Compare commits

...

147 Commits

Author SHA1 Message Date
Clément Renault
f4903c2fe7 Merge pull request #5824 from meilisearch/curquiza-patch-1
Update release doc (again)
2025-08-12 06:33:40 +00:00
Clément Renault
3d271c25c7 Merge pull request #5815 from meilisearch/curquiza-patch-2
Minor update for release process
2025-08-11 17:14:48 +00:00
Clémentine
1f126a2d8a Update release doc (again) 2025-08-11 18:23:37 +02:00
Tamo
b7f32c5acd Merge pull request #5817 from meilisearch/fix-dumpless-upgrade
fix the dumpless upgrade again
2025-08-11 15:29:54 +00:00
Tamo
8f04529ba2 Merge pull request #5816 from meilisearch/webhook-telemetry
Update webhook telemetry events
2025-08-11 15:13:41 +00:00
Tamo
54b85b8644 fix the dumpless upgrade again 2025-08-11 16:37:09 +02:00
Mubelotix
562c620fec Update webhook telemetry events 2025-08-11 16:21:14 +02:00
Clémentine
68280bad9e Minor update for release process 2025-08-11 14:28:38 +02:00
Clément Renault
33bc86d71a Merge pull request #5810 from meilisearch/curquiza-patch-1
Add category to release draft
2025-08-11 11:57:51 +00:00
curquiza
b265c92852 Thank contributors better 2025-08-11 12:17:10 +02:00
Clémentine
759beed560 Add category in release draft 2025-08-07 18:15:29 +02:00
Tamo
2035f342f0 Merge pull request #5807 from meilisearch/patch-chat-settings
Turn chat settings to `PATCH`
2025-08-06 14:17:30 +00:00
Tamo
27fed758c2 Merge pull request #5806 from meilisearch/update-to-v1-17-0
Update version to v1.17.0
2025-08-06 13:33:51 +00:00
Mubelotix
3ead985caf Fix issue #5772 2025-08-06 15:02:25 +02:00
Mubelotix
e302e9edd3 Add test for task 2025-08-06 15:02:15 +02:00
Tamo
1fdf820931 Update version to v1.17.0 2025-08-06 12:12:52 +02:00
Tamo
b4f2eeac0a Merge pull request #5803 from meilisearch/curquiza-patch-1
Minor docs update about release.md
2025-08-06 09:14:57 +00:00
Tamo
7e3f2ab0c6 Merge pull request #5785 from meilisearch/webhook-api
Webhook api
2025-08-05 18:10:00 +00:00
Tamo
899be9c3ff make sure we NEVER ever write the cli defined webhook to the database or dumps 2025-08-05 18:55:32 +02:00
Clément Renault
444231e812 Merge pull request #5804 from meilisearch/curquiza-patch-2
Minor fix in PR template
2025-08-05 15:11:23 +00:00
Mubelotix
1ff6da63e8 Make errors singular 2025-08-05 16:58:25 +02:00
Mubelotix
b5158e1e83 Fix cli webhook getting stored in dumps 2025-08-05 16:58:25 +02:00
Tamo
3f1e172c6f fix race condition: take the rtxn before entering the thread so we're sure we won't try to retrieve deleted tasks 2025-08-05 16:47:35 +02:00
Tamo
2b5b41790e update the dump so it doesn't contains the null-uuid webhook 2025-08-05 16:21:14 +02:00
Mubelotix
55cd3203fe Merge pull request #5783 from meilisearch/starts-with-optim
Optimize the starts_with filter
2025-08-05 14:10:10 +00:00
Clémentine
45bb13bf43 Minor fix in PR template 2025-08-05 15:42:56 +02:00
Clémentine
095cba8fba Minor docs update about release.md 2025-08-05 15:29:42 +02:00
Mubelotix
3a9b08960a Add test 2025-08-05 13:49:28 +02:00
Mubelotix
c4e7bf2e60 Stabilize STARTS WITH filter 2025-08-05 12:14:25 +02:00
Mubelotix
4f6a48c327 Stop storing the cli webhook in the db 2025-08-05 11:44:53 +02:00
Tamo
4c61a227ca fmt after my suggestion 2025-08-05 11:29:54 +02:00
Tamo
3d2c204f2d Update crates/milli/src/search/facet/filter.rs 2025-08-05 11:26:10 +02:00
Mubelotix
8b27dec25c Test that the cli webhook receives data 2025-08-05 11:19:21 +02:00
Mubelotix
a9c924b433 Turn url back into a setting 2025-08-05 11:16:34 +02:00
Mubelotix
6cb2296644 Update tests 2025-08-05 11:10:48 +02:00
Mubelotix
b2d157a74a Remove dbg
Co-Authored-By: Thomas Campistron <irevoire@hotmail.fr>
2025-08-05 10:49:21 +02:00
Mubelotix
386cf83285 Improve webhook settings 2025-08-05 10:48:39 +02:00
Mubelotix
8ef1a50086 Add hint
Co-Authored-By: Thomas Campistron <irevoire@hotmail.fr>
2025-08-05 10:42:39 +02:00
Mubelotix
84651ffd7d Remove hardcoded buffer size
Co-Authored-By: Thomas Campistron <irevoire@hotmail.fr>
2025-08-05 10:41:28 +02:00
Mubelotix
43c20bb3ed Add missing actions in from_repr
Co-Authored-By: Thomas Campistron <irevoire@hotmail.fr>
2025-08-05 10:39:52 +02:00
Mubelotix
d340013d8b Change error name 2025-08-05 10:35:12 +02:00
Mubelotix
8a44d9faef Merge branch 'main' into webhook-api 2025-08-05 10:32:36 +02:00
Mubelotix
afb367c7f4 Update old comment 2025-08-05 10:29:39 +02:00
Mubelotix
84bcf9785f Merge branch 'main' into starts-with-optim 2025-08-05 10:27:45 +02:00
Mubelotix
fc814b7537 Apply review suggestion 2025-08-05 10:25:14 +02:00
Clémentine
0865d8af6c Merge pull request #5766 from meilisearch/release-process-change
Release process change
2025-08-05 07:07:46 +00:00
Clément Renault
cac884401f Merge pull request #5800 from meilisearch/tmp-release-v1.16.0
Bring back changes to main
2025-08-04 16:34:24 +00:00
Mubelotix
7251cccd03 Make notify_webhooks execute in its own thread 2025-08-04 17:13:05 +02:00
Mubelotix
ddfcacbb62 Add nice error message for users trying to set uuid or isEditable 2025-08-04 16:53:41 +02:00
Mubelotix
3b26d64a5d Edit reserved webhook message 2025-08-04 16:39:34 +02:00
Mubelotix
3b0f576d56 Improve invalid uuid error message 2025-08-04 16:38:00 +02:00
Clément Renault
454f8b36f4 Make clippy happy 2025-08-04 16:36:46 +02:00
Mubelotix
1754745c42 Add URL and header validity checks 2025-08-04 16:26:20 +02:00
Clément Renault
6f30dfa41c Merge remote-tracking branch 'origin/main' into tmp-release-v1.16.0 2025-08-04 16:06:51 +02:00
Tamo
33350248c8 Merge pull request #5773 from meilisearch/snapshotception
Fix snapshotCreation task being included in snapshot
2025-08-04 13:53:43 +00:00
Mubelotix
69c59d3de3 Update security in utoipa 2025-08-04 15:43:37 +02:00
Mubelotix
8dfebbb3e7 Fix tests 2025-08-04 15:37:12 +02:00
Mubelotix
737ad3ec19 Add new api key actions 2025-08-04 15:00:45 +02:00
Mubelotix
4ec4710811 Improve logs 2025-08-04 15:00:26 +02:00
Mubelotix
c5caac95dd Format 2025-08-04 14:51:23 +02:00
Mubelotix
7acbb1e140 Remove PATCH /webhooks 2025-08-04 14:49:27 +02:00
Clémentine
a5e5afd123 Merge pull request #5794 from meilisearch/dependabot/github_actions/sigstore/cosign-installer-3.9.2
Bump sigstore/cosign-installer from 3.8.2 to 3.9.2
2025-08-04 12:39:40 +00:00
Clémentine
c70e9abf70 Merge pull request #5795 from meilisearch/dependabot/github_actions/svenstaro/upload-release-action-2.11.2
Bump svenstaro/upload-release-action from 2.11.1 to 2.11.2
2025-08-04 12:10:03 +00:00
curquiza
f8d70249a7 Update process with Ruleset branch addition 2025-08-04 13:59:11 +02:00
Clémentine
a2c96d40d3 Merge pull request #5798 from meilisearch/update-minidashboard-v0.2.22
Update mini-dashboard v0.2.22
2025-08-04 10:40:12 +00:00
Clément Renault
05dd8e0d62 update mini-dashboard to v0.2.22 2025-08-04 11:14:10 +02:00
Clémentine
4182e631d6 Potential fix for code scanning alert no. 63: Workflow does not contain permissions
Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
2025-08-04 09:59:54 +02:00
dependabot[bot]
ddea0b1570 Bump svenstaro/upload-release-action from 2.11.1 to 2.11.2
Bumps [svenstaro/upload-release-action](https://github.com/svenstaro/upload-release-action) from 2.11.1 to 2.11.2.
- [Release notes](https://github.com/svenstaro/upload-release-action/releases)
- [Changelog](https://github.com/svenstaro/upload-release-action/blob/master/CHANGELOG.md)
- [Commits](https://github.com/svenstaro/upload-release-action/compare/2.11.1...2.11.2)

---
updated-dependencies:
- dependency-name: svenstaro/upload-release-action
  dependency-version: 2.11.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-08-01 17:05:02 +00:00
dependabot[bot]
beb532e2a7 Bump sigstore/cosign-installer from 3.8.2 to 3.9.2
Bumps [sigstore/cosign-installer](https://github.com/sigstore/cosign-installer) from 3.8.2 to 3.9.2.
- [Release notes](https://github.com/sigstore/cosign-installer/releases)
- [Commits](3454372f43...d58896d6a1)

---
updated-dependencies:
- dependency-name: sigstore/cosign-installer
  dependency-version: 3.9.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-08-01 17:04:58 +00:00
Mubelotix
e3a6d63b52 Add utoipa types 2025-08-01 08:42:27 +02:00
Mubelotix
ed147f80ac Add test and fix bug 2025-07-31 16:45:30 +02:00
Clément Renault
c37ed05f49 Merge pull request #5790 from meilisearch/adapt-go-ci
Adapt Go CI to recent change in the Go repo
2025-07-31 14:20:33 +00:00
curquiza
c1a5a545b6 Adapt Go CI to recent change in the Go repo 2025-07-31 15:23:45 +02:00
Mubelotix
35537e0b0b Add single_receives_data test 2025-07-31 14:12:09 +02:00
Mubelotix
ee80fc87c9 Add test for patch endpoint 2025-07-31 13:00:43 +02:00
Clémentine
bb43bf122e Update .github/pull_request_template.md
Co-authored-by: Louis Dureuil <louis@meilisearch.com>
2025-07-31 12:55:19 +02:00
Mubelotix
34590297c1 Add patch webhook endpoint 2025-07-31 12:53:57 +02:00
Mubelotix
9e43f7b419 Update tests 2025-07-31 12:44:35 +02:00
Mubelotix
94733a4a18 Add delete endpoint 2025-07-31 12:38:14 +02:00
Mubelotix
ad68245186 Update tests 2025-07-31 12:33:34 +02:00
Mubelotix
29fb4d5e2a Add post webhook route 2025-07-31 12:27:12 +02:00
Mubelotix
ca27bcaac7 Update tests 2025-07-31 11:34:47 +02:00
Mubelotix
53397e28fc Replace name by uuid 2025-07-31 11:19:46 +02:00
Mubelotix
7c2c17129f Add get webhook route 2025-07-31 10:59:06 +02:00
Mubelotix
446fce6c16 Extract logic from route 2025-07-31 10:01:25 +02:00
Mubelotix
f67043801b Add a test for concurrent cli and dump 2025-07-31 09:35:16 +02:00
Mubelotix
fc4c5d2718 Add dump test 2025-07-30 16:16:12 +02:00
Mubelotix
a75b327b37 Add test for webhooks over limits 2025-07-30 15:59:19 +02:00
Mubelotix
c70ae91d34 Add test for reserved webhooks 2025-07-30 15:52:24 +02:00
Mubelotix
e88480c7c4 Fix reserved name check 2025-07-30 15:44:51 +02:00
Mubelotix
b565ec1497 Test cli behavior 2025-07-30 15:44:42 +02:00
Mubelotix
3e77c1d8c8 Add reserved webhook 2025-07-30 15:23:06 +02:00
Mubelotix
dc7af47371 Add new errors 2025-07-30 15:18:43 +02:00
Mubelotix
064d9d5ff8 Add dump support 2025-07-30 15:06:37 +02:00
Mubelotix
93f8b31eec Fix tests 2025-07-30 12:52:01 +02:00
Mubelotix
466e1a7aac Support legacy cli arguments 2025-07-30 12:25:59 +02:00
Mubelotix
cc37eb870f Initial implementation 2025-07-30 12:01:40 +02:00
Mubelotix
5567653c96 Fix network documentation 2025-07-29 16:47:28 +02:00
Mubelotix
5e867f7ce0 Add webhooks api key action 2025-07-29 16:47:20 +02:00
Mubelotix
48a5f4db2d Improve comment 2025-07-28 16:42:33 +02:00
Mubelotix
224892e692 Enable new algorithm every time 2025-07-28 16:28:06 +02:00
Mubelotix
691a9ae4b1 Format 2025-07-28 16:24:11 +02:00
Mubelotix
e8a818f53d Optimize the filter 2025-07-28 16:24:04 +02:00
Mubelotix
478f374b9d Add benchmark 2025-07-28 16:23:26 +02:00
Mubelotix
1f18f0ba77 Update little tiny comments 2025-07-23 14:33:58 +02:00
Mubelotix
44b24652d2 Change strategy to remove task instead of marking it succeeded 2025-07-23 14:30:25 +02:00
Mubelotix
5dcf79233e Remove useless parameter
Co-Authored-By: Tamo <tamo@meilisearch.com>
2025-07-23 11:30:39 +02:00
Mubelotix
846d27354b Format 2025-07-22 15:18:21 +02:00
Mubelotix
c1aa4120ac Update test 2025-07-22 15:18:13 +02:00
Mubelotix
6394efc4c2 Turn dirty fix into beautiful fix 2025-07-22 15:17:26 +02:00
Mubelotix
9716834380 Initial fix 2025-07-22 14:31:42 +02:00
Mubelotix
2f2e42e72d Add test for issue #4653 2025-07-22 12:33:18 +02:00
curquiza
f3b60a1dab Minor update on doc 2025-07-20 22:20:08 +02:00
curquiza
cd0523c3f1 Remove run of SDK test on PR because cannot work 2025-07-20 22:13:07 +02:00
curquiza
7f318ee964 Adapt issue template 2025-07-20 22:11:30 +02:00
curquiza
dc1656da8e Adapt automation 2025-07-20 22:11:14 +02:00
curquiza
dc0bd9f25d Add release drafter 2025-07-20 22:10:35 +02:00
curquiza
52d8007b12 Add pull request template 2025-07-20 22:10:17 +02:00
curquiza
4f8382b159 Remove useless automation 2025-07-20 22:07:59 +02:00
curquiza
c2c82be556 Update documentation 2025-07-20 22:07:23 +02:00
Tamo
421a23ee3d Merge pull request #3265 from LeSuisse/sign-container-image-cosign
Sign container image using Cosign in keyless mode
2025-07-16 08:54:57 +00:00
Thomas Gerbet
191ea340ed Sign container image using Cosign in keyless mode
Cosign keyless mode makes possible to sign the container image using the
OIDC Identity Tokens provided by GitHub Actions [0][1].
The signature is published to the registry storing the image and to the
public Rekor transparency log instance [2].

Cosign keyless mode has already been adopted by some major projects like
Kubernetes [3].

The image signature can be manually verified using:
```
$ cosign verify \
	--certificate-oidc-issuer='https://token.actions.githubusercontent.com' \
	--certificate-identity-regexp='^https://github.com/meilisearch/meilisearch/.github/workflows/publish-docker-images.yaml' \
	<image_name>
```

See #2179.
Note that a similar approach can be used to sign the release binaries.

[0] https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect
[1] https://docs.sigstore.dev/cosign/signing/signing_with_containers/
[2] https://docs.sigstore.dev/rekor/overview
[3] https://kubernetes.io/docs/tasks/administer-cluster/verify-signed-artifacts/#verifying-image-signatures
2025-07-16 10:04:18 +02:00
Tamo
8d22972d84 Merge pull request #5626 from martin-g/faster-batches-it-tests
tests: Faster batches:: IT tests
2025-07-16 07:01:16 +00:00
Martin Grigorov
8772b5af87 Merge branch 'main' into faster-batches-it-tests 2025-07-15 15:21:32 +03:00
Tamo
df2e7cde53 Merge pull request #5703 from martin-g/all-use-server-wait-task
tests: Use Server::wait_task() instead of Index::wait_task()
2025-07-15 09:18:12 +00:00
Clément Renault
02b2ae6142 Merge pull request #5756 from meilisearch/fix-integration-test
Fix Rails CI
2025-07-15 07:38:06 +00:00
curquiza
f813eb7ca4 Fix 2025-07-13 12:35:54 +02:00
curquiza
d072edaa49 Fix Rails CI 2025-07-13 12:26:56 +02:00
Martin Tzvetanov Grigorov
e3daa907c5 Update redactions
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-07-11 11:14:39 +03:00
Martin Tzvetanov Grigorov
a39223822a More tests fixes
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-07-11 11:11:46 +03:00
Martin Grigorov
1eb6cd38ce Merge branch 'main' into faster-batches-it-tests 2025-07-11 10:49:22 +03:00
Martin Tzvetanov Grigorov
eb6ad3ef9c Fix batch id detection
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-07-11 10:24:25 +03:00
Martin Tzvetanov Grigorov
3bef4f4413 Use Server::wait_task() instead of Index::wait_task()
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-07-11 10:16:25 +03:00
Martin Tzvetanov Grigorov
9f89881b0d More tests fixes
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-07-11 10:11:58 +03:00
Martin Tzvetanov Grigorov
126aefc207 Fix more tests
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-07-10 16:47:04 +03:00
Martin Tzvetanov Grigorov
e7a60555d6 Formatting
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-07-10 14:35:40 +03:00
Martin Tzvetanov Grigorov
ae912c4c3f Pass the Server as an extra parameter when the Index needs to wait for a task
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-07-10 14:28:57 +03:00
Martin Tzvetanov Grigorov
13ea29e511 Fix some search+replace issues. Make Server::wait_task() available for Index:: methods
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-07-10 14:03:16 +03:00
Martin Tzvetanov Grigorov
5342df26fe tests: Use Server::wait_task() instead of Index::wait_task()
The code is mostly duplicated. Server::wait_task() has better handling for errors and more retries.

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-07-10 14:03:15 +03:00
Tamo
61bc95e8d6 Merge pull request #5740 from meilisearch/ignore-flaky-test-2
Ignore yet another flaky test
2025-07-09 13:25:45 +00:00
Louis Dureuil
074744b8a6 Ignore yet-another flaky test 2025-07-08 10:54:39 +02:00
Martin Tzvetanov Grigorov
9e31d6ceff Add batch_uid to all successful and failed tasks too
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-10 14:12:48 +03:00
Martin Tzvetanov Grigorov
139ec8c782 Add task.batch_uid() helper method
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-10 14:12:48 +03:00
Martin Tzvetanov Grigorov
2691999bd3 Add a helper method for getting the latest batch
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-10 14:12:47 +03:00
Martin Tzvetanov Grigorov
48460678df More assertion fixes
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-10 14:12:47 +03:00
Martin Tzvetanov Grigorov
cb15e5c67e WIP: More snapshot updates
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-10 14:12:46 +03:00
Martin Tzvetanov Grigorov
7380808b26 tests: Faster batches:: IT tests
Use shared server + unique indices where possible

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-10 14:12:46 +03:00
107 changed files with 2905 additions and 1033 deletions

View File

@@ -1,28 +1,26 @@
--- ---
name: New sprint issue name: New feature issue
about: ⚠️ Should only be used by the engine team ⚠️ about: ⚠️ Should only be used by the internal Meili team ⚠️
title: '' title: ''
labels: 'missing usage in PRD, impacts docs' labels: 'impacts docs, impacts integrations'
assignees: '' assignees: ''
--- ---
Related product team resources: [PRD]() (_internal only_) Related product team resources: [PRD]() (_internal only_)
Related product discussion:
## Motivation
<!---Copy/paste the information in PRD or briefly detail the product motivation. Ask product team if any hesitation.-->
## Usage ## Usage
<!---Link to the public part of the PRD, or to the related product discussion for experimental features--> <!---Link to the public part of the PRD, or to the related product discussion for experimental features-->
TBD
## TODO ## TODO
<!---If necessary, create a list with technical/product steps--> <!---If necessary, create a list with technical/product steps-->
### Are you modifying a database? ### Are you modifying a database?
- [ ] If not, add the `no db change` label to your PR, and you're good to merge. - [ ] If not, add the `no db change` label to your PR, and you're good to merge.
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do. - [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
@@ -54,5 +52,5 @@ Related product discussion:
## Impacted teams ## Impacted teams
<!---Ping the related teams. Ask for the engine manager if any hesitation--> <!---Ping the related teams. Ask on Slack if any hesitation-->
<!---@meilisearch/docs-team when there is any API change, e.g. settings addition--> <!---@meilisearch/docs-team and @meilisearch/integration-team when there is any API change, e.g. settings addition-->

16
.github/pull_request_template.md vendored Normal file
View File

@@ -0,0 +1,16 @@
## Related issue
Fixes #...
## Requirements
⚠️ Ensure the following requirements before merging ⚠️
- [ ] Automated tests have been added.
- [ ] If some tests cannot be automated, manual rigorous tests should be applied.
- [ ] ⚠️ If there is any change in the DB:
- [ ] Test that any impacted DB still works as expected after using `--experimental-dumpless-upgrade` on a DB created with the last released Meilisearch
- [ ] Test that during the upgrade, **search is still available** (artificially make the upgrade longer if needed)
- [ ] Set the `db change` label.
- [ ] If necessary, the feature have been tested in the Cloud production environment (with [prototypes](./documentation/prototypes.md)) and the Cloud UI is ready.
- [ ] If necessary, the [documentation](https://github.com/meilisearch/documentation) related to the implemented feature in the PR is ready.
- [ ] If necessary, the [integrations](https://github.com/meilisearch/integration-guides) related to the implemented feature in the PR are ready.

33
.github/release-draft-template.yml vendored Normal file
View File

@@ -0,0 +1,33 @@
name-template: 'v$RESOLVED_VERSION'
tag-template: 'v$RESOLVED_VERSION'
exclude-labels:
- 'skip changelog'
version-resolver:
minor:
labels:
- 'enhancement'
default: patch
categories:
- title: '⚠️ Breaking changes'
label: 'breaking-change'
- title: '🚀 Enhancements'
label: 'enhancement'
- title: '🐛 Bug Fixes'
label: 'bug'
- title: '🔒 Security'
label: 'security'
- title: '⚙️ Maintenance/misc'
label:
- 'maintenance'
- 'documentation'
template: |
$CHANGES
❤️ Huge thanks to our contributors: $CONTRIBUTORS.
no-changes-template: 'Changes are coming soon 😎'
sort-direction: 'ascending'
replacers:
- search: '/(?:and )?@dependabot-preview(?:\[bot\])?,?/g'
replace: ''
- search: '/(?:and )?@dependabot(?:\[bot\])?,?/g'
replace: ''

22
.github/templates/dependency-issue.md vendored Normal file
View File

@@ -0,0 +1,22 @@
This issue is about updating Meilisearch dependencies:
- [ ] Update Meilisearch dependencies with the help of `cargo +nightly udeps --all-targets` (remove unused dependencies) and `cargo upgrade` (upgrade dependencies versions) - ⚠️ Some repositories may contain subdirectories (like heed, charabia, or deserr). Take care of updating these in the main crate as well. This won't be done automatically by `cargo upgrade`.
- [ ] [deserr](https://github.com/meilisearch/deserr)
- [ ] [charabia](https://github.com/meilisearch/charabia/)
- [ ] [heed](https://github.com/meilisearch/heed/)
- [ ] [roaring-rs](https://github.com/RoaringBitmap/roaring-rs/)
- [ ] [obkv](https://github.com/meilisearch/obkv)
- [ ] [grenad](https://github.com/meilisearch/grenad/)
- [ ] [arroy](https://github.com/meilisearch/arroy/)
- [ ] [segment](https://github.com/meilisearch/segment)
- [ ] [bumparaw-collections](https://github.com/meilisearch/bumparaw-collections)
- [ ] [bbqueue](https://github.com/meilisearch/bbqueue)
- [ ] Finally, [Meilisearch](https://github.com/meilisearch/MeiliSearch)
- [ ] If new Rust versions have been released, update the minimal Rust version in use at Meilisearch:
- [ ] in this [GitHub Action file](https://github.com/meilisearch/meilisearch/blob/main/.github/workflows/test-suite.yml), by changing the `toolchain` field of the `rustfmt` job to the latest available nightly (of the day before or the current day).
- [ ] in every [GitHub Action files](https://github.com/meilisearch/meilisearch/blob/main/.github/workflows), by changing all the `dtolnay/rust-toolchain@` references to use the latest stable version.
- [ ] in this [`rust-toolchain.toml`](https://github.com/meilisearch/meilisearch/blob/main/rust-toolchain.toml), by changing the `channel` field to the latest stable version.
- [ ] in the [Dockerfile](https://github.com/meilisearch/meilisearch/blob/main/Dockerfile), by changing the base image to `rust:<target_rust_version>-alpine<alpine_version>`. Check that the image exists on [Dockerhub](https://hub.docker.com/_/rust/tags?page=1&name=alpine). Also, build and run the image to check everything still works!
⚠️ This issue should be prioritized to avoid any deprecation and vulnerability issues.
The GitHub action dependencies are managed by [Dependabot](https://github.com/meilisearch/meilisearch/blob/main/.github/dependabot.yml), so no need to update them when solving this issue.

View File

@@ -1,100 +0,0 @@
name: PR Milestone Check
on:
pull_request:
types: [opened, reopened, edited, synchronize, milestoned, demilestoned]
branches:
- "main"
- "release-v*.*.*"
jobs:
check-milestone:
name: Check PR Milestone
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Validate PR milestone
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
// Get PR number directly from the event payload
const prNumber = context.payload.pull_request.number;
// Get PR details
const { data: prData } = await github.rest.pulls.get({
owner: 'meilisearch',
repo: 'meilisearch',
pull_number: prNumber
});
// Get base branch name
const baseBranch = prData.base.ref;
console.log(`Base branch: ${baseBranch}`);
// Get PR milestone
const prMilestone = prData.milestone;
if (!prMilestone) {
core.setFailed('PR must have a milestone assigned');
return;
}
console.log(`PR milestone: ${prMilestone.title}`);
// Validate milestone format: vx.y.z
const milestoneRegex = /^v\d+\.\d+\.\d+$/;
if (!milestoneRegex.test(prMilestone.title)) {
core.setFailed(`Milestone "${prMilestone.title}" does not follow the required format vx.y.z`);
return;
}
// For main branch PRs, check if the milestone is the highest one
if (baseBranch === 'main') {
// Get all milestones
const { data: milestones } = await github.rest.issues.listMilestones({
owner: 'meilisearch',
repo: 'meilisearch',
state: 'open',
sort: 'due_on',
direction: 'desc'
});
// Sort milestones by version number (vx.y.z)
const sortedMilestones = milestones
.filter(m => milestoneRegex.test(m.title))
.sort((a, b) => {
const versionA = a.title.substring(1).split('.').map(Number);
const versionB = b.title.substring(1).split('.').map(Number);
// Compare major version
if (versionA[0] !== versionB[0]) return versionB[0] - versionA[0];
// Compare minor version
if (versionA[1] !== versionB[1]) return versionB[1] - versionA[1];
// Compare patch version
return versionB[2] - versionA[2];
});
if (sortedMilestones.length === 0) {
core.setFailed('No valid milestones found in the repository. Please create at least one milestone with the format vx.y.z');
return;
}
const highestMilestone = sortedMilestones[0];
console.log(`Highest milestone: ${highestMilestone.title}`);
if (prMilestone.title !== highestMilestone.title) {
core.setFailed(`PRs targeting the main branch must use the highest milestone (${highestMilestone.title}), but this PR uses ${prMilestone.title}`);
return;
}
} else {
// For release branches, the milestone should match the branch version
const branchVersion = baseBranch.substring(8); // remove 'release-'
if (prMilestone.title !== branchVersion) {
core.setFailed(`PRs targeting release branch "${baseBranch}" must use the matching milestone "${branchVersion}", but this PR uses "${prMilestone.title}"`);
return;
}
}
console.log('PR milestone validation passed!');

View File

@@ -15,7 +15,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Download the issue template - name: Download the issue template
run: curl -s https://raw.githubusercontent.com/meilisearch/engine-team/main/issue-templates/dependency-issue.md > $ISSUE_TEMPLATE run: curl -s https://raw.githubusercontent.com/meilisearch/meilisearch/main/.github/templates/dependency-issue.md > $ISSUE_TEMPLATE
- name: Create issue - name: Create issue
run: | run: |
gh issue create \ gh issue create \

View File

@@ -3,7 +3,7 @@ name: Look for flaky tests
on: on:
workflow_dispatch: workflow_dispatch:
schedule: schedule:
- cron: "0 12 * * FRI" # Every Friday at 12:00PM - cron: '0 4 * * *' # Every day at 4:00AM
jobs: jobs:
flaky: flaky:

View File

@@ -1,224 +0,0 @@
name: Milestone's workflow
# /!\ No git flow are handled here
# For each Milestone created (not opened!), and if the release is NOT a patch release (only the patch changed)
# - the roadmap issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/roadmap-issue.md
# - the changelog issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/changelog-issue.md
# - update the ruleset to add the current release version to the list of allowed versions and be able to use the merge queue.
# For each Milestone closed
# - the `release_version` label is created
# - this label is applied to all issues/PRs in the Milestone
on:
milestone:
types: [created, closed]
env:
MILESTONE_VERSION: ${{ github.event.milestone.title }}
MILESTONE_URL: ${{ github.event.milestone.html_url }}
MILESTONE_DUE_ON: ${{ github.event.milestone.due_on }}
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
jobs:
# -----------------
# MILESTONE CREATED
# -----------------
get-release-version:
if: github.event.action == 'created'
runs-on: ubuntu-latest
outputs:
is-patch: ${{ steps.check-patch.outputs.is-patch }}
steps:
- uses: actions/checkout@v3
- name: Check if this release is a patch release only
id: check-patch
run: |
echo version: $MILESTONE_VERSION
if [[ $MILESTONE_VERSION =~ ^v[0-9]+\.[0-9]+\.0$ ]]; then
echo 'This is NOT a patch release'
echo "is-patch=false" >> $GITHUB_OUTPUT
elif [[ $MILESTONE_VERSION =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo 'This is a patch release'
echo "is-patch=true" >> $GITHUB_OUTPUT
else
echo "Not a valid format of release, check the Milestone's title."
echo 'Should be vX.Y.Z'
exit 1
fi
create-roadmap-issue:
needs: get-release-version
# Create the roadmap issue if the release is not only a patch release
if: github.event.action == 'created' && needs.get-release-version.outputs.is-patch == 'false'
runs-on: ubuntu-latest
env:
ISSUE_TEMPLATE: issue-template.md
steps:
- uses: actions/checkout@v3
- name: Download the issue template
run: curl -s https://raw.githubusercontent.com/meilisearch/engine-team/main/issue-templates/roadmap-issue.md > $ISSUE_TEMPLATE
- name: Replace all empty occurrences in the templates
run: |
# Replace all <<version>> occurrences
sed -i "s/<<version>>/$MILESTONE_VERSION/g" $ISSUE_TEMPLATE
# Replace all <<milestone_id>> occurrences
milestone_id=$(echo $MILESTONE_URL | cut -d '/' -f 7)
sed -i "s/<<milestone_id>>/$milestone_id/g" $ISSUE_TEMPLATE
# Replace release date if exists
if [[ ! -z $MILESTONE_DUE_ON ]]; then
date=$(echo $MILESTONE_DUE_ON | cut -d 'T' -f 1)
sed -i "s/Release date\: 20XX-XX-XX/Release date\: $date/g" $ISSUE_TEMPLATE
fi
- name: Create the issue
run: |
gh issue create \
--title "$MILESTONE_VERSION ROADMAP" \
--label 'epic,impacts docs,impacts integrations,impacts cloud' \
--body-file $ISSUE_TEMPLATE \
--milestone $MILESTONE_VERSION
create-changelog-issue:
needs: get-release-version
# Create the changelog issue if the release is not only a patch release
if: github.event.action == 'created' && needs.get-release-version.outputs.is-patch == 'false'
runs-on: ubuntu-latest
env:
ISSUE_TEMPLATE: issue-template.md
steps:
- uses: actions/checkout@v3
- name: Download the issue template
run: curl -s https://raw.githubusercontent.com/meilisearch/engine-team/main/issue-templates/changelog-issue.md > $ISSUE_TEMPLATE
- name: Replace all empty occurrences in the templates
run: |
# Replace all <<version>> occurrences
sed -i "s/<<version>>/$MILESTONE_VERSION/g" $ISSUE_TEMPLATE
# Replace all <<milestone_id>> occurrences
milestone_id=$(echo $MILESTONE_URL | cut -d '/' -f 7)
sed -i "s/<<milestone_id>>/$milestone_id/g" $ISSUE_TEMPLATE
- name: Create the issue
run: |
gh issue create \
--title "Create release changelogs for $MILESTONE_VERSION" \
--label 'impacts docs,documentation' \
--body-file $ISSUE_TEMPLATE \
--milestone $MILESTONE_VERSION \
--assignee curquiza
create-update-version-issue:
needs: get-release-version
# Create the update-version issue even if the release is a patch release
if: github.event.action == 'created'
runs-on: ubuntu-latest
env:
ISSUE_TEMPLATE: issue-template.md
steps:
- uses: actions/checkout@v3
- name: Download the issue template
run: curl -s https://raw.githubusercontent.com/meilisearch/engine-team/main/issue-templates/update-version-issue.md > $ISSUE_TEMPLATE
- name: Create the issue
run: |
gh issue create \
--title "Update version in Cargo.toml for $MILESTONE_VERSION" \
--label 'maintenance' \
--body-file $ISSUE_TEMPLATE \
--milestone $MILESTONE_VERSION
create-update-openapi-issue:
needs: get-release-version
# Create the openAPI issue if the release is not only a patch release
if: github.event.action == 'created' && needs.get-release-version.outputs.is-patch == 'false'
runs-on: ubuntu-latest
env:
ISSUE_TEMPLATE: issue-template.md
steps:
- uses: actions/checkout@v3
- name: Download the issue template
run: curl -s https://raw.githubusercontent.com/meilisearch/engine-team/main/issue-templates/update-openapi-issue.md > $ISSUE_TEMPLATE
- name: Create the issue
run: |
gh issue create \
--title "Update Open API file for $MILESTONE_VERSION" \
--label 'maintenance' \
--body-file $ISSUE_TEMPLATE \
--milestone $MILESTONE_VERSION
update-ruleset:
runs-on: ubuntu-latest
if: github.event.action == 'created'
steps:
- uses: actions/checkout@v3
- name: Install jq
run: |
sudo apt-get update
sudo apt-get install -y jq
- name: Update ruleset
env:
# gh api repos/meilisearch/meilisearch/rulesets --jq '.[] | {name: .name, id: .id}'
RULESET_ID: 4253297
BRANCH_NAME: ${{ github.event.inputs.branch_name }}
run: |
echo "RULESET_ID: ${{ env.RULESET_ID }}"
echo "BRANCH_NAME: ${{ env.BRANCH_NAME }}"
# Get current ruleset conditions
CONDITIONS=$(gh api repos/meilisearch/meilisearch/rulesets/${{ env.RULESET_ID }} --jq '{ conditions: .conditions }')
# Update the conditions by appending the milestone version
UPDATED_CONDITIONS=$(echo $CONDITIONS | jq '.conditions.ref_name.include += ["refs/heads/release-'${{ env.MILESTONE_VERSION }}'"]')
# Update the ruleset from stdin (-)
echo $UPDATED_CONDITIONS |
gh api repos/meilisearch/meilisearch/rulesets/${{ env.RULESET_ID }} \
--method PUT \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
--input -
# ----------------
# MILESTONE CLOSED
# ----------------
create-release-label:
if: github.event.action == 'closed'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Create the ${{ env.MILESTONE_VERSION }} label
run: |
label_description="PRs/issues solved in $MILESTONE_VERSION"
if [[ ! -z $MILESTONE_DUE_ON ]]; then
date=$(echo $MILESTONE_DUE_ON | cut -d 'T' -f 1)
label_description="$label_description released on $date"
fi
gh api repos/meilisearch/meilisearch/labels \
--method POST \
-H "Accept: application/vnd.github+json" \
-f name="$MILESTONE_VERSION" \
-f description="$label_description" \
-f color='ff5ba3'
labelize-all-milestone-content:
if: github.event.action == 'closed'
needs: create-release-label
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Add label ${{ env.MILESTONE_VERSION }} to all PRs in the Milestone
run: |
prs=$(gh pr list --search milestone:"$MILESTONE_VERSION" --limit 1000 --state all --json number --template '{{range .}}{{tablerow (printf "%v" .number)}}{{end}}')
for pr in $prs; do
gh pr edit $pr --add-label $MILESTONE_VERSION
done
- name: Add label ${{ env.MILESTONE_VERSION }} to all issues in the Milestone
run: |
issues=$(gh issue list --search milestone:"$MILESTONE_VERSION" --limit 1000 --state all --json number --template '{{range .}}{{tablerow (printf "%v" .number)}}{{end}}')
for issue in $issues; do
gh issue edit $issue --add-label $MILESTONE_VERSION
done

View File

@@ -32,7 +32,7 @@ jobs:
- name: Build deb package - name: Build deb package
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
- name: Upload debian pkg to release - name: Upload debian pkg to release
uses: svenstaro/upload-release-action@2.11.1 uses: svenstaro/upload-release-action@2.11.2
with: with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }} repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/debian/meilisearch.deb file: target/debian/meilisearch.deb

View File

@@ -51,7 +51,7 @@ jobs:
# No need to upload binaries for dry run (cron) # No need to upload binaries for dry run (cron)
- name: Upload binaries to release - name: Upload binaries to release
if: github.event_name == 'release' if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.1 uses: svenstaro/upload-release-action@2.11.2
with: with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }} repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/release/meilisearch file: target/release/meilisearch
@@ -81,7 +81,7 @@ jobs:
# No need to upload binaries for dry run (cron) # No need to upload binaries for dry run (cron)
- name: Upload binaries to release - name: Upload binaries to release
if: github.event_name == 'release' if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.1 uses: svenstaro/upload-release-action@2.11.2
with: with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }} repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/release/${{ matrix.artifact_name }} file: target/release/${{ matrix.artifact_name }}
@@ -113,7 +113,7 @@ jobs:
- name: Upload the binary to release - name: Upload the binary to release
# No need to upload binaries for dry run (cron) # No need to upload binaries for dry run (cron)
if: github.event_name == 'release' if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.1 uses: svenstaro/upload-release-action@2.11.2
with: with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }} repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/${{ matrix.target }}/release/meilisearch file: target/${{ matrix.target }}/release/meilisearch
@@ -178,7 +178,7 @@ jobs:
- name: Upload the binary to release - name: Upload the binary to release
# No need to upload binaries for dry run (cron) # No need to upload binaries for dry run (cron)
if: github.event_name == 'release' if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.1 uses: svenstaro/upload-release-action@2.11.2
with: with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }} repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/${{ matrix.target }}/release/meilisearch file: target/${{ matrix.target }}/release/meilisearch

View File

@@ -16,6 +16,8 @@ on:
jobs: jobs:
docker: docker:
runs-on: docker runs-on: docker
permissions:
id-token: write # This is needed to use Cosign in keyless mode
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
@@ -62,6 +64,9 @@ jobs:
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v3
- name: Install cosign
uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # tag=v3.9.2
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
@@ -85,6 +90,7 @@ jobs:
- name: Build and push - name: Build and push
uses: docker/build-push-action@v6 uses: docker/build-push-action@v6
id: build-and-push
with: with:
push: true push: true
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
@@ -94,6 +100,17 @@ jobs:
COMMIT_DATE=${{ steps.build-metadata.outputs.date }} COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
GIT_TAG=${{ github.ref_name }} GIT_TAG=${{ github.ref_name }}
- name: Sign the images with GitHub OIDC Token
env:
DIGEST: ${{ steps.build-and-push.outputs.digest }}
TAGS: ${{ steps.meta.outputs.tags }}
run: |
images=""
for tag in ${TAGS}; do
images+="${tag}@${DIGEST} "
done
cosign sign --yes ${images}
# /!\ Don't touch this without checking with Cloud team # /!\ Don't touch this without checking with Cloud team
- name: Send CI information to Cloud team - name: Send CI information to Cloud team
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event) # Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)

20
.github/workflows/release-drafter.yml vendored Normal file
View File

@@ -0,0 +1,20 @@
name: Release Drafter
permissions:
contents: read
pull-requests: write
on:
push:
branches:
- main
jobs:
update_release_draft:
runs-on: ubuntu-latest
steps:
- uses: release-drafter/release-drafter@v6
with:
config-name: release-draft-template.yml
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_DRAFTER_TOKEN }}

View File

@@ -9,7 +9,7 @@ on:
required: false required: false
default: nightly default: nightly
schedule: schedule:
- cron: "0 6 * * MON" # Every Monday at 6:00AM - cron: '0 6 * * *' # Every day at 6:00am
env: env:
MEILI_MASTER_KEY: 'masterKey' MEILI_MASTER_KEY: 'masterKey'
@@ -114,7 +114,7 @@ jobs:
dep ensure dep ensure
fi fi
- name: Run integration tests - name: Run integration tests
run: go test -v ./... run: go test --race -v ./integration
meilisearch-java-tests: meilisearch-java-tests:
needs: define-docker-image needs: define-docker-image
@@ -344,15 +344,23 @@ jobs:
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
ports: ports:
- '7700:7700' - '7700:7700'
env:
RAILS_VERSION: '7.0'
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
repository: meilisearch/meilisearch-rails repository: meilisearch/meilisearch-rails
- name: Set up Ruby 3 - name: Install SQLite dependencies
run: sudo apt-get update && sudo apt-get install -y libsqlite3-dev
- name: Set up Ruby
uses: ruby/setup-ruby@v1 uses: ruby/setup-ruby@v1
with: with:
ruby-version: 3 ruby-version: 3
bundler-cache: true bundler-cache: true
- name: Start MongoDB
uses: supercharge/mongodb-github-action@1.12.0
with:
mongodb-version: 8.0
- name: Run tests - name: Run tests
run: bundle exec rspec run: bundle exec rspec

View File

@@ -3,7 +3,7 @@ name: Test suite
on: on:
workflow_dispatch: workflow_dispatch:
schedule: schedule:
# Everyday at 5:00am # Every day at 5:00am
- cron: "0 5 * * *" - cron: "0 5 * * *"
pull_request: pull_request:
merge_group: merge_group:

View File

@@ -106,7 +106,13 @@ Run `cargo xtask --help` from the root of the repository to find out what is ava
#### Update the openAPI file if the APIchanged #### Update the openAPI file if the APIchanged
To update the openAPI file in the code, see [sprint_issue.md](https://github.com/meilisearch/meilisearch/blob/main/.github/ISSUE_TEMPLATE/sprint_issue.md#reminders-when-modifying-the-api). To update the openAPI file in the code, see [sprint_issue.md](https://github.com/meilisearch/meilisearch/blob/main/.github/ISSUE_TEMPLATE/sprint_issue.md#reminders-when-modifying-the-api).
If you want to update the openAPI file on the [open-api repository](https://github.com/meilisearch/open-api), see [update-openapi-issue.md](https://github.com/meilisearch/engine-team/blob/main/issue-templates/update-openapi-issue.md).
If you want to update the openAPI file on the [open-api repository](https://github.com/meilisearch/open-api):
- Pull the latest version of the latest rc of Meilisearch `git checkout release-vX.Y.Z; git pull`
- Starts Meilisearch with the `swagger` feature flag: `cargo run --features swagger`
- On a browser, open the following URL: http://localhost:7700/scalar
- Click the « Download openAPI file »
- Open a PR replacing [this file](https://github.com/meilisearch/open-api/blob/main/open-api.json) with the one downloaded
### Logging ### Logging
@@ -160,25 +166,37 @@ Some notes on GitHub PRs:
The draft PRs are recommended when you want to show that you are working on something and make your work visible. The draft PRs are recommended when you want to show that you are working on something and make your work visible.
- The branch related to the PR must be **up-to-date with `main`** before merging. Fortunately, this project uses [GitHub Merge Queues](https://github.blog/news-insights/product-news/github-merge-queue-is-generally-available/) to automatically enforce this requirement without the PR author having to rebase manually. - The branch related to the PR must be **up-to-date with `main`** before merging. Fortunately, this project uses [GitHub Merge Queues](https://github.blog/news-insights/product-news/github-merge-queue-is-generally-available/) to automatically enforce this requirement without the PR author having to rebase manually.
## Release Process (for internal team only) ## Merging PRs
Meilisearch tools follow the [Semantic Versioning Convention](https://semver.org/).
### Automation to rebase and Merge the PRs
This project uses GitHub Merge Queues that helps us manage pull requests merging. This project uses GitHub Merge Queues that helps us manage pull requests merging.
### How to Publish a new Release Before merging a PR, the maintainer should ensure the following requirements are met
- Automated tests have been added.
- If some tests cannot be automated, manual rigorous tests should be applied.
- ⚠️ If there is an change in the DB: it's mandatory to manually test the `--experimental-dumpless-upgrade` on a DB of the previous Meilisearch minor version (e.g. v1.13 for the v1.14 release).
- If necessary, the feature have been tested in the Cloud production environment (with [prototypes](./documentation/prototypes.md)) and the Cloud UI is ready.
- If necessary, the [documentation](https://github.com/meilisearch/documentation) related to the implemented feature in the PR is ready.
- If necessary, the [integrations](https://github.com/meilisearch/integration-guides) related to the implemented feature in the PR are ready.
The full Meilisearch release process is described in [this guide](https://github.com/meilisearch/engine-team/blob/main/resources/meilisearch-release.md). Please follow it carefully before doing any release. ## Publish Process (for internal team only)
Meilisearch tools follow the [Semantic Versioning Convention](https://semver.org/).
### How to publish a new release
The full Meilisearch release process is described in [this guide](./documentation/release.md).
### How to publish a prototype ### How to publish a prototype
Depending on the developed feature, you might need to provide a prototyped version of Meilisearch to make it easier to test by the users. Depending on the developed feature, you might need to provide a prototyped version of Meilisearch to make it easier to test by the users.
This happens in two steps: This happens in two steps:
- [Release the prototype](https://github.com/meilisearch/engine-team/blob/main/resources/prototypes.md#how-to-publish-a-prototype) - [Release the prototype](./documentation/prototypes.md#how-to-publish-a-prototype)
- [Communicate about it](https://github.com/meilisearch/engine-team/blob/main/resources/prototypes.md#communication) - [Communicate about it](./documentation/prototypes.md#communication)
### How to implement and publish an experimental feature
Here is our [guidelines and process](./documentation/experimental-features.md) to implement and publish an experimental feature.
### Release assets ### Release assets

34
Cargo.lock generated
View File

@@ -580,7 +580,7 @@ source = "git+https://github.com/meilisearch/bbqueue#cbb87cc707b5af415ef203bdaf2
[[package]] [[package]]
name = "benchmarks" name = "benchmarks"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"bumpalo", "bumpalo",
@@ -770,7 +770,7 @@ dependencies = [
[[package]] [[package]]
name = "build-info" name = "build-info"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"time", "time",
@@ -1774,7 +1774,7 @@ dependencies = [
[[package]] [[package]]
name = "dump" name = "dump"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"big_s", "big_s",
@@ -2006,7 +2006,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]] [[package]]
name = "file-store" name = "file-store"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"tempfile", "tempfile",
"thiserror 2.0.12", "thiserror 2.0.12",
@@ -2028,7 +2028,7 @@ dependencies = [
[[package]] [[package]]
name = "filter-parser" name = "filter-parser"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"insta", "insta",
"nom", "nom",
@@ -2049,7 +2049,7 @@ dependencies = [
[[package]] [[package]]
name = "flatten-serde-json" name = "flatten-serde-json"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"criterion", "criterion",
"serde_json", "serde_json",
@@ -2194,7 +2194,7 @@ dependencies = [
[[package]] [[package]]
name = "fuzzers" name = "fuzzers"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"arbitrary", "arbitrary",
"bumpalo", "bumpalo",
@@ -2994,7 +2994,7 @@ dependencies = [
[[package]] [[package]]
name = "index-scheduler" name = "index-scheduler"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"backoff", "backoff",
@@ -3230,7 +3230,7 @@ dependencies = [
[[package]] [[package]]
name = "json-depth-checker" name = "json-depth-checker"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"criterion", "criterion",
"serde_json", "serde_json",
@@ -3724,7 +3724,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
[[package]] [[package]]
name = "meili-snap" name = "meili-snap"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"insta", "insta",
"md5", "md5",
@@ -3735,7 +3735,7 @@ dependencies = [
[[package]] [[package]]
name = "meilisearch" name = "meilisearch"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"actix-cors", "actix-cors",
"actix-http", "actix-http",
@@ -3831,7 +3831,7 @@ dependencies = [
[[package]] [[package]]
name = "meilisearch-auth" name = "meilisearch-auth"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"base64 0.22.1", "base64 0.22.1",
"enum-iterator", "enum-iterator",
@@ -3850,7 +3850,7 @@ dependencies = [
[[package]] [[package]]
name = "meilisearch-types" name = "meilisearch-types"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"actix-web", "actix-web",
"anyhow", "anyhow",
@@ -3885,7 +3885,7 @@ dependencies = [
[[package]] [[package]]
name = "meilitool" name = "meilitool"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"clap", "clap",
@@ -3919,7 +3919,7 @@ dependencies = [
[[package]] [[package]]
name = "milli" name = "milli"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"allocator-api2 0.3.0", "allocator-api2 0.3.0",
"arroy", "arroy",
@@ -4471,7 +4471,7 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]] [[package]]
name = "permissive-json-pointer" name = "permissive-json-pointer"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"big_s", "big_s",
"serde_json", "serde_json",
@@ -7259,7 +7259,7 @@ dependencies = [
[[package]] [[package]]
name = "xtask" name = "xtask"
version = "1.16.0" version = "1.17.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"build-info", "build-info",

View File

@@ -22,7 +22,7 @@ members = [
] ]
[workspace.package] [workspace.package]
version = "1.16.0" version = "1.17.0"
authors = [ authors = [
"Quentin de Quelen <quentin@dequelen.me>", "Quentin de Quelen <quentin@dequelen.me>",
"Clément Renault <clement@meilisearch.com>", "Clément Renault <clement@meilisearch.com>",

View File

@@ -119,6 +119,6 @@ Meilisearch is, and will always be, open-source! If you want to contribute to th
Meilisearch releases and their associated binaries are available on the project's [releases page](https://github.com/meilisearch/meilisearch/releases). Meilisearch releases and their associated binaries are available on the project's [releases page](https://github.com/meilisearch/meilisearch/releases).
The binaries are versioned following [SemVer conventions](https://semver.org/). To know more, read our [versioning policy](https://github.com/meilisearch/engine-team/blob/main/resources/versioning-policy.md). The binaries are versioned following [SemVer conventions](https://semver.org/). To know more, read our [versioning policy](./documentation/versioning-policy.md).
Differently from the binaries, crates in this repository are not currently available on [crates.io](https://crates.io/) and do not follow [SemVer conventions](https://semver.org). Differently from the binaries, crates in this repository are not currently available on [crates.io](https://crates.io/) and do not follow [SemVer conventions](https://semver.org).

View File

@@ -55,3 +55,7 @@ harness = false
[[bench]] [[bench]]
name = "sort" name = "sort"
harness = false harness = false
[[bench]]
name = "filter_starts_with"
harness = false

View File

@@ -0,0 +1,66 @@
mod datasets_paths;
mod utils;
use criterion::{criterion_group, criterion_main};
use milli::update::Settings;
use milli::FilterableAttributesRule;
use utils::Conf;
#[cfg(not(windows))]
#[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
fn base_conf(builder: &mut Settings) {
let displayed_fields = ["geonameid", "name"].iter().map(|s| s.to_string()).collect();
builder.set_displayed_fields(displayed_fields);
let filterable_fields =
["name"].iter().map(|s| FilterableAttributesRule::Field(s.to_string())).collect();
builder.set_filterable_fields(filterable_fields);
}
#[rustfmt::skip]
const BASE_CONF: Conf = Conf {
dataset: datasets_paths::SMOL_ALL_COUNTRIES,
dataset_format: "jsonl",
queries: &[
"",
],
configure: base_conf,
primary_key: Some("geonameid"),
..Conf::BASE
};
fn filter_starts_with(c: &mut criterion::Criterion) {
#[rustfmt::skip]
let confs = &[
utils::Conf {
group_name: "1 letter",
filter: Some("name STARTS WITH e"),
..BASE_CONF
},
utils::Conf {
group_name: "2 letters",
filter: Some("name STARTS WITH es"),
..BASE_CONF
},
utils::Conf {
group_name: "3 letters",
filter: Some("name STARTS WITH est"),
..BASE_CONF
},
utils::Conf {
group_name: "6 letters",
filter: Some("name STARTS WITH estoni"),
..BASE_CONF
}
];
utils::run_benches(c, confs);
}
criterion_group!(benches, filter_starts_with);
criterion_main!(benches);

View File

@@ -202,6 +202,10 @@ impl CompatV5ToV6 {
pub fn network(&self) -> Result<Option<&v6::Network>> { pub fn network(&self) -> Result<Option<&v6::Network>> {
Ok(None) Ok(None)
} }
pub fn webhooks(&self) -> Option<&v6::Webhooks> {
None
}
} }
pub enum CompatIndexV5ToV6 { pub enum CompatIndexV5ToV6 {

View File

@@ -138,6 +138,13 @@ impl DumpReader {
DumpReader::Compat(compat) => compat.network(), DumpReader::Compat(compat) => compat.network(),
} }
} }
pub fn webhooks(&self) -> Option<&v6::Webhooks> {
match self {
DumpReader::Current(current) => current.webhooks(),
DumpReader::Compat(compat) => compat.webhooks(),
}
}
} }
impl From<V6Reader> for DumpReader { impl From<V6Reader> for DumpReader {
@@ -365,6 +372,7 @@ pub(crate) mod test {
assert_eq!(dump.features().unwrap().unwrap(), RuntimeTogglableFeatures::default()); assert_eq!(dump.features().unwrap().unwrap(), RuntimeTogglableFeatures::default());
assert_eq!(dump.network().unwrap(), None); assert_eq!(dump.network().unwrap(), None);
assert_eq!(dump.webhooks(), None);
} }
#[test] #[test]
@@ -435,6 +443,43 @@ pub(crate) mod test {
insta::assert_snapshot!(network.remotes.get("ms-2").as_ref().unwrap().search_api_key.as_ref().unwrap(), @"foo"); insta::assert_snapshot!(network.remotes.get("ms-2").as_ref().unwrap().search_api_key.as_ref().unwrap(), @"foo");
} }
#[test]
fn import_dump_v6_webhooks() {
let dump = File::open("tests/assets/v6-with-webhooks.dump").unwrap();
let dump = DumpReader::open(dump).unwrap();
// top level infos
insta::assert_snapshot!(dump.date().unwrap(), @"2025-07-31 9:21:30.479544 +00:00:00");
insta::assert_debug_snapshot!(dump.instance_uid().unwrap(), @r"
Some(
cb887dcc-34b3-48d1-addd-9815ae721a81,
)
");
// webhooks
let webhooks = dump.webhooks().unwrap();
insta::assert_json_snapshot!(webhooks, @r#"
{
"webhooks": {
"627ea538-733d-4545-8d2d-03526eb381ce": {
"url": "https://example.com/authorization-less",
"headers": {}
},
"771b0a28-ef28-4082-b984-536f82958c65": {
"url": "https://example.com/hook",
"headers": {
"authorization": "TOKEN"
}
},
"f3583083-f8a7-4cbf-a5e7-fb3f1e28a7e9": {
"url": "https://third.com",
"headers": {}
}
}
}
"#);
}
#[test] #[test]
fn import_dump_v5() { fn import_dump_v5() {
let dump = File::open("tests/assets/v5.dump").unwrap(); let dump = File::open("tests/assets/v5.dump").unwrap();

View File

@@ -25,6 +25,7 @@ pub type Key = meilisearch_types::keys::Key;
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings; pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures; pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
pub type Network = meilisearch_types::features::Network; pub type Network = meilisearch_types::features::Network;
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
// ===== Other types to clarify the code of the compat module // ===== Other types to clarify the code of the compat module
// everything related to the tasks // everything related to the tasks
@@ -59,6 +60,7 @@ pub struct V6Reader {
keys: BufReader<File>, keys: BufReader<File>,
features: Option<RuntimeTogglableFeatures>, features: Option<RuntimeTogglableFeatures>,
network: Option<Network>, network: Option<Network>,
webhooks: Option<Webhooks>,
} }
impl V6Reader { impl V6Reader {
@@ -93,8 +95,8 @@ impl V6Reader {
Err(e) => return Err(e.into()), Err(e) => return Err(e.into()),
}; };
let network_file = match fs::read(dump.path().join("network.json")) { let network = match fs::read(dump.path().join("network.json")) {
Ok(network_file) => Some(network_file), Ok(network_file) => Some(serde_json::from_reader(&*network_file)?),
Err(error) => match error.kind() { Err(error) => match error.kind() {
// Allows the file to be missing, this will only result in all experimental features disabled. // Allows the file to be missing, this will only result in all experimental features disabled.
ErrorKind::NotFound => { ErrorKind::NotFound => {
@@ -104,10 +106,16 @@ impl V6Reader {
_ => return Err(error.into()), _ => return Err(error.into()),
}, },
}; };
let network = if let Some(network_file) = network_file {
Some(serde_json::from_reader(&*network_file)?) let webhooks = match fs::read(dump.path().join("webhooks.json")) {
} else { Ok(webhooks_file) => Some(serde_json::from_reader(&*webhooks_file)?),
None Err(error) => match error.kind() {
ErrorKind::NotFound => {
debug!("`webhooks.json` not found in dump");
None
}
_ => return Err(error.into()),
},
}; };
Ok(V6Reader { Ok(V6Reader {
@@ -119,6 +127,7 @@ impl V6Reader {
features, features,
network, network,
dump, dump,
webhooks,
}) })
} }
@@ -229,6 +238,10 @@ impl V6Reader {
pub fn network(&self) -> Option<&Network> { pub fn network(&self) -> Option<&Network> {
self.network.as_ref() self.network.as_ref()
} }
pub fn webhooks(&self) -> Option<&Webhooks> {
self.webhooks.as_ref()
}
} }
pub struct UpdateFile { pub struct UpdateFile {

View File

@@ -8,6 +8,7 @@ use meilisearch_types::batches::Batch;
use meilisearch_types::features::{ChatCompletionSettings, Network, RuntimeTogglableFeatures}; use meilisearch_types::features::{ChatCompletionSettings, Network, RuntimeTogglableFeatures};
use meilisearch_types::keys::Key; use meilisearch_types::keys::Key;
use meilisearch_types::settings::{Checked, Settings}; use meilisearch_types::settings::{Checked, Settings};
use meilisearch_types::webhooks::WebhooksDumpView;
use serde_json::{Map, Value}; use serde_json::{Map, Value};
use tempfile::TempDir; use tempfile::TempDir;
use time::OffsetDateTime; use time::OffsetDateTime;
@@ -74,6 +75,13 @@ impl DumpWriter {
Ok(std::fs::write(self.dir.path().join("network.json"), serde_json::to_string(&network)?)?) Ok(std::fs::write(self.dir.path().join("network.json"), serde_json::to_string(&network)?)?)
} }
pub fn create_webhooks(&self, webhooks: WebhooksDumpView) -> Result<()> {
Ok(std::fs::write(
self.dir.path().join("webhooks.json"),
serde_json::to_string(&webhooks)?,
)?)
}
pub fn persist_to(self, mut writer: impl Write) -> Result<()> { pub fn persist_to(self, mut writer: impl Write) -> Result<()> {
let gz_encoder = GzEncoder::new(&mut writer, Compression::default()); let gz_encoder = GzEncoder::new(&mut writer, Compression::default());
let mut tar_encoder = tar::Builder::new(gz_encoder); let mut tar_encoder = tar::Builder::new(gz_encoder);

Binary file not shown.

View File

@@ -165,9 +165,9 @@ impl<'a> FilterCondition<'a> {
| Condition::Exists | Condition::Exists
| Condition::LowerThan(_) | Condition::LowerThan(_)
| Condition::LowerThanOrEqual(_) | Condition::LowerThanOrEqual(_)
| Condition::Between { .. } => None, | Condition::Between { .. }
Condition::Contains { keyword, word: _ } | Condition::StartsWith { .. } => None,
| Condition::StartsWith { keyword, word: _ } => Some(keyword), Condition::Contains { keyword, word: _ } => Some(keyword),
}, },
FilterCondition::Not(this) => this.use_contains_operator(), FilterCondition::Not(this) => this.use_contains_operator(),
FilterCondition::Or(seq) | FilterCondition::And(seq) => { FilterCondition::Or(seq) | FilterCondition::And(seq) => {

View File

@@ -85,7 +85,7 @@ impl RoFeatures {
Ok(()) Ok(())
} else { } else {
Err(FeatureNotEnabledError { Err(FeatureNotEnabledError {
disabled_action: "Using `CONTAINS` or `STARTS WITH` in a filter", disabled_action: "Using `CONTAINS` in a filter",
feature: "contains filter", feature: "contains filter",
issue_link: "https://github.com/orgs/meilisearch/discussions/763", issue_link: "https://github.com/orgs/meilisearch/discussions/763",
} }
@@ -182,6 +182,7 @@ impl FeatureData {
..persisted_features ..persisted_features
})); }));
// Once this is stabilized, network should be stored along with webhooks in index-scheduler's persisted database
let network_db = runtime_features_db.remap_data_type::<SerdeJson<Network>>(); let network_db = runtime_features_db.remap_data_type::<SerdeJson<Network>>();
let network: Network = network_db.get(wtxn, db_keys::NETWORK)?.unwrap_or_default(); let network: Network = network_db.get(wtxn, db_keys::NETWORK)?.unwrap_or_default();

View File

@@ -71,7 +71,7 @@ pub struct IndexMapper {
/// Path to the folder where the LMDB environments of each index are. /// Path to the folder where the LMDB environments of each index are.
base_path: PathBuf, base_path: PathBuf,
/// The map size an index is opened with on the first time. /// The map size an index is opened with on the first time.
index_base_map_size: usize, pub(crate) index_base_map_size: usize,
/// The quantity by which the map size of an index is incremented upon reopening, in bytes. /// The quantity by which the map size of an index is incremented upon reopening, in bytes.
index_growth_amount: usize, index_growth_amount: usize,
/// Whether we open a meilisearch index with the MDB_WRITEMAP option or not. /// Whether we open a meilisearch index with the MDB_WRITEMAP option or not.

View File

@@ -26,11 +26,11 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
version, version,
queue, queue,
scheduler, scheduler,
persisted,
index_mapper, index_mapper,
features: _, features: _,
webhook_url: _, webhooks: _,
webhook_authorization_header: _,
test_breakpoint_sdr: _, test_breakpoint_sdr: _,
planned_failures: _, planned_failures: _,
run_loop_iteration: _, run_loop_iteration: _,
@@ -62,6 +62,13 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
} }
snap.push_str("\n----------------------------------------------------------------------\n"); snap.push_str("\n----------------------------------------------------------------------\n");
let persisted_db_snapshot = snapshot_persisted_db(&rtxn, persisted);
if !persisted_db_snapshot.is_empty() {
snap.push_str("### Persisted:\n");
snap.push_str(&persisted_db_snapshot);
snap.push_str("----------------------------------------------------------------------\n");
}
snap.push_str("### All Tasks:\n"); snap.push_str("### All Tasks:\n");
snap.push_str(&snapshot_all_tasks(&rtxn, queue.tasks.all_tasks)); snap.push_str(&snapshot_all_tasks(&rtxn, queue.tasks.all_tasks));
snap.push_str("----------------------------------------------------------------------\n"); snap.push_str("----------------------------------------------------------------------\n");
@@ -200,6 +207,16 @@ pub fn snapshot_date_db(rtxn: &RoTxn, db: Database<BEI128, CboRoaringBitmapCodec
snap snap
} }
pub fn snapshot_persisted_db(rtxn: &RoTxn, db: &Database<Str, Str>) -> String {
let mut snap = String::new();
let iter = db.iter(rtxn).unwrap();
for next in iter {
let (key, value) = next.unwrap();
snap.push_str(&format!("{key}: {value}\n"));
}
snap
}
pub fn snapshot_task(task: &Task) -> String { pub fn snapshot_task(task: &Task) -> String {
let mut snap = String::new(); let mut snap = String::new();
let Task { let Task {
@@ -311,6 +328,7 @@ pub fn snapshot_status(
} }
snap snap
} }
pub fn snapshot_kind(rtxn: &RoTxn, db: Database<SerdeBincode<Kind>, RoaringBitmapCodec>) -> String { pub fn snapshot_kind(rtxn: &RoTxn, db: Database<SerdeBincode<Kind>, RoaringBitmapCodec>) -> String {
let mut snap = String::new(); let mut snap = String::new();
let iter = db.iter(rtxn).unwrap(); let iter = db.iter(rtxn).unwrap();
@@ -331,6 +349,7 @@ pub fn snapshot_index_tasks(rtxn: &RoTxn, db: Database<Str, RoaringBitmapCodec>)
} }
snap snap
} }
pub fn snapshot_canceled_by(rtxn: &RoTxn, db: Database<BEU32, RoaringBitmapCodec>) -> String { pub fn snapshot_canceled_by(rtxn: &RoTxn, db: Database<BEU32, RoaringBitmapCodec>) -> String {
let mut snap = String::new(); let mut snap = String::new();
let iter = db.iter(rtxn).unwrap(); let iter = db.iter(rtxn).unwrap();

View File

@@ -65,13 +65,16 @@ use meilisearch_types::milli::vector::{
use meilisearch_types::milli::{self, Index}; use meilisearch_types::milli::{self, Index};
use meilisearch_types::task_view::TaskView; use meilisearch_types::task_view::TaskView;
use meilisearch_types::tasks::{KindWithContent, Task}; use meilisearch_types::tasks::{KindWithContent, Task};
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
use milli::vector::db::IndexEmbeddingConfig; use milli::vector::db::IndexEmbeddingConfig;
use processing::ProcessingTasks; use processing::ProcessingTasks;
pub use queue::Query; pub use queue::Query;
use queue::Queue; use queue::Queue;
use roaring::RoaringBitmap; use roaring::RoaringBitmap;
use scheduler::Scheduler; use scheduler::Scheduler;
use serde::{Deserialize, Serialize};
use time::OffsetDateTime; use time::OffsetDateTime;
use uuid::Uuid;
use versioning::Versioning; use versioning::Versioning;
use crate::index_mapper::IndexMapper; use crate::index_mapper::IndexMapper;
@@ -80,7 +83,15 @@ use crate::utils::clamp_to_page_size;
pub(crate) type BEI128 = I128<BE>; pub(crate) type BEI128 = I128<BE>;
const TASK_SCHEDULER_SIZE_THRESHOLD_PERCENT_INT: u64 = 40; const TASK_SCHEDULER_SIZE_THRESHOLD_PERCENT_INT: u64 = 40;
const CHAT_SETTINGS_DB_NAME: &str = "chat-settings";
mod db_name {
pub const CHAT_SETTINGS: &str = "chat-settings";
pub const PERSISTED: &str = "persisted";
}
mod db_keys {
pub const WEBHOOKS: &str = "webhooks";
}
#[derive(Debug)] #[derive(Debug)]
pub struct IndexSchedulerOptions { pub struct IndexSchedulerOptions {
@@ -98,10 +109,10 @@ pub struct IndexSchedulerOptions {
pub snapshots_path: PathBuf, pub snapshots_path: PathBuf,
/// The path to the folder containing the dumps. /// The path to the folder containing the dumps.
pub dumps_path: PathBuf, pub dumps_path: PathBuf,
/// The URL on which we must send the tasks statuses /// The webhook url that was set by the CLI.
pub webhook_url: Option<String>, pub cli_webhook_url: Option<String>,
/// The value we will send into the Authorization HTTP header on the webhook URL /// The Authorization header to send to the webhook URL that was set by the CLI.
pub webhook_authorization_header: Option<String>, pub cli_webhook_authorization: Option<String>,
/// The maximum size, in bytes, of the task index. /// The maximum size, in bytes, of the task index.
pub task_db_size: usize, pub task_db_size: usize,
/// The size, in bytes, with which a meilisearch index is opened the first time of each meilisearch index. /// The size, in bytes, with which a meilisearch index is opened the first time of each meilisearch index.
@@ -171,10 +182,11 @@ pub struct IndexScheduler {
/// Whether we should use the old document indexer or the new one. /// Whether we should use the old document indexer or the new one.
pub(crate) experimental_no_edition_2024_for_dumps: bool, pub(crate) experimental_no_edition_2024_for_dumps: bool,
/// The webhook url we should send tasks to after processing every batches. /// A database to store single-keyed data that is persisted across restarts.
pub(crate) webhook_url: Option<String>, persisted: Database<Str, Str>,
/// The Authorization header to send to the webhook URL.
pub(crate) webhook_authorization_header: Option<String>, /// Webhook, loaded and stored in the `persisted` database
webhooks: Arc<Webhooks>,
/// A map to retrieve the runtime representation of an embedder depending on its configuration. /// A map to retrieve the runtime representation of an embedder depending on its configuration.
/// ///
@@ -214,8 +226,9 @@ impl IndexScheduler {
index_mapper: self.index_mapper.clone(), index_mapper: self.index_mapper.clone(),
cleanup_enabled: self.cleanup_enabled, cleanup_enabled: self.cleanup_enabled,
experimental_no_edition_2024_for_dumps: self.experimental_no_edition_2024_for_dumps, experimental_no_edition_2024_for_dumps: self.experimental_no_edition_2024_for_dumps,
webhook_url: self.webhook_url.clone(), persisted: self.persisted,
webhook_authorization_header: self.webhook_authorization_header.clone(),
webhooks: self.webhooks.clone(),
embedders: self.embedders.clone(), embedders: self.embedders.clone(),
#[cfg(test)] #[cfg(test)]
test_breakpoint_sdr: self.test_breakpoint_sdr.clone(), test_breakpoint_sdr: self.test_breakpoint_sdr.clone(),
@@ -234,6 +247,7 @@ impl IndexScheduler {
+ IndexMapper::nb_db() + IndexMapper::nb_db()
+ features::FeatureData::nb_db() + features::FeatureData::nb_db()
+ 1 // chat-prompts + 1 // chat-prompts
+ 1 // persisted
} }
/// Create an index scheduler and start its run loop. /// Create an index scheduler and start its run loop.
@@ -284,10 +298,18 @@ impl IndexScheduler {
let version = versioning::Versioning::new(&env, from_db_version)?; let version = versioning::Versioning::new(&env, from_db_version)?;
let mut wtxn = env.write_txn()?; let mut wtxn = env.write_txn()?;
let features = features::FeatureData::new(&env, &mut wtxn, options.instance_features)?; let features = features::FeatureData::new(&env, &mut wtxn, options.instance_features)?;
let queue = Queue::new(&env, &mut wtxn, &options)?; let queue = Queue::new(&env, &mut wtxn, &options)?;
let index_mapper = IndexMapper::new(&env, &mut wtxn, &options, budget)?; let index_mapper = IndexMapper::new(&env, &mut wtxn, &options, budget)?;
let chat_settings = env.create_database(&mut wtxn, Some(CHAT_SETTINGS_DB_NAME))?; let chat_settings = env.create_database(&mut wtxn, Some(db_name::CHAT_SETTINGS))?;
let persisted = env.create_database(&mut wtxn, Some(db_name::PERSISTED))?;
let webhooks_db = persisted.remap_data_type::<SerdeJson<Webhooks>>();
let mut webhooks = webhooks_db.get(&wtxn, db_keys::WEBHOOKS)?.unwrap_or_default();
webhooks
.with_cli(options.cli_webhook_url.clone(), options.cli_webhook_authorization.clone());
wtxn.commit()?; wtxn.commit()?;
// allow unreachable_code to get rids of the warning in the case of a test build. // allow unreachable_code to get rids of the warning in the case of a test build.
@@ -303,8 +325,8 @@ impl IndexScheduler {
experimental_no_edition_2024_for_dumps: options experimental_no_edition_2024_for_dumps: options
.indexer_config .indexer_config
.experimental_no_edition_2024_for_dumps, .experimental_no_edition_2024_for_dumps,
webhook_url: options.webhook_url, persisted,
webhook_authorization_header: options.webhook_authorization_header, webhooks: Arc::new(webhooks),
embedders: Default::default(), embedders: Default::default(),
#[cfg(test)] #[cfg(test)]
@@ -752,86 +774,92 @@ impl IndexScheduler {
Ok(()) Ok(())
} }
/// Once the tasks changes have been committed we must send all the tasks that were updated to our webhook if there is one. /// Once the tasks changes have been committed we must send all the tasks that were updated to our webhooks
fn notify_webhook(&self, updated: &RoaringBitmap) -> Result<()> { fn notify_webhooks(&self, updated: RoaringBitmap) {
if let Some(ref url) = self.webhook_url { struct TaskReader<'a, 'b> {
struct TaskReader<'a, 'b> { rtxn: &'a RoTxn<'a>,
rtxn: &'a RoTxn<'a>, index_scheduler: &'a IndexScheduler,
index_scheduler: &'a IndexScheduler, tasks: &'b mut roaring::bitmap::Iter<'b>,
tasks: &'b mut roaring::bitmap::Iter<'b>, buffer: Vec<u8>,
buffer: Vec<u8>, written: usize,
written: usize, }
}
impl Read for TaskReader<'_, '_> { impl Read for TaskReader<'_, '_> {
fn read(&mut self, mut buf: &mut [u8]) -> std::io::Result<usize> { fn read(&mut self, mut buf: &mut [u8]) -> std::io::Result<usize> {
if self.buffer.is_empty() { if self.buffer.is_empty() {
match self.tasks.next() { match self.tasks.next() {
None => return Ok(0), None => return Ok(0),
Some(task_id) => { Some(task_id) => {
let task = self let task = self
.index_scheduler .index_scheduler
.queue .queue
.tasks .tasks
.get_task(self.rtxn, task_id) .get_task(self.rtxn, task_id)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))? .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?
.ok_or_else(|| { .ok_or_else(|| {
io::Error::new( io::Error::new(io::ErrorKind::Other, Error::CorruptedTaskQueue)
io::ErrorKind::Other, })?;
Error::CorruptedTaskQueue,
)
})?;
serde_json::to_writer( serde_json::to_writer(&mut self.buffer, &TaskView::from_task(&task))?;
&mut self.buffer, self.buffer.push(b'\n');
&TaskView::from_task(&task),
)?;
self.buffer.push(b'\n');
}
} }
} }
let mut to_write = &self.buffer[self.written..];
let wrote = io::copy(&mut to_write, &mut buf)?;
self.written += wrote as usize;
// we wrote everything and must refresh our buffer on the next call
if self.written == self.buffer.len() {
self.written = 0;
self.buffer.clear();
}
Ok(wrote as usize)
} }
}
let rtxn = self.env.read_txn()?; let mut to_write = &self.buffer[self.written..];
let wrote = io::copy(&mut to_write, &mut buf)?;
self.written += wrote as usize;
let task_reader = TaskReader { // we wrote everything and must refresh our buffer on the next call
rtxn: &rtxn, if self.written == self.buffer.len() {
index_scheduler: self, self.written = 0;
tasks: &mut updated.into_iter(), self.buffer.clear();
buffer: Vec::with_capacity(50), // on average a task is around ~100 bytes }
written: 0,
};
// let reader = GzEncoder::new(BufReader::new(task_reader), Compression::default()); Ok(wrote as usize)
let reader = GzEncoder::new(BufReader::new(task_reader), Compression::default());
let request = ureq::post(url)
.timeout(Duration::from_secs(30))
.set("Content-Encoding", "gzip")
.set("Content-Type", "application/x-ndjson");
let request = match &self.webhook_authorization_header {
Some(header) => request.set("Authorization", header),
None => request,
};
if let Err(e) = request.send(reader) {
tracing::error!("While sending data to the webhook: {e}");
} }
} }
Ok(()) let webhooks = self.webhooks.get_all();
if webhooks.is_empty() {
return;
}
let this = self.private_clone();
// We must take the RoTxn before entering the thread::spawn otherwise another batch may be
// processed before we had the time to take our txn.
let rtxn = match self.env.clone().static_read_txn() {
Ok(rtxn) => rtxn,
Err(e) => {
tracing::error!("Couldn't get an rtxn to notify the webhook: {e}");
return;
}
};
std::thread::spawn(move || {
for (uuid, Webhook { url, headers }) in webhooks.iter() {
let task_reader = TaskReader {
rtxn: &rtxn,
index_scheduler: &this,
tasks: &mut updated.iter(),
buffer: Vec::with_capacity(page_size::get()),
written: 0,
};
let reader = GzEncoder::new(BufReader::new(task_reader), Compression::default());
let mut request = ureq::post(url)
.timeout(Duration::from_secs(30))
.set("Content-Encoding", "gzip")
.set("Content-Type", "application/x-ndjson");
for (header_name, header_value) in headers.iter() {
request = request.set(header_name, header_value);
}
if let Err(e) = request.send(reader) {
tracing::error!("While sending data to the webhook {uuid}: {e}");
}
}
});
} }
pub fn index_stats(&self, index_uid: &str) -> Result<IndexStats> { pub fn index_stats(&self, index_uid: &str) -> Result<IndexStats> {
@@ -862,6 +890,29 @@ impl IndexScheduler {
self.features.network() self.features.network()
} }
pub fn update_runtime_webhooks(&self, runtime: RuntimeWebhooks) -> Result<()> {
let webhooks = Webhooks::from_runtime(runtime);
let mut wtxn = self.env.write_txn()?;
let webhooks_db = self.persisted.remap_data_type::<SerdeJson<Webhooks>>();
webhooks_db.put(&mut wtxn, db_keys::WEBHOOKS, &webhooks)?;
wtxn.commit()?;
self.webhooks.update_runtime(webhooks.into_runtime());
Ok(())
}
pub fn webhooks_dump_view(&self) -> WebhooksDumpView {
// We must not dump the cli api key
WebhooksDumpView { webhooks: self.webhooks.get_runtime() }
}
pub fn webhooks_view(&self) -> WebhooksView {
WebhooksView { webhooks: self.webhooks.get_all() }
}
pub fn retrieve_runtime_webhooks(&self) -> RuntimeWebhooks {
self.webhooks.get_runtime()
}
pub fn embedders( pub fn embedders(
&self, &self,
index_uid: String, index_uid: String,
@@ -990,3 +1041,72 @@ pub struct IndexStats {
/// Internal stats computed from the index. /// Internal stats computed from the index.
pub inner_stats: index_mapper::IndexStats, pub inner_stats: index_mapper::IndexStats,
} }
/// These structure are not meant to be exposed to the end user, if needed, use the meilisearch-types::webhooks structure instead.
/// /!\ Everytime you deserialize this structure you should fill the cli_webhook later on with the `with_cli` method. /!\
#[derive(Debug, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
struct Webhooks {
// The cli webhook should *never* be stored in a database.
// It represent a state that only exists for this execution of meilisearch
#[serde(skip)]
pub cli: Option<CliWebhook>,
#[serde(default)]
pub runtime: RwLock<RuntimeWebhooks>,
}
type RuntimeWebhooks = BTreeMap<Uuid, Webhook>;
impl Webhooks {
pub fn with_cli(&mut self, url: Option<String>, auth: Option<String>) {
if let Some(url) = url {
let webhook = CliWebhook { url, auth };
self.cli = Some(webhook);
}
}
pub fn from_runtime(webhooks: RuntimeWebhooks) -> Self {
Self { cli: None, runtime: RwLock::new(webhooks) }
}
pub fn into_runtime(self) -> RuntimeWebhooks {
// safe because we own self and it cannot be cloned
self.runtime.into_inner().unwrap()
}
pub fn update_runtime(&self, webhooks: RuntimeWebhooks) {
*self.runtime.write().unwrap() = webhooks;
}
/// Returns all the webhooks in an unified view. The cli webhook is represented with an uuid set to 0
pub fn get_all(&self) -> BTreeMap<Uuid, Webhook> {
self.cli
.as_ref()
.map(|wh| (Uuid::nil(), Webhook::from(wh)))
.into_iter()
.chain(self.runtime.read().unwrap().iter().map(|(uuid, wh)| (*uuid, wh.clone())))
.collect()
}
/// Returns all the runtime webhooks.
pub fn get_runtime(&self) -> BTreeMap<Uuid, Webhook> {
self.runtime.read().unwrap().iter().map(|(uuid, wh)| (*uuid, wh.clone())).collect()
}
}
#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)]
struct CliWebhook {
pub url: String,
pub auth: Option<String>,
}
impl From<&CliWebhook> for Webhook {
fn from(webhook: &CliWebhook) -> Self {
let mut headers = BTreeMap::new();
if let Some(ref auth) = webhook.auth {
headers.insert("Authorization".to_string(), auth.to_string());
}
Self { url: webhook.url.to_string(), headers }
}
}

View File

@@ -108,6 +108,7 @@ make_enum_progress! {
DumpTheBatches, DumpTheBatches,
DumpTheIndexes, DumpTheIndexes,
DumpTheExperimentalFeatures, DumpTheExperimentalFeatures,
DumpTheWebhooks,
CompressTheDump, CompressTheDump,
} }
} }

View File

@@ -446,8 +446,7 @@ impl IndexScheduler {
Ok(()) Ok(())
})?; })?;
// We shouldn't crash the tick function if we can't send data to the webhook. self.notify_webhooks(ids);
let _ = self.notify_webhook(&ids);
#[cfg(test)] #[cfg(test)]
self.breakpoint(crate::test_utils::Breakpoint::AfterProcessing); self.breakpoint(crate::test_utils::Breakpoint::AfterProcessing);

View File

@@ -270,6 +270,11 @@ impl IndexScheduler {
let network = self.network(); let network = self.network();
dump.create_network(network)?; dump.create_network(network)?;
// 7. Dump the webhooks
progress.update_progress(DumpCreationProgress::DumpTheWebhooks);
let webhooks = self.webhooks_dump_view();
dump.create_webhooks(webhooks)?;
let dump_uid = started_at.format(format_description!( let dump_uid = started_at.format(format_description!(
"[year repr:full][month repr:numerical][day padding:zero]-[hour padding:zero][minute padding:zero][second padding:zero][subsecond digits:3]" "[year repr:full][month repr:numerical][day padding:zero]-[hour padding:zero][minute padding:zero][second padding:zero][subsecond digits:3]"
)).unwrap(); )).unwrap();

View File

@@ -7,9 +7,73 @@ use meilisearch_types::milli::progress::{Progress, VariableNameStep};
use meilisearch_types::tasks::{Status, Task}; use meilisearch_types::tasks::{Status, Task};
use meilisearch_types::{compression, VERSION_FILE_NAME}; use meilisearch_types::{compression, VERSION_FILE_NAME};
use crate::heed::EnvOpenOptions;
use crate::processing::{AtomicUpdateFileStep, SnapshotCreationProgress}; use crate::processing::{AtomicUpdateFileStep, SnapshotCreationProgress};
use crate::queue::TaskQueue;
use crate::{Error, IndexScheduler, Result}; use crate::{Error, IndexScheduler, Result};
/// # Safety
///
/// See [`EnvOpenOptions::open`].
unsafe fn remove_tasks(
tasks: &[Task],
dst: &std::path::Path,
index_base_map_size: usize,
) -> Result<()> {
let env_options = EnvOpenOptions::new();
let mut env_options = env_options.read_txn_without_tls();
let env = env_options.max_dbs(TaskQueue::nb_db()).map_size(index_base_map_size).open(dst)?;
let mut wtxn = env.write_txn()?;
let task_queue = TaskQueue::new(&env, &mut wtxn)?;
// Destructuring to ensure the code below gets updated if a database gets added in the future.
let TaskQueue {
all_tasks,
status,
kind,
index_tasks: _, // snapshot creation tasks are not index tasks
canceled_by,
enqueued_at,
started_at,
finished_at,
} = task_queue;
for task in tasks {
all_tasks.delete(&mut wtxn, &task.uid)?;
let mut tasks = status.get(&wtxn, &task.status)?.unwrap_or_default();
tasks.remove(task.uid);
status.put(&mut wtxn, &task.status, &tasks)?;
let mut tasks = kind.get(&wtxn, &task.kind.as_kind())?.unwrap_or_default();
tasks.remove(task.uid);
kind.put(&mut wtxn, &task.kind.as_kind(), &tasks)?;
canceled_by.delete(&mut wtxn, &task.uid)?;
let timestamp = task.enqueued_at.unix_timestamp_nanos();
let mut tasks = enqueued_at.get(&wtxn, &timestamp)?.unwrap_or_default();
tasks.remove(task.uid);
enqueued_at.put(&mut wtxn, &timestamp, &tasks)?;
if let Some(task_started_at) = task.started_at {
let timestamp = task_started_at.unix_timestamp_nanos();
let mut tasks = started_at.get(&wtxn, &timestamp)?.unwrap_or_default();
tasks.remove(task.uid);
started_at.put(&mut wtxn, &timestamp, &tasks)?;
}
if let Some(task_finished_at) = task.finished_at {
let timestamp = task_finished_at.unix_timestamp_nanos();
let mut tasks = finished_at.get(&wtxn, &timestamp)?.unwrap_or_default();
tasks.remove(task.uid);
finished_at.put(&mut wtxn, &timestamp, &tasks)?;
}
}
wtxn.commit()?;
Ok(())
}
impl IndexScheduler { impl IndexScheduler {
pub(super) fn process_snapshot( pub(super) fn process_snapshot(
&self, &self,
@@ -48,14 +112,26 @@ impl IndexScheduler {
}; };
self.env.copy_to_path(dst.join("data.mdb"), compaction_option)?; self.env.copy_to_path(dst.join("data.mdb"), compaction_option)?;
// 2.2 Create a read transaction on the index-scheduler // 2.2 Remove the current snapshot tasks
//
// This is done to ensure that the tasks are not processed again when the snapshot is imported
//
// # Safety
//
// This is safe because we open the env file we just created in a temporary directory.
// We are sure it's not being used by any other process nor thread.
unsafe {
remove_tasks(&tasks, &dst, self.index_mapper.index_base_map_size)?;
}
// 2.3 Create a read transaction on the index-scheduler
let rtxn = self.env.read_txn()?; let rtxn = self.env.read_txn()?;
// 2.3 Create the update files directory // 2.4 Create the update files directory
let update_files_dir = temp_snapshot_dir.path().join("update_files"); let update_files_dir = temp_snapshot_dir.path().join("update_files");
fs::create_dir_all(&update_files_dir)?; fs::create_dir_all(&update_files_dir)?;
// 2.4 Only copy the update files of the enqueued tasks // 2.5 Only copy the update files of the enqueued tasks
progress.update_progress(SnapshotCreationProgress::SnapshotTheUpdateFiles); progress.update_progress(SnapshotCreationProgress::SnapshotTheUpdateFiles);
let enqueued = self.queue.tasks.get_status(&rtxn, Status::Enqueued)?; let enqueued = self.queue.tasks.get_status(&rtxn, Status::Enqueued)?;
let (atomic, update_file_progress) = AtomicUpdateFileStep::new(enqueued.len() as u32); let (atomic, update_file_progress) = AtomicUpdateFileStep::new(enqueued.len() as u32);

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 16, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 17, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
[timestamp] [4,] [timestamp] [4,]
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Batches: ### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.16.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", } 0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.17.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", } 1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", } 2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", } 3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 16, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 17, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Status: ### Status:
enqueued [0,] enqueued [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 16, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 17, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Status: ### Status:

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 16, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 17, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Status: ### Status:
@@ -37,7 +37,7 @@ catto [1,]
[timestamp] [0,] [timestamp] [0,]
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Batches: ### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.16.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", } 0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.17.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Batch to tasks mapping: ### Batch to tasks mapping:
0 [0,] 0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 16, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 17, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 2 {uid: 2, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
---------------------------------------------------------------------- ----------------------------------------------------------------------
@@ -40,7 +40,7 @@ doggo [2,]
[timestamp] [0,] [timestamp] [0,]
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Batches: ### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.16.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", } 0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.17.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Batch to tasks mapping: ### Batch to tasks mapping:
0 [0,] 0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 16, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 17, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 2 {uid: 2, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 3 {uid: 3, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -43,7 +43,7 @@ doggo [2,3,]
[timestamp] [0,] [timestamp] [0,]
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Batches: ### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.16.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", } 0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.17.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Batch to tasks mapping: ### Batch to tasks mapping:
0 [0,] 0 [0,]

View File

@@ -98,8 +98,8 @@ impl IndexScheduler {
indexes_path: tempdir.path().join("indexes"), indexes_path: tempdir.path().join("indexes"),
snapshots_path: tempdir.path().join("snapshots"), snapshots_path: tempdir.path().join("snapshots"),
dumps_path: tempdir.path().join("dumps"), dumps_path: tempdir.path().join("dumps"),
webhook_url: None, cli_webhook_url: None,
webhook_authorization_header: None, cli_webhook_authorization: None,
task_db_size: 1000 * 1000 * 10, // 10 MB, we don't use MiB on purpose. task_db_size: 1000 * 1000 * 10, // 10 MB, we don't use MiB on purpose.
index_base_map_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose. index_base_map_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
enable_mdb_writemap: false, enable_mdb_writemap: false,

View File

@@ -39,6 +39,7 @@ pub fn upgrade_index_scheduler(
(1, 13, _) => 0, (1, 13, _) => 0,
(1, 14, _) => 0, (1, 14, _) => 0,
(1, 15, _) => 0, (1, 15, _) => 0,
(1, 16, _) => 0,
(major, minor, patch) => { (major, minor, patch) => {
if major > current_major if major > current_major
|| (major == current_major && minor > current_minor) || (major == current_major && minor > current_minor)

View File

@@ -137,6 +137,14 @@ impl HeedAuthStore {
Action::ChatsSettingsAll => { Action::ChatsSettingsAll => {
actions.extend([Action::ChatsSettingsGet, Action::ChatsSettingsUpdate]); actions.extend([Action::ChatsSettingsGet, Action::ChatsSettingsUpdate]);
} }
Action::WebhooksAll => {
actions.extend([
Action::WebhooksGet,
Action::WebhooksUpdate,
Action::WebhooksDelete,
Action::WebhooksCreate,
]);
}
other => { other => {
actions.insert(*other); actions.insert(*other);
} }

View File

@@ -418,7 +418,16 @@ InvalidChatCompletionSearchDescriptionPrompt , InvalidRequest , BAD_REQU
InvalidChatCompletionSearchQueryParamPrompt , InvalidRequest , BAD_REQUEST ; InvalidChatCompletionSearchQueryParamPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ; InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ; InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST ;
// Webhooks
InvalidWebhooks , InvalidRequest , BAD_REQUEST ;
InvalidWebhookUrl , InvalidRequest , BAD_REQUEST ;
InvalidWebhookHeaders , InvalidRequest , BAD_REQUEST ;
ImmutableWebhook , InvalidRequest , BAD_REQUEST ;
InvalidWebhookUuid , InvalidRequest , BAD_REQUEST ;
WebhookNotFound , InvalidRequest , NOT_FOUND ;
ImmutableWebhookUuid , InvalidRequest , BAD_REQUEST ;
ImmutableWebhookIsEditable , InvalidRequest , BAD_REQUEST
} }
impl ErrorCode for JoinError { impl ErrorCode for JoinError {

View File

@@ -365,6 +365,21 @@ pub enum Action {
#[serde(rename = "*.get")] #[serde(rename = "*.get")]
#[deserr(rename = "*.get")] #[deserr(rename = "*.get")]
AllGet, AllGet,
#[serde(rename = "webhooks.get")]
#[deserr(rename = "webhooks.get")]
WebhooksGet,
#[serde(rename = "webhooks.update")]
#[deserr(rename = "webhooks.update")]
WebhooksUpdate,
#[serde(rename = "webhooks.delete")]
#[deserr(rename = "webhooks.delete")]
WebhooksDelete,
#[serde(rename = "webhooks.create")]
#[deserr(rename = "webhooks.create")]
WebhooksCreate,
#[serde(rename = "webhooks.*")]
#[deserr(rename = "webhooks.*")]
WebhooksAll,
} }
impl Action { impl Action {
@@ -416,6 +431,11 @@ impl Action {
NETWORK_GET => Some(Self::NetworkGet), NETWORK_GET => Some(Self::NetworkGet),
NETWORK_UPDATE => Some(Self::NetworkUpdate), NETWORK_UPDATE => Some(Self::NetworkUpdate),
ALL_GET => Some(Self::AllGet), ALL_GET => Some(Self::AllGet),
WEBHOOKS_GET => Some(Self::WebhooksGet),
WEBHOOKS_UPDATE => Some(Self::WebhooksUpdate),
WEBHOOKS_DELETE => Some(Self::WebhooksDelete),
WEBHOOKS_CREATE => Some(Self::WebhooksCreate),
WEBHOOKS_ALL => Some(Self::WebhooksAll),
_otherwise => None, _otherwise => None,
} }
} }
@@ -428,7 +448,9 @@ impl Action {
match self { match self {
// Any action that expands to others must return false, as it wouldn't be able to expand recursively. // Any action that expands to others must return false, as it wouldn't be able to expand recursively.
All | AllGet | DocumentsAll | IndexesAll | ChatsAll | TasksAll | SettingsAll All | AllGet | DocumentsAll | IndexesAll | ChatsAll | TasksAll | SettingsAll
| StatsAll | MetricsAll | DumpsAll | SnapshotsAll | ChatsSettingsAll => false, | StatsAll | MetricsAll | DumpsAll | SnapshotsAll | ChatsSettingsAll | WebhooksAll => {
false
}
Search => true, Search => true,
DocumentsAdd => false, DocumentsAdd => false,
@@ -463,6 +485,10 @@ impl Action {
ChatsDelete => false, ChatsDelete => false,
ChatsSettingsGet => true, ChatsSettingsGet => true,
ChatsSettingsUpdate => false, ChatsSettingsUpdate => false,
WebhooksGet => true,
WebhooksUpdate => false,
WebhooksDelete => false,
WebhooksCreate => false,
} }
} }
@@ -522,6 +548,12 @@ pub mod actions {
pub const CHATS_SETTINGS_ALL: u8 = ChatsSettingsAll.repr(); pub const CHATS_SETTINGS_ALL: u8 = ChatsSettingsAll.repr();
pub const CHATS_SETTINGS_GET: u8 = ChatsSettingsGet.repr(); pub const CHATS_SETTINGS_GET: u8 = ChatsSettingsGet.repr();
pub const CHATS_SETTINGS_UPDATE: u8 = ChatsSettingsUpdate.repr(); pub const CHATS_SETTINGS_UPDATE: u8 = ChatsSettingsUpdate.repr();
pub const WEBHOOKS_GET: u8 = WebhooksGet.repr();
pub const WEBHOOKS_UPDATE: u8 = WebhooksUpdate.repr();
pub const WEBHOOKS_DELETE: u8 = WebhooksDelete.repr();
pub const WEBHOOKS_CREATE: u8 = WebhooksCreate.repr();
pub const WEBHOOKS_ALL: u8 = WebhooksAll.repr();
} }
#[cfg(test)] #[cfg(test)]
@@ -577,6 +609,11 @@ pub(crate) mod test {
assert!(ChatsSettingsGet.repr() == 42 && CHATS_SETTINGS_GET == 42); assert!(ChatsSettingsGet.repr() == 42 && CHATS_SETTINGS_GET == 42);
assert!(ChatsSettingsUpdate.repr() == 43 && CHATS_SETTINGS_UPDATE == 43); assert!(ChatsSettingsUpdate.repr() == 43 && CHATS_SETTINGS_UPDATE == 43);
assert!(AllGet.repr() == 44 && ALL_GET == 44); assert!(AllGet.repr() == 44 && ALL_GET == 44);
assert!(WebhooksGet.repr() == 45 && WEBHOOKS_GET == 45);
assert!(WebhooksUpdate.repr() == 46 && WEBHOOKS_UPDATE == 46);
assert!(WebhooksDelete.repr() == 47 && WEBHOOKS_DELETE == 47);
assert!(WebhooksCreate.repr() == 48 && WEBHOOKS_CREATE == 48);
assert!(WebhooksAll.repr() == 49 && WEBHOOKS_ALL == 49);
} }
#[test] #[test]

View File

@@ -15,6 +15,7 @@ pub mod star_or;
pub mod task_view; pub mod task_view;
pub mod tasks; pub mod tasks;
pub mod versioning; pub mod versioning;
pub mod webhooks;
pub use milli::{heed, Index}; pub use milli::{heed, Index};
use uuid::Uuid; use uuid::Uuid;
pub use versioning::VERSION_FILE_NAME; pub use versioning::VERSION_FILE_NAME;

View File

@@ -0,0 +1,28 @@
use std::collections::BTreeMap;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct Webhook {
pub url: String,
#[serde(default)]
pub headers: BTreeMap<String, String>,
}
#[derive(Debug, Serialize, Default, Clone, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct WebhooksView {
#[serde(default)]
pub webhooks: BTreeMap<Uuid, Webhook>,
}
// Same as the WebhooksView instead it should never contains the CLI webhooks.
// It's the right structure to use in the dump
#[derive(Debug, Deserialize, Serialize, Default, Clone, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct WebhooksDumpView {
#[serde(default)]
pub webhooks: BTreeMap<Uuid, Webhook>,
}

View File

@@ -170,5 +170,5 @@ german = ["meilisearch-types/german"]
turkish = ["meilisearch-types/turkish"] turkish = ["meilisearch-types/turkish"]
[package.metadata.mini-dashboard] [package.metadata.mini-dashboard]
assets-url = "https://github.com/meilisearch/mini-dashboard/releases/download/v0.2.21/build.zip" assets-url = "https://github.com/meilisearch/mini-dashboard/releases/download/v0.2.22/build.zip"
sha1 = "94f56a8e24e2e3a1bc1bd7d9ceaa23464a5e241a" sha1 = "b70b2036b5f167da9ea0b637da8b320c7ea88254"

View File

@@ -223,8 +223,8 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<
indexes_path: opt.db_path.join("indexes"), indexes_path: opt.db_path.join("indexes"),
snapshots_path: opt.snapshot_dir.clone(), snapshots_path: opt.snapshot_dir.clone(),
dumps_path: opt.dump_dir.clone(), dumps_path: opt.dump_dir.clone(),
webhook_url: opt.task_webhook_url.as_ref().map(|url| url.to_string()), cli_webhook_url: opt.task_webhook_url.as_ref().map(|url| url.to_string()),
webhook_authorization_header: opt.task_webhook_authorization_header.clone(), cli_webhook_authorization: opt.task_webhook_authorization_header.clone(),
task_db_size: opt.max_task_db_size.as_u64() as usize, task_db_size: opt.max_task_db_size.as_u64() as usize,
index_base_map_size: opt.max_index_size.as_u64() as usize, index_base_map_size: opt.max_index_size.as_u64() as usize,
enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage, enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage,
@@ -491,7 +491,12 @@ fn import_dump(
let _ = std::fs::write(db_path.join("instance-uid"), instance_uid.to_string().as_bytes()); let _ = std::fs::write(db_path.join("instance-uid"), instance_uid.to_string().as_bytes());
}; };
// 2. Import the `Key`s. // 2. Import the webhooks
if let Some(webhooks) = dump_reader.webhooks() {
index_scheduler.update_runtime_webhooks(webhooks.webhooks.clone())?;
}
// 3. Import the `Key`s.
let mut keys = Vec::new(); let mut keys = Vec::new();
auth.raw_delete_all_keys()?; auth.raw_delete_all_keys()?;
for key in dump_reader.keys()? { for key in dump_reader.keys()? {
@@ -500,20 +505,20 @@ fn import_dump(
keys.push(key); keys.push(key);
} }
// 3. Import the `ChatCompletionSettings`s. // 4. Import the `ChatCompletionSettings`s.
for result in dump_reader.chat_completions_settings()? { for result in dump_reader.chat_completions_settings()? {
let (name, settings) = result?; let (name, settings) = result?;
index_scheduler.put_chat_settings(&name, &settings)?; index_scheduler.put_chat_settings(&name, &settings)?;
} }
// 4. Import the runtime features and network // 5. Import the runtime features and network
let features = dump_reader.features()?.unwrap_or_default(); let features = dump_reader.features()?.unwrap_or_default();
index_scheduler.put_runtime_features(features)?; index_scheduler.put_runtime_features(features)?;
let network = dump_reader.network()?.cloned().unwrap_or_default(); let network = dump_reader.network()?.cloned().unwrap_or_default();
index_scheduler.put_network(network)?; index_scheduler.put_network(network)?;
// 4.1 Use all cpus to process dump if `max_indexing_threads` not configured // 5.1 Use all cpus to process dump if `max_indexing_threads` not configured
let backup_config; let backup_config;
let base_config = index_scheduler.indexer_config(); let base_config = index_scheduler.indexer_config();
@@ -530,7 +535,7 @@ fn import_dump(
// /!\ The tasks must be imported AFTER importing the indexes or else the scheduler might // /!\ The tasks must be imported AFTER importing the indexes or else the scheduler might
// try to process tasks while we're trying to import the indexes. // try to process tasks while we're trying to import the indexes.
// 5. Import the indexes. // 6. Import the indexes.
for index_reader in dump_reader.indexes()? { for index_reader in dump_reader.indexes()? {
let mut index_reader = index_reader?; let mut index_reader = index_reader?;
let metadata = index_reader.metadata(); let metadata = index_reader.metadata();
@@ -543,12 +548,12 @@ fn import_dump(
let mut wtxn = index.write_txn()?; let mut wtxn = index.write_txn()?;
let mut builder = milli::update::Settings::new(&mut wtxn, &index, indexer_config); let mut builder = milli::update::Settings::new(&mut wtxn, &index, indexer_config);
// 5.1 Import the primary key if there is one. // 6.1 Import the primary key if there is one.
if let Some(ref primary_key) = metadata.primary_key { if let Some(ref primary_key) = metadata.primary_key {
builder.set_primary_key(primary_key.to_string()); builder.set_primary_key(primary_key.to_string());
} }
// 5.2 Import the settings. // 6.2 Import the settings.
tracing::info!("Importing the settings."); tracing::info!("Importing the settings.");
let settings = index_reader.settings()?; let settings = index_reader.settings()?;
apply_settings_to_builder(&settings, &mut builder); apply_settings_to_builder(&settings, &mut builder);
@@ -560,8 +565,8 @@ fn import_dump(
let rtxn = index.read_txn()?; let rtxn = index.read_txn()?;
if index_scheduler.no_edition_2024_for_dumps() { if index_scheduler.no_edition_2024_for_dumps() {
// 5.3 Import the documents. // 6.3 Import the documents.
// 5.3.1 We need to recreate the grenad+obkv format accepted by the index. // 6.3.1 We need to recreate the grenad+obkv format accepted by the index.
tracing::info!("Importing the documents."); tracing::info!("Importing the documents.");
let file = tempfile::tempfile()?; let file = tempfile::tempfile()?;
let mut builder = DocumentsBatchBuilder::new(BufWriter::new(file)); let mut builder = DocumentsBatchBuilder::new(BufWriter::new(file));
@@ -572,7 +577,7 @@ fn import_dump(
// This flush the content of the batch builder. // This flush the content of the batch builder.
let file = builder.into_inner()?.into_inner()?; let file = builder.into_inner()?.into_inner()?;
// 5.3.2 We feed it to the milli index. // 6.3.2 We feed it to the milli index.
let reader = BufReader::new(file); let reader = BufReader::new(file);
let reader = DocumentsBatchReader::from_reader(reader)?; let reader = DocumentsBatchReader::from_reader(reader)?;
@@ -651,15 +656,15 @@ fn import_dump(
index_scheduler.refresh_index_stats(&uid)?; index_scheduler.refresh_index_stats(&uid)?;
} }
// 6. Import the queue // 7. Import the queue
let mut index_scheduler_dump = index_scheduler.register_dumped_task()?; let mut index_scheduler_dump = index_scheduler.register_dumped_task()?;
// 6.1. Import the batches // 7.1. Import the batches
for ret in dump_reader.batches()? { for ret in dump_reader.batches()? {
let batch = ret?; let batch = ret?;
index_scheduler_dump.register_dumped_batch(batch)?; index_scheduler_dump.register_dumped_batch(batch)?;
} }
// 6.2. Import the tasks // 7.2. Import the tasks
for ret in dump_reader.tasks()? { for ret in dump_reader.tasks()? {
let (task, file) = ret?; let (task, file) = ret?;
index_scheduler_dump.register_dumped_task(task, file)?; index_scheduler_dump.register_dumped_task(task, file)?;

View File

@@ -206,11 +206,13 @@ pub struct Opt {
pub env: String, pub env: String,
/// Called whenever a task finishes so a third party can be notified. /// Called whenever a task finishes so a third party can be notified.
/// See also the dedicated API `/webhooks`.
#[clap(long, env = MEILI_TASK_WEBHOOK_URL)] #[clap(long, env = MEILI_TASK_WEBHOOK_URL)]
pub task_webhook_url: Option<Url>, pub task_webhook_url: Option<Url>,
/// The Authorization header to send on the webhook URL whenever /// The Authorization header to send on the webhook URL whenever
/// a task finishes so a third party can be notified. /// a task finishes so a third party can be notified.
/// See also the dedicated API `/webhooks`.
#[clap(long, env = MEILI_TASK_WEBHOOK_AUTHORIZATION_HEADER)] #[clap(long, env = MEILI_TASK_WEBHOOK_AUTHORIZATION_HEADER)]
pub task_webhook_authorization_header: Option<String>, pub task_webhook_authorization_header: Option<String>,

View File

@@ -511,7 +511,7 @@ make_setting_routes!(
}, },
{ {
route: "/chat", route: "/chat",
update_verb: put, update_verb: patch,
value_type: ChatSettings, value_type: ChatSettings,
err_type: meilisearch_types::deserr::DeserrJsonError< err_type: meilisearch_types::deserr::DeserrJsonError<
meilisearch_types::error::deserr_codes::InvalidSettingsIndexChat, meilisearch_types::error::deserr_codes::InvalidSettingsIndexChat,

View File

@@ -41,6 +41,7 @@ use crate::routes::indexes::IndexView;
use crate::routes::multi_search::SearchResults; use crate::routes::multi_search::SearchResults;
use crate::routes::network::{Network, Remote}; use crate::routes::network::{Network, Remote};
use crate::routes::swap_indexes::SwapIndexesPayload; use crate::routes::swap_indexes::SwapIndexesPayload;
use crate::routes::webhooks::{WebhookResults, WebhookSettings, WebhookWithMetadata};
use crate::search::{ use crate::search::{
FederatedSearch, FederatedSearchResult, Federation, FederationOptions, MergeFacets, FederatedSearch, FederatedSearchResult, Federation, FederationOptions, MergeFacets,
SearchQueryWithIndex, SearchResultWithIndex, SimilarQuery, SimilarResult, SearchQueryWithIndex, SearchResultWithIndex, SimilarQuery, SimilarResult,
@@ -70,6 +71,7 @@ mod swap_indexes;
pub mod tasks; pub mod tasks;
#[cfg(test)] #[cfg(test)]
mod tasks_test; mod tasks_test;
mod webhooks;
#[derive(OpenApi)] #[derive(OpenApi)]
#[openapi( #[openapi(
@@ -89,6 +91,7 @@ mod tasks_test;
(path = "/experimental-features", api = features::ExperimentalFeaturesApi), (path = "/experimental-features", api = features::ExperimentalFeaturesApi),
(path = "/export", api = export::ExportApi), (path = "/export", api = export::ExportApi),
(path = "/network", api = network::NetworkApi), (path = "/network", api = network::NetworkApi),
(path = "/webhooks", api = webhooks::WebhooksApi),
), ),
paths(get_health, get_version, get_stats), paths(get_health, get_version, get_stats),
tags( tags(
@@ -99,7 +102,7 @@ mod tasks_test;
url = "/", url = "/",
description = "Local server", description = "Local server",
)), )),
components(schemas(PaginationView<KeyView>, PaginationView<IndexView>, IndexView, DocumentDeletionByFilter, AllBatches, BatchStats, ProgressStepView, ProgressView, BatchView, RuntimeTogglableFeatures, SwapIndexesPayload, DocumentEditionByFunction, MergeFacets, FederationOptions, SearchQueryWithIndex, Federation, FederatedSearch, FederatedSearchResult, SearchResults, SearchResultWithIndex, SimilarQuery, SimilarResult, PaginationView<serde_json::Value>, BrowseQuery, UpdateIndexRequest, IndexUid, IndexCreateRequest, KeyView, Action, CreateApiKey, UpdateStderrLogs, LogMode, GetLogs, IndexStats, Stats, HealthStatus, HealthResponse, VersionResponse, Code, ErrorType, AllTasks, TaskView, Status, DetailsView, ResponseError, Settings<Unchecked>, Settings<Checked>, TypoSettings, MinWordSizeTyposSetting, FacetingSettings, PaginationSettings, SummarizedTaskView, Kind, Network, Remote, FilterableAttributesRule, FilterableAttributesPatterns, AttributePatterns, FilterableAttributesFeatures, FilterFeatures, Export)) components(schemas(PaginationView<KeyView>, PaginationView<IndexView>, IndexView, DocumentDeletionByFilter, AllBatches, BatchStats, ProgressStepView, ProgressView, BatchView, RuntimeTogglableFeatures, SwapIndexesPayload, DocumentEditionByFunction, MergeFacets, FederationOptions, SearchQueryWithIndex, Federation, FederatedSearch, FederatedSearchResult, SearchResults, SearchResultWithIndex, SimilarQuery, SimilarResult, PaginationView<serde_json::Value>, BrowseQuery, UpdateIndexRequest, IndexUid, IndexCreateRequest, KeyView, Action, CreateApiKey, UpdateStderrLogs, LogMode, GetLogs, IndexStats, Stats, HealthStatus, HealthResponse, VersionResponse, Code, ErrorType, AllTasks, TaskView, Status, DetailsView, ResponseError, Settings<Unchecked>, Settings<Checked>, TypoSettings, MinWordSizeTyposSetting, FacetingSettings, PaginationSettings, SummarizedTaskView, Kind, Network, Remote, FilterableAttributesRule, FilterableAttributesPatterns, AttributePatterns, FilterableAttributesFeatures, FilterFeatures, Export, WebhookSettings, WebhookResults, WebhookWithMetadata))
)] )]
pub struct MeilisearchApi; pub struct MeilisearchApi;
@@ -120,7 +123,8 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
.service(web::scope("/experimental-features").configure(features::configure)) .service(web::scope("/experimental-features").configure(features::configure))
.service(web::scope("/network").configure(network::configure)) .service(web::scope("/network").configure(network::configure))
.service(web::scope("/export").configure(export::configure)) .service(web::scope("/export").configure(export::configure))
.service(web::scope("/chats").configure(chats::configure)); .service(web::scope("/chats").configure(chats::configure))
.service(web::scope("/webhooks").configure(webhooks::configure));
#[cfg(feature = "swagger")] #[cfg(feature = "swagger")]
{ {

View File

@@ -51,7 +51,7 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
get, get,
path = "", path = "",
tag = "Network", tag = "Network",
security(("Bearer" = ["network.get", "network.*", "*"])), security(("Bearer" = ["network.get", "*"])),
responses( responses(
(status = OK, description = "Known nodes are returned", body = Network, content_type = "application/json", example = json!( (status = OK, description = "Known nodes are returned", body = Network, content_type = "application/json", example = json!(
{ {
@@ -168,7 +168,7 @@ impl Aggregate for PatchNetworkAnalytics {
path = "", path = "",
tag = "Network", tag = "Network",
request_body = Network, request_body = Network,
security(("Bearer" = ["network.update", "network.*", "*"])), security(("Bearer" = ["network.update", "*"])),
responses( responses(
(status = OK, description = "New network state is returned", body = Network, content_type = "application/json", example = json!( (status = OK, description = "New network state is returned", body = Network, content_type = "application/json", example = json!(
{ {

View File

@@ -0,0 +1,474 @@
use std::collections::BTreeMap;
use std::str::FromStr;
use actix_http::header::{
HeaderName, HeaderValue, InvalidHeaderName as ActixInvalidHeaderName,
InvalidHeaderValue as ActixInvalidHeaderValue,
};
use actix_web::web::{self, Data, Path};
use actix_web::{HttpRequest, HttpResponse};
use core::convert::Infallible;
use deserr::actix_web::AwebJson;
use deserr::{DeserializeError, Deserr, ValuePointerRef};
use index_scheduler::IndexScheduler;
use meilisearch_types::deserr::{immutable_field_error, DeserrJsonError};
use meilisearch_types::error::deserr_codes::{
BadRequest, InvalidWebhookHeaders, InvalidWebhookUrl,
};
use meilisearch_types::error::{Code, ErrorCode, ResponseError};
use meilisearch_types::keys::actions;
use meilisearch_types::milli::update::Setting;
use meilisearch_types::webhooks::Webhook;
use serde::Serialize;
use tracing::debug;
use url::Url;
use utoipa::{OpenApi, ToSchema};
use uuid::Uuid;
use crate::analytics::{Aggregate, Analytics};
use crate::extractors::authentication::policies::ActionPolicy;
use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use WebhooksError::*;
#[derive(OpenApi)]
#[openapi(
paths(get_webhooks, get_webhook, post_webhook, patch_webhook, delete_webhook),
tags((
name = "Webhooks",
description = "The `/webhooks` route allows you to register endpoints to be called once tasks are processed.",
external_docs(url = "https://www.meilisearch.com/docs/reference/api/webhooks"),
)),
)]
pub struct WebhooksApi;
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(
web::resource("")
.route(web::get().to(get_webhooks))
.route(web::post().to(SeqHandler(post_webhook))),
)
.service(
web::resource("/{uuid}")
.route(web::get().to(get_webhook))
.route(web::patch().to(SeqHandler(patch_webhook)))
.route(web::delete().to(SeqHandler(delete_webhook))),
);
}
#[derive(Debug, Deserr, ToSchema)]
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields = deny_immutable_fields_webhook)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
pub(super) struct WebhookSettings {
#[schema(value_type = Option<String>, example = "https://your.site/on-tasks-completed")]
#[deserr(default, error = DeserrJsonError<InvalidWebhookUrl>)]
#[serde(default)]
url: Setting<String>,
#[schema(value_type = Option<BTreeMap<String, String>>, example = json!({"Authorization":"Bearer a-secret-token"}))]
#[deserr(default, error = DeserrJsonError<InvalidWebhookHeaders>)]
#[serde(default)]
headers: Setting<BTreeMap<String, Setting<String>>>,
}
fn deny_immutable_fields_webhook(
field: &str,
accepted: &[&str],
location: ValuePointerRef,
) -> DeserrJsonError {
match field {
"uuid" => immutable_field_error(field, accepted, Code::ImmutableWebhookUuid),
"isEditable" => immutable_field_error(field, accepted, Code::ImmutableWebhookIsEditable),
_ => deserr::take_cf_content(DeserrJsonError::<BadRequest>::error::<Infallible>(
None,
deserr::ErrorKind::UnknownKey { key: field, accepted },
location,
)),
}
}
#[derive(Debug, Serialize, ToSchema)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
pub(super) struct WebhookWithMetadata {
uuid: Uuid,
is_editable: bool,
#[schema(value_type = WebhookSettings)]
#[serde(flatten)]
webhook: Webhook,
}
impl WebhookWithMetadata {
pub fn from(uuid: Uuid, webhook: Webhook) -> Self {
Self { uuid, is_editable: uuid != Uuid::nil(), webhook }
}
}
#[derive(Debug, Serialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub(super) struct WebhookResults {
results: Vec<WebhookWithMetadata>,
}
#[utoipa::path(
get,
path = "",
tag = "Webhooks",
security(("Bearer" = ["webhooks.get", "webhooks.*", "*.get", "*"])),
responses(
(status = OK, description = "Webhooks are returned", body = WebhookResults, content_type = "application/json", example = json!({
"results": [
{
"uuid": "550e8400-e29b-41d4-a716-446655440000",
"url": "https://your.site/on-tasks-completed",
"headers": {
"Authorization": "Bearer a-secret-token"
},
"isEditable": true
},
{
"uuid": "550e8400-e29b-41d4-a716-446655440001",
"url": "https://another.site/on-tasks-completed",
"isEditable": true
}
]
})),
(status = 401, description = "The authorization header is missing", body = ResponseError, content_type = "application/json", example = json!(
{
"message": "The Authorization header is missing. It must use the bearer authorization method.",
"code": "missing_authorization_header",
"type": "auth",
"link": "https://docs.meilisearch.com/errors#missing_authorization_header"
}
)),
)
)]
async fn get_webhooks(
index_scheduler: GuardedData<ActionPolicy<{ actions::WEBHOOKS_GET }>, Data<IndexScheduler>>,
) -> Result<HttpResponse, ResponseError> {
let webhooks = index_scheduler.webhooks_view();
let results = webhooks
.webhooks
.into_iter()
.map(|(uuid, webhook)| WebhookWithMetadata::from(uuid, webhook))
.collect::<Vec<_>>();
let results = WebhookResults { results };
debug!(returns = ?results, "Get webhooks");
Ok(HttpResponse::Ok().json(results))
}
#[derive(Serialize, Default)]
pub struct PatchWebhooksAnalytics;
impl Aggregate for PatchWebhooksAnalytics {
fn event_name(&self) -> &'static str {
"Webhooks Updated"
}
fn aggregate(self: Box<Self>, _new: Box<Self>) -> Box<Self> {
self
}
fn into_event(self: Box<Self>) -> serde_json::Value {
serde_json::to_value(*self).unwrap_or_default()
}
}
#[derive(Serialize, Default)]
pub struct PostWebhooksAnalytics;
impl Aggregate for PostWebhooksAnalytics {
fn event_name(&self) -> &'static str {
"Webhooks Created"
}
fn aggregate(self: Box<Self>, _new: Box<Self>) -> Box<Self> {
self
}
fn into_event(self: Box<Self>) -> serde_json::Value {
serde_json::to_value(*self).unwrap_or_default()
}
}
#[derive(Debug, thiserror::Error)]
enum WebhooksError {
#[error("The URL for the webhook `{0}` is missing.")]
MissingUrl(Uuid),
#[error("Defining too many webhooks would crush the server. Please limit the number of webhooks to 20. You may use a third-party proxy server to dispatch events to more than 20 endpoints.")]
TooManyWebhooks,
#[error("Too many headers for the webhook `{0}`. Please limit the number of headers to 200. Hint: To remove an already defined header set its value to `null`")]
TooManyHeaders(Uuid),
#[error("Webhook `{0}` is immutable. The webhook defined from the command line cannot be modified using the API.")]
ImmutableWebhook(Uuid),
#[error("Webhook `{0}` not found.")]
WebhookNotFound(Uuid),
#[error("Invalid header name `{0}`: {1}")]
InvalidHeaderName(String, ActixInvalidHeaderName),
#[error("Invalid header value `{0}`: {1}")]
InvalidHeaderValue(String, ActixInvalidHeaderValue),
#[error("Invalid URL `{0}`: {1}")]
InvalidUrl(String, url::ParseError),
#[error("Invalid UUID: {0}")]
InvalidUuid(uuid::Error),
}
impl ErrorCode for WebhooksError {
fn error_code(&self) -> meilisearch_types::error::Code {
match self {
MissingUrl(_) => meilisearch_types::error::Code::InvalidWebhookUrl,
TooManyWebhooks => meilisearch_types::error::Code::InvalidWebhooks,
TooManyHeaders(_) => meilisearch_types::error::Code::InvalidWebhookHeaders,
ImmutableWebhook(_) => meilisearch_types::error::Code::ImmutableWebhook,
WebhookNotFound(_) => meilisearch_types::error::Code::WebhookNotFound,
InvalidHeaderName(_, _) => meilisearch_types::error::Code::InvalidWebhookHeaders,
InvalidHeaderValue(_, _) => meilisearch_types::error::Code::InvalidWebhookHeaders,
InvalidUrl(_, _) => meilisearch_types::error::Code::InvalidWebhookUrl,
InvalidUuid(_) => meilisearch_types::error::Code::InvalidWebhookUuid,
}
}
}
fn patch_webhook_inner(
uuid: &Uuid,
old_webhook: Webhook,
new_webhook: WebhookSettings,
) -> Result<Webhook, WebhooksError> {
let Webhook { url: old_url, mut headers } = old_webhook;
let url = match new_webhook.url {
Setting::Set(url) => url,
Setting::NotSet => old_url,
Setting::Reset => return Err(MissingUrl(uuid.to_owned())),
};
match new_webhook.headers {
Setting::Set(new_headers) => {
for (name, value) in new_headers {
match value {
Setting::Set(value) => {
headers.insert(name, value);
}
Setting::NotSet => continue,
Setting::Reset => {
headers.remove(&name);
continue;
}
}
}
}
Setting::Reset => headers.clear(),
Setting::NotSet => (),
};
if headers.len() > 200 {
return Err(TooManyHeaders(uuid.to_owned()));
}
Ok(Webhook { url, headers })
}
fn check_changed(uuid: Uuid, webhook: &Webhook) -> Result<(), WebhooksError> {
if uuid.is_nil() {
return Err(ImmutableWebhook(uuid));
}
if webhook.url.is_empty() {
return Err(MissingUrl(uuid));
}
if webhook.headers.len() > 200 {
return Err(TooManyHeaders(uuid));
}
for (header, value) in &webhook.headers {
HeaderName::from_bytes(header.as_bytes())
.map_err(|e| InvalidHeaderName(header.to_owned(), e))?;
HeaderValue::from_str(value).map_err(|e| InvalidHeaderValue(header.to_owned(), e))?;
}
if let Err(e) = Url::parse(&webhook.url) {
return Err(InvalidUrl(webhook.url.to_owned(), e));
}
Ok(())
}
#[utoipa::path(
get,
path = "/{uuid}",
tag = "Webhooks",
security(("Bearer" = ["webhooks.get", "webhooks.*", "*.get", "*"])),
responses(
(status = 200, description = "Webhook found", body = WebhookWithMetadata, content_type = "application/json", example = json!({
"uuid": "550e8400-e29b-41d4-a716-446655440000",
"url": "https://your.site/on-tasks-completed",
"headers": {
"Authorization": "Bearer a-secret"
},
"isEditable": true
})),
(status = 404, description = "Webhook not found", body = ResponseError, content_type = "application/json"),
(status = 401, description = "The authorization header is missing", body = ResponseError, content_type = "application/json"),
),
params(
("uuid" = Uuid, Path, description = "The universally unique identifier of the webhook")
)
)]
async fn get_webhook(
index_scheduler: GuardedData<ActionPolicy<{ actions::WEBHOOKS_GET }>, Data<IndexScheduler>>,
uuid: Path<String>,
) -> Result<HttpResponse, ResponseError> {
let uuid = Uuid::from_str(&uuid.into_inner()).map_err(InvalidUuid)?;
let mut webhooks = index_scheduler.webhooks_view();
let webhook = webhooks.webhooks.remove(&uuid).ok_or(WebhookNotFound(uuid))?;
let webhook = WebhookWithMetadata::from(uuid, webhook);
debug!(returns = ?webhook, "Get webhook");
Ok(HttpResponse::Ok().json(webhook))
}
#[utoipa::path(
post,
path = "",
tag = "Webhooks",
request_body = WebhookSettings,
security(("Bearer" = ["webhooks.create", "webhooks.*", "*"])),
responses(
(status = 201, description = "Webhook created successfully", body = WebhookWithMetadata, content_type = "application/json", example = json!({
"uuid": "550e8400-e29b-41d4-a716-446655440000",
"url": "https://your.site/on-tasks-completed",
"headers": {
"Authorization": "Bearer a-secret-token"
},
"isEditable": true
})),
(status = 401, description = "The authorization header is missing", body = ResponseError, content_type = "application/json"),
(status = 400, description = "Bad request", body = ResponseError, content_type = "application/json"),
)
)]
async fn post_webhook(
index_scheduler: GuardedData<ActionPolicy<{ actions::WEBHOOKS_CREATE }>, Data<IndexScheduler>>,
webhook_settings: AwebJson<WebhookSettings, DeserrJsonError>,
req: HttpRequest,
analytics: Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let webhook_settings = webhook_settings.into_inner();
debug!(parameters = ?webhook_settings, "Post webhook");
let uuid = Uuid::new_v4();
if webhook_settings.headers.as_ref().set().is_some_and(|h| h.len() > 200) {
return Err(TooManyHeaders(uuid).into());
}
let mut webhooks = index_scheduler.retrieve_runtime_webhooks();
if webhooks.len() >= 20 {
return Err(TooManyWebhooks.into());
}
let webhook = Webhook {
url: webhook_settings.url.set().ok_or(MissingUrl(uuid))?,
headers: webhook_settings
.headers
.set()
.map(|h| h.into_iter().map(|(k, v)| (k, v.set().unwrap_or_default())).collect())
.unwrap_or_default(),
};
check_changed(uuid, &webhook)?;
webhooks.insert(uuid, webhook.clone());
index_scheduler.update_runtime_webhooks(webhooks)?;
analytics.publish(PostWebhooksAnalytics, &req);
let response = WebhookWithMetadata::from(uuid, webhook);
debug!(returns = ?response, "Post webhook");
Ok(HttpResponse::Created().json(response))
}
#[utoipa::path(
patch,
path = "/{uuid}",
tag = "Webhooks",
request_body = WebhookSettings,
security(("Bearer" = ["webhooks.update", "webhooks.*", "*"])),
responses(
(status = 200, description = "Webhook updated successfully", body = WebhookWithMetadata, content_type = "application/json", example = json!({
"uuid": "550e8400-e29b-41d4-a716-446655440000",
"url": "https://your.site/on-tasks-completed",
"headers": {
"Authorization": "Bearer a-secret-token"
},
"isEditable": true
})),
(status = 401, description = "The authorization header is missing", body = ResponseError, content_type = "application/json"),
(status = 400, description = "Bad request", body = ResponseError, content_type = "application/json"),
),
params(
("uuid" = Uuid, Path, description = "The universally unique identifier of the webhook")
)
)]
async fn patch_webhook(
index_scheduler: GuardedData<ActionPolicy<{ actions::WEBHOOKS_UPDATE }>, Data<IndexScheduler>>,
uuid: Path<String>,
webhook_settings: AwebJson<WebhookSettings, DeserrJsonError>,
req: HttpRequest,
analytics: Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let uuid = Uuid::from_str(&uuid.into_inner()).map_err(InvalidUuid)?;
let webhook_settings = webhook_settings.into_inner();
debug!(parameters = ?(uuid, &webhook_settings), "Patch webhook");
if uuid.is_nil() {
return Err(ImmutableWebhook(uuid).into());
}
let mut webhooks = index_scheduler.retrieve_runtime_webhooks();
let old_webhook = webhooks.remove(&uuid).ok_or(WebhookNotFound(uuid))?;
let webhook = patch_webhook_inner(&uuid, old_webhook, webhook_settings)?;
check_changed(uuid, &webhook)?;
webhooks.insert(uuid, webhook.clone());
index_scheduler.update_runtime_webhooks(webhooks)?;
analytics.publish(PatchWebhooksAnalytics, &req);
let response = WebhookWithMetadata::from(uuid, webhook);
debug!(returns = ?response, "Patch webhook");
Ok(HttpResponse::Ok().json(response))
}
#[utoipa::path(
delete,
path = "/{uuid}",
tag = "Webhooks",
security(("Bearer" = ["webhooks.delete", "webhooks.*", "*"])),
responses(
(status = 204, description = "Webhook deleted successfully"),
(status = 404, description = "Webhook not found", body = ResponseError, content_type = "application/json"),
(status = 401, description = "The authorization header is missing", body = ResponseError, content_type = "application/json"),
),
params(
("uuid" = Uuid, Path, description = "The universally unique identifier of the webhook")
)
)]
async fn delete_webhook(
index_scheduler: GuardedData<ActionPolicy<{ actions::WEBHOOKS_DELETE }>, Data<IndexScheduler>>,
uuid: Path<String>,
) -> Result<HttpResponse, ResponseError> {
let uuid = Uuid::from_str(&uuid.into_inner()).map_err(InvalidUuid)?;
debug!(parameters = ?uuid, "Delete webhook");
if uuid.is_nil() {
return Err(ImmutableWebhook(uuid).into());
}
let mut webhooks = index_scheduler.retrieve_runtime_webhooks();
webhooks.remove(&uuid).ok_or(WebhookNotFound(uuid))?;
index_scheduler.update_runtime_webhooks(webhooks)?;
debug!(returns = "No Content", "Delete webhook");
Ok(HttpResponse::NoContent().finish())
}

View File

@@ -421,7 +421,7 @@ async fn error_add_api_key_invalid_parameters_actions() {
meili_snap::snapshot!(code, @"400 Bad Request"); meili_snap::snapshot!(code, @"400 Bad Request");
meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]" }), @r#" meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]" }), @r#"
{ {
"message": "Unknown value `doc.add` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`", "message": "Unknown value `doc.add` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`",
"code": "invalid_api_key_actions", "code": "invalid_api_key_actions",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_api_key_actions" "link": "https://docs.meilisearch.com/errors#invalid_api_key_actions"

View File

@@ -304,7 +304,7 @@ async fn access_authorized_stats_restricted_index() {
let (response, code) = index.create(Some("product_id")).await; let (response, code) = index.create(Some("product_id")).await;
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await; server.wait_task(task_id).await;
// create key with access on `products` index only. // create key with access on `products` index only.
let content = json!({ let content = json!({
@@ -344,7 +344,7 @@ async fn access_authorized_stats_no_index_restriction() {
let (response, code) = index.create(Some("product_id")).await; let (response, code) = index.create(Some("product_id")).await;
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await; server.wait_task(task_id).await;
// create key with access on all indexes. // create key with access on all indexes.
let content = json!({ let content = json!({
@@ -384,7 +384,7 @@ async fn list_authorized_indexes_restricted_index() {
let (response, code) = index.create(Some("product_id")).await; let (response, code) = index.create(Some("product_id")).await;
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await; server.wait_task(task_id).await;
// create key with access on `products` index only. // create key with access on `products` index only.
let content = json!({ let content = json!({
@@ -425,7 +425,7 @@ async fn list_authorized_indexes_no_index_restriction() {
let (response, code) = index.create(Some("product_id")).await; let (response, code) = index.create(Some("product_id")).await;
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await; server.wait_task(task_id).await;
// create key with access on all indexes. // create key with access on all indexes.
let content = json!({ let content = json!({
@@ -507,10 +507,10 @@ async fn access_authorized_index_patterns() {
server.use_api_key(MASTER_KEY); server.use_api_key(MASTER_KEY);
// refer to products_1 with modified api key. // refer to products_1 with a modified api key.
let index_1 = server.index("products_1"); let index_1 = server.index("products_1");
index_1.wait_task(task_id).await; server.wait_task(task_id).await;
let (response, code) = index_1.get_task(task_id).await; let (response, code) = index_1.get_task(task_id).await;
assert_eq!(200, code, "{:?}", &response); assert_eq!(200, code, "{:?}", &response);
@@ -578,19 +578,19 @@ async fn raise_error_non_authorized_index_patterns() {
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task2_id = response["taskUid"].as_u64().unwrap(); let task2_id = response["taskUid"].as_u64().unwrap();
// Adding document to test index. Should Fail with 403 -- invalid_api_key // Adding a document to test index. Should Fail with 403 -- invalid_api_key
let (response, code) = test_index.add_documents(documents, None).await; let (response, code) = test_index.add_documents(documents, None).await;
assert_eq!(403, code, "{:?}", &response); assert_eq!(403, code, "{:?}", &response);
server.use_api_key(MASTER_KEY); server.use_api_key(MASTER_KEY);
// refer to products_1 with modified api key. // refer to products_1 with a modified api key.
let product_1_index = server.index("products_1"); let product_1_index = server.index("products_1");
// refer to products_2 with modified api key. // refer to products_2 with a modified api key.
let product_2_index = server.index("products_2"); // let product_2_index = server.index("products_2");
product_1_index.wait_task(task1_id).await; server.wait_task(task1_id).await;
product_2_index.wait_task(task2_id).await; server.wait_task(task2_id).await;
let (response, code) = product_1_index.get_task(task1_id).await; let (response, code) = product_1_index.get_task(task1_id).await;
assert_eq!(200, code, "{:?}", &response); assert_eq!(200, code, "{:?}", &response);
@@ -603,7 +603,7 @@ async fn raise_error_non_authorized_index_patterns() {
#[actix_rt::test] #[actix_rt::test]
async fn pattern_indexes() { async fn pattern_indexes() {
// Create server with master key // Create a server with master key
let mut server = Server::new_auth().await; let mut server = Server::new_auth().await;
server.use_admin_key(MASTER_KEY).await; server.use_admin_key(MASTER_KEY).await;
@@ -650,7 +650,7 @@ async fn list_authorized_tasks_restricted_index() {
let (response, code) = index.create(Some("product_id")).await; let (response, code) = index.create(Some("product_id")).await;
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await; server.wait_task(task_id).await;
// create key with access on `products` index only. // create key with access on `products` index only.
let content = json!({ let content = json!({
@@ -690,7 +690,7 @@ async fn list_authorized_tasks_no_index_restriction() {
let (response, code) = index.create(Some("product_id")).await; let (response, code) = index.create(Some("product_id")).await;
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await; server.wait_task(task_id).await;
// create key with access on all indexes. // create key with access on all indexes.
let content = json!({ let content = json!({
@@ -757,7 +757,7 @@ async fn error_creating_index_without_action() {
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
let response = index.wait_task(task_id).await; let response = server.wait_task(task_id).await;
assert_eq!(response["status"], "failed"); assert_eq!(response["status"], "failed");
assert_eq!(response["error"], expected_error.clone()); assert_eq!(response["error"], expected_error.clone());
@@ -768,7 +768,7 @@ async fn error_creating_index_without_action() {
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
let response = index.wait_task(task_id).await; let response = server.wait_task(task_id).await;
assert_eq!(response["status"], "failed"); assert_eq!(response["status"], "failed");
assert_eq!(response["error"], expected_error.clone()); assert_eq!(response["error"], expected_error.clone());
@@ -778,7 +778,7 @@ async fn error_creating_index_without_action() {
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
let response = index.wait_task(task_id).await; let response = server.wait_task(task_id).await;
assert_eq!(response["status"], "failed"); assert_eq!(response["status"], "failed");
assert_eq!(response["error"], expected_error.clone()); assert_eq!(response["error"], expected_error.clone());
@@ -830,7 +830,7 @@ async fn lazy_create_index() {
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await; server.wait_task(task_id).await;
let (response, code) = index.get_task(task_id).await; let (response, code) = index.get_task(task_id).await;
assert_eq!(200, code, "{:?}", &response); assert_eq!(200, code, "{:?}", &response);
@@ -844,7 +844,7 @@ async fn lazy_create_index() {
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await; server.wait_task(task_id).await;
let (response, code) = index.get_task(task_id).await; let (response, code) = index.get_task(task_id).await;
assert_eq!(200, code, "{:?}", &response); assert_eq!(200, code, "{:?}", &response);
@@ -856,7 +856,7 @@ async fn lazy_create_index() {
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await; server.wait_task(task_id).await;
let (response, code) = index.get_task(task_id).await; let (response, code) = index.get_task(task_id).await;
assert_eq!(200, code, "{:?}", &response); assert_eq!(200, code, "{:?}", &response);
@@ -911,7 +911,7 @@ async fn lazy_create_index_from_pattern() {
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await; server.wait_task(task_id).await;
let (response, code) = index.get_task(task_id).await; let (response, code) = index.get_task(task_id).await;
assert_eq!(200, code, "{:?}", &response); assert_eq!(200, code, "{:?}", &response);
@@ -929,7 +929,7 @@ async fn lazy_create_index_from_pattern() {
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await; server.wait_task(task_id).await;
let (response, code) = index.get_task(task_id).await; let (response, code) = index.get_task(task_id).await;
assert_eq!(200, code, "{:?}", &response); assert_eq!(200, code, "{:?}", &response);
@@ -949,7 +949,7 @@ async fn lazy_create_index_from_pattern() {
assert_eq!(202, code, "{:?}", &response); assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap(); let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await; server.wait_task(task_id).await;
let (response, code) = index.get_task(task_id).await; let (response, code) = index.get_task(task_id).await;
assert_eq!(200, code, "{:?}", &response); assert_eq!(200, code, "{:?}", &response);

View File

@@ -93,7 +93,7 @@ async fn create_api_key_bad_actions() {
snapshot!(code, @"400 Bad Request"); snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#" snapshot!(json_string!(response), @r#"
{ {
"message": "Unknown value `doggo` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`", "message": "Unknown value `doggo` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`",
"code": "invalid_api_key_actions", "code": "invalid_api_key_actions",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_api_key_actions" "link": "https://docs.meilisearch.com/errors#invalid_api_key_actions"

View File

@@ -100,11 +100,11 @@ macro_rules! compute_authorized_search {
let index = server.index("sales"); let index = server.index("sales");
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (task1,_status_code) = index.add_documents(documents, None).await; let (task1,_status_code) = index.add_documents(documents, None).await;
index.wait_task(task1.uid()).await.succeeded(); server.wait_task(task1.uid()).await.succeeded();
let (task2,_status_code) = index let (task2,_status_code) = index
.update_settings(json!({"filterableAttributes": ["color"]})) .update_settings(json!({"filterableAttributes": ["color"]}))
.await; .await;
index.wait_task(task2.uid()).await.succeeded(); server.wait_task(task2.uid()).await.succeeded();
drop(index); drop(index);
for key_content in ACCEPTED_KEYS.iter() { for key_content in ACCEPTED_KEYS.iter() {
@@ -147,7 +147,7 @@ macro_rules! compute_forbidden_search {
let index = server.index("sales"); let index = server.index("sales");
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (task, _status_code) = index.add_documents(documents, None).await; let (task, _status_code) = index.add_documents(documents, None).await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
drop(index); drop(index);
for key_content in $parent_keys.iter() { for key_content in $parent_keys.iter() {

View File

@@ -268,21 +268,21 @@ macro_rules! compute_authorized_single_search {
let index = server.index("sales"); let index = server.index("sales");
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (add_task,_status_code) = index.add_documents(documents, None).await; let (add_task,_status_code) = index.add_documents(documents, None).await;
index.wait_task(add_task.uid()).await.succeeded(); server.wait_task(add_task.uid()).await.succeeded();
let (update_task,_status_code) = index let (update_task,_status_code) = index
.update_settings(json!({"filterableAttributes": ["color"]})) .update_settings(json!({"filterableAttributes": ["color"]}))
.await; .await;
index.wait_task(update_task.uid()).await.succeeded(); server.wait_task(update_task.uid()).await.succeeded();
drop(index); drop(index);
let index = server.index("products"); let index = server.index("products");
let documents = NESTED_DOCUMENTS.clone(); let documents = NESTED_DOCUMENTS.clone();
let (add_task2,_status_code) = index.add_documents(documents, None).await; let (add_task2,_status_code) = index.add_documents(documents, None).await;
index.wait_task(add_task2.uid()).await.succeeded(); server.wait_task(add_task2.uid()).await.succeeded();
let (update_task2,_status_code) = index let (update_task2,_status_code) = index
.update_settings(json!({"filterableAttributes": ["doggos"]})) .update_settings(json!({"filterableAttributes": ["doggos"]}))
.await; .await;
index.wait_task(update_task2.uid()).await.succeeded(); server.wait_task(update_task2.uid()).await.succeeded();
drop(index); drop(index);
@@ -339,21 +339,21 @@ macro_rules! compute_authorized_multiple_search {
let index = server.index("sales"); let index = server.index("sales");
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (task,_status_code) = index.add_documents(documents, None).await; let (task,_status_code) = index.add_documents(documents, None).await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
let (task,_status_code) = index let (task,_status_code) = index
.update_settings(json!({"filterableAttributes": ["color"]})) .update_settings(json!({"filterableAttributes": ["color"]}))
.await; .await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
drop(index); drop(index);
let index = server.index("products"); let index = server.index("products");
let documents = NESTED_DOCUMENTS.clone(); let documents = NESTED_DOCUMENTS.clone();
let (task,_status_code) = index.add_documents(documents, None).await; let (task,_status_code) = index.add_documents(documents, None).await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
let (task,_status_code) = index let (task,_status_code) = index
.update_settings(json!({"filterableAttributes": ["doggos"]})) .update_settings(json!({"filterableAttributes": ["doggos"]}))
.await; .await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
drop(index); drop(index);
@@ -423,21 +423,21 @@ macro_rules! compute_forbidden_single_search {
let index = server.index("sales"); let index = server.index("sales");
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (task,_status_code) = index.add_documents(documents, None).await; let (task,_status_code) = index.add_documents(documents, None).await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
let (task,_status_code) = index let (task,_status_code) = index
.update_settings(json!({"filterableAttributes": ["color"]})) .update_settings(json!({"filterableAttributes": ["color"]}))
.await; .await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
drop(index); drop(index);
let index = server.index("products"); let index = server.index("products");
let documents = NESTED_DOCUMENTS.clone(); let documents = NESTED_DOCUMENTS.clone();
let (task,_status_code) = index.add_documents(documents, None).await; let (task,_status_code) = index.add_documents(documents, None).await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
let (task,_status_code) = index let (task,_status_code) = index
.update_settings(json!({"filterableAttributes": ["doggos"]})) .update_settings(json!({"filterableAttributes": ["doggos"]}))
.await; .await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
drop(index); drop(index);
assert_eq!($parent_keys.len(), $failed_query_indexes.len(), "keys != query_indexes"); assert_eq!($parent_keys.len(), $failed_query_indexes.len(), "keys != query_indexes");
@@ -499,21 +499,21 @@ macro_rules! compute_forbidden_multiple_search {
let index = server.index("sales"); let index = server.index("sales");
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (task,_status_code) = index.add_documents(documents, None).await; let (task,_status_code) = index.add_documents(documents, None).await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
let (task,_status_code) = index let (task,_status_code) = index
.update_settings(json!({"filterableAttributes": ["color"]})) .update_settings(json!({"filterableAttributes": ["color"]}))
.await; .await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
drop(index); drop(index);
let index = server.index("products"); let index = server.index("products");
let documents = NESTED_DOCUMENTS.clone(); let documents = NESTED_DOCUMENTS.clone();
let (task,_status_code) = index.add_documents(documents, None).await; let (task,_status_code) = index.add_documents(documents, None).await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
let (task,_status_code) = index let (task,_status_code) = index
.update_settings(json!({"filterableAttributes": ["doggos"]})) .update_settings(json!({"filterableAttributes": ["doggos"]}))
.await; .await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
drop(index); drop(index);
assert_eq!($parent_keys.len(), $failed_query_indexes.len(), "keys != query_indexes"); assert_eq!($parent_keys.len(), $failed_query_indexes.len(), "keys != query_indexes");

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +1,13 @@
use std::fmt::Write; use std::fmt::Write;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::panic::{catch_unwind, resume_unwind, UnwindSafe}; use std::panic::{catch_unwind, resume_unwind, UnwindSafe};
use std::time::Duration;
use actix_web::http::StatusCode; use actix_web::http::StatusCode;
use tokio::time::sleep;
use urlencoding::encode as urlencode; use urlencoding::encode as urlencode;
use super::encoder::Encoder; use super::encoder::Encoder;
use super::service::Service; use super::service::Service;
use super::{Owned, Shared, Value}; use super::{Owned, Server, Shared, Value};
use crate::json; use crate::json;
pub struct Index<'a, State = Owned> { pub struct Index<'a, State = Owned> {
@@ -33,7 +31,7 @@ impl<'a> Index<'a, Owned> {
Index { uid: self.uid.clone(), service: self.service, encoder, marker: PhantomData } Index { uid: self.uid.clone(), service: self.service, encoder, marker: PhantomData }
} }
pub async fn load_test_set(&self) -> u64 { pub async fn load_test_set<State>(&self, waiter: &Server<State>) -> u64 {
let url = format!("/indexes/{}/documents", urlencode(self.uid.as_ref())); let url = format!("/indexes/{}/documents", urlencode(self.uid.as_ref()));
let (response, code) = self let (response, code) = self
.service .service
@@ -44,12 +42,12 @@ impl<'a> Index<'a, Owned> {
) )
.await; .await;
assert_eq!(code, 202); assert_eq!(code, 202);
let update_id = response["taskUid"].as_i64().unwrap(); let update_id = response["taskUid"].as_u64().unwrap();
self.wait_task(update_id as u64).await; waiter.wait_task(update_id).await;
update_id as u64 update_id
} }
pub async fn load_test_set_ndjson(&self) -> u64 { pub async fn load_test_set_ndjson<State>(&self, waiter: &Server<State>) -> u64 {
let url = format!("/indexes/{}/documents", urlencode(self.uid.as_ref())); let url = format!("/indexes/{}/documents", urlencode(self.uid.as_ref()));
let (response, code) = self let (response, code) = self
.service .service
@@ -60,9 +58,9 @@ impl<'a> Index<'a, Owned> {
) )
.await; .await;
assert_eq!(code, 202); assert_eq!(code, 202);
let update_id = response["taskUid"].as_i64().unwrap(); let update_id = response["taskUid"].as_u64().unwrap();
self.wait_task(update_id as u64).await; waiter.wait_task(update_id).await;
update_id as u64 update_id
} }
pub async fn create(&self, primary_key: Option<&str>) -> (Value, StatusCode) { pub async fn create(&self, primary_key: Option<&str>) -> (Value, StatusCode) {
@@ -251,6 +249,11 @@ impl<'a> Index<'a, Owned> {
self.service.put_encoded(url, settings, self.encoder).await self.service.put_encoded(url, settings, self.encoder).await
} }
pub async fn update_settings_chat(&self, settings: Value) -> (Value, StatusCode) {
let url = format!("/indexes/{}/settings/chat", urlencode(self.uid.as_ref()));
self.service.patch_encoded(url, settings, self.encoder).await
}
pub async fn delete_settings(&self) -> (Value, StatusCode) { pub async fn delete_settings(&self) -> (Value, StatusCode) {
let url = format!("/indexes/{}/settings", urlencode(self.uid.as_ref())); let url = format!("/indexes/{}/settings", urlencode(self.uid.as_ref()));
self.service.delete(url).await self.service.delete(url).await
@@ -267,10 +270,14 @@ impl Index<'_, Shared> {
/// You cannot modify the content of a shared index, thus the delete_document_by_filter call /// You cannot modify the content of a shared index, thus the delete_document_by_filter call
/// must fail. If the task successfully enqueue itself, we'll wait for the task to finishes, /// must fail. If the task successfully enqueue itself, we'll wait for the task to finishes,
/// and if it succeed the function will panic. /// and if it succeed the function will panic.
pub async fn delete_document_by_filter_fail(&self, body: Value) -> (Value, StatusCode) { pub async fn delete_document_by_filter_fail<State>(
&self,
body: Value,
waiter: &Server<State>,
) -> (Value, StatusCode) {
let (mut task, code) = self._delete_document_by_filter(body).await; let (mut task, code) = self._delete_document_by_filter(body).await;
if code.is_success() { if code.is_success() {
task = self.wait_task(task.uid()).await; task = waiter.wait_task(task.uid()).await;
if task.is_success() { if task.is_success() {
panic!( panic!(
"`delete_document_by_filter_fail` succeeded: {}", "`delete_document_by_filter_fail` succeeded: {}",
@@ -281,10 +288,10 @@ impl Index<'_, Shared> {
(task, code) (task, code)
} }
pub async fn delete_index_fail(&self) -> (Value, StatusCode) { pub async fn delete_index_fail<State>(&self, waiter: &Server<State>) -> (Value, StatusCode) {
let (mut task, code) = self._delete().await; let (mut task, code) = self._delete().await;
if code.is_success() { if code.is_success() {
task = self.wait_task(task.uid()).await; task = waiter.wait_task(task.uid()).await;
if task.is_success() { if task.is_success() {
panic!( panic!(
"`delete_index_fail` succeeded: {}", "`delete_index_fail` succeeded: {}",
@@ -295,10 +302,14 @@ impl Index<'_, Shared> {
(task, code) (task, code)
} }
pub async fn update_index_fail(&self, primary_key: Option<&str>) -> (Value, StatusCode) { pub async fn update_index_fail<State>(
&self,
primary_key: Option<&str>,
waiter: &Server<State>,
) -> (Value, StatusCode) {
let (mut task, code) = self._update(primary_key).await; let (mut task, code) = self._update(primary_key).await;
if code.is_success() { if code.is_success() {
task = self.wait_task(task.uid()).await; task = waiter.wait_task(task.uid()).await;
if task.is_success() { if task.is_success() {
panic!( panic!(
"`update_index_fail` succeeded: {}", "`update_index_fail` succeeded: {}",
@@ -364,23 +375,6 @@ impl<State> Index<'_, State> {
self.service.delete(url).await self.service.delete(url).await
} }
pub async fn wait_task(&self, update_id: u64) -> Value {
// try several times to get status, or panic to not wait forever
let url = format!("/tasks/{}", update_id);
for _ in 0..100 {
let (response, status_code) = self.service.get(&url).await;
assert_eq!(200, status_code, "response: {}", response);
if response["status"] == "succeeded" || response["status"] == "failed" {
return response;
}
// wait 0.5 second.
sleep(Duration::from_millis(500)).await;
}
panic!("Timeout waiting for update id");
}
pub async fn get_task(&self, update_id: u64) -> (Value, StatusCode) { pub async fn get_task(&self, update_id: u64) -> (Value, StatusCode) {
let url = format!("/tasks/{}", update_id); let url = format!("/tasks/{}", update_id);
self.service.get(url).await self.service.get(url).await

View File

@@ -3,10 +3,8 @@ pub mod index;
pub mod server; pub mod server;
pub mod service; pub mod service;
use std::{ use std::collections::BTreeMap;
collections::BTreeMap, use std::fmt::{self, Display};
fmt::{self, Display},
};
use actix_http::StatusCode; use actix_http::StatusCode;
#[allow(unused)] #[allow(unused)]
@@ -17,10 +15,8 @@ use serde::{Deserialize, Serialize};
#[allow(unused)] #[allow(unused)]
pub use server::{default_settings, Server}; pub use server::{default_settings, Server};
use tokio::sync::OnceCell; use tokio::sync::OnceCell;
use wiremock::{ use wiremock::matchers::{method, path};
matchers::{method, path}, use wiremock::{Mock, MockServer, Request, ResponseTemplate};
Mock, MockServer, Request, ResponseTemplate,
};
use crate::common::index::Index; use crate::common::index::Index;
@@ -46,6 +42,15 @@ impl Value {
self["uid"].as_u64().is_some() || self["taskUid"].as_u64().is_some() self["uid"].as_u64().is_some() || self["taskUid"].as_u64().is_some()
} }
#[track_caller]
pub fn batch_uid(&self) -> u32 {
if let Some(batch_uid) = self["batchUid"].as_u64() {
batch_uid as u32
} else {
panic!("Didn't find `batchUid` in: {self}");
}
}
/// Return `true` if the `status` field is set to `succeeded`. /// Return `true` if the `status` field is set to `succeeded`.
/// Panic if the `status` field doesn't exists. /// Panic if the `status` field doesn't exists.
#[track_caller] #[track_caller]
@@ -189,7 +194,7 @@ pub async fn shared_empty_index() -> &'static Index<'static, Shared> {
let server = Server::new_shared(); let server = Server::new_shared();
let index = server._index("EMPTY_INDEX").to_shared(); let index = server._index("EMPTY_INDEX").to_shared();
let (response, _code) = index._create(None).await; let (response, _code) = index._create(None).await;
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
index index
}) })
.await .await
@@ -237,13 +242,13 @@ pub async fn shared_index_with_documents() -> &'static Index<'static, Shared> {
let index = server._index("SHARED_DOCUMENTS").to_shared(); let index = server._index("SHARED_DOCUMENTS").to_shared();
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (response, _code) = index._add_documents(documents, None).await; let (response, _code) = index._add_documents(documents, None).await;
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
let (response, _code) = index let (response, _code) = index
._update_settings( ._update_settings(
json!({"filterableAttributes": ["id", "title"], "sortableAttributes": ["id", "title"]}), json!({"filterableAttributes": ["id", "title"], "sortableAttributes": ["id", "title"]}),
) )
.await; .await;
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
index index
}).await }).await
} }
@@ -280,13 +285,13 @@ pub async fn shared_index_with_score_documents() -> &'static Index<'static, Shar
let index = server._index("SHARED_SCORE_DOCUMENTS").to_shared(); let index = server._index("SHARED_SCORE_DOCUMENTS").to_shared();
let documents = SCORE_DOCUMENTS.clone(); let documents = SCORE_DOCUMENTS.clone();
let (response, _code) = index._add_documents(documents, None).await; let (response, _code) = index._add_documents(documents, None).await;
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
let (response, _code) = index let (response, _code) = index
._update_settings( ._update_settings(
json!({"filterableAttributes": ["id", "title"], "sortableAttributes": ["id", "title"]}), json!({"filterableAttributes": ["id", "title"], "sortableAttributes": ["id", "title"]}),
) )
.await; .await;
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
index index
}).await }).await
} }
@@ -357,13 +362,13 @@ pub async fn shared_index_with_nested_documents() -> &'static Index<'static, Sha
let index = server._index("SHARED_NESTED_DOCUMENTS").to_shared(); let index = server._index("SHARED_NESTED_DOCUMENTS").to_shared();
let documents = NESTED_DOCUMENTS.clone(); let documents = NESTED_DOCUMENTS.clone();
let (response, _code) = index._add_documents(documents, None).await; let (response, _code) = index._add_documents(documents, None).await;
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
let (response, _code) = index let (response, _code) = index
._update_settings( ._update_settings(
json!({"filterableAttributes": ["father", "doggos", "cattos"], "sortableAttributes": ["doggos"]}), json!({"filterableAttributes": ["father", "doggos", "cattos"], "sortableAttributes": ["doggos"]}),
) )
.await; .await;
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
index index
}).await }).await
} }
@@ -457,7 +462,7 @@ pub async fn shared_index_with_test_set() -> &'static Index<'static, Shared> {
) )
.await; .await;
assert_eq!(code, 202); assert_eq!(code, 202);
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
index index
}) })
.await .await
@@ -504,14 +509,14 @@ pub async fn shared_index_with_geo_documents() -> &'static Index<'static, Shared
let server = Server::new_shared(); let server = Server::new_shared();
let index = server._index("SHARED_GEO_DOCUMENTS").to_shared(); let index = server._index("SHARED_GEO_DOCUMENTS").to_shared();
let (response, _code) = index._add_documents(GEO_DOCUMENTS.clone(), None).await; let (response, _code) = index._add_documents(GEO_DOCUMENTS.clone(), None).await;
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
let (response, _code) = index let (response, _code) = index
._update_settings( ._update_settings(
json!({"filterableAttributes": ["_geo"], "sortableAttributes": ["_geo"]}), json!({"filterableAttributes": ["_geo"], "sortableAttributes": ["_geo"]}),
) )
.await; .await;
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
index index
}) })
.await .await
@@ -609,7 +614,7 @@ pub async fn init_fragments_index() -> (Server<Owned>, String, crate::common::Va
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
assert_eq!(code, StatusCode::ACCEPTED); assert_eq!(code, StatusCode::ACCEPTED);
let _task = index.wait_task(value.uid()).await.succeeded(); let _task = server.wait_task(value.uid()).await.succeeded();
let uid = index.uid.clone(); let uid = index.uid.clone();
(server, uid, settings) (server, uid, settings)
@@ -674,7 +679,7 @@ pub async fn init_fragments_index_composite() -> (Server<Owned>, String, crate::
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
assert_eq!(code, StatusCode::ACCEPTED); assert_eq!(code, StatusCode::ACCEPTED);
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
let uid = index.uid.clone(); let uid = index.uid.clone();
(server, uid, settings) (server, uid, settings)

View File

@@ -182,6 +182,25 @@ impl Server<Owned> {
self.service.patch("/network", value).await self.service.patch("/network", value).await
} }
pub async fn create_webhook(&self, value: Value) -> (Value, StatusCode) {
self.service.post("/webhooks", value).await
}
pub async fn get_webhook(&self, uuid: impl AsRef<str>) -> (Value, StatusCode) {
let url = format!("/webhooks/{}", uuid.as_ref());
self.service.get(url).await
}
pub async fn delete_webhook(&self, uuid: impl AsRef<str>) -> (Value, StatusCode) {
let url = format!("/webhooks/{}", uuid.as_ref());
self.service.delete(url).await
}
pub async fn patch_webhook(&self, uuid: impl AsRef<str>, value: Value) -> (Value, StatusCode) {
let url = format!("/webhooks/{}", uuid.as_ref());
self.service.patch(url, value).await
}
pub async fn get_metrics(&self) -> (Value, StatusCode) { pub async fn get_metrics(&self) -> (Value, StatusCode) {
self.service.get("/metrics").await self.service.get("/metrics").await
} }
@@ -409,12 +428,12 @@ impl<State> Server<State> {
pub async fn wait_task(&self, update_id: u64) -> Value { pub async fn wait_task(&self, update_id: u64) -> Value {
// try several times to get status, or panic to not wait forever // try several times to get status, or panic to not wait forever
let url = format!("/tasks/{}", update_id); let url = format!("/tasks/{update_id}");
let max_attempts = 400; // 200 seconds total, 0.5s per attempt let max_attempts = 400; // 200 seconds in total, 0.5secs per attempt
for i in 0..max_attempts { for i in 0..max_attempts {
let (response, status_code) = self.service.get(&url).await; let (response, status_code) = self.service.get(url.clone()).await;
assert_eq!(200, status_code, "response: {}", response); assert_eq!(200, status_code, "response: {response}");
if response["status"] == "succeeded" || response["status"] == "failed" { if response["status"] == "succeeded" || response["status"] == "failed" {
return response; return response;
@@ -447,6 +466,10 @@ impl<State> Server<State> {
pub async fn get_network(&self) -> (Value, StatusCode) { pub async fn get_network(&self) -> (Value, StatusCode) {
self.service.get("/network").await self.service.get("/network").await
} }
pub async fn get_webhooks(&self) -> (Value, StatusCode) {
self.service.get("/webhooks").await
}
} }
pub fn default_settings(dir: impl AsRef<Path>) -> Opt { pub fn default_settings(dir: impl AsRef<Path>) -> Opt {

View File

@@ -1318,7 +1318,7 @@ async fn add_no_documents() {
async fn add_larger_dataset() { async fn add_larger_dataset() {
let server = Server::new_shared(); let server = Server::new_shared();
let index = server.unique_index(); let index = server.unique_index();
let update_id = index.load_test_set().await; let update_id = index.load_test_set(server).await;
let (response, code) = index.get_task(update_id).await; let (response, code) = index.get_task(update_id).await;
assert_eq!(code, 200); assert_eq!(code, 200);
assert_eq!(response["status"], "succeeded"); assert_eq!(response["status"], "succeeded");
@@ -1333,7 +1333,7 @@ async fn add_larger_dataset() {
// x-ndjson add large test // x-ndjson add large test
let index = server.unique_index(); let index = server.unique_index();
let update_id = index.load_test_set_ndjson().await; let update_id = index.load_test_set_ndjson(server).await;
let (response, code) = index.get_task(update_id).await; let (response, code) = index.get_task(update_id).await;
assert_eq!(code, 200); assert_eq!(code, 200);
assert_eq!(response["status"], "succeeded"); assert_eq!(response["status"], "succeeded");

View File

@@ -7,7 +7,8 @@ use crate::json;
async fn delete_one_document_unexisting_index() { async fn delete_one_document_unexisting_index() {
let server = Server::new_shared(); let server = Server::new_shared();
let index = shared_does_not_exists_index().await; let index = shared_does_not_exists_index().await;
let (task, code) = index.delete_document_by_filter_fail(json!({"filter": "a = b"})).await; let (task, code) =
index.delete_document_by_filter_fail(json!({"filter": "a = b"}), server).await;
assert_eq!(code, 202); assert_eq!(code, 202);
server.wait_task(task.uid()).await.failed(); server.wait_task(task.uid()).await.failed();

View File

@@ -559,7 +559,7 @@ async fn delete_document_by_filter() {
let index = shared_does_not_exists_index().await; let index = shared_does_not_exists_index().await;
// index does not exists // index does not exists
let (response, _code) = let (response, _code) =
index.delete_document_by_filter_fail(json!({ "filter": "doggo = bernese"})).await; index.delete_document_by_filter_fail(json!({ "filter": "doggo = bernese"}), server).await;
snapshot!(response, @r###" snapshot!(response, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -589,7 +589,7 @@ async fn delete_document_by_filter() {
// no filterable are set // no filterable are set
let index = shared_empty_index().await; let index = shared_empty_index().await;
let (response, _code) = let (response, _code) =
index.delete_document_by_filter_fail(json!({ "filter": "doggo = bernese"})).await; index.delete_document_by_filter_fail(json!({ "filter": "doggo = bernese"}), server).await;
snapshot!(response, @r###" snapshot!(response, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -619,7 +619,7 @@ async fn delete_document_by_filter() {
// not filterable while there is a filterable attribute // not filterable while there is a filterable attribute
let index = shared_index_with_documents().await; let index = shared_index_with_documents().await;
let (response, code) = let (response, code) =
index.delete_document_by_filter_fail(json!({ "filter": "catto = jorts"})).await; index.delete_document_by_filter_fail(json!({ "filter": "catto = jorts"}), server).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let response = server.wait_task(response.uid()).await.failed(); let response = server.wait_task(response.uid()).await.failed();
snapshot!(response, @r###" snapshot!(response, @r###"

View File

@@ -87,7 +87,7 @@ async fn get_document() {
async fn get_document_sorted() { async fn get_document_sorted() {
let server = Server::new_shared(); let server = Server::new_shared();
let index = server.unique_index(); let index = server.unique_index();
index.load_test_set().await; index.load_test_set(server).await;
let (task, _status_code) = let (task, _status_code) =
index.update_settings_sortable_attributes(json!(["age", "email", "gender", "name"])).await; index.update_settings_sortable_attributes(json!(["age", "email", "gender", "name"])).await;
@@ -639,7 +639,7 @@ async fn get_document_s_nested_attributes_to_retrieve() {
async fn get_documents_displayed_attributes_is_ignored() { async fn get_documents_displayed_attributes_is_ignored() {
let server = Server::new_shared(); let server = Server::new_shared();
let index = server.unique_index(); let index = server.unique_index();
index.load_test_set().await; index.load_test_set(server).await;
index.update_settings(json!({"displayedAttributes": ["gender"]})).await; index.update_settings(json!({"displayedAttributes": ["gender"]})).await;
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await; let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;

View File

@@ -2366,7 +2366,7 @@ async fn generate_and_import_dump_containing_vectors() {
)) ))
.await; .await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let response = index.wait_task(response.uid()).await; let response = server.wait_task(response.uid()).await;
snapshot!(response); snapshot!(response);
let (response, code) = index let (response, code) = index
.add_documents( .add_documents(
@@ -2381,12 +2381,12 @@ async fn generate_and_import_dump_containing_vectors() {
) )
.await; .await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let response = index.wait_task(response.uid()).await; let response = server.wait_task(response.uid()).await;
snapshot!(response); snapshot!(response);
let (response, code) = server.create_dump().await; let (response, code) = server.create_dump().await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let response = index.wait_task(response.uid()).await; let response = server.wait_task(response.uid()).await;
snapshot!(response["status"], @r###""succeeded""###); snapshot!(response["status"], @r###""succeeded""###);
// ========= We made a dump, now we should clear the DB and try to import our dump // ========= We made a dump, now we should clear the DB and try to import our dump

View File

@@ -161,9 +161,9 @@ async fn test_create_multiple_indexes() {
let (task2, _) = index2.create(None).await; let (task2, _) = index2.create(None).await;
let (task3, _) = index3.create(None).await; let (task3, _) = index3.create(None).await;
index1.wait_task(task1.uid()).await.succeeded(); server.wait_task(task1.uid()).await.succeeded();
index2.wait_task(task2.uid()).await.succeeded(); server.wait_task(task2.uid()).await.succeeded();
index3.wait_task(task3.uid()).await.succeeded(); server.wait_task(task3.uid()).await.succeeded();
assert_eq!(index1.get().await.1, 200); assert_eq!(index1.get().await.1, 200);
assert_eq!(index2.get().await.1, 200); assert_eq!(index2.get().await.1, 200);

View File

@@ -26,7 +26,7 @@ async fn create_and_delete_index() {
async fn error_delete_unexisting_index() { async fn error_delete_unexisting_index() {
let server = Server::new_shared(); let server = Server::new_shared();
let index = shared_does_not_exists_index().await; let index = shared_does_not_exists_index().await;
let (task, code) = index.delete_index_fail().await; let (task, code) = index.delete_index_fail(server).await;
assert_eq!(code, 202); assert_eq!(code, 202);
server.wait_task(task.uid()).await.failed(); server.wait_task(task.uid()).await.failed();

View File

@@ -60,8 +60,8 @@ async fn list_multiple_indexes() {
let index_with_key = server.unique_index(); let index_with_key = server.unique_index();
let (response_with_key, _status_code) = index_with_key.create(Some("key")).await; let (response_with_key, _status_code) = index_with_key.create(Some("key")).await;
index_without_key.wait_task(response_without_key.uid()).await.succeeded(); server.wait_task(response_without_key.uid()).await.succeeded();
index_with_key.wait_task(response_with_key.uid()).await.succeeded(); server.wait_task(response_with_key.uid()).await.succeeded();
let (response, code) = server.list_indexes(None, Some(1000)).await; let (response, code) = server.list_indexes(None, Some(1000)).await;
assert_eq!(code, 200); assert_eq!(code, 200);
@@ -81,8 +81,9 @@ async fn get_and_paginate_indexes() {
let server = Server::new().await; let server = Server::new().await;
const NB_INDEXES: usize = 50; const NB_INDEXES: usize = 50;
for i in 0..NB_INDEXES { for i in 0..NB_INDEXES {
server.index(format!("test_{i:02}")).create(None).await; let (task, code) = server.index(format!("test_{i:02}")).create(None).await;
server.index(format!("test_{i:02}")).wait_task(i as u64).await; assert_eq!(code, 202);
server.wait_task(task.uid()).await;
} }
// basic // basic

View File

@@ -72,7 +72,7 @@ async fn error_update_existing_primary_key() {
let server = Server::new_shared(); let server = Server::new_shared();
let index = shared_index_with_documents().await; let index = shared_index_with_documents().await;
let (update_task, code) = index.update_index_fail(Some("primary")).await; let (update_task, code) = index.update_index_fail(Some("primary"), server).await;
assert_eq!(code, 202); assert_eq!(code, 202);
let response = server.wait_task(update_task.uid()).await.failed(); let response = server.wait_task(update_task.uid()).await.failed();
@@ -91,7 +91,7 @@ async fn error_update_existing_primary_key() {
async fn error_update_unexisting_index() { async fn error_update_unexisting_index() {
let server = Server::new_shared(); let server = Server::new_shared();
let index = shared_does_not_exists_index().await; let index = shared_does_not_exists_index().await;
let (task, code) = index.update_index_fail(Some("my-primary-key")).await; let (task, code) = index.update_index_fail(Some("my-primary-key"), server).await;
assert_eq!(code, 202); assert_eq!(code, 202);

View File

@@ -1270,27 +1270,27 @@ async fn search_with_contains_without_enabling_the_feature() {
index index
.search(json!({ "filter": "doggo CONTAINS kefir" }), |response, code| { .search(json!({ "filter": "doggo CONTAINS kefir" }), |response, code| {
snapshot!(code, @"400 Bad Request"); snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###" snapshot!(json_string!(response), @r#"
{ {
"message": "Using `CONTAINS` or `STARTS WITH` in a filter requires enabling the `contains filter` experimental feature. See https://github.com/orgs/meilisearch/discussions/763\n7:15 doggo CONTAINS kefir", "message": "Using `CONTAINS` in a filter requires enabling the `contains filter` experimental feature. See https://github.com/orgs/meilisearch/discussions/763\n7:15 doggo CONTAINS kefir",
"code": "feature_not_enabled", "code": "feature_not_enabled",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#feature_not_enabled" "link": "https://docs.meilisearch.com/errors#feature_not_enabled"
} }
"###); "#);
}) })
.await; .await;
index index
.search(json!({ "filter": "doggo != echo AND doggo CONTAINS kefir" }), |response, code| { .search(json!({ "filter": "doggo != echo AND doggo CONTAINS kefir" }), |response, code| {
snapshot!(code, @"400 Bad Request"); snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###" snapshot!(json_string!(response), @r#"
{ {
"message": "Using `CONTAINS` or `STARTS WITH` in a filter requires enabling the `contains filter` experimental feature. See https://github.com/orgs/meilisearch/discussions/763\n25:33 doggo != echo AND doggo CONTAINS kefir", "message": "Using `CONTAINS` in a filter requires enabling the `contains filter` experimental feature. See https://github.com/orgs/meilisearch/discussions/763\n25:33 doggo != echo AND doggo CONTAINS kefir",
"code": "feature_not_enabled", "code": "feature_not_enabled",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#feature_not_enabled" "link": "https://docs.meilisearch.com/errors#feature_not_enabled"
} }
"###); "#);
}) })
.await; .await;
@@ -1299,24 +1299,24 @@ async fn search_with_contains_without_enabling_the_feature() {
index.search_post(json!({ "filter": ["doggo != echo", "doggo CONTAINS kefir"] })).await; index.search_post(json!({ "filter": ["doggo != echo", "doggo CONTAINS kefir"] })).await;
snapshot!(code, @"400 Bad Request"); snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###" snapshot!(json_string!(response), @r#"
{ {
"message": "Using `CONTAINS` or `STARTS WITH` in a filter requires enabling the `contains filter` experimental feature. See https://github.com/orgs/meilisearch/discussions/763\n7:15 doggo CONTAINS kefir", "message": "Using `CONTAINS` in a filter requires enabling the `contains filter` experimental feature. See https://github.com/orgs/meilisearch/discussions/763\n7:15 doggo CONTAINS kefir",
"code": "feature_not_enabled", "code": "feature_not_enabled",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#feature_not_enabled" "link": "https://docs.meilisearch.com/errors#feature_not_enabled"
} }
"###); "#);
let (response, code) = let (response, code) =
index.search_post(json!({ "filter": ["doggo != echo", ["doggo CONTAINS kefir"]] })).await; index.search_post(json!({ "filter": ["doggo != echo", ["doggo CONTAINS kefir"]] })).await;
snapshot!(code, @"400 Bad Request"); snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###" snapshot!(json_string!(response), @r#"
{ {
"message": "Using `CONTAINS` or `STARTS WITH` in a filter requires enabling the `contains filter` experimental feature. See https://github.com/orgs/meilisearch/discussions/763\n7:15 doggo CONTAINS kefir", "message": "Using `CONTAINS` in a filter requires enabling the `contains filter` experimental feature. See https://github.com/orgs/meilisearch/discussions/763\n7:15 doggo CONTAINS kefir",
"code": "feature_not_enabled", "code": "feature_not_enabled",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#feature_not_enabled" "link": "https://docs.meilisearch.com/errors#feature_not_enabled"
} }
"###); "#);
} }

View File

@@ -158,11 +158,11 @@ async fn remote_sharding() {
let index1 = ms1.index("test"); let index1 = ms1.index("test");
let index2 = ms2.index("test"); let index2 = ms2.index("test");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index2.add_documents(json!(documents[3..5]), None).await; let (task, _status_code) = index2.add_documents(json!(documents[3..5]), None).await;
index2.wait_task(task.uid()).await.succeeded(); ms2.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
let ms0 = Arc::new(ms0); let ms0 = Arc::new(ms0);
@@ -454,9 +454,9 @@ async fn error_unregistered_remote() {
let index0 = ms0.index("test"); let index0 = ms0.index("test");
let index1 = ms1.index("test"); let index1 = ms1.index("test");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
let ms0 = Arc::new(ms0); let ms0 = Arc::new(ms0);
@@ -572,9 +572,9 @@ async fn error_no_weighted_score() {
let index0 = ms0.index("test"); let index0 = ms0.index("test");
let index1 = ms1.index("test"); let index1 = ms1.index("test");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
let ms0 = Arc::new(ms0); let ms0 = Arc::new(ms0);
@@ -705,9 +705,9 @@ async fn error_bad_response() {
let index0 = ms0.index("test"); let index0 = ms0.index("test");
let index1 = ms1.index("test"); let index1 = ms1.index("test");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
let ms0 = Arc::new(ms0); let ms0 = Arc::new(ms0);
@@ -842,9 +842,9 @@ async fn error_bad_request() {
let index0 = ms0.index("test"); let index0 = ms0.index("test");
let index1 = ms1.index("test"); let index1 = ms1.index("test");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
let ms0 = Arc::new(ms0); let ms0 = Arc::new(ms0);
@@ -972,10 +972,10 @@ async fn error_bad_request_facets_by_index() {
let index0 = ms0.index("test0"); let index0 = ms0.index("test0");
let index1 = ms1.index("test1"); let index1 = ms1.index("test1");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
let ms0 = Arc::new(ms0); let ms0 = Arc::new(ms0);
@@ -1113,13 +1113,13 @@ async fn error_bad_request_facets_by_index_facet() {
let index0 = ms0.index("test"); let index0 = ms0.index("test");
let index1 = ms1.index("test"); let index1 = ms1.index("test");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index0.update_settings_filterable_attributes(json!(["id"])).await; let (task, _status_code) = index0.update_settings_filterable_attributes(json!(["id"])).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
let ms0 = Arc::new(ms0); let ms0 = Arc::new(ms0);
@@ -1224,6 +1224,7 @@ async fn error_bad_request_facets_by_index_facet() {
} }
#[actix_rt::test] #[actix_rt::test]
#[ignore]
async fn error_remote_does_not_answer() { async fn error_remote_does_not_answer() {
let ms0 = Server::new().await; let ms0 = Server::new().await;
let ms1 = Server::new().await; let ms1 = Server::new().await;
@@ -1262,9 +1263,9 @@ async fn error_remote_does_not_answer() {
let index0 = ms0.index("test"); let index0 = ms0.index("test");
let index1 = ms1.index("test"); let index1 = ms1.index("test");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
let ms0 = Arc::new(ms0); let ms0 = Arc::new(ms0);
@@ -1463,9 +1464,9 @@ async fn error_remote_404() {
let index0 = ms0.index("test"); let index0 = ms0.index("test");
let index1 = ms1.index("test"); let index1 = ms1.index("test");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
let ms0 = Arc::new(ms0); let ms0 = Arc::new(ms0);
@@ -1658,9 +1659,9 @@ async fn error_remote_sharding_auth() {
let index0 = ms0.index("test"); let index0 = ms0.index("test");
let index1 = ms1.index("test"); let index1 = ms1.index("test");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
ms1.clear_api_key(); ms1.clear_api_key();
@@ -1818,9 +1819,9 @@ async fn remote_sharding_auth() {
let index0 = ms0.index("test"); let index0 = ms0.index("test");
let index1 = ms1.index("test"); let index1 = ms1.index("test");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
ms1.clear_api_key(); ms1.clear_api_key();
@@ -1973,9 +1974,9 @@ async fn error_remote_500() {
let index0 = ms0.index("test"); let index0 = ms0.index("test");
let index1 = ms1.index("test"); let index1 = ms1.index("test");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
let ms0 = Arc::new(ms0); let ms0 = Arc::new(ms0);
@@ -2152,9 +2153,9 @@ async fn error_remote_500_once() {
let index0 = ms0.index("test"); let index0 = ms0.index("test");
let index1 = ms1.index("test"); let index1 = ms1.index("test");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
let ms0 = Arc::new(ms0); let ms0 = Arc::new(ms0);
@@ -2335,9 +2336,9 @@ async fn error_remote_timeout() {
let index0 = ms0.index("test"); let index0 = ms0.index("test");
let index1 = ms1.index("test"); let index1 = ms1.index("test");
let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await;
index0.wait_task(task.uid()).await.succeeded(); ms0.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await;
index1.wait_task(task.uid()).await.succeeded(); ms1.wait_task(task.uid()).await.succeeded();
// wrap servers // wrap servers
let ms0 = Arc::new(ms0); let ms0 = Arc::new(ms0);

View File

@@ -0,0 +1,66 @@
use crate::common::Server;
use crate::json;
use meili_snap::{json_string, snapshot};
#[actix_rt::test]
async fn set_reset_chat_issue_5772() {
let server = Server::new().await;
let index = server.unique_index();
let (_, code) = server
.set_features(json!({
"chatCompletions": true,
}))
.await;
snapshot!(code, @r#"200 OK"#);
let (task1, _code) = index.update_settings_chat(json!({
"description": "test!",
"documentTemplate": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",
"documentTemplateMaxBytes": 400,
"searchParameters": {
"limit": 15,
"sort": [],
"attributesToSearchOn": []
}
})).await;
server.wait_task(task1.uid()).await.succeeded();
let (response, _) = index.settings().await;
snapshot!(json_string!(response["chat"]), @r#"
{
"description": "test!",
"documentTemplate": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",
"documentTemplateMaxBytes": 400,
"searchParameters": {
"limit": 15,
"sort": [],
"attributesToSearchOn": []
}
}
"#);
let (task2, _status_code) = index.update_settings_chat(json!({
"description": "test!",
"documentTemplate": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",
"documentTemplateMaxBytes": 400,
"searchParameters": {
"limit": 16
}
})).await;
server.wait_task(task2.uid()).await.succeeded();
let (response, _) = index.settings().await;
snapshot!(json_string!(response["chat"]), @r#"
{
"description": "test!",
"documentTemplate": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",
"documentTemplateMaxBytes": 400,
"searchParameters": {
"limit": 16,
"sort": [],
"attributesToSearchOn": []
}
}
"#);
}

View File

@@ -186,7 +186,7 @@ test_setting_routes!(
}, },
{ {
setting: chat, setting: chat,
update_verb: put, update_verb: patch,
default_value: { default_value: {
"description": "", "description": "",
"documentTemplate": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}", "documentTemplate": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",

View File

@@ -1,3 +1,4 @@
mod chat;
mod distinct; mod distinct;
mod errors; mod errors;
mod get_settings; mod get_settings;

View File

@@ -298,7 +298,7 @@ async fn similar_bad_filter() {
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
let (response, code) = let (response, code) =
index.similar_post(json!({ "id": 287947, "filter": true, "embedder": "manual" })).await; index.similar_post(json!({ "id": 287947, "filter": true, "embedder": "manual" })).await;
@@ -335,7 +335,7 @@ async fn filter_invalid_syntax_object() {
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
index index
.similar(json!({"id": 287947, "filter": "title & Glass", "embedder": "manual"}), |response, code| { .similar(json!({"id": 287947, "filter": "title & Glass", "embedder": "manual"}), |response, code| {
@@ -373,7 +373,7 @@ async fn filter_invalid_syntax_array() {
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
index index
.similar(json!({"id": 287947, "filter": ["title & Glass"], "embedder": "manual"}), |response, code| { .similar(json!({"id": 287947, "filter": ["title & Glass"], "embedder": "manual"}), |response, code| {
@@ -411,7 +411,7 @@ async fn filter_invalid_syntax_string() {
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
let expected_response = json!({ let expected_response = json!({
"message": "Found unexpected characters at the end of the filter: `XOR title = Glass`. You probably forgot an `OR` or an `AND` rule.\n15:32 title = Glass XOR title = Glass", "message": "Found unexpected characters at the end of the filter: `XOR title = Glass`. You probably forgot an `OR` or an `AND` rule.\n15:32 title = Glass XOR title = Glass",
@@ -451,7 +451,7 @@ async fn filter_invalid_attribute_array() {
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
index index
.similar( .similar(
@@ -492,7 +492,7 @@ async fn filter_invalid_attribute_string() {
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
index index
.similar( .similar(
@@ -533,7 +533,7 @@ async fn filter_reserved_geo_attribute_array() {
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
let expected_response = json!({ let expected_response = json!({
"message": "`_geo` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:13 _geo = Glass", "message": "`_geo` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:13 _geo = Glass",
@@ -573,7 +573,7 @@ async fn filter_reserved_geo_attribute_string() {
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
let expected_response = json!({ let expected_response = json!({
"message": "`_geo` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:13 _geo = Glass", "message": "`_geo` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:13 _geo = Glass",
@@ -613,7 +613,7 @@ async fn filter_reserved_attribute_array() {
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
let expected_response = json!({ let expected_response = json!({
"message": "`_geoDistance` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:21 _geoDistance = Glass", "message": "`_geoDistance` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:21 _geoDistance = Glass",
@@ -653,7 +653,7 @@ async fn filter_reserved_attribute_string() {
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
let expected_response = json!({ let expected_response = json!({
"message": "`_geoDistance` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:21 _geoDistance = Glass", "message": "`_geoDistance` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:21 _geoDistance = Glass",
@@ -693,7 +693,7 @@ async fn filter_reserved_geo_point_array() {
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
let expected_response = json!({ let expected_response = json!({
"message": "`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:18 _geoPoint = Glass", "message": "`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:18 _geoPoint = Glass",
@@ -733,7 +733,7 @@ async fn filter_reserved_geo_point_string() {
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
let expected_response = json!({ let expected_response = json!({
"message": "`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:18 _geoPoint = Glass", "message": "`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:18 _geoPoint = Glass",
@@ -825,7 +825,7 @@ async fn similar_bad_embedder() {
let documents = DOCUMENTS.clone(); let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await; server.wait_task(value.uid()).await;
let expected_response = json!({ let expected_response = json!({
"message": "Cannot find embedder with name `auto`.", "message": "Cannot find embedder with name `auto`.",

View File

@@ -51,12 +51,12 @@ async fn perform_snapshot() {
})) }))
.await; .await;
index.load_test_set().await; index.load_test_set(&server).await;
let (task, code) = server.index("test1").create(Some("prim")).await; let (task, code) = server.index("test1").create(Some("prim")).await;
meili_snap::snapshot!(code, @"202 Accepted"); meili_snap::snapshot!(code, @"202 Accepted");
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
// wait for the _next task_ to process, aka the snapshot that should be enqueued at some point // wait for the _next task_ to process, aka the snapshot that should be enqueued at some point
@@ -122,19 +122,15 @@ async fn perform_on_demand_snapshot() {
let server = Server::new_with_options(options).await.unwrap(); let server = Server::new_with_options(options).await.unwrap();
let index = server.index("catto"); let index = server.index("catto");
index index.update_settings(json! ({ "searchableAttributes": [] })).await;
.update_settings(json! ({
"searchableAttributes": [],
}))
.await;
index.load_test_set().await; index.load_test_set(&server).await;
let (task, _status_code) = server.index("doggo").create(Some("bone")).await; let (task, _status_code) = server.index("doggo").create(Some("bone")).await;
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = server.index("doggo").create(Some("bone")).await; let (task, _status_code) = server.index("doggo").create(Some("bone")).await;
index.wait_task(task.uid()).await.failed(); server.wait_task(task.uid()).await.failed();
let (task, code) = server.create_snapshot().await; let (task, code) = server.create_snapshot().await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
@@ -147,7 +143,7 @@ async fn perform_on_demand_snapshot() {
"enqueuedAt": "[date]" "enqueuedAt": "[date]"
} }
"###); "###);
let task = index.wait_task(task.uid()).await; let task = server.wait_task(task.uid()).await;
snapshot!(json_string!(task, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###" snapshot!(json_string!(task, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{ {
"uid": 4, "uid": 4,
@@ -203,3 +199,70 @@ async fn perform_on_demand_snapshot() {
server.index("doggo").settings(), server.index("doggo").settings(),
); );
} }
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn snapshotception_issue_4653() {
let temp = tempfile::tempdir().unwrap();
let snapshot_dir = tempfile::tempdir().unwrap();
let options =
Opt { snapshot_dir: snapshot_dir.path().to_owned(), ..default_settings(temp.path()) };
let server = Server::new_with_options(options).await.unwrap();
let (task, code) = server.create_snapshot().await;
snapshot!(code, @"202 Accepted");
snapshot!(json_string!(task, { ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": null,
"status": "enqueued",
"type": "snapshotCreation",
"enqueuedAt": "[date]"
}
"###);
server.wait_task(task.uid()).await.succeeded();
let temp = tempfile::tempdir().unwrap();
let snapshot_path = snapshot_dir.path().to_owned().join("db.snapshot");
let options = Opt { import_snapshot: Some(snapshot_path), ..default_settings(temp.path()) };
let snapshot_server = Server::new_with_options(options).await.unwrap();
// The snapshot should have been taken without the snapshot creation task
let (tasks, code) = snapshot_server.tasks().await;
snapshot!(code, @"200 OK");
snapshot!(tasks, @r#"
{
"results": [],
"total": 0,
"limit": 20,
"from": null,
"next": null
}
"#);
// Ensure the task is not present in the snapshot
let (task, code) = snapshot_server.get_task(0).await;
snapshot!(code, @"404 Not Found");
snapshot!(task, @r#"
{
"message": "Task `0` not found.",
"code": "task_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#task_not_found"
}
"#);
// Ensure the batch is also not present
let (batch, code) = snapshot_server.get_batch(0).await;
snapshot!(code, @"404 Not Found");
snapshot!(batch, @r#"
{
"message": "Batch `0` not found.",
"code": "batch_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#batch_not_found"
}
"#);
}

View File

@@ -32,7 +32,7 @@ async fn stats() {
let (task, code) = index.create(Some("id")).await; let (task, code) = index.create(Some("id")).await;
assert_eq!(code, 202); assert_eq!(code, 202);
index.wait_task(task.uid()).await.succeeded(); server.wait_task(task.uid()).await.succeeded();
let (response, code) = server.stats().await; let (response, code) = server.stats().await;
@@ -58,7 +58,7 @@ async fn stats() {
assert_eq!(code, 202, "{response}"); assert_eq!(code, 202, "{response}");
assert_eq!(response["taskUid"], 1); assert_eq!(response["taskUid"], 1);
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
let timestamp = OffsetDateTime::now_utc(); let timestamp = OffsetDateTime::now_utc();
let (response, code) = server.stats().await; let (response, code) = server.stats().await;
@@ -107,7 +107,7 @@ async fn add_remove_embeddings() {
let (response, code) = index.add_documents(documents, None).await; let (response, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
let (stats, _code) = index.stats().await; let (stats, _code) = index.stats().await;
snapshot!(json_string!(stats, { snapshot!(json_string!(stats, {
@@ -135,7 +135,7 @@ async fn add_remove_embeddings() {
let (response, code) = index.update_documents(documents, None).await; let (response, code) = index.update_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
let (stats, _code) = index.stats().await; let (stats, _code) = index.stats().await;
snapshot!(json_string!(stats, { snapshot!(json_string!(stats, {
@@ -163,7 +163,7 @@ async fn add_remove_embeddings() {
let (response, code) = index.update_documents(documents, None).await; let (response, code) = index.update_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
let (stats, _code) = index.stats().await; let (stats, _code) = index.stats().await;
snapshot!(json_string!(stats, { snapshot!(json_string!(stats, {
@@ -192,7 +192,7 @@ async fn add_remove_embeddings() {
let (response, code) = index.update_documents(documents, None).await; let (response, code) = index.update_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
let (stats, _code) = index.stats().await; let (stats, _code) = index.stats().await;
snapshot!(json_string!(stats, { snapshot!(json_string!(stats, {
@@ -245,7 +245,7 @@ async fn add_remove_embedded_documents() {
let (response, code) = index.add_documents(documents, None).await; let (response, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
let (stats, _code) = index.stats().await; let (stats, _code) = index.stats().await;
snapshot!(json_string!(stats, { snapshot!(json_string!(stats, {
@@ -269,7 +269,7 @@ async fn add_remove_embedded_documents() {
// delete one embedded document, remaining 1 embedded documents for 3 embeddings in total // delete one embedded document, remaining 1 embedded documents for 3 embeddings in total
let (response, code) = index.delete_document(0).await; let (response, code) = index.delete_document(0).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
let (stats, _code) = index.stats().await; let (stats, _code) = index.stats().await;
snapshot!(json_string!(stats, { snapshot!(json_string!(stats, {
@@ -305,7 +305,7 @@ async fn update_embedder_settings() {
let (response, code) = index.add_documents(documents, None).await; let (response, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
let (stats, _code) = index.stats().await; let (stats, _code) = index.stats().await;
snapshot!(json_string!(stats, { snapshot!(json_string!(stats, {

View File

@@ -2,16 +2,18 @@
//! post requests. The webhook handle starts a server and forwards all the //! post requests. The webhook handle starts a server and forwards all the
//! received requests into a channel for you to handle. //! received requests into a channel for you to handle.
use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use actix_http::body::MessageBody; use actix_http::body::MessageBody;
use actix_web::dev::{ServiceFactory, ServiceResponse}; use actix_web::dev::{ServiceFactory, ServiceResponse};
use actix_web::web::{Bytes, Data}; use actix_web::web::{Bytes, Data};
use actix_web::{post, App, HttpRequest, HttpResponse, HttpServer}; use actix_web::{post, App, HttpRequest, HttpResponse, HttpServer};
use meili_snap::snapshot; use meili_snap::{json_string, snapshot};
use meilisearch::Opt; use meilisearch::Opt;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use url::Url; use url::Url;
use uuid::Uuid;
use crate::common::{self, default_settings, Server}; use crate::common::{self, default_settings, Server};
use crate::json; use crate::json;
@@ -68,21 +70,55 @@ async fn create_webhook_server() -> WebhookHandle {
} }
#[actix_web::test] #[actix_web::test]
async fn test_basic_webhook() { async fn cli_only() {
let WebhookHandle { server_handle, url, mut receiver } = create_webhook_server().await;
let db_path = tempfile::tempdir().unwrap(); let db_path = tempfile::tempdir().unwrap();
let server = Server::new_with_options(Opt { let server = Server::new_with_options(Opt {
task_webhook_url: Some(Url::parse(&url).unwrap()), task_webhook_url: Some(Url::parse("https://example-cli.com/").unwrap()),
task_webhook_authorization_header: Some(String::from("Bearer a-secret-token")),
..default_settings(db_path.path()) ..default_settings(db_path.path())
}) })
.await .await
.unwrap(); .unwrap();
let index = server.index("tamo"); let (webhooks, code) = server.get_webhooks().await;
snapshot!(code, @"200 OK");
snapshot!(webhooks, @r#"
{
"results": [
{
"uuid": "00000000-0000-0000-0000-000000000000",
"isEditable": false,
"url": "https://example-cli.com/",
"headers": {
"Authorization": "Bearer a-secret-token"
}
}
]
}
"#);
}
#[actix_web::test]
async fn single_receives_data() {
let WebhookHandle { server_handle, url, mut receiver } = create_webhook_server().await;
let server = Server::new().await;
let (value, code) = server.create_webhook(json!({ "url": url })).await;
snapshot!(code, @"201 Created");
snapshot!(json_string!(value, { ".uuid" => "[uuid]", ".url" => "[ignored]" }), @r#"
{
"uuid": "[uuid]",
"isEditable": true,
"url": "[ignored]",
"headers": {}
}
"#);
// May be flaky: we're relying on the fact that while the first document addition is processed, the other // May be flaky: we're relying on the fact that while the first document addition is processed, the other
// operations will be received and will be batched together. If it doesn't happen it's not a problem // operations will be received and will be batched together. If it doesn't happen it's not a problem
// the rest of the test won't assume anything about the number of tasks per batch. // the rest of the test won't assume anything about the number of tasks per batch.
let index = server.index("tamo");
for i in 0..5 { for i in 0..5 {
let (_, _status) = index.add_documents(json!({ "id": i, "doggo": "bone" }), None).await; let (_, _status) = index.add_documents(json!({ "id": i, "doggo": "bone" }), None).await;
} }
@@ -127,3 +163,496 @@ async fn test_basic_webhook() {
server_handle.abort(); server_handle.abort();
} }
#[actix_web::test]
async fn multiple_receive_data() {
let WebhookHandle { server_handle: handle1, url: url1, receiver: mut receiver1 } =
create_webhook_server().await;
let WebhookHandle { server_handle: handle2, url: url2, receiver: mut receiver2 } =
create_webhook_server().await;
let WebhookHandle { server_handle: handle3, url: url3, receiver: mut receiver3 } =
create_webhook_server().await;
let db_path = tempfile::tempdir().unwrap();
let server = Server::new_with_options(Opt {
task_webhook_url: Some(Url::parse(&url3).unwrap()),
..default_settings(db_path.path())
})
.await
.unwrap();
for url in [url1, url2] {
let (value, code) = server.create_webhook(json!({ "url": url })).await;
snapshot!(code, @"201 Created");
snapshot!(json_string!(value, { ".uuid" => "[uuid]", ".url" => "[ignored]" }), @r#"
{
"uuid": "[uuid]",
"isEditable": true,
"url": "[ignored]",
"headers": {}
}
"#);
}
let index = server.index("tamo");
let (_, status) = index.add_documents(json!({ "id": 1, "doggo": "bone" }), None).await;
snapshot!(status, @"202 Accepted");
let mut count1 = 0;
let mut count2 = 0;
let mut count3 = 0;
while count1 == 0 || count2 == 0 || count3 == 0 {
tokio::select! {
msg = receiver1.recv() => { if msg.is_some() { count1 += 1; } },
msg = receiver2.recv() => { if msg.is_some() { count2 += 1; } },
msg = receiver3.recv() => { if msg.is_some() { count3 += 1; } },
}
}
assert_eq!(count1, 1);
assert_eq!(count2, 1);
assert_eq!(count3, 1);
handle1.abort();
handle2.abort();
handle3.abort();
}
#[actix_web::test]
async fn cli_with_dumps() {
let db_path = tempfile::tempdir().unwrap();
let server = Server::new_with_options(Opt {
task_webhook_url: Some(Url::parse("http://defined-in-test-cli.com").unwrap()),
task_webhook_authorization_header: Some(String::from(
"Bearer a-secret-token-defined-in-test-cli",
)),
import_dump: Some(PathBuf::from("../dump/tests/assets/v6-with-webhooks.dump")),
..default_settings(db_path.path())
})
.await
.unwrap();
let (webhooks, code) = server.get_webhooks().await;
snapshot!(code, @"200 OK");
snapshot!(webhooks, @r#"
{
"results": [
{
"uuid": "00000000-0000-0000-0000-000000000000",
"isEditable": false,
"url": "http://defined-in-test-cli.com/",
"headers": {
"Authorization": "Bearer a-secret-token-defined-in-test-cli"
}
},
{
"uuid": "627ea538-733d-4545-8d2d-03526eb381ce",
"isEditable": true,
"url": "https://example.com/authorization-less",
"headers": {}
},
{
"uuid": "771b0a28-ef28-4082-b984-536f82958c65",
"isEditable": true,
"url": "https://example.com/hook",
"headers": {
"authorization": "TOKEN"
}
},
{
"uuid": "f3583083-f8a7-4cbf-a5e7-fb3f1e28a7e9",
"isEditable": true,
"url": "https://third.com",
"headers": {}
}
]
}
"#);
}
#[actix_web::test]
async fn reserved_names() {
let db_path = tempfile::tempdir().unwrap();
let server = Server::new_with_options(Opt {
task_webhook_url: Some(Url::parse("https://example-cli.com/").unwrap()),
task_webhook_authorization_header: Some(String::from("Bearer a-secret-token")),
..default_settings(db_path.path())
})
.await
.unwrap();
let (value, code) = server
.patch_webhook(Uuid::nil().to_string(), json!({ "url": "http://localhost:8080" }))
.await;
snapshot!(value, @r#"
{
"message": "Webhook `[uuid]` is immutable. The webhook defined from the command line cannot be modified using the API.",
"code": "immutable_webhook",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#immutable_webhook"
}
"#);
snapshot!(code, @"400 Bad Request");
let (value, code) = server.delete_webhook(Uuid::nil().to_string()).await;
snapshot!(value, @r#"
{
"message": "Webhook `[uuid]` is immutable. The webhook defined from the command line cannot be modified using the API.",
"code": "immutable_webhook",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#immutable_webhook"
}
"#);
snapshot!(code, @"400 Bad Request");
}
#[actix_web::test]
async fn over_limits() {
let server = Server::new().await;
// Too many webhooks
let mut uuids = Vec::new();
for _ in 0..20 {
let (value, code) = server.create_webhook(json!({ "url": "http://localhost:8080" } )).await;
snapshot!(code, @"201 Created");
uuids.push(value.get("uuid").unwrap().as_str().unwrap().to_string());
}
let (value, code) = server.create_webhook(json!({ "url": "http://localhost:8080" })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Defining too many webhooks would crush the server. Please limit the number of webhooks to 20. You may use a third-party proxy server to dispatch events to more than 20 endpoints.",
"code": "invalid_webhooks",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_webhooks"
}
"#);
// Reset webhooks
for uuid in uuids {
let (_value, code) = server.delete_webhook(&uuid).await;
snapshot!(code, @"204 No Content");
}
// Test too many headers
let (value, code) = server.create_webhook(json!({ "url": "http://localhost:8080" })).await;
snapshot!(code, @"201 Created");
let uuid = value.get("uuid").unwrap().as_str().unwrap();
for i in 0..200 {
let header_name = format!("header_{i}");
let (_value, code) =
server.patch_webhook(uuid, json!({ "headers": { header_name: "" } })).await;
snapshot!(code, @"200 OK");
}
let (value, code) =
server.patch_webhook(uuid, json!({ "headers": { "header_200": "" } })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Too many headers for the webhook `[uuid]`. Please limit the number of headers to 200. Hint: To remove an already defined header set its value to `null`",
"code": "invalid_webhook_headers",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_webhook_headers"
}
"#);
}
#[actix_web::test]
async fn post_get_delete() {
let server = Server::new().await;
let (value, code) = server
.create_webhook(json!({
"url": "https://example.com/hook",
"headers": { "authorization": "TOKEN" }
}))
.await;
snapshot!(code, @"201 Created");
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r#"
{
"uuid": "[uuid]",
"isEditable": true,
"url": "https://example.com/hook",
"headers": {
"authorization": "TOKEN"
}
}
"#);
let uuid = value.get("uuid").unwrap().as_str().unwrap();
let (value, code) = server.get_webhook(uuid).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r#"
{
"uuid": "[uuid]",
"isEditable": true,
"url": "https://example.com/hook",
"headers": {
"authorization": "TOKEN"
}
}
"#);
let (_value, code) = server.delete_webhook(uuid).await;
snapshot!(code, @"204 No Content");
let (_value, code) = server.get_webhook(uuid).await;
snapshot!(code, @"404 Not Found");
}
#[actix_web::test]
async fn create_and_patch() {
let server = Server::new().await;
let (value, code) =
server.create_webhook(json!({ "headers": { "authorization": "TOKEN" } })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "The URL for the webhook `[uuid]` is missing.",
"code": "invalid_webhook_url",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_webhook_url"
}
"#);
let (value, code) = server.create_webhook(json!({ "url": "https://example.com/hook" })).await;
snapshot!(code, @"201 Created");
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r#"
{
"uuid": "[uuid]",
"isEditable": true,
"url": "https://example.com/hook",
"headers": {}
}
"#);
let uuid = value.get("uuid").unwrap().as_str().unwrap();
let (value, code) =
server.patch_webhook(&uuid, json!({ "headers": { "authorization": "TOKEN" } })).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r#"
{
"uuid": "[uuid]",
"isEditable": true,
"url": "https://example.com/hook",
"headers": {
"authorization": "TOKEN"
}
}
"#);
let (value, code) =
server.patch_webhook(&uuid, json!({ "headers": { "authorization2": "TOKEN" } })).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r#"
{
"uuid": "[uuid]",
"isEditable": true,
"url": "https://example.com/hook",
"headers": {
"authorization": "TOKEN",
"authorization2": "TOKEN"
}
}
"#);
let (value, code) =
server.patch_webhook(&uuid, json!({ "headers": { "authorization": null } })).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r#"
{
"uuid": "[uuid]",
"isEditable": true,
"url": "https://example.com/hook",
"headers": {
"authorization2": "TOKEN"
}
}
"#);
let (value, code) = server.patch_webhook(&uuid, json!({ "url": null })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r#"
{
"message": "The URL for the webhook `[uuid]` is missing.",
"code": "invalid_webhook_url",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_webhook_url"
}
"#);
}
#[actix_web::test]
async fn invalid_url_and_headers() {
let server = Server::new().await;
// Test invalid URL format
let (value, code) = server.create_webhook(json!({ "url": "not-a-valid-url" })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Invalid URL `not-a-valid-url`: relative URL without a base",
"code": "invalid_webhook_url",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_webhook_url"
}
"#);
// Test invalid header name (containing spaces)
let (value, code) = server
.create_webhook(json!({
"url": "https://example.com/hook",
"headers": { "invalid header name": "value" }
}))
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Invalid header name `invalid header name`: invalid HTTP header name",
"code": "invalid_webhook_headers",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_webhook_headers"
}
"#);
// Test invalid header value (containing control characters)
let (value, code) = server
.create_webhook(json!({
"url": "https://example.com/hook",
"headers": { "authorization": "token\nwith\nnewlines" }
}))
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Invalid header value `authorization`: failed to parse header value",
"code": "invalid_webhook_headers",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_webhook_headers"
}
"#);
}
#[actix_web::test]
async fn invalid_uuid() {
let server = Server::new().await;
// Test get webhook with invalid UUID
let (value, code) = server.get_webhook("invalid-uuid").await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Invalid UUID: invalid character: expected an optional prefix of `urn:uuid:` followed by [0-9a-fA-F-], found `i` at 1",
"code": "invalid_webhook_uuid",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_webhook_uuid"
}
"#);
// Test update webhook with invalid UUID
let (value, code) =
server.patch_webhook("invalid-uuid", json!({ "url": "https://example.com/hook" })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Invalid UUID: invalid character: expected an optional prefix of `urn:uuid:` followed by [0-9a-fA-F-], found `i` at 1",
"code": "invalid_webhook_uuid",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_webhook_uuid"
}
"#);
// Test delete webhook with invalid UUID
let (value, code) = server.delete_webhook("invalid-uuid").await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Invalid UUID: invalid character: expected an optional prefix of `urn:uuid:` followed by [0-9a-fA-F-], found `i` at 1",
"code": "invalid_webhook_uuid",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_webhook_uuid"
}
"#);
}
#[actix_web::test]
async fn forbidden_fields() {
let server = Server::new().await;
// Test creating webhook with uuid field
let custom_uuid = Uuid::new_v4();
let (value, code) = server
.create_webhook(json!({
"url": "https://example.com/hook",
"uuid": custom_uuid.to_string(),
"headers": { "authorization": "TOKEN" }
}))
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Immutable field `uuid`: expected one of `url`, `headers`",
"code": "immutable_webhook_uuid",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#immutable_webhook_uuid"
}
"#);
// Test creating webhook with isEditable field
let (value, code) = server
.create_webhook(json!({
"url": "https://example.com/hook2",
"isEditable": false,
"headers": { "authorization": "TOKEN" }
}))
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Immutable field `isEditable`: expected one of `url`, `headers`",
"code": "immutable_webhook_is_editable",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#immutable_webhook_is_editable"
}
"#);
// Test patching webhook with uuid field
let (value, code) = server
.patch_webhook(
"uuid-whatever",
json!({
"uuid": Uuid::new_v4(),
"headers": { "new-header": "value" }
}),
)
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Immutable field `uuid`: expected one of `url`, `headers`",
"code": "immutable_webhook_uuid",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#immutable_webhook_uuid"
}
"#);
// Test patching webhook with isEditable field
let (value, code) = server
.patch_webhook(
"uuid-whatever",
json!({
"isEditable": false,
"headers": { "another-header": "value" }
}),
)
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r#"
{
"message": "Immutable field `isEditable`: expected one of `url`, `headers`",
"code": "immutable_webhook_is_editable",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#immutable_webhook_is_editable"
}
"#);
}

View File

@@ -43,7 +43,7 @@ async fn version_too_old() {
std::fs::write(db_path.join("VERSION"), "1.11.9999").unwrap(); std::fs::write(db_path.join("VERSION"), "1.11.9999").unwrap();
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings }; let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err(); let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.16.0"); snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.17.0");
} }
#[actix_rt::test] #[actix_rt::test]
@@ -58,7 +58,7 @@ async fn version_requires_downgrade() {
std::fs::write(db_path.join("VERSION"), format!("{major}.{minor}.{patch}")).unwrap(); std::fs::write(db_path.join("VERSION"), format!("{major}.{minor}.{patch}")).unwrap();
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings }; let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err(); let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
snapshot!(err, @"Database version 1.16.1 is higher than the Meilisearch version 1.16.0. Downgrade is not supported"); snapshot!(err, @"Database version 1.17.1 is higher than the Meilisearch version 1.17.0. Downgrade is not supported");
} }
#[actix_rt::test] #[actix_rt::test]

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null, "progress": null,
"details": { "details": {
"upgradeFrom": "v1.12.0", "upgradeFrom": "v1.12.0",
"upgradeTo": "v1.16.0" "upgradeTo": "v1.17.0"
}, },
"stats": { "stats": {
"totalNbTasks": 1, "totalNbTasks": 1,

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null, "progress": null,
"details": { "details": {
"upgradeFrom": "v1.12.0", "upgradeFrom": "v1.12.0",
"upgradeTo": "v1.16.0" "upgradeTo": "v1.17.0"
}, },
"stats": { "stats": {
"totalNbTasks": 1, "totalNbTasks": 1,

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null, "progress": null,
"details": { "details": {
"upgradeFrom": "v1.12.0", "upgradeFrom": "v1.12.0",
"upgradeTo": "v1.16.0" "upgradeTo": "v1.17.0"
}, },
"stats": { "stats": {
"totalNbTasks": 1, "totalNbTasks": 1,

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null, "canceledBy": null,
"details": { "details": {
"upgradeFrom": "v1.12.0", "upgradeFrom": "v1.12.0",
"upgradeTo": "v1.16.0" "upgradeTo": "v1.17.0"
}, },
"error": null, "error": null,
"duration": "[duration]", "duration": "[duration]",

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null, "canceledBy": null,
"details": { "details": {
"upgradeFrom": "v1.12.0", "upgradeFrom": "v1.12.0",
"upgradeTo": "v1.16.0" "upgradeTo": "v1.17.0"
}, },
"error": null, "error": null,
"duration": "[duration]", "duration": "[duration]",

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null, "canceledBy": null,
"details": { "details": {
"upgradeFrom": "v1.12.0", "upgradeFrom": "v1.12.0",
"upgradeTo": "v1.16.0" "upgradeTo": "v1.17.0"
}, },
"error": null, "error": null,
"duration": "[duration]", "duration": "[duration]",

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null, "progress": null,
"details": { "details": {
"upgradeFrom": "v1.12.0", "upgradeFrom": "v1.12.0",
"upgradeTo": "v1.16.0" "upgradeTo": "v1.17.0"
}, },
"stats": { "stats": {
"totalNbTasks": 1, "totalNbTasks": 1,

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null, "canceledBy": null,
"details": { "details": {
"upgradeFrom": "v1.12.0", "upgradeFrom": "v1.12.0",
"upgradeTo": "v1.16.0" "upgradeTo": "v1.17.0"
}, },
"error": null, "error": null,
"duration": "[duration]", "duration": "[duration]",

View File

@@ -88,7 +88,7 @@ async fn binary_quantize_before_sending_documents() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
// Make sure the documents are binary quantized // Make sure the documents are binary quantized
let (documents, _code) = index let (documents, _code) = index
@@ -161,7 +161,7 @@ async fn binary_quantize_after_sending_documents() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
let (response, code) = index let (response, code) = index
.update_settings(json!({ .update_settings(json!({
@@ -305,7 +305,7 @@ async fn binary_quantize_clear_documents() {
server.wait_task(response.uid()).await.succeeded(); server.wait_task(response.uid()).await.succeeded();
let (value, _code) = index.clear_all_documents().await; let (value, _code) = index.clear_all_documents().await;
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
// Make sure the documents DB has been cleared // Make sure the documents DB has been cleared
let (documents, _code) = index let (documents, _code) = index

View File

@@ -149,7 +149,7 @@ async fn replace_document() {
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
// Make sure kefir now has 2 vectors // Make sure kefir now has 2 vectors
let (documents, code) = index let (documents, code) = index

View File

@@ -43,7 +43,7 @@ async fn add_remove_user_provided() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
let (documents, _code) = index let (documents, _code) = index
.get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() }) .get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() })
@@ -96,7 +96,7 @@ async fn add_remove_user_provided() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
let (documents, _code) = index let (documents, _code) = index
.get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() }) .get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() })
@@ -139,7 +139,7 @@ async fn add_remove_user_provided() {
let (value, code) = index.delete_document(0).await; let (value, code) = index.delete_document(0).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
let (documents, _code) = index let (documents, _code) = index
.get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() }) .get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() })
@@ -188,7 +188,7 @@ async fn user_provide_mismatched_embedding_dimension() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -219,7 +219,7 @@ async fn user_provide_mismatched_embedding_dimension() {
]); ]);
let (response, code) = index.add_documents(new_document, None).await; let (response, code) = index.add_documents(new_document, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(response.uid()).await; let task = server.wait_task(response.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -271,7 +271,7 @@ async fn generate_default_user_provided_documents(server: &Server) -> Index {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
index index
} }
@@ -286,7 +286,7 @@ async fn user_provided_embeddings_error() {
json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": [0, 0, 0] }}}); json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": [0, 0, 0] }}});
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -316,7 +316,7 @@ async fn user_provided_embeddings_error() {
let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": {}}}); let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": {}}});
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -347,7 +347,7 @@ async fn user_provided_embeddings_error() {
json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": "yes please" }}}); json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": "yes please" }}});
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -376,7 +376,7 @@ async fn user_provided_embeddings_error() {
let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": true, "regenerate": true }}}); let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": true, "regenerate": true }}});
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -405,7 +405,7 @@ async fn user_provided_embeddings_error() {
let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": [true], "regenerate": true }}}); let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": [true], "regenerate": true }}});
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -434,7 +434,7 @@ async fn user_provided_embeddings_error() {
let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": [[true]], "regenerate": false }}}); let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": [[true]], "regenerate": false }}});
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -463,20 +463,20 @@ async fn user_provided_embeddings_error() {
let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": [23, 0.1, -12], "regenerate": true }}}); let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": [23, 0.1, -12], "regenerate": true }}});
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task["status"], @r###""succeeded""###); snapshot!(task["status"], @r###""succeeded""###);
let documents = let documents =
json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": false }}}); json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": false }}});
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task["status"], @r###""succeeded""###); snapshot!(task["status"], @r###""succeeded""###);
let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": false, "embeddings": [0.1, [0.2, 0.3]] }}}); let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": false, "embeddings": [0.1, [0.2, 0.3]] }}});
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -505,7 +505,7 @@ async fn user_provided_embeddings_error() {
let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": false, "embeddings": [[0.1, 0.2], 0.3] }}}); let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": false, "embeddings": [[0.1, 0.2], 0.3] }}});
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -534,7 +534,7 @@ async fn user_provided_embeddings_error() {
let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": false, "embeddings": [[0.1, true], 0.3] }}}); let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": false, "embeddings": [[0.1, true], 0.3] }}});
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -575,7 +575,7 @@ async fn user_provided_vectors_error() {
let documents = json!([{"id": 40, "name": "kefir"}, {"id": 41, "name": "intel"}, {"id": 42, "name": "max"}, {"id": 43, "name": "venus"}, {"id": 44, "name": "eva"}]); let documents = json!([{"id": 40, "name": "kefir"}, {"id": 41, "name": "intel"}, {"id": 42, "name": "max"}, {"id": 43, "name": "venus"}, {"id": 44, "name": "eva"}]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -605,7 +605,7 @@ async fn user_provided_vectors_error() {
let documents = json!({"id": 42, "name": "kefir", "_vector": { "manaul": [0, 0, 0] }}); let documents = json!({"id": 42, "name": "kefir", "_vector": { "manaul": [0, 0, 0] }});
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -635,7 +635,7 @@ async fn user_provided_vectors_error() {
let documents = json!({"id": 42, "name": "kefir", "_vectors": { "manaul": [0, 0, 0] }}); let documents = json!({"id": 42, "name": "kefir", "_vectors": { "manaul": [0, 0, 0] }});
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -668,7 +668,7 @@ async fn clear_documents() {
let index = generate_default_user_provided_documents(&server).await; let index = generate_default_user_provided_documents(&server).await;
let (value, _code) = index.clear_all_documents().await; let (value, _code) = index.clear_all_documents().await;
index.wait_task(value.uid()).await.succeeded(); server.wait_task(value.uid()).await.succeeded();
// Make sure the documents DB has been cleared // Make sure the documents DB has been cleared
let (documents, _code) = index let (documents, _code) = index
@@ -724,7 +724,7 @@ async fn add_remove_one_vector_4588() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, name: "document-added"); snapshot!(task, name: "document-added");
let documents = json!([ let documents = json!([
@@ -732,7 +732,7 @@ async fn add_remove_one_vector_4588() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, name: "document-deleted"); snapshot!(task, name: "document-deleted");
let (documents, _code) = index let (documents, _code) = index

View File

@@ -117,7 +117,7 @@ async fn test_both_apis() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",

View File

@@ -370,7 +370,7 @@ async fn it_works() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -601,7 +601,7 @@ async fn tokenize_long_text() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -657,7 +657,7 @@ async fn bad_api_key() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
@@ -805,7 +805,7 @@ async fn bad_model() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
@@ -883,7 +883,7 @@ async fn bad_dimensions() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
@@ -992,7 +992,7 @@ async fn smaller_dimensions() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -1224,7 +1224,7 @@ async fn small_embedding_model() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -1455,7 +1455,7 @@ async fn legacy_embedding_model() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -1687,7 +1687,7 @@ async fn it_still_works() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -1916,7 +1916,7 @@ async fn timeout() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",

View File

@@ -1099,7 +1099,7 @@ async fn add_vector_and_user_provided() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -1616,7 +1616,7 @@ async fn server_returns_multiple() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -1722,7 +1722,7 @@ async fn server_single_input_returns_in_array() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@@ -1828,7 +1828,7 @@ async fn server_raw() {
]); ]);
let (value, code) = index.add_documents(documents, None).await; let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
let task = index.wait_task(value.uid()).await; let task = server.wait_task(value.uid()).await;
snapshot!(task, @r###" snapshot!(task, @r###"
{ {
"uid": "[uid]", "uid": "[uid]",

View File

@@ -1,3 +1,4 @@
use std::borrow::Cow;
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::fmt::{Debug, Display}; use std::fmt::{Debug, Display};
use std::ops::Bound::{self, Excluded, Included, Unbounded}; use std::ops::Bound::{self, Excluded, Included, Unbounded};
@@ -14,10 +15,9 @@ use super::facet_range_search;
use crate::constants::RESERVED_GEO_FIELD_NAME; use crate::constants::RESERVED_GEO_FIELD_NAME;
use crate::error::{Error, UserError}; use crate::error::{Error, UserError};
use crate::filterable_attributes_rules::{filtered_matching_patterns, matching_features}; use crate::filterable_attributes_rules::{filtered_matching_patterns, matching_features};
use crate::heed_codec::facet::{ use crate::heed_codec::facet::{FacetGroupKey, FacetGroupKeyCodec, FacetGroupValueCodec};
FacetGroupKey, FacetGroupKeyCodec, FacetGroupValue, FacetGroupValueCodec,
};
use crate::index::db_name::FACET_ID_STRING_DOCIDS; use crate::index::db_name::FACET_ID_STRING_DOCIDS;
use crate::search::facet::facet_range_search::find_docids_of_facet_within_bounds;
use crate::{ use crate::{
distance_between_two_points, lat_lng_to_xyz, FieldId, FieldsIdsMap, distance_between_two_points, lat_lng_to_xyz, FieldId, FieldsIdsMap,
FilterableAttributesFeatures, FilterableAttributesRule, Index, InternalError, Result, FilterableAttributesFeatures, FilterableAttributesRule, Index, InternalError, Result,
@@ -416,20 +416,56 @@ impl<'a> Filter<'a> {
return Ok(docids); return Ok(docids);
} }
Condition::StartsWith { keyword: _, word } => { Condition::StartsWith { keyword: _, word } => {
// The idea here is that "STARTS WITH baba" is the same as "baba <= value < babb".
// We just incremented the last letter to find the upper bound.
// The upper bound may not be valid utf8, but lmdb doesn't care as it works over bytes.
let value = crate::normalize_facet(word.value()); let value = crate::normalize_facet(word.value());
let base = FacetGroupKey { field_id, level: 0, left_bound: value.as_str() }; let mut value2 = value.as_bytes().to_owned();
let docids = strings_db
.prefix_iter(rtxn, &base)? let last = match value2.last_mut() {
.map(|result| -> Result<RoaringBitmap> { Some(last) => last,
match result { None => {
Ok((_facet_group_key, FacetGroupValue { bitmap, .. })) => Ok(bitmap), // The prefix is empty, so all documents that have the field will match.
Err(_e) => Err(InternalError::from(SerializationError::Decoding { return index
db_name: Some(FACET_ID_STRING_DOCIDS), .exists_faceted_documents_ids(rtxn, field_id)
}) .map_err(|e| e.into());
.into()), }
} };
})
.union()?; if *last == u8::MAX {
// u8::MAX is a forbidden UTF-8 byte, we're guaranteed it cannot be sent through a filter to meilisearch, but just in case, we're going to return something
tracing::warn!(
"Found non utf-8 character in filter. That shouldn't be possible"
);
return Ok(RoaringBitmap::new());
}
*last += 1;
// This is very similar to `heed::Bytes` but its `EItem` is `&[u8]` instead of `[u8]`
struct BytesRef;
impl<'a> BytesEncode<'a> for BytesRef {
type EItem = &'a [u8];
fn bytes_encode(
item: &'a Self::EItem,
) -> std::result::Result<Cow<'a, [u8]>, heed::BoxedError> {
Ok(Cow::Borrowed(item))
}
}
let mut docids = RoaringBitmap::new();
let bytes_db =
index.facet_id_string_docids.remap_key_type::<FacetGroupKeyCodec<BytesRef>>();
find_docids_of_facet_within_bounds::<BytesRef>(
rtxn,
bytes_db,
field_id,
&Included(value.as_bytes()),
&Excluded(value2.as_slice()),
universe,
&mut docids,
)?;
return Ok(docids); return Ok(docids);
} }

View File

@@ -17,7 +17,7 @@ pub fn setup_search_index_with_criteria(criteria: &[Criterion]) -> Index {
let path = tempfile::tempdir().unwrap(); let path = tempfile::tempdir().unwrap();
let options = EnvOpenOptions::new(); let options = EnvOpenOptions::new();
let mut options = options.read_txn_without_tls(); let mut options = options.read_txn_without_tls();
options.map_size(10 * 1024 * 1024); // 10 MB options.map_size(10 * 1024 * 1024); // 10 MiB
let index = Index::new(options, &path, true).unwrap(); let index = Index::new(options, &path, true).unwrap();
let mut wtxn = index.write_txn().unwrap(); let mut wtxn = index.write_txn().unwrap();

View File

@@ -119,6 +119,7 @@ pub struct FacetsUpdate<'i> {
min_level_size: u8, min_level_size: u8,
data_size: u64, data_size: u64,
} }
impl<'i> FacetsUpdate<'i> { impl<'i> FacetsUpdate<'i> {
pub fn new( pub fn new(
index: &'i Index, index: &'i Index,

View File

@@ -8,6 +8,7 @@ use v1_12::{V1_12_3_To_V1_13_0, V1_12_To_V1_12_3};
use v1_13::{V1_13_0_To_V1_13_1, V1_13_1_To_Latest_V1_13}; use v1_13::{V1_13_0_To_V1_13_1, V1_13_1_To_Latest_V1_13};
use v1_14::Latest_V1_13_To_Latest_V1_14; use v1_14::Latest_V1_13_To_Latest_V1_14;
use v1_15::Latest_V1_14_To_Latest_V1_15; use v1_15::Latest_V1_14_To_Latest_V1_15;
use v1_16::Latest_V1_16_To_V1_17_0;
use crate::constants::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH}; use crate::constants::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
use crate::progress::{Progress, VariableNameStep}; use crate::progress::{Progress, VariableNameStep};
@@ -34,6 +35,7 @@ const UPGRADE_FUNCTIONS: &[&dyn UpgradeIndex] = &[
&Latest_V1_13_To_Latest_V1_14 {}, &Latest_V1_13_To_Latest_V1_14 {},
&Latest_V1_14_To_Latest_V1_15 {}, &Latest_V1_14_To_Latest_V1_15 {},
&Latest_V1_15_To_V1_16_0 {}, &Latest_V1_15_To_V1_16_0 {},
&Latest_V1_16_To_V1_17_0 {},
// This is the last upgrade function, it will be called when the index is up to date. // This is the last upgrade function, it will be called when the index is up to date.
// any other upgrade function should be added before this one. // any other upgrade function should be added before this one.
&ToCurrentNoOp {}, &ToCurrentNoOp {},
@@ -62,6 +64,7 @@ const fn start(from: (u32, u32, u32)) -> Option<usize> {
// We must handle the current version in the match because in case of a failure some index may have been upgraded but not other. // We must handle the current version in the match because in case of a failure some index may have been upgraded but not other.
(1, 15, _) => function_index!(6), (1, 15, _) => function_index!(6),
(1, 16, _) => function_index!(7), (1, 16, _) => function_index!(7),
(1, 17, _) => function_index!(8),
// We deliberately don't add a placeholder with (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) here to force manually // We deliberately don't add a placeholder with (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) here to force manually
// considering dumpless upgrade. // considering dumpless upgrade.
(_major, _minor, _patch) => return None, (_major, _minor, _patch) => return None,

Some files were not shown because too many files have changed in this diff Show More