mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-11-25 14:07:11 +00:00
Compare commits
298 Commits
update-ver
...
change-net
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9bd3482230 | ||
|
|
d6e4e414d7 | ||
|
|
c0617efe76 | ||
|
|
8316c36648 | ||
|
|
572bae9da1 | ||
|
|
2a330dce83 | ||
|
|
d62a6b6f0d | ||
|
|
58b8630862 | ||
|
|
0703767fc6 | ||
|
|
e0c97325d6 | ||
|
|
0f3ef8de73 | ||
|
|
7313cefd74 | ||
|
|
7fb4404928 | ||
|
|
8405f0bf9c | ||
|
|
3a7f9b56fe | ||
|
|
61034e2e2e | ||
|
|
108d6d3344 | ||
|
|
35bd00f6a1 | ||
|
|
69059d67ef | ||
|
|
e13783103f | ||
|
|
f719665c4e | ||
|
|
638f284614 | ||
|
|
32ac98ed95 | ||
|
|
46aee695ca | ||
|
|
716c67f858 | ||
|
|
fec10bb2d6 | ||
|
|
3dac2cf73e | ||
|
|
03eca800e6 | ||
|
|
28fa2e960e | ||
|
|
a3b9220f84 | ||
|
|
c09d48edf2 | ||
|
|
ae4ab0ebbb | ||
|
|
900a9a6d59 | ||
|
|
fc560e6730 | ||
|
|
e2a06470b7 | ||
|
|
ada27323f2 | ||
|
|
607a1c2395 | ||
|
|
b56956ea0c | ||
|
|
3d21290f7f | ||
|
|
4edd4c06bc | ||
|
|
566baddc6b | ||
|
|
febe3186ce | ||
|
|
5dd42c1871 | ||
|
|
8670793e6e | ||
|
|
41a04aa3ab | ||
|
|
88f841bc05 | ||
|
|
d19892d2ea | ||
|
|
c0905d6650 | ||
|
|
576d7d94b1 | ||
|
|
f4f1334b62 | ||
|
|
aaff6c3685 | ||
|
|
42d2af4c84 | ||
|
|
6be91c824c | ||
|
|
6ee0537db8 | ||
|
|
3fbeff4308 | ||
|
|
375546b61a | ||
|
|
25a1d50763 | ||
|
|
4fe073cc1a | ||
|
|
5cd3d36d20 | ||
|
|
d7ad76ea1e | ||
|
|
e82bb93221 | ||
|
|
000cb93aad | ||
|
|
ad4f5514b9 | ||
|
|
8d29a29867 | ||
|
|
d7de819d11 | ||
|
|
e43d67591c | ||
|
|
134237d1eb | ||
|
|
26d9070aa7 | ||
|
|
06b3ca9eb5 | ||
|
|
7dc1c03a36 | ||
|
|
0b74722a73 | ||
|
|
0f80249b70 | ||
|
|
a9b8a60320 | ||
|
|
fd795c513b | ||
|
|
ce136ec0c1 | ||
|
|
4d4f6d2c20 | ||
|
|
4cc8fb2c5c | ||
|
|
5d47590f3e | ||
|
|
16461a9145 | ||
|
|
17810394b8 | ||
|
|
15690b9e22 | ||
|
|
a8cd81c7f4 | ||
|
|
6376571df0 | ||
|
|
cfb040e647 | ||
|
|
f54773781a | ||
|
|
0fccd0ca1f | ||
|
|
226c102bab | ||
|
|
2940bbb75c | ||
|
|
13df964564 | ||
|
|
35b24a28aa | ||
|
|
0faf495173 | ||
|
|
c32c74671d | ||
|
|
b05bcf2c13 | ||
|
|
90cc5263f6 | ||
|
|
424d0e277e | ||
|
|
34eba61c0d | ||
|
|
687260bc13 | ||
|
|
0a3ab8e171 | ||
|
|
6b6e69b07a | ||
|
|
a25111f32e | ||
|
|
b144d9ab2b | ||
|
|
c3cefbc170 | ||
|
|
8e2aeb6739 | ||
|
|
9c06545ae3 | ||
|
|
e1c859c0f7 | ||
|
|
c4848e6cc0 | ||
|
|
454581dbc9 | ||
|
|
bc5100dddd | ||
|
|
118c6da64d | ||
|
|
a989f52657 | ||
|
|
a8cc66899c | ||
|
|
c9cc748f42 | ||
|
|
4ccce18d7b | ||
|
|
00d1006cd9 | ||
|
|
5cad65cca5 | ||
|
|
7fe9d07247 | ||
|
|
8933d87031 | ||
|
|
231f86decf | ||
|
|
381de52fc5 | ||
|
|
026b95afbb | ||
|
|
210da70faf | ||
|
|
5fc7872ab3 | ||
|
|
1f0a6e8a44 | ||
|
|
b2f2807a94 | ||
|
|
952394710c | ||
|
|
da6fffdf6d | ||
|
|
b5f0c19406 | ||
|
|
0fd66a5317 | ||
|
|
cb4dd3b88c | ||
|
|
0ade376b00 | ||
|
|
32785cb2d0 | ||
|
|
fb7ccc0db3 | ||
|
|
69a84fbfe6 | ||
|
|
31cb960992 | ||
|
|
6d9e0c4bce | ||
|
|
a8e9597f49 | ||
|
|
f4147a60a3 | ||
|
|
5139dd273e | ||
|
|
72c63d3929 | ||
|
|
97ea9e9937 | ||
|
|
4645813ea8 | ||
|
|
fb68f1241c | ||
|
|
f5f2f7c6f2 | ||
|
|
6340412219 | ||
|
|
6e4dfa0168 | ||
|
|
5cf66856ae | ||
|
|
0d4b78a217 | ||
|
|
aef07f4bfa | ||
|
|
0b3f983d27 | ||
|
|
52d55ccd8e | ||
|
|
6d92c94bb3 | ||
|
|
30110a0488 | ||
|
|
47cee7e1ea | ||
|
|
493d67ffd4 | ||
|
|
2b2559016a | ||
|
|
6176b143bb | ||
|
|
f9d0d1ddd6 | ||
|
|
e50f970ab8 | ||
|
|
27550dafad | ||
|
|
a7cd6853db | ||
|
|
f51f7832a7 | ||
|
|
a38a57acb6 | ||
|
|
affcaef556 | ||
|
|
7acac2f560 | ||
|
|
b68431367f | ||
|
|
79d3d1606c | ||
|
|
580bfb06b4 | ||
|
|
062c9c6971 | ||
|
|
07ed5c57e4 | ||
|
|
a94a13c9b0 | ||
|
|
938ef77ee5 | ||
|
|
9dcdde592c | ||
|
|
7de44ad2b7 | ||
|
|
820854ba5c | ||
|
|
496de5563a | ||
|
|
0a86b1e11e | ||
|
|
795045c03a | ||
|
|
b541b7bed3 | ||
|
|
6fb3cf95e4 | ||
|
|
cbd2bdf0fa | ||
|
|
601785692f | ||
|
|
65c212d1fd | ||
|
|
85feb3a26c | ||
|
|
d550b90c60 | ||
|
|
385acbbcd2 | ||
|
|
484dbf8c06 | ||
|
|
9c6c0af076 | ||
|
|
e33fbcf7b2 | ||
|
|
d352f33d16 | ||
|
|
3682b92ee8 | ||
|
|
ef10c1fb23 | ||
|
|
bd97a7cc19 | ||
|
|
56c7f54804 | ||
|
|
15d34c33e8 | ||
|
|
42ac869c5c | ||
|
|
6e0152921f | ||
|
|
069d25dce6 | ||
|
|
9929f798d3 | ||
|
|
80ff438402 | ||
|
|
e62a807b60 | ||
|
|
907055ed08 | ||
|
|
8b18adee95 | ||
|
|
53223ace47 | ||
|
|
a579ea2596 | ||
|
|
e13541818a | ||
|
|
c974f0ab0a | ||
|
|
36cac8acf7 | ||
|
|
5507a73b23 | ||
|
|
a608e57c3c | ||
|
|
398efa3c55 | ||
|
|
307ea38c2a | ||
|
|
cdeca59587 | ||
|
|
8529e2161a | ||
|
|
b80869f2be | ||
|
|
666ae1a3e7 | ||
|
|
f6559258ce | ||
|
|
b5ba0e42b3 | ||
|
|
b0479eb996 | ||
|
|
2bab375001 | ||
|
|
81020c7d6d | ||
|
|
4068c58417 | ||
|
|
a904ce109a | ||
|
|
ecea247e5d | ||
|
|
ae5bd9d0e3 | ||
|
|
ae2d0a67a4 | ||
|
|
0f1c78b185 | ||
|
|
300f5ce0f4 | ||
|
|
3240d89e81 | ||
|
|
63a649fd7d | ||
|
|
b9e014c044 | ||
|
|
9021cb4258 | ||
|
|
de52fe91f5 | ||
|
|
97ecbb53ff | ||
|
|
14b1a3300b | ||
|
|
3c84010403 | ||
|
|
a69af611e3 | ||
|
|
c5b325de30 | ||
|
|
100a6f96e4 | ||
|
|
3c583ce7a4 | ||
|
|
0881810780 | ||
|
|
3f20c1aa5d | ||
|
|
5df125cbb7 | ||
|
|
74992560b0 | ||
|
|
c385cf985b | ||
|
|
2121819c66 | ||
|
|
2f33cd5f0a | ||
|
|
2f5101a1e4 | ||
|
|
be045a7636 | ||
|
|
60acdf8574 | ||
|
|
93864009cc | ||
|
|
223df5a433 | ||
|
|
3580b3a4ef | ||
|
|
66b6e47494 | ||
|
|
6c3dd83ae5 | ||
|
|
10567b150c | ||
|
|
a439f57d70 | ||
|
|
d243504296 | ||
|
|
a7fe2abca4 | ||
|
|
26da478b5b | ||
|
|
13d38d59bf | ||
|
|
4264abda23 | ||
|
|
dbb670a9ee | ||
|
|
a92e36ab83 | ||
|
|
ad06828685 | ||
|
|
8f1b697b91 | ||
|
|
bb4d573862 | ||
|
|
aa5a1f333a | ||
|
|
776e55d209 | ||
|
|
3362fb8476 | ||
|
|
6d93b36279 | ||
|
|
982e989886 | ||
|
|
0014ed3114 | ||
|
|
ab07e9480e | ||
|
|
00e957051e | ||
|
|
f244439b4f | ||
|
|
30fd546c12 | ||
|
|
a930977460 | ||
|
|
a3b8c2b71f | ||
|
|
39f808714d | ||
|
|
8adf6141e0 | ||
|
|
df3f282e4d | ||
|
|
d81855015b | ||
|
|
feb53104e5 | ||
|
|
881c37393f | ||
|
|
9e98a25e45 | ||
|
|
fb73b83abe | ||
|
|
29b74424ad | ||
|
|
b4cafec8b3 | ||
|
|
d43cd40807 | ||
|
|
0301d8f239 | ||
|
|
2d45124d9b | ||
|
|
40e7284d70 | ||
|
|
4d8d34cc93 | ||
|
|
5cced0af02 | ||
|
|
9c60e9689f | ||
|
|
2052537681 | ||
|
|
a9bb64c55a | ||
|
|
45da2257ec |
4
.github/workflows/bench-manual.yml
vendored
4
.github/workflows/bench-manual.yml
vendored
@@ -17,8 +17,8 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/bench-pr.yml
vendored
4
.github/workflows/bench-pr.yml
vendored
@@ -60,13 +60,13 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
repo_token: ${{ env.GH_TOKEN }}
|
repo_token: ${{ env.GH_TOKEN }}
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
if: success()
|
if: success()
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
||||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/bench-push-indexing.yml
vendored
4
.github/workflows/bench-push-indexing.yml
vendored
@@ -11,8 +11,8 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/benchmarks-manual.yml
vendored
4
.github/workflows/benchmarks-manual.yml
vendored
@@ -17,8 +17,8 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/benchmarks-pr.yml
vendored
4
.github/workflows/benchmarks-pr.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
@@ -61,7 +61,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
repo_token: ${{ env.GH_TOKEN }}
|
repo_token: ${{ env.GH_TOKEN }}
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
if: success()
|
if: success()
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
||||||
|
|||||||
@@ -15,8 +15,8 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ jobs:
|
|||||||
name: Run and upload benchmarks
|
name: Run and upload benchmarks
|
||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ jobs:
|
|||||||
name: Run and upload benchmarks
|
name: Run and upload benchmarks
|
||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ jobs:
|
|||||||
name: Run and upload benchmarks
|
name: Run and upload benchmarks
|
||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/db-change-missing.yml
vendored
2
.github/workflows/db-change-missing.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
- name: Check db change labels
|
- name: Check db change labels
|
||||||
id: check_labels
|
id: check_labels
|
||||||
env:
|
env:
|
||||||
|
|||||||
2
.github/workflows/dependency-issue.yml
vendored
2
.github/workflows/dependency-issue.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
|||||||
ISSUE_TEMPLATE: issue-template.md
|
ISSUE_TEMPLATE: issue-template.md
|
||||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Download the issue template
|
- name: Download the issue template
|
||||||
run: curl -s https://raw.githubusercontent.com/meilisearch/meilisearch/main/.github/templates/dependency-issue.md > $ISSUE_TEMPLATE
|
run: curl -s https://raw.githubusercontent.com/meilisearch/meilisearch/main/.github/templates/dependency-issue.md > $ISSUE_TEMPLATE
|
||||||
- name: Create issue
|
- name: Create issue
|
||||||
|
|||||||
4
.github/workflows/flaky-tests.yml
vendored
4
.github/workflows/flaky-tests.yml
vendored
@@ -12,12 +12,12 @@ jobs:
|
|||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
image: ubuntu:22.04
|
image: ubuntu:22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Install cargo-flaky
|
- name: Install cargo-flaky
|
||||||
run: cargo install cargo-flaky
|
run: cargo install cargo-flaky
|
||||||
- name: Run cargo flaky in the dumps
|
- name: Run cargo flaky in the dumps
|
||||||
|
|||||||
4
.github/workflows/fuzzer-indexing.yml
vendored
4
.github/workflows/fuzzer-indexing.yml
vendored
@@ -11,8 +11,8 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/latest-git-tag.yml
vendored
4
.github/workflows/latest-git-tag.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
|||||||
name: Check the version validity
|
name: Check the version validity
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Check release validity
|
- name: Check release validity
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
run: bash .github/scripts/check-release.sh
|
run: bash .github/scripts/check-release.sh
|
||||||
@@ -19,7 +19,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: check-version
|
needs: check-version
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: rickstaa/action-create-tag@v1
|
- uses: rickstaa/action-create-tag@v1
|
||||||
with:
|
with:
|
||||||
tag: "latest"
|
tag: "latest"
|
||||||
|
|||||||
6
.github/workflows/publish-apt-brew-pkg.yml
vendored
6
.github/workflows/publish-apt-brew-pkg.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
|||||||
name: Check the version validity
|
name: Check the version validity
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Check release validity
|
- name: Check release validity
|
||||||
run: bash .github/scripts/check-release.sh
|
run: bash .github/scripts/check-release.sh
|
||||||
|
|
||||||
@@ -25,10 +25,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Install cargo-deb
|
- name: Install cargo-deb
|
||||||
run: cargo install cargo-deb
|
run: cargo install cargo-deb
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Build deb package
|
- name: Build deb package
|
||||||
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
||||||
- name: Upload debian pkg to release
|
- name: Upload debian pkg to release
|
||||||
|
|||||||
2
.github/workflows/publish-docker-images.yml
vendored
2
.github/workflows/publish-docker-images.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
|||||||
permissions:
|
permissions:
|
||||||
id-token: write # This is needed to use Cosign in keyless mode
|
id-token: write # This is needed to use Cosign in keyless mode
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
# If we are running a cron or manual job ('schedule' or 'workflow_dispatch' event), it means we are publishing the `nightly` tag, so not considered stable.
|
# If we are running a cron or manual job ('schedule' or 'workflow_dispatch' event), it means we are publishing the `nightly` tag, so not considered stable.
|
||||||
# If we have pushed a tag, and the tag has the v<nmumber>.<number>.<number> format, it means we are publishing an official release, so considered stable.
|
# If we have pushed a tag, and the tag has the v<nmumber>.<number>.<number> format, it means we are publishing an official release, so considered stable.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
name: Publish binaries to GitHub release
|
name: Publish assets to GitHub release
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@@ -13,7 +13,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# No need to check the version for dry run (cron)
|
# No need to check the version for dry run (cron)
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
# Check if the tag has the v<nmumber>.<number>.<number> format.
|
# Check if the tag has the v<nmumber>.<number>.<number> format.
|
||||||
# If yes, it means we are publishing an official release.
|
# If yes, it means we are publishing an official release.
|
||||||
# If no, we are releasing a RC, so no need to check the version.
|
# If no, we are releasing a RC, so no need to check the version.
|
||||||
@@ -40,12 +40,12 @@ jobs:
|
|||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
image: ubuntu:22.04
|
image: ubuntu:22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --release --locked
|
run: cargo build --release --locked
|
||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
@@ -74,8 +74,8 @@ jobs:
|
|||||||
artifact_name: meilisearch.exe
|
artifact_name: meilisearch.exe
|
||||||
asset_name: meilisearch-windows-amd64.exe
|
asset_name: meilisearch-windows-amd64.exe
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --release --locked
|
run: cargo build --release --locked
|
||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
@@ -99,9 +99,9 @@ jobs:
|
|||||||
asset_name: meilisearch-macos-apple-silicon
|
asset_name: meilisearch-macos-apple-silicon
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v5
|
||||||
- name: Installing Rust toolchain
|
- name: Installing Rust toolchain
|
||||||
uses: dtolnay/rust-toolchain@1.85
|
uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
target: ${{ matrix.target }}
|
target: ${{ matrix.target }}
|
||||||
@@ -136,7 +136,7 @@ jobs:
|
|||||||
asset_name: meilisearch-linux-aarch64
|
asset_name: meilisearch-linux-aarch64
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v5
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update -y && apt upgrade -y
|
apt-get update -y && apt upgrade -y
|
||||||
@@ -148,7 +148,7 @@ jobs:
|
|||||||
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||||
apt-get update -y && apt-get install -y docker-ce
|
apt-get update -y && apt-get install -y docker-ce
|
||||||
- name: Installing Rust toolchain
|
- name: Installing Rust toolchain
|
||||||
uses: dtolnay/rust-toolchain@1.85
|
uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
target: ${{ matrix.target }}
|
target: ${{ matrix.target }}
|
||||||
@@ -184,3 +184,28 @@ jobs:
|
|||||||
file: target/${{ matrix.target }}/release/meilisearch
|
file: target/${{ matrix.target }}/release/meilisearch
|
||||||
asset_name: ${{ matrix.asset_name }}
|
asset_name: ${{ matrix.asset_name }}
|
||||||
tag: ${{ github.ref }}
|
tag: ${{ github.ref }}
|
||||||
|
|
||||||
|
publish-openapi-file:
|
||||||
|
name: Publish OpenAPI file
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
- name: Setup Rust
|
||||||
|
uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
toolchain: stable
|
||||||
|
override: true
|
||||||
|
- name: Generate OpenAPI file
|
||||||
|
run: |
|
||||||
|
cd crates/openapi-generator
|
||||||
|
cargo run --release -- --pretty --output ../../meilisearch.json
|
||||||
|
- name: Upload OpenAPI to Release
|
||||||
|
# No need to upload for dry run (cron)
|
||||||
|
if: github.event_name == 'release'
|
||||||
|
uses: svenstaro/upload-release-action@2.11.2
|
||||||
|
with:
|
||||||
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
|
file: ./meilisearch.json
|
||||||
|
asset_name: meilisearch-openapi.json
|
||||||
|
tag: ${{ github.ref }}
|
||||||
30
.github/workflows/sdks-tests.yml
vendored
30
.github/workflows/sdks-tests.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
docker-image: ${{ steps.define-image.outputs.docker-image }}
|
docker-image: ${{ steps.define-image.outputs.docker-image }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Define the Docker image we need to use
|
- name: Define the Docker image we need to use
|
||||||
id: define-image
|
id: define-image
|
||||||
run: |
|
run: |
|
||||||
@@ -46,7 +46,7 @@ jobs:
|
|||||||
MEILISEARCH_VERSION: ${{ needs.define-docker-image.outputs.docker-image }}
|
MEILISEARCH_VERSION: ${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-dotnet
|
repository: meilisearch/meilisearch-dotnet
|
||||||
- name: Setup .NET Core
|
- name: Setup .NET Core
|
||||||
@@ -75,7 +75,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-dart
|
repository: meilisearch/meilisearch-dart
|
||||||
- uses: dart-lang/setup-dart@v1
|
- uses: dart-lang/setup-dart@v1
|
||||||
@@ -103,7 +103,7 @@ jobs:
|
|||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: stable
|
go-version: stable
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-go
|
repository: meilisearch/meilisearch-go
|
||||||
- name: Get dependencies
|
- name: Get dependencies
|
||||||
@@ -129,11 +129,11 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-java
|
repository: meilisearch/meilisearch-java
|
||||||
- name: Set up Java
|
- name: Set up Java
|
||||||
uses: actions/setup-java@v4
|
uses: actions/setup-java@v5
|
||||||
with:
|
with:
|
||||||
java-version: 8
|
java-version: 8
|
||||||
distribution: 'zulu'
|
distribution: 'zulu'
|
||||||
@@ -156,7 +156,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-js
|
repository: meilisearch/meilisearch-js
|
||||||
- name: Setup node
|
- name: Setup node
|
||||||
@@ -191,7 +191,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-php
|
repository: meilisearch/meilisearch-php
|
||||||
- name: Install PHP
|
- name: Install PHP
|
||||||
@@ -220,7 +220,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-python
|
repository: meilisearch/meilisearch-python
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
@@ -245,7 +245,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-ruby
|
repository: meilisearch/meilisearch-ruby
|
||||||
- name: Set up Ruby 3
|
- name: Set up Ruby 3
|
||||||
@@ -270,7 +270,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-rust
|
repository: meilisearch/meilisearch-rust
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -291,7 +291,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-swift
|
repository: meilisearch/meilisearch-swift
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
@@ -314,7 +314,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-js-plugins
|
repository: meilisearch/meilisearch-js-plugins
|
||||||
- name: Setup node
|
- name: Setup node
|
||||||
@@ -347,7 +347,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
RAILS_VERSION: '7.0'
|
RAILS_VERSION: '7.0'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-rails
|
repository: meilisearch/meilisearch-rails
|
||||||
- name: Install SQLite dependencies
|
- name: Install SQLite dependencies
|
||||||
@@ -377,7 +377,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-symfony
|
repository: meilisearch/meilisearch-symfony
|
||||||
- name: Install PHP
|
- name: Install PHP
|
||||||
|
|||||||
30
.github/workflows/test-suite.yml
vendored
30
.github/workflows/test-suite.yml
vendored
@@ -21,13 +21,13 @@ jobs:
|
|||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
image: ubuntu:22.04
|
image: ubuntu:22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- name: Setup test with Rust stable
|
- name: Setup test with Rust stable
|
||||||
uses: dtolnay/rust-toolchain@1.85
|
uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
- name: Run cargo check without any default features
|
- name: Run cargo check without any default features
|
||||||
@@ -49,10 +49,10 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
os: [macos-13, windows-2022]
|
os: [macos-13, windows-2022]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Run cargo check without any default features
|
- name: Run cargo check without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -72,12 +72,12 @@ jobs:
|
|||||||
image: ubuntu:22.04
|
image: ubuntu:22.04
|
||||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update
|
apt-get update
|
||||||
apt-get install --assume-yes build-essential curl
|
apt-get install --assume-yes build-essential curl
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Run cargo build with almost all features
|
- name: Run cargo build with almost all features
|
||||||
run: |
|
run: |
|
||||||
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||||
@@ -91,7 +91,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
|
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Install Ollama
|
- name: Install Ollama
|
||||||
run: |
|
run: |
|
||||||
curl -fsSL https://ollama.com/install.sh | sudo -E sh
|
curl -fsSL https://ollama.com/install.sh | sudo -E sh
|
||||||
@@ -124,12 +124,12 @@ jobs:
|
|||||||
image: ubuntu:22.04
|
image: ubuntu:22.04
|
||||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update
|
apt-get update
|
||||||
apt-get install --assume-yes build-essential curl
|
apt-get install --assume-yes build-essential curl
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Run cargo tree without default features and check lindera is not present
|
- name: Run cargo tree without default features and check lindera is not present
|
||||||
run: |
|
run: |
|
||||||
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
||||||
@@ -148,12 +148,12 @@ jobs:
|
|||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
image: ubuntu:22.04
|
image: ubuntu:22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
- name: Run tests in debug
|
- name: Run tests in debug
|
||||||
@@ -166,8 +166,8 @@ jobs:
|
|||||||
name: Run Clippy
|
name: Run Clippy
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
components: clippy
|
components: clippy
|
||||||
@@ -183,8 +183,8 @@ jobs:
|
|||||||
name: Run Rustfmt
|
name: Run Rustfmt
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
toolchain: nightly-2024-07-09
|
toolchain: nightly-2024-07-09
|
||||||
|
|||||||
@@ -17,8 +17,8 @@ jobs:
|
|||||||
name: Update version in Cargo.toml
|
name: Update version in Cargo.toml
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
- name: Install sd
|
- name: Install sd
|
||||||
@@ -41,5 +41,4 @@ jobs:
|
|||||||
--title "Update version for the next release ($NEW_VERSION) in Cargo.toml" \
|
--title "Update version for the next release ($NEW_VERSION) in Cargo.toml" \
|
||||||
--body '⚠️ This PR is automatically generated. Check the new version is the expected one and Cargo.lock has been updated before merging.' \
|
--body '⚠️ This PR is automatically generated. Check the new version is the expected one and Cargo.lock has been updated before merging.' \
|
||||||
--label 'skip changelog' \
|
--label 'skip changelog' \
|
||||||
--milestone $NEW_VERSION \
|
|
||||||
--base $GITHUB_REF_NAME
|
--base $GITHUB_REF_NAME
|
||||||
|
|||||||
@@ -107,12 +107,18 @@ Run `cargo xtask --help` from the root of the repository to find out what is ava
|
|||||||
|
|
||||||
To update the openAPI file in the code, see [sprint_issue.md](https://github.com/meilisearch/meilisearch/blob/main/.github/ISSUE_TEMPLATE/sprint_issue.md#reminders-when-modifying-the-api).
|
To update the openAPI file in the code, see [sprint_issue.md](https://github.com/meilisearch/meilisearch/blob/main/.github/ISSUE_TEMPLATE/sprint_issue.md#reminders-when-modifying-the-api).
|
||||||
|
|
||||||
If you want to update the openAPI file on the [open-api repository](https://github.com/meilisearch/open-api):
|
If you want to generate OpenAPI file manually:
|
||||||
- Pull the latest version of the latest rc of Meilisearch `git checkout release-vX.Y.Z; git pull`
|
|
||||||
|
With swagger:
|
||||||
- Starts Meilisearch with the `swagger` feature flag: `cargo run --features swagger`
|
- Starts Meilisearch with the `swagger` feature flag: `cargo run --features swagger`
|
||||||
- On a browser, open the following URL: http://localhost:7700/scalar
|
- On a browser, open the following URL: http://localhost:7700/scalar
|
||||||
- Click the « Download openAPI file »
|
- Click the « Download openAPI file »
|
||||||
- Open a PR replacing [this file](https://github.com/meilisearch/open-api/blob/main/open-api.json) with the one downloaded
|
|
||||||
|
With the internal crate:
|
||||||
|
```bash
|
||||||
|
cd crates/openapi-generator
|
||||||
|
cargo run --release -- --pretty --output meilisearch.json
|
||||||
|
```
|
||||||
|
|
||||||
### Logging
|
### Logging
|
||||||
|
|
||||||
|
|||||||
1729
Cargo.lock
generated
1729
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -19,10 +19,11 @@ members = [
|
|||||||
"crates/tracing-trace",
|
"crates/tracing-trace",
|
||||||
"crates/xtask",
|
"crates/xtask",
|
||||||
"crates/build-info",
|
"crates/build-info",
|
||||||
|
"crates/openapi-generator",
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.17.1"
|
version = "1.21.0"
|
||||||
authors = [
|
authors = [
|
||||||
"Quentin de Quelen <quentin@dequelen.me>",
|
"Quentin de Quelen <quentin@dequelen.me>",
|
||||||
"Clément Renault <clement@meilisearch.com>",
|
"Clément Renault <clement@meilisearch.com>",
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Compile
|
# Compile
|
||||||
FROM rust:1.85-alpine3.20 AS compiler
|
FROM rust:1.89-alpine3.20 AS compiler
|
||||||
|
|
||||||
RUN apk add -q --no-cache build-base openssl-dev
|
RUN apk add -q --no-cache build-base openssl-dev
|
||||||
|
|
||||||
|
|||||||
8
LICENSE
8
LICENSE
@@ -19,3 +19,11 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
SOFTWARE.
|
SOFTWARE.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
🔒 Meilisearch Enterprise Edition (EE)
|
||||||
|
|
||||||
|
Certain parts of this codebase are not licensed under the MIT license and governed by the Business Source License 1.1.
|
||||||
|
|
||||||
|
See the LICENSE-EE file for details.
|
||||||
|
|||||||
67
LICENSE-EE
Normal file
67
LICENSE-EE
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
Business Source License 1.1 – Adapted for Meili SAS
|
||||||
|
This license is based on the Business Source License version 1.1, as published by MariaDB Corporation Ab.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
|
||||||
|
Licensor: Meili SAS
|
||||||
|
|
||||||
|
Licensed Work: Any file explicitly marked as “Enterprise Edition (EE)” or “governed by the Business Source License” residing in enterprise_editions modules/folders.
|
||||||
|
|
||||||
|
Additional Use Grant:
|
||||||
|
You may use, modify, and distribute the Licensed Work for non-production purposes only, such as testing, development, or evaluation.
|
||||||
|
|
||||||
|
Production use of the Licensed Work requires a commercial license agreement with Meilisearch. Contact bonjour@meilisearch.com for licensing.
|
||||||
|
|
||||||
|
Change License: MIT
|
||||||
|
|
||||||
|
Change Date: Four years from the date the Licensed Work is published.
|
||||||
|
|
||||||
|
This License does not apply to any code outside of the Licensed Work, which remains under the MIT license.
|
||||||
|
|
||||||
|
For information about alternative licensing arrangements for the Licensed Work,
|
||||||
|
please contact bonjour@meilisearch.com or sales@meilisearch.com.
|
||||||
|
|
||||||
|
Notice
|
||||||
|
|
||||||
|
Business Source License 1.1
|
||||||
|
|
||||||
|
Terms
|
||||||
|
|
||||||
|
The Licensor hereby grants you the right to copy, modify, create derivative
|
||||||
|
works, redistribute, and make non-production use of the Licensed Work. The
|
||||||
|
Licensor may make an Additional Use Grant, above, permitting limited production use.
|
||||||
|
|
||||||
|
Effective on the Change Date, or the fourth anniversary of the first publicly
|
||||||
|
available distribution of a specific version of the Licensed Work under this
|
||||||
|
License, whichever comes first, the Licensor hereby grants you rights under
|
||||||
|
the terms of the Change License, and the rights granted in the paragraph
|
||||||
|
above terminate.
|
||||||
|
|
||||||
|
If your use of the Licensed Work does not comply with the requirements
|
||||||
|
currently in effect as described in this License, you must purchase a
|
||||||
|
commercial license from the Licensor, its affiliated entities, or authorized
|
||||||
|
resellers, or you must refrain from using the Licensed Work.
|
||||||
|
|
||||||
|
All copies of the original and modified Licensed Work, and derivative works
|
||||||
|
of the Licensed Work, are subject to this License. This License applies
|
||||||
|
separately for each version of the Licensed Work and the Change Date may vary
|
||||||
|
for each version of the Licensed Work released by Licensor.
|
||||||
|
|
||||||
|
You must conspicuously display this License on each original or modified copy
|
||||||
|
of the Licensed Work. If you receive the Licensed Work in original or
|
||||||
|
modified form from a third party, the terms and conditions set forth in this
|
||||||
|
License apply to your use of that work.
|
||||||
|
|
||||||
|
Any use of the Licensed Work in violation of this License will automatically
|
||||||
|
terminate your rights under this License for the current and all other
|
||||||
|
versions of the Licensed Work.
|
||||||
|
|
||||||
|
This License does not grant you any right in any trademark or logo of
|
||||||
|
Licensor or its affiliates (provided that you may use a trademark or logo of
|
||||||
|
Licensor as expressly required by this License).
|
||||||
|
|
||||||
|
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
|
||||||
|
AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
|
||||||
|
TITLE.
|
||||||
20
README.md
20
README.md
@@ -89,6 +89,26 @@ We also offer a wide range of dedicated guides to all Meilisearch features, such
|
|||||||
|
|
||||||
Finally, for more in-depth information, refer to our articles explaining fundamental Meilisearch concepts such as [documents](https://www.meilisearch.com/docs/learn/core_concepts/documents?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=advanced) and [indexes](https://www.meilisearch.com/docs/learn/core_concepts/indexes?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=advanced).
|
Finally, for more in-depth information, refer to our articles explaining fundamental Meilisearch concepts such as [documents](https://www.meilisearch.com/docs/learn/core_concepts/documents?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=advanced) and [indexes](https://www.meilisearch.com/docs/learn/core_concepts/indexes?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=advanced).
|
||||||
|
|
||||||
|
## 🧾 Editions & Licensing
|
||||||
|
|
||||||
|
Meilisearch is available in two editions:
|
||||||
|
|
||||||
|
### 🧪 Community Edition (CE)
|
||||||
|
|
||||||
|
- Fully open source under the [MIT license](./LICENSE)
|
||||||
|
- Core search engine with fast and relevant full-text, semantic or hybrid search
|
||||||
|
- Free to use for anyone, including commercial usage
|
||||||
|
|
||||||
|
### 🏢 Enterprise Edition (EE)
|
||||||
|
|
||||||
|
- Includes advanced features such as:
|
||||||
|
- Sharding
|
||||||
|
- Governed by a [commercial license](./LICENSE-EE) or the [Business Source License 1.1](https://mariadb.com/bsl11)
|
||||||
|
- Not allowed in production without a commercial agreement with Meilisearch.
|
||||||
|
- You may use, modify, and distribute the Licensed Work for non-production purposes only, such as testing, development, or evaluation.
|
||||||
|
|
||||||
|
Want access to Enterprise features? → Contact us at [sales@meilisearch.com](maito:sales@meilisearch.com).
|
||||||
|
|
||||||
## 📊 Telemetry
|
## 📊 Telemetry
|
||||||
|
|
||||||
Meilisearch collects **anonymized** user data to help us improve our product. You can [deactivate this](https://www.meilisearch.com/docs/learn/what_is_meilisearch/telemetry?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=telemetry#how-to-disable-data-collection) whenever you want.
|
Meilisearch collects **anonymized** user data to help us improve our product. You can [deactivate this](https://www.meilisearch.com/docs/learn/what_is_meilisearch/telemetry?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=telemetry#how-to-disable-data-collection) whenever you want.
|
||||||
|
|||||||
@@ -154,6 +154,7 @@ fn indexing_songs_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -221,6 +222,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -266,6 +268,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -335,6 +338,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -412,6 +416,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -457,6 +462,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -498,6 +504,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -566,6 +573,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -633,6 +641,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -700,6 +709,7 @@ fn indexing_wiki(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -766,6 +776,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -811,6 +822,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -879,6 +891,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -956,6 +969,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1002,6 +1016,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1044,6 +1059,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1111,6 +1127,7 @@ fn indexing_movies_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1177,6 +1194,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1222,6 +1240,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1290,6 +1309,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1404,6 +1424,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1449,6 +1470,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1490,6 +1512,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1580,6 +1603,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1671,6 +1695,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1754,6 +1779,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1821,6 +1847,7 @@ fn indexing_geo(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1887,6 +1914,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1932,6 +1960,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -2000,6 +2029,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
|||||||
@@ -123,6 +123,7 @@ pub fn base_setup(conf: &Conf) -> Index {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use meilisearch_types::keys::Key;
|
|||||||
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
||||||
use meilisearch_types::settings::Unchecked;
|
use meilisearch_types::settings::Unchecked;
|
||||||
use meilisearch_types::tasks::{
|
use meilisearch_types::tasks::{
|
||||||
Details, ExportIndexSettings, IndexSwap, KindWithContent, Status, Task, TaskId,
|
Details, ExportIndexSettings, IndexSwap, KindWithContent, Status, Task, TaskId, TaskNetwork,
|
||||||
};
|
};
|
||||||
use meilisearch_types::InstanceUid;
|
use meilisearch_types::InstanceUid;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
@@ -94,6 +94,8 @@ pub struct TaskDump {
|
|||||||
default
|
default
|
||||||
)]
|
)]
|
||||||
pub finished_at: Option<OffsetDateTime>,
|
pub finished_at: Option<OffsetDateTime>,
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub network: Option<TaskNetwork>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// A `Kind` specific version made for the dump. If modified you may break the dump.
|
// A `Kind` specific version made for the dump. If modified you may break the dump.
|
||||||
@@ -129,6 +131,7 @@ pub enum KindDump {
|
|||||||
},
|
},
|
||||||
IndexUpdate {
|
IndexUpdate {
|
||||||
primary_key: Option<String>,
|
primary_key: Option<String>,
|
||||||
|
uid: Option<String>,
|
||||||
},
|
},
|
||||||
IndexSwap {
|
IndexSwap {
|
||||||
swaps: Vec<IndexSwap>,
|
swaps: Vec<IndexSwap>,
|
||||||
@@ -155,6 +158,10 @@ pub enum KindDump {
|
|||||||
UpgradeDatabase {
|
UpgradeDatabase {
|
||||||
from: (u32, u32, u32),
|
from: (u32, u32, u32),
|
||||||
},
|
},
|
||||||
|
NetworkTopologyChange {
|
||||||
|
network: Option<meilisearch_types::enterprise_edition::network::Network>,
|
||||||
|
origin: Option<meilisearch_types::tasks::Origin>,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Task> for TaskDump {
|
impl From<Task> for TaskDump {
|
||||||
@@ -171,6 +178,7 @@ impl From<Task> for TaskDump {
|
|||||||
enqueued_at: task.enqueued_at,
|
enqueued_at: task.enqueued_at,
|
||||||
started_at: task.started_at,
|
started_at: task.started_at,
|
||||||
finished_at: task.finished_at,
|
finished_at: task.finished_at,
|
||||||
|
network: task.network,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -210,8 +218,8 @@ impl From<KindWithContent> for KindDump {
|
|||||||
KindWithContent::IndexCreation { primary_key, .. } => {
|
KindWithContent::IndexCreation { primary_key, .. } => {
|
||||||
KindDump::IndexCreation { primary_key }
|
KindDump::IndexCreation { primary_key }
|
||||||
}
|
}
|
||||||
KindWithContent::IndexUpdate { primary_key, .. } => {
|
KindWithContent::IndexUpdate { primary_key, new_index_uid: uid, .. } => {
|
||||||
KindDump::IndexUpdate { primary_key }
|
KindDump::IndexUpdate { primary_key, uid }
|
||||||
}
|
}
|
||||||
KindWithContent::IndexSwap { swaps } => KindDump::IndexSwap { swaps },
|
KindWithContent::IndexSwap { swaps } => KindDump::IndexSwap { swaps },
|
||||||
KindWithContent::TaskCancelation { query, tasks } => {
|
KindWithContent::TaskCancelation { query, tasks } => {
|
||||||
@@ -236,6 +244,9 @@ impl From<KindWithContent> for KindDump {
|
|||||||
KindWithContent::UpgradeDatabase { from: version } => {
|
KindWithContent::UpgradeDatabase { from: version } => {
|
||||||
KindDump::UpgradeDatabase { from: version }
|
KindDump::UpgradeDatabase { from: version }
|
||||||
}
|
}
|
||||||
|
KindWithContent::NetworkTopologyChange { network, origin } => {
|
||||||
|
KindDump::NetworkTopologyChange { network, origin }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -249,8 +260,9 @@ pub(crate) mod test {
|
|||||||
use big_s::S;
|
use big_s::S;
|
||||||
use maplit::{btreemap, btreeset};
|
use maplit::{btreemap, btreeset};
|
||||||
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
|
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
|
||||||
|
use meilisearch_types::enterprise_edition::network::{DbNetwork, DbRemote};
|
||||||
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
||||||
use meilisearch_types::features::{Network, Remote, RuntimeTogglableFeatures};
|
use meilisearch_types::features::RuntimeTogglableFeatures;
|
||||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||||
use meilisearch_types::keys::{Action, Key};
|
use meilisearch_types::keys::{Action, Key};
|
||||||
use meilisearch_types::milli::update::Setting;
|
use meilisearch_types::milli::update::Setting;
|
||||||
@@ -326,6 +338,7 @@ pub(crate) mod test {
|
|||||||
facet_search: Setting::NotSet,
|
facet_search: Setting::NotSet,
|
||||||
prefix_search: Setting::NotSet,
|
prefix_search: Setting::NotSet,
|
||||||
chat: Setting::NotSet,
|
chat: Setting::NotSet,
|
||||||
|
vector_store: Setting::NotSet,
|
||||||
_kind: std::marker::PhantomData,
|
_kind: std::marker::PhantomData,
|
||||||
};
|
};
|
||||||
settings.check()
|
settings.check()
|
||||||
@@ -383,6 +396,7 @@ pub(crate) mod test {
|
|||||||
enqueued_at: datetime!(2022-11-11 0:00 UTC),
|
enqueued_at: datetime!(2022-11-11 0:00 UTC),
|
||||||
started_at: Some(datetime!(2022-11-20 0:00 UTC)),
|
started_at: Some(datetime!(2022-11-20 0:00 UTC)),
|
||||||
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
||||||
|
network: None,
|
||||||
},
|
},
|
||||||
None,
|
None,
|
||||||
),
|
),
|
||||||
@@ -407,6 +421,7 @@ pub(crate) mod test {
|
|||||||
enqueued_at: datetime!(2022-11-11 0:00 UTC),
|
enqueued_at: datetime!(2022-11-11 0:00 UTC),
|
||||||
started_at: None,
|
started_at: None,
|
||||||
finished_at: None,
|
finished_at: None,
|
||||||
|
network: None,
|
||||||
},
|
},
|
||||||
Some(vec![
|
Some(vec![
|
||||||
json!({ "id": 4, "race": "leonberg" }).as_object().unwrap().clone(),
|
json!({ "id": 4, "race": "leonberg" }).as_object().unwrap().clone(),
|
||||||
@@ -426,6 +441,7 @@ pub(crate) mod test {
|
|||||||
enqueued_at: datetime!(2022-11-15 0:00 UTC),
|
enqueued_at: datetime!(2022-11-15 0:00 UTC),
|
||||||
started_at: None,
|
started_at: None,
|
||||||
finished_at: None,
|
finished_at: None,
|
||||||
|
network: None,
|
||||||
},
|
},
|
||||||
None,
|
None,
|
||||||
),
|
),
|
||||||
@@ -535,10 +551,11 @@ pub(crate) mod test {
|
|||||||
RuntimeTogglableFeatures::default()
|
RuntimeTogglableFeatures::default()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_test_network() -> Network {
|
fn create_test_network() -> DbNetwork {
|
||||||
Network {
|
DbNetwork {
|
||||||
local: Some("myself".to_string()),
|
local: Some("myself".to_string()),
|
||||||
remotes: maplit::btreemap! {"other".to_string() => Remote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()) }},
|
remotes: maplit::btreemap! {"other".to_string() => DbRemote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
|
||||||
|
sharding: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -97,6 +97,7 @@ impl CompatV2ToV3 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
pub enum CompatIndexV2ToV3 {
|
pub enum CompatIndexV2ToV3 {
|
||||||
V2(v2::V2IndexReader),
|
V2(v2::V2IndexReader),
|
||||||
Compat(Box<CompatIndexV1ToV2>),
|
Compat(Box<CompatIndexV1ToV2>),
|
||||||
|
|||||||
@@ -85,7 +85,7 @@ impl CompatV5ToV6 {
|
|||||||
v6::Kind::IndexCreation { primary_key }
|
v6::Kind::IndexCreation { primary_key }
|
||||||
}
|
}
|
||||||
v5::tasks::TaskContent::IndexUpdate { primary_key, .. } => {
|
v5::tasks::TaskContent::IndexUpdate { primary_key, .. } => {
|
||||||
v6::Kind::IndexUpdate { primary_key }
|
v6::Kind::IndexUpdate { primary_key, uid: None }
|
||||||
}
|
}
|
||||||
v5::tasks::TaskContent::IndexDeletion { .. } => v6::Kind::IndexDeletion,
|
v5::tasks::TaskContent::IndexDeletion { .. } => v6::Kind::IndexDeletion,
|
||||||
v5::tasks::TaskContent::DocumentAddition {
|
v5::tasks::TaskContent::DocumentAddition {
|
||||||
@@ -140,9 +140,11 @@ impl CompatV5ToV6 {
|
|||||||
v5::Details::Settings { settings } => {
|
v5::Details::Settings { settings } => {
|
||||||
v6::Details::SettingsUpdate { settings: Box::new(settings.into()) }
|
v6::Details::SettingsUpdate { settings: Box::new(settings.into()) }
|
||||||
}
|
}
|
||||||
v5::Details::IndexInfo { primary_key } => {
|
v5::Details::IndexInfo { primary_key } => v6::Details::IndexInfo {
|
||||||
v6::Details::IndexInfo { primary_key }
|
primary_key,
|
||||||
}
|
new_index_uid: None,
|
||||||
|
old_index_uid: None,
|
||||||
|
},
|
||||||
v5::Details::DocumentDeletion {
|
v5::Details::DocumentDeletion {
|
||||||
received_document_ids,
|
received_document_ids,
|
||||||
deleted_documents,
|
deleted_documents,
|
||||||
@@ -161,6 +163,7 @@ impl CompatV5ToV6 {
|
|||||||
enqueued_at: task_view.enqueued_at,
|
enqueued_at: task_view.enqueued_at,
|
||||||
started_at: task_view.started_at,
|
started_at: task_view.started_at,
|
||||||
finished_at: task_view.finished_at,
|
finished_at: task_view.finished_at,
|
||||||
|
network: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
(task, content_file)
|
(task, content_file)
|
||||||
@@ -418,6 +421,7 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
|||||||
facet_search: v6::Setting::NotSet,
|
facet_search: v6::Setting::NotSet,
|
||||||
prefix_search: v6::Setting::NotSet,
|
prefix_search: v6::Setting::NotSet,
|
||||||
chat: v6::Setting::NotSet,
|
chat: v6::Setting::NotSet,
|
||||||
|
vector_store: v6::Setting::NotSet,
|
||||||
_kind: std::marker::PhantomData,
|
_kind: std::marker::PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use std::io::{BufRead, BufReader, ErrorKind};
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
pub use meilisearch_types::milli;
|
pub use meilisearch_types::milli;
|
||||||
use meilisearch_types::milli::vector::hf::OverridePooling;
|
use meilisearch_types::milli::vector::embedder::hf::OverridePooling;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
@@ -24,7 +24,7 @@ pub type Batch = meilisearch_types::batches::Batch;
|
|||||||
pub type Key = meilisearch_types::keys::Key;
|
pub type Key = meilisearch_types::keys::Key;
|
||||||
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
|
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
|
||||||
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
|
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
|
||||||
pub type Network = meilisearch_types::features::Network;
|
pub type Network = meilisearch_types::enterprise_edition::network::DbNetwork;
|
||||||
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
|
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
|
||||||
|
|
||||||
// ===== Other types to clarify the code of the compat module
|
// ===== Other types to clarify the code of the compat module
|
||||||
|
|||||||
@@ -5,7 +5,8 @@ use std::path::PathBuf;
|
|||||||
use flate2::write::GzEncoder;
|
use flate2::write::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use meilisearch_types::batches::Batch;
|
use meilisearch_types::batches::Batch;
|
||||||
use meilisearch_types::features::{ChatCompletionSettings, Network, RuntimeTogglableFeatures};
|
use meilisearch_types::enterprise_edition::network::DbNetwork;
|
||||||
|
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
|
||||||
use meilisearch_types::keys::Key;
|
use meilisearch_types::keys::Key;
|
||||||
use meilisearch_types::settings::{Checked, Settings};
|
use meilisearch_types::settings::{Checked, Settings};
|
||||||
use meilisearch_types::webhooks::WebhooksDumpView;
|
use meilisearch_types::webhooks::WebhooksDumpView;
|
||||||
@@ -71,7 +72,7 @@ impl DumpWriter {
|
|||||||
)?)
|
)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_network(&self, network: Network) -> Result<()> {
|
pub fn create_network(&self, network: DbNetwork) -> Result<()> {
|
||||||
Ok(std::fs::write(self.dir.path().join("network.json"), serde_json::to_string(&network)?)?)
|
Ok(std::fs::write(self.dir.path().join("network.json"), serde_json::to_string(&network)?)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -148,11 +148,10 @@ impl File {
|
|||||||
Ok(Self { path: PathBuf::new(), file: None })
|
Ok(Self { path: PathBuf::new(), file: None })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn persist(self) -> Result<()> {
|
pub fn persist(self) -> Result<Option<StdFile>> {
|
||||||
if let Some(file) = self.file {
|
let Some(file) = self.file else { return Ok(None) };
|
||||||
file.persist(&self.path)?;
|
|
||||||
}
|
Ok(Some(file.persist(&self.path)?))
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ license.workspace = true
|
|||||||
nom = "7.1.3"
|
nom = "7.1.3"
|
||||||
nom_locate = "4.2.0"
|
nom_locate = "4.2.0"
|
||||||
unescaper = "0.1.6"
|
unescaper = "0.1.6"
|
||||||
|
levenshtein_automata = { version = "0.2.1", features = ["fst_automaton"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
# fixed version due to format breakages in v1.40
|
# fixed version due to format breakages in v1.40
|
||||||
|
|||||||
@@ -7,12 +7,14 @@
|
|||||||
|
|
||||||
use nom::branch::alt;
|
use nom::branch::alt;
|
||||||
use nom::bytes::complete::tag;
|
use nom::bytes::complete::tag;
|
||||||
use nom::character::complete::multispace1;
|
use nom::character::complete::{char, multispace0, multispace1};
|
||||||
use nom::combinator::cut;
|
use nom::combinator::{cut, map, value};
|
||||||
use nom::sequence::{terminated, tuple};
|
use nom::sequence::{preceded, terminated, tuple};
|
||||||
use Condition::*;
|
use Condition::*;
|
||||||
|
|
||||||
use crate::{parse_value, FilterCondition, IResult, Span, Token};
|
use crate::error::IResultExt;
|
||||||
|
use crate::value::{parse_vector_value, parse_vector_value_cut};
|
||||||
|
use crate::{parse_value, Error, ErrorKind, FilterCondition, IResult, Span, Token, VectorFilter};
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub enum Condition<'a> {
|
pub enum Condition<'a> {
|
||||||
@@ -113,6 +115,83 @@ pub fn parse_not_exists(input: Span) -> IResult<FilterCondition> {
|
|||||||
Ok((input, FilterCondition::Not(Box::new(FilterCondition::Condition { fid: key, op: Exists }))))
|
Ok((input, FilterCondition::Not(Box::new(FilterCondition::Condition { fid: key, op: Exists }))))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn parse_vectors(input: Span) -> IResult<(Token, Option<Token>, VectorFilter)> {
|
||||||
|
let (input, _) = multispace0(input)?;
|
||||||
|
let (input, fid) = tag("_vectors")(input)?;
|
||||||
|
|
||||||
|
if let Ok((input, _)) = multispace1::<_, crate::Error>(input) {
|
||||||
|
return Ok((input, (Token::from(fid), None, VectorFilter::None)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let (input, _) = char('.')(input)?;
|
||||||
|
|
||||||
|
// From this point, we are certain this is a vector filter, so our errors must be final.
|
||||||
|
// We could use nom's `cut` but it's better to be explicit about the errors
|
||||||
|
|
||||||
|
if let Ok((_, space)) = tag::<_, _, ()>(" ")(input) {
|
||||||
|
return Err(crate::Error::failure_from_kind(space, ErrorKind::VectorFilterMissingEmbedder));
|
||||||
|
}
|
||||||
|
|
||||||
|
let (input, embedder_name) =
|
||||||
|
parse_vector_value_cut(input, ErrorKind::VectorFilterInvalidEmbedder)?;
|
||||||
|
|
||||||
|
let (input, filter) = alt((
|
||||||
|
map(
|
||||||
|
preceded(tag(".fragments"), |input| {
|
||||||
|
let (input, _) = tag(".")(input).map_cut(ErrorKind::VectorFilterMissingFragment)?;
|
||||||
|
parse_vector_value_cut(input, ErrorKind::VectorFilterInvalidFragment)
|
||||||
|
}),
|
||||||
|
VectorFilter::Fragment,
|
||||||
|
),
|
||||||
|
value(VectorFilter::UserProvided, tag(".userProvided")),
|
||||||
|
value(VectorFilter::DocumentTemplate, tag(".documentTemplate")),
|
||||||
|
value(VectorFilter::Regenerate, tag(".regenerate")),
|
||||||
|
value(VectorFilter::None, nom::combinator::success("")),
|
||||||
|
))(input)?;
|
||||||
|
|
||||||
|
if let Ok((input, point)) = tag::<_, _, ()>(".")(input) {
|
||||||
|
let opt_value = parse_vector_value(input).ok().map(|(_, v)| v);
|
||||||
|
let value =
|
||||||
|
opt_value.as_ref().map(|v| v.value().to_owned()).unwrap_or_else(|| point.to_string());
|
||||||
|
let context = opt_value.map(|v| v.original_span()).unwrap_or(point);
|
||||||
|
let previous_kind = match filter {
|
||||||
|
VectorFilter::Fragment(_) => Some("fragments"),
|
||||||
|
VectorFilter::DocumentTemplate => Some("documentTemplate"),
|
||||||
|
VectorFilter::UserProvided => Some("userProvided"),
|
||||||
|
VectorFilter::Regenerate => Some("regenerate"),
|
||||||
|
VectorFilter::None => None,
|
||||||
|
};
|
||||||
|
return Err(Error::failure_from_kind(
|
||||||
|
context,
|
||||||
|
ErrorKind::VectorFilterUnknownSuffix(previous_kind, value),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let (input, _) = multispace1(input).map_cut(ErrorKind::VectorFilterLeftover)?;
|
||||||
|
|
||||||
|
Ok((input, (Token::from(fid), Some(embedder_name), filter)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// vectors_exists = vectors ("EXISTS" | ("NOT" WS+ "EXISTS"))
|
||||||
|
pub fn parse_vectors_exists(input: Span) -> IResult<FilterCondition> {
|
||||||
|
let (input, (fid, embedder, filter)) = parse_vectors(input)?;
|
||||||
|
|
||||||
|
// Try parsing "EXISTS" first
|
||||||
|
if let Ok((input, _)) = tag::<_, _, ()>("EXISTS")(input) {
|
||||||
|
return Ok((input, FilterCondition::VectorExists { fid, embedder, filter }));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try parsing "NOT EXISTS"
|
||||||
|
if let Ok((input, _)) = tuple::<_, _, (), _>((tag("NOT"), multispace1, tag("EXISTS")))(input) {
|
||||||
|
return Ok((
|
||||||
|
input,
|
||||||
|
FilterCondition::Not(Box::new(FilterCondition::VectorExists { fid, embedder, filter })),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(crate::Error::failure_from_kind(input, ErrorKind::VectorFilterOperation))
|
||||||
|
}
|
||||||
|
|
||||||
/// contains = value "CONTAINS" value
|
/// contains = value "CONTAINS" value
|
||||||
pub fn parse_contains(input: Span) -> IResult<FilterCondition> {
|
pub fn parse_contains(input: Span) -> IResult<FilterCondition> {
|
||||||
let (input, (fid, contains, value)) =
|
let (input, (fid, contains, value)) =
|
||||||
|
|||||||
@@ -42,6 +42,23 @@ pub fn cut_with_err<'a, O>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait IResultExt<'a> {
|
||||||
|
fn map_cut(self, kind: ErrorKind<'a>) -> Self;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T> IResultExt<'a> for IResult<'a, T> {
|
||||||
|
fn map_cut(self, kind: ErrorKind<'a>) -> Self {
|
||||||
|
self.map_err(move |e: nom::Err<Error<'a>>| {
|
||||||
|
let input = match e {
|
||||||
|
nom::Err::Incomplete(_) => return e,
|
||||||
|
nom::Err::Error(e) => *e.context(),
|
||||||
|
nom::Err::Failure(e) => *e.context(),
|
||||||
|
};
|
||||||
|
Error::failure_from_kind(input, kind)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Error<'a> {
|
pub struct Error<'a> {
|
||||||
context: Span<'a>,
|
context: Span<'a>,
|
||||||
@@ -58,9 +75,21 @@ pub enum ExpectedValueKind {
|
|||||||
pub enum ErrorKind<'a> {
|
pub enum ErrorKind<'a> {
|
||||||
ReservedGeo(&'a str),
|
ReservedGeo(&'a str),
|
||||||
GeoRadius,
|
GeoRadius,
|
||||||
|
GeoRadiusArgumentCount(usize),
|
||||||
GeoBoundingBox,
|
GeoBoundingBox,
|
||||||
|
GeoPolygon,
|
||||||
|
GeoPolygonNotEnoughPoints(usize),
|
||||||
|
GeoCoordinatesNotPair(usize),
|
||||||
MisusedGeoRadius,
|
MisusedGeoRadius,
|
||||||
MisusedGeoBoundingBox,
|
MisusedGeoBoundingBox,
|
||||||
|
VectorFilterLeftover,
|
||||||
|
VectorFilterInvalidQuotes,
|
||||||
|
VectorFilterMissingEmbedder,
|
||||||
|
VectorFilterInvalidEmbedder,
|
||||||
|
VectorFilterMissingFragment,
|
||||||
|
VectorFilterInvalidFragment,
|
||||||
|
VectorFilterUnknownSuffix(Option<&'static str>, String),
|
||||||
|
VectorFilterOperation,
|
||||||
InvalidPrimary,
|
InvalidPrimary,
|
||||||
InvalidEscapedNumber,
|
InvalidEscapedNumber,
|
||||||
ExpectedEof,
|
ExpectedEof,
|
||||||
@@ -91,6 +120,10 @@ impl<'a> Error<'a> {
|
|||||||
Self { context, kind }
|
Self { context, kind }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn failure_from_kind(context: Span<'a>, kind: ErrorKind<'a>) -> nom::Err<Self> {
|
||||||
|
nom::Err::Failure(Self::new_from_kind(context, kind))
|
||||||
|
}
|
||||||
|
|
||||||
pub fn new_from_external(context: Span<'a>, error: impl std::error::Error) -> Self {
|
pub fn new_from_external(context: Span<'a>, error: impl std::error::Error) -> Self {
|
||||||
Self::new_from_kind(context, ErrorKind::External(error.to_string()))
|
Self::new_from_kind(context, ErrorKind::External(error.to_string()))
|
||||||
}
|
}
|
||||||
@@ -128,6 +161,20 @@ impl Display for Error<'_> {
|
|||||||
// first line being the diagnostic and the second line being the incriminated filter.
|
// first line being the diagnostic and the second line being the incriminated filter.
|
||||||
let escaped_input = input.escape_debug();
|
let escaped_input = input.escape_debug();
|
||||||
|
|
||||||
|
fn key_suggestion<'a>(key: &str, keys: &[&'a str]) -> Option<&'a str> {
|
||||||
|
let typos =
|
||||||
|
levenshtein_automata::LevenshteinAutomatonBuilder::new(2, true).build_dfa(key);
|
||||||
|
for key in keys.iter() {
|
||||||
|
match typos.eval(key) {
|
||||||
|
levenshtein_automata::Distance::Exact(_) => {
|
||||||
|
return Some(key);
|
||||||
|
}
|
||||||
|
levenshtein_automata::Distance::AtLeast(_) => continue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
match &self.kind {
|
match &self.kind {
|
||||||
ErrorKind::ExpectedValue(_) if input.trim().is_empty() => {
|
ErrorKind::ExpectedValue(_) if input.trim().is_empty() => {
|
||||||
writeln!(f, "Was expecting a value but instead got nothing.")?
|
writeln!(f, "Was expecting a value but instead got nothing.")?
|
||||||
@@ -146,7 +193,7 @@ impl Display for Error<'_> {
|
|||||||
}
|
}
|
||||||
ErrorKind::InvalidPrimary => {
|
ErrorKind::InvalidPrimary => {
|
||||||
let text = if input.trim().is_empty() { "but instead got nothing.".to_string() } else { format!("at `{}`.", escaped_input) };
|
let text = if input.trim().is_empty() { "but instead got nothing.".to_string() } else { format!("at `{}`.", escaped_input) };
|
||||||
writeln!(f, "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` {}", text)?
|
writeln!(f, "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` {text}")?
|
||||||
}
|
}
|
||||||
ErrorKind::InvalidEscapedNumber => {
|
ErrorKind::InvalidEscapedNumber => {
|
||||||
writeln!(f, "Found an invalid escaped sequence number: `{}`.", escaped_input)?
|
writeln!(f, "Found an invalid escaped sequence number: `{}`.", escaped_input)?
|
||||||
@@ -155,11 +202,23 @@ impl Display for Error<'_> {
|
|||||||
writeln!(f, "Found unexpected characters at the end of the filter: `{}`. You probably forgot an `OR` or an `AND` rule.", escaped_input)?
|
writeln!(f, "Found unexpected characters at the end of the filter: `{}`. You probably forgot an `OR` or an `AND` rule.", escaped_input)?
|
||||||
}
|
}
|
||||||
ErrorKind::GeoRadius => {
|
ErrorKind::GeoRadius => {
|
||||||
writeln!(f, "The `_geoRadius` filter expects three arguments: `_geoRadius(latitude, longitude, radius)`.")?
|
writeln!(f, "The `_geoRadius` filter must be in the form: `_geoRadius(latitude, longitude, radius, optionalResolution)`.")?
|
||||||
|
}
|
||||||
|
ErrorKind::GeoRadiusArgumentCount(count) => {
|
||||||
|
writeln!(f, "Was expecting 3 or 4 arguments for `_geoRadius`, but instead found {count}.")?
|
||||||
}
|
}
|
||||||
ErrorKind::GeoBoundingBox => {
|
ErrorKind::GeoBoundingBox => {
|
||||||
writeln!(f, "The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.")?
|
writeln!(f, "The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.")?
|
||||||
}
|
}
|
||||||
|
ErrorKind::GeoPolygon => {
|
||||||
|
writeln!(f, "The `_geoPolygon` filter doesn't match the expected format: `_geoPolygon([latitude, longitude], [latitude, longitude])`.")?
|
||||||
|
}
|
||||||
|
ErrorKind::GeoPolygonNotEnoughPoints(n) => {
|
||||||
|
writeln!(f, "The `_geoPolygon` filter expects at least 3 points but only {n} were specified")?;
|
||||||
|
}
|
||||||
|
ErrorKind::GeoCoordinatesNotPair(number) => {
|
||||||
|
writeln!(f, "Was expecting 2 coordinates but instead found {number}.")?
|
||||||
|
}
|
||||||
ErrorKind::ReservedGeo(name) => {
|
ErrorKind::ReservedGeo(name) => {
|
||||||
writeln!(f, "`{}` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.", name.escape_debug())?
|
writeln!(f, "`{}` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.", name.escape_debug())?
|
||||||
}
|
}
|
||||||
@@ -169,6 +228,44 @@ impl Display for Error<'_> {
|
|||||||
ErrorKind::MisusedGeoBoundingBox => {
|
ErrorKind::MisusedGeoBoundingBox => {
|
||||||
writeln!(f, "The `_geoBoundingBox` filter is an operation and can't be used as a value.")?
|
writeln!(f, "The `_geoBoundingBox` filter is an operation and can't be used as a value.")?
|
||||||
}
|
}
|
||||||
|
ErrorKind::VectorFilterLeftover => {
|
||||||
|
writeln!(f, "The vector filter has leftover tokens.")?
|
||||||
|
}
|
||||||
|
ErrorKind::VectorFilterUnknownSuffix(_, value) if value.as_str() == "." => {
|
||||||
|
writeln!(f, "Was expecting one of `.fragments`, `.userProvided`, `.documentTemplate`, `.regenerate` or nothing, but instead found a point without a valid value.")?;
|
||||||
|
}
|
||||||
|
ErrorKind::VectorFilterUnknownSuffix(None, value) if ["fragments", "userProvided", "documentTemplate", "regenerate"].contains(&value.as_str()) => {
|
||||||
|
// This will happen with "_vectors.rest.\"userProvided\"" for instance
|
||||||
|
writeln!(f, "Was expecting this part to be unquoted.")?
|
||||||
|
}
|
||||||
|
ErrorKind::VectorFilterUnknownSuffix(None, value) => {
|
||||||
|
if let Some(suggestion) = key_suggestion(value, &["fragments", "userProvided", "documentTemplate", "regenerate"]) {
|
||||||
|
writeln!(f, "Was expecting one of `fragments`, `userProvided`, `documentTemplate`, `regenerate` or nothing, but instead found `{value}`. Did you mean `{suggestion}`?")?;
|
||||||
|
} else {
|
||||||
|
writeln!(f, "Was expecting one of `fragments`, `userProvided`, `documentTemplate`, `regenerate` or nothing, but instead found `{value}`.")?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ErrorKind::VectorFilterUnknownSuffix(Some(previous_filter_kind), value) => {
|
||||||
|
writeln!(f, "Vector filter can only accept one of `fragments`, `userProvided`, `documentTemplate` or `regenerate`, but found both `{previous_filter_kind}` and `{value}`.")?
|
||||||
|
},
|
||||||
|
ErrorKind::VectorFilterInvalidFragment => {
|
||||||
|
writeln!(f, "The vector filter's fragment name is invalid.")?
|
||||||
|
}
|
||||||
|
ErrorKind::VectorFilterMissingFragment => {
|
||||||
|
writeln!(f, "The vector filter is missing a fragment name.")?
|
||||||
|
}
|
||||||
|
ErrorKind::VectorFilterMissingEmbedder => {
|
||||||
|
writeln!(f, "Was expecting embedder name but found nothing.")?
|
||||||
|
}
|
||||||
|
ErrorKind::VectorFilterInvalidEmbedder => {
|
||||||
|
writeln!(f, "The vector filter's embedder name is invalid.")?
|
||||||
|
}
|
||||||
|
ErrorKind::VectorFilterOperation => {
|
||||||
|
writeln!(f, "Was expecting an operation like `EXISTS` or `NOT EXISTS` after the vector filter.")?
|
||||||
|
}
|
||||||
|
ErrorKind::VectorFilterInvalidQuotes => {
|
||||||
|
writeln!(f, "The quotes in one of the values are inconsistent.")?
|
||||||
|
}
|
||||||
ErrorKind::ReservedKeyword(word) => {
|
ErrorKind::ReservedKeyword(word) => {
|
||||||
writeln!(f, "`{word}` is a reserved keyword and thus cannot be used as a field name unless it is put inside quotes. Use \"{word}\" or \'{word}\' instead.")?
|
writeln!(f, "`{word}` is a reserved keyword and thus cannot be used as a field name unless it is put inside quotes. Use \"{word}\" or \'{word}\' instead.")?
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,6 +19,7 @@
|
|||||||
//! word = (alphanumeric | _ | - | .)+
|
//! word = (alphanumeric | _ | - | .)+
|
||||||
//! geoRadius = "_geoRadius(" WS* float WS* "," WS* float WS* "," float WS* ")"
|
//! geoRadius = "_geoRadius(" WS* float WS* "," WS* float WS* "," float WS* ")"
|
||||||
//! geoBoundingBox = "_geoBoundingBox([" WS * float WS* "," WS* float WS* "], [" WS* float WS* "," WS* float WS* "]")
|
//! geoBoundingBox = "_geoBoundingBox([" WS * float WS* "," WS* float WS* "], [" WS* float WS* "," WS* float WS* "]")
|
||||||
|
//! geoPolygon = "_geoPolygon([[" WS* float WS* "," WS* float WS* "],+])"
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! Other BNF grammar used to handle some specific errors:
|
//! Other BNF grammar used to handle some specific errors:
|
||||||
@@ -65,6 +66,9 @@ use nom_locate::LocatedSpan;
|
|||||||
pub(crate) use value::parse_value;
|
pub(crate) use value::parse_value;
|
||||||
use value::word_exact;
|
use value::word_exact;
|
||||||
|
|
||||||
|
use crate::condition::parse_vectors_exists;
|
||||||
|
use crate::error::IResultExt;
|
||||||
|
|
||||||
pub type Span<'a> = LocatedSpan<&'a str, &'a str>;
|
pub type Span<'a> = LocatedSpan<&'a str, &'a str>;
|
||||||
|
|
||||||
type IResult<'a, Ret> = nom::IResult<Span<'a>, Ret, Error<'a>>;
|
type IResult<'a, Ret> = nom::IResult<Span<'a>, Ret, Error<'a>>;
|
||||||
@@ -113,7 +117,7 @@ impl<'a> Token<'a> {
|
|||||||
self.span
|
self.span
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse_finite_float(&self) -> Result<f64, Error> {
|
pub fn parse_finite_float(&self) -> Result<f64, Error<'a>> {
|
||||||
let value: f64 = self.value().parse().map_err(|e| self.as_external_error(e))?;
|
let value: f64 = self.value().parse().map_err(|e| self.as_external_error(e))?;
|
||||||
if value.is_finite() {
|
if value.is_finite() {
|
||||||
Ok(value)
|
Ok(value)
|
||||||
@@ -136,6 +140,15 @@ impl<'a> From<&'a str> for Token<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub enum VectorFilter<'a> {
|
||||||
|
Fragment(Token<'a>),
|
||||||
|
DocumentTemplate,
|
||||||
|
UserProvided,
|
||||||
|
Regenerate,
|
||||||
|
None,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub enum FilterCondition<'a> {
|
pub enum FilterCondition<'a> {
|
||||||
Not(Box<Self>),
|
Not(Box<Self>),
|
||||||
@@ -143,8 +156,10 @@ pub enum FilterCondition<'a> {
|
|||||||
In { fid: Token<'a>, els: Vec<Token<'a>> },
|
In { fid: Token<'a>, els: Vec<Token<'a>> },
|
||||||
Or(Vec<Self>),
|
Or(Vec<Self>),
|
||||||
And(Vec<Self>),
|
And(Vec<Self>),
|
||||||
GeoLowerThan { point: [Token<'a>; 2], radius: Token<'a> },
|
VectorExists { fid: Token<'a>, embedder: Option<Token<'a>>, filter: VectorFilter<'a> },
|
||||||
|
GeoLowerThan { point: [Token<'a>; 2], radius: Token<'a>, resolution: Option<Token<'a>> },
|
||||||
GeoBoundingBox { top_right_point: [Token<'a>; 2], bottom_left_point: [Token<'a>; 2] },
|
GeoBoundingBox { top_right_point: [Token<'a>; 2], bottom_left_point: [Token<'a>; 2] },
|
||||||
|
GeoPolygon { points: Vec<[Token<'a>; 2]> },
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum TraversedElement<'a> {
|
pub enum TraversedElement<'a> {
|
||||||
@@ -153,7 +168,7 @@ pub enum TraversedElement<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> FilterCondition<'a> {
|
impl<'a> FilterCondition<'a> {
|
||||||
pub fn use_contains_operator(&self) -> Option<&Token> {
|
pub fn use_contains_operator(&self) -> Option<&Token<'a>> {
|
||||||
match self {
|
match self {
|
||||||
FilterCondition::Condition { fid: _, op } => match op {
|
FilterCondition::Condition { fid: _, op } => match op {
|
||||||
Condition::GreaterThan(_)
|
Condition::GreaterThan(_)
|
||||||
@@ -173,13 +188,30 @@ impl<'a> FilterCondition<'a> {
|
|||||||
FilterCondition::Or(seq) | FilterCondition::And(seq) => {
|
FilterCondition::Or(seq) | FilterCondition::And(seq) => {
|
||||||
seq.iter().find_map(|filter| filter.use_contains_operator())
|
seq.iter().find_map(|filter| filter.use_contains_operator())
|
||||||
}
|
}
|
||||||
FilterCondition::GeoLowerThan { .. }
|
FilterCondition::VectorExists { .. }
|
||||||
|
| FilterCondition::GeoLowerThan { .. }
|
||||||
| FilterCondition::GeoBoundingBox { .. }
|
| FilterCondition::GeoBoundingBox { .. }
|
||||||
|
| FilterCondition::GeoPolygon { .. }
|
||||||
| FilterCondition::In { .. } => None,
|
| FilterCondition::In { .. } => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fids(&self, depth: usize) -> Box<dyn Iterator<Item = &Token> + '_> {
|
pub fn use_vector_filter(&self) -> Option<&Token<'a>> {
|
||||||
|
match self {
|
||||||
|
FilterCondition::Condition { .. } => None,
|
||||||
|
FilterCondition::Not(this) => this.use_vector_filter(),
|
||||||
|
FilterCondition::Or(seq) | FilterCondition::And(seq) => {
|
||||||
|
seq.iter().find_map(|filter| filter.use_vector_filter())
|
||||||
|
}
|
||||||
|
FilterCondition::GeoLowerThan { .. }
|
||||||
|
| FilterCondition::GeoBoundingBox { .. }
|
||||||
|
| FilterCondition::GeoPolygon { .. }
|
||||||
|
| FilterCondition::In { .. } => None,
|
||||||
|
FilterCondition::VectorExists { fid, .. } => Some(fid),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fids(&self, depth: usize) -> Box<dyn Iterator<Item = &Token<'a>> + '_> {
|
||||||
if depth == 0 {
|
if depth == 0 {
|
||||||
return Box::new(std::iter::empty());
|
return Box::new(std::iter::empty());
|
||||||
}
|
}
|
||||||
@@ -200,7 +232,7 @@ impl<'a> FilterCondition<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the first token found at the specified depth, `None` if no token at this depth.
|
/// Returns the first token found at the specified depth, `None` if no token at this depth.
|
||||||
pub fn token_at_depth(&self, depth: usize) -> Option<&Token> {
|
pub fn token_at_depth(&self, depth: usize) -> Option<&Token<'a>> {
|
||||||
match self {
|
match self {
|
||||||
FilterCondition::Condition { fid, .. } if depth == 0 => Some(fid),
|
FilterCondition::Condition { fid, .. } if depth == 0 => Some(fid),
|
||||||
FilterCondition::Or(subfilters) => {
|
FilterCondition::Or(subfilters) => {
|
||||||
@@ -263,10 +295,7 @@ fn parse_in_body(input: Span) -> IResult<Vec<Token>> {
|
|||||||
let (input, _) = ws(word_exact("IN"))(input)?;
|
let (input, _) = ws(word_exact("IN"))(input)?;
|
||||||
|
|
||||||
// everything after `IN` can be a failure
|
// everything after `IN` can be a failure
|
||||||
let (input, _) =
|
let (input, _) = tag("[")(input).map_cut(ErrorKind::InOpeningBracket)?;
|
||||||
cut_with_err(tag("["), |_| Error::new_from_kind(input, ErrorKind::InOpeningBracket))(
|
|
||||||
input,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let (input, content) = cut(parse_value_list)(input)?;
|
let (input, content) = cut(parse_value_list)(input)?;
|
||||||
|
|
||||||
@@ -371,23 +400,27 @@ fn parse_not(input: Span, depth: usize) -> IResult<FilterCondition> {
|
|||||||
/// If we parse `_geoRadius` we MUST parse the rest of the expression.
|
/// If we parse `_geoRadius` we MUST parse the rest of the expression.
|
||||||
fn parse_geo_radius(input: Span) -> IResult<FilterCondition> {
|
fn parse_geo_radius(input: Span) -> IResult<FilterCondition> {
|
||||||
// we want to allow space BEFORE the _geoRadius but not after
|
// we want to allow space BEFORE the _geoRadius but not after
|
||||||
let parsed = preceded(
|
|
||||||
tuple((multispace0, word_exact("_geoRadius"))),
|
let (input, _) = tuple((multispace0, word_exact("_geoRadius")))(input)?;
|
||||||
// if we were able to parse `_geoRadius` and can't parse the rest of the input we return a failure
|
|
||||||
cut(delimited(char('('), separated_list1(tag(","), ws(recognize_float)), char(')'))),
|
// if we were able to parse `_geoRadius` and can't parse the rest of the input we return a failure
|
||||||
)(input)
|
|
||||||
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::GeoRadius)));
|
let parsed =
|
||||||
|
delimited(char('('), separated_list1(tag(","), ws(recognize_float)), char(')'))(input)
|
||||||
|
.map_cut(ErrorKind::GeoRadius);
|
||||||
|
|
||||||
let (input, args) = parsed?;
|
let (input, args) = parsed?;
|
||||||
|
|
||||||
if args.len() != 3 {
|
if !(3..=4).contains(&args.len()) {
|
||||||
return Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::GeoRadius)));
|
return Err(Error::failure_from_kind(input, ErrorKind::GeoRadiusArgumentCount(args.len())));
|
||||||
}
|
}
|
||||||
|
|
||||||
let res = FilterCondition::GeoLowerThan {
|
let res = FilterCondition::GeoLowerThan {
|
||||||
point: [args[0].into(), args[1].into()],
|
point: [args[0].into(), args[1].into()],
|
||||||
radius: args[2].into(),
|
radius: args[2].into(),
|
||||||
|
resolution: args.get(3).cloned().map(Token::from),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok((input, res))
|
Ok((input, res))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -395,24 +428,31 @@ fn parse_geo_radius(input: Span) -> IResult<FilterCondition> {
|
|||||||
/// If we parse `_geoBoundingBox` we MUST parse the rest of the expression.
|
/// If we parse `_geoBoundingBox` we MUST parse the rest of the expression.
|
||||||
fn parse_geo_bounding_box(input: Span) -> IResult<FilterCondition> {
|
fn parse_geo_bounding_box(input: Span) -> IResult<FilterCondition> {
|
||||||
// we want to allow space BEFORE the _geoBoundingBox but not after
|
// we want to allow space BEFORE the _geoBoundingBox but not after
|
||||||
let parsed = preceded(
|
|
||||||
tuple((multispace0, word_exact("_geoBoundingBox"))),
|
let (input, _) = tuple((multispace0, word_exact("_geoBoundingBox")))(input)?;
|
||||||
// if we were able to parse `_geoBoundingBox` and can't parse the rest of the input we return a failure
|
|
||||||
cut(delimited(
|
// if we were able to parse `_geoBoundingBox` and can't parse the rest of the input we return a failure
|
||||||
char('('),
|
|
||||||
separated_list1(
|
let (input, args) = delimited(
|
||||||
tag(","),
|
char('('),
|
||||||
ws(delimited(char('['), separated_list1(tag(","), ws(recognize_float)), char(']'))),
|
separated_list1(
|
||||||
),
|
tag(","),
|
||||||
char(')'),
|
ws(delimited(char('['), separated_list1(tag(","), ws(recognize_float)), char(']'))),
|
||||||
)),
|
),
|
||||||
|
char(')'),
|
||||||
)(input)
|
)(input)
|
||||||
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::GeoBoundingBox)));
|
.map_cut(ErrorKind::GeoBoundingBox)?;
|
||||||
|
|
||||||
let (input, args) = parsed?;
|
if args.len() != 2 {
|
||||||
|
return Err(Error::failure_from_kind(input, ErrorKind::GeoBoundingBox));
|
||||||
|
}
|
||||||
|
|
||||||
if args.len() != 2 || args[0].len() != 2 || args[1].len() != 2 {
|
if let Some(offending) = args.iter().find(|a| a.len() != 2) {
|
||||||
return Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::GeoBoundingBox)));
|
let context = offending.first().unwrap_or(&input);
|
||||||
|
return Err(Error::failure_from_kind(
|
||||||
|
*context,
|
||||||
|
ErrorKind::GeoCoordinatesNotPair(offending.len()),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let res = FilterCondition::GeoBoundingBox {
|
let res = FilterCondition::GeoBoundingBox {
|
||||||
@@ -422,6 +462,47 @@ fn parse_geo_bounding_box(input: Span) -> IResult<FilterCondition> {
|
|||||||
Ok((input, res))
|
Ok((input, res))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// geoPolygon = "_geoPolygon([[" WS* float WS* "," WS* float WS* "],+])"
|
||||||
|
/// If we parse `_geoPolygon` we MUST parse the rest of the expression.
|
||||||
|
fn parse_geo_polygon(input: Span) -> IResult<FilterCondition> {
|
||||||
|
// we want to allow space BEFORE the _geoPolygon but not after
|
||||||
|
|
||||||
|
let (input, _) = tuple((multispace0, word_exact("_geoPolygon")))(input)?;
|
||||||
|
|
||||||
|
// if we were able to parse `_geoPolygon` and can't parse the rest of the input we return a failure
|
||||||
|
|
||||||
|
let (input, args): (_, Vec<Vec<LocatedSpan<_, _>>>) = delimited(
|
||||||
|
char('('),
|
||||||
|
separated_list1(
|
||||||
|
tag(","),
|
||||||
|
ws(delimited(char('['), separated_list1(tag(","), ws(recognize_float)), char(']'))),
|
||||||
|
),
|
||||||
|
preceded(opt(ws(char(','))), char(')')), // Tolerate trailing comma
|
||||||
|
)(input)
|
||||||
|
.map_cut(ErrorKind::GeoPolygon)?;
|
||||||
|
|
||||||
|
if args.len() < 3 {
|
||||||
|
let context = args.last().and_then(|a| a.last()).unwrap_or(&input);
|
||||||
|
return Err(Error::failure_from_kind(
|
||||||
|
*context,
|
||||||
|
ErrorKind::GeoPolygonNotEnoughPoints(args.len()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(offending) = args.iter().find(|a| a.len() != 2) {
|
||||||
|
let context = offending.first().unwrap_or(&input);
|
||||||
|
return Err(Error::failure_from_kind(
|
||||||
|
*context,
|
||||||
|
ErrorKind::GeoCoordinatesNotPair(offending.len()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = FilterCondition::GeoPolygon {
|
||||||
|
points: args.into_iter().map(|a| [a[0].into(), a[1].into()]).collect(),
|
||||||
|
};
|
||||||
|
Ok((input, res))
|
||||||
|
}
|
||||||
|
|
||||||
/// geoPoint = WS* "_geoPoint(float WS* "," WS* float WS* "," WS* float)
|
/// geoPoint = WS* "_geoPoint(float WS* "," WS* float WS* "," WS* float)
|
||||||
fn parse_geo_point(input: Span) -> IResult<FilterCondition> {
|
fn parse_geo_point(input: Span) -> IResult<FilterCondition> {
|
||||||
// we want to forbid space BEFORE the _geoPoint but not after
|
// we want to forbid space BEFORE the _geoPoint but not after
|
||||||
@@ -433,7 +514,7 @@ fn parse_geo_point(input: Span) -> IResult<FilterCondition> {
|
|||||||
))(input)
|
))(input)
|
||||||
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::ReservedGeo("_geoPoint"))))?;
|
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::ReservedGeo("_geoPoint"))))?;
|
||||||
// if we succeeded we still return a `Failure` because geoPoints are not allowed
|
// if we succeeded we still return a `Failure` because geoPoints are not allowed
|
||||||
Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::ReservedGeo("_geoPoint"))))
|
Err(Error::failure_from_kind(input, ErrorKind::ReservedGeo("_geoPoint")))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// geoPoint = WS* "_geoDistance(float WS* "," WS* float WS* "," WS* float)
|
/// geoPoint = WS* "_geoDistance(float WS* "," WS* float WS* "," WS* float)
|
||||||
@@ -447,7 +528,7 @@ fn parse_geo_distance(input: Span) -> IResult<FilterCondition> {
|
|||||||
))(input)
|
))(input)
|
||||||
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::ReservedGeo("_geoDistance"))))?;
|
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::ReservedGeo("_geoDistance"))))?;
|
||||||
// if we succeeded we still return a `Failure` because `geoDistance` filters are not allowed
|
// if we succeeded we still return a `Failure` because `geoDistance` filters are not allowed
|
||||||
Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::ReservedGeo("_geoDistance"))))
|
Err(Error::failure_from_kind(input, ErrorKind::ReservedGeo("_geoDistance")))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// geo = WS* "_geo(float WS* "," WS* float WS* "," WS* float)
|
/// geo = WS* "_geo(float WS* "," WS* float WS* "," WS* float)
|
||||||
@@ -461,7 +542,7 @@ fn parse_geo(input: Span) -> IResult<FilterCondition> {
|
|||||||
))(input)
|
))(input)
|
||||||
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::ReservedGeo("_geo"))))?;
|
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::ReservedGeo("_geo"))))?;
|
||||||
// if we succeeded we still return a `Failure` because `_geo` filter is not allowed
|
// if we succeeded we still return a `Failure` because `_geo` filter is not allowed
|
||||||
Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::ReservedGeo("_geo"))))
|
Err(Error::failure_from_kind(input, ErrorKind::ReservedGeo("_geo")))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_error_reserved_keyword(input: Span) -> IResult<FilterCondition> {
|
fn parse_error_reserved_keyword(input: Span) -> IResult<FilterCondition> {
|
||||||
@@ -491,8 +572,8 @@ fn parse_primary(input: Span, depth: usize) -> IResult<FilterCondition> {
|
|||||||
Error::new_from_kind(input, ErrorKind::MissingClosingDelimiter(c.char()))
|
Error::new_from_kind(input, ErrorKind::MissingClosingDelimiter(c.char()))
|
||||||
}),
|
}),
|
||||||
),
|
),
|
||||||
parse_geo_radius,
|
// Made a random block of functions because we reached the maximum number of elements per alt
|
||||||
parse_geo_bounding_box,
|
alt((parse_geo_radius, parse_geo_bounding_box, parse_geo_polygon)),
|
||||||
parse_in,
|
parse_in,
|
||||||
parse_not_in,
|
parse_not_in,
|
||||||
parse_condition,
|
parse_condition,
|
||||||
@@ -500,8 +581,7 @@ fn parse_primary(input: Span, depth: usize) -> IResult<FilterCondition> {
|
|||||||
parse_is_not_null,
|
parse_is_not_null,
|
||||||
parse_is_empty,
|
parse_is_empty,
|
||||||
parse_is_not_empty,
|
parse_is_not_empty,
|
||||||
parse_exists,
|
alt((parse_vectors_exists, parse_exists, parse_not_exists)),
|
||||||
parse_not_exists,
|
|
||||||
parse_to,
|
parse_to,
|
||||||
parse_contains,
|
parse_contains,
|
||||||
parse_not_contains,
|
parse_not_contains,
|
||||||
@@ -557,9 +637,28 @@ impl std::fmt::Display for FilterCondition<'_> {
|
|||||||
}
|
}
|
||||||
write!(f, "]")
|
write!(f, "]")
|
||||||
}
|
}
|
||||||
FilterCondition::GeoLowerThan { point, radius } => {
|
FilterCondition::VectorExists { fid: _, embedder, filter: inner } => {
|
||||||
|
write!(f, "_vectors")?;
|
||||||
|
if let Some(embedder) = embedder {
|
||||||
|
write!(f, ".{:?}", embedder.value())?;
|
||||||
|
}
|
||||||
|
match inner {
|
||||||
|
VectorFilter::Fragment(fragment) => {
|
||||||
|
write!(f, ".fragments.{:?}", fragment.value())?
|
||||||
|
}
|
||||||
|
VectorFilter::DocumentTemplate => write!(f, ".documentTemplate")?,
|
||||||
|
VectorFilter::UserProvided => write!(f, ".userProvided")?,
|
||||||
|
VectorFilter::Regenerate => write!(f, ".regenerate")?,
|
||||||
|
VectorFilter::None => (),
|
||||||
|
}
|
||||||
|
write!(f, " EXISTS")
|
||||||
|
}
|
||||||
|
FilterCondition::GeoLowerThan { point, radius, resolution: None } => {
|
||||||
write!(f, "_geoRadius({}, {}, {})", point[0], point[1], radius)
|
write!(f, "_geoRadius({}, {}, {})", point[0], point[1], radius)
|
||||||
}
|
}
|
||||||
|
FilterCondition::GeoLowerThan { point, radius, resolution: Some(resolution) } => {
|
||||||
|
write!(f, "_geoRadius({}, {}, {}, {})", point[0], point[1], radius, resolution)
|
||||||
|
}
|
||||||
FilterCondition::GeoBoundingBox {
|
FilterCondition::GeoBoundingBox {
|
||||||
top_right_point: top_left_point,
|
top_right_point: top_left_point,
|
||||||
bottom_left_point: bottom_right_point,
|
bottom_left_point: bottom_right_point,
|
||||||
@@ -573,6 +672,13 @@ impl std::fmt::Display for FilterCondition<'_> {
|
|||||||
bottom_right_point[1]
|
bottom_right_point[1]
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
FilterCondition::GeoPolygon { points } => {
|
||||||
|
write!(f, "_geoPolygon([")?;
|
||||||
|
for point in points {
|
||||||
|
write!(f, "[{}, {}], ", point[0], point[1])?;
|
||||||
|
}
|
||||||
|
write!(f, "])")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -611,7 +717,7 @@ pub mod tests {
|
|||||||
/// Create a raw [Token]. You must specify the string that appear BEFORE your element followed by your element
|
/// Create a raw [Token]. You must specify the string that appear BEFORE your element followed by your element
|
||||||
pub fn rtok<'a>(before: &'a str, value: &'a str) -> Token<'a> {
|
pub fn rtok<'a>(before: &'a str, value: &'a str) -> Token<'a> {
|
||||||
// if the string is empty we still need to return 1 for the line number
|
// if the string is empty we still need to return 1 for the line number
|
||||||
let lines = before.is_empty().then_some(1).unwrap_or_else(|| before.lines().count());
|
let lines = if before.is_empty() { 1 } else { before.lines().count() };
|
||||||
let offset = before.chars().count();
|
let offset = before.chars().count();
|
||||||
// the extra field is not checked in the tests so we can set it to nothing
|
// the extra field is not checked in the tests so we can set it to nothing
|
||||||
unsafe { Span::new_from_raw_offset(offset, lines as u32, value, "") }.into()
|
unsafe { Span::new_from_raw_offset(offset, lines as u32, value, "") }.into()
|
||||||
@@ -630,6 +736,9 @@ pub mod tests {
|
|||||||
insta::assert_snapshot!(p(r"title = 'foo\\\\\\\\'"), @r#"{title} = {foo\\\\}"#);
|
insta::assert_snapshot!(p(r"title = 'foo\\\\\\\\'"), @r#"{title} = {foo\\\\}"#);
|
||||||
// but it also works with other sequences
|
// but it also works with other sequences
|
||||||
insta::assert_snapshot!(p(r#"title = 'foo\x20\n\t\"\'"'"#), @"{title} = {foo \n\t\"\'\"}");
|
insta::assert_snapshot!(p(r#"title = 'foo\x20\n\t\"\'"'"#), @"{title} = {foo \n\t\"\'\"}");
|
||||||
|
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors." valid.name ".fragments."also.. valid! " EXISTS"#), @r#"_vectors." valid.name ".fragments."also.. valid! " EXISTS"#);
|
||||||
|
insta::assert_snapshot!(p("_vectors.\"\n\t\r\\\"\" EXISTS"), @r#"_vectors."\n\t\r\"" EXISTS"#);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -692,6 +801,18 @@ pub mod tests {
|
|||||||
insta::assert_snapshot!(p("NOT subscribers IS NOT EMPTY"), @"{subscribers} IS EMPTY");
|
insta::assert_snapshot!(p("NOT subscribers IS NOT EMPTY"), @"{subscribers} IS EMPTY");
|
||||||
insta::assert_snapshot!(p("subscribers IS NOT EMPTY"), @"NOT ({subscribers} IS EMPTY)");
|
insta::assert_snapshot!(p("subscribers IS NOT EMPTY"), @"NOT ({subscribers} IS EMPTY)");
|
||||||
|
|
||||||
|
// Test _vectors EXISTS + _vectors NOT EXITS
|
||||||
|
insta::assert_snapshot!(p("_vectors EXISTS"), @"_vectors EXISTS");
|
||||||
|
insta::assert_snapshot!(p("_vectors.embedderName EXISTS"), @r#"_vectors."embedderName" EXISTS"#);
|
||||||
|
insta::assert_snapshot!(p("_vectors.embedderName.documentTemplate EXISTS"), @r#"_vectors."embedderName".documentTemplate EXISTS"#);
|
||||||
|
insta::assert_snapshot!(p("_vectors.embedderName.regenerate EXISTS"), @r#"_vectors."embedderName".regenerate EXISTS"#);
|
||||||
|
insta::assert_snapshot!(p("_vectors.embedderName.regenerate EXISTS"), @r#"_vectors."embedderName".regenerate EXISTS"#);
|
||||||
|
insta::assert_snapshot!(p("_vectors.embedderName.fragments.fragmentName EXISTS"), @r#"_vectors."embedderName".fragments."fragmentName" EXISTS"#);
|
||||||
|
insta::assert_snapshot!(p(" _vectors.embedderName.fragments.fragmentName EXISTS"), @r#"_vectors."embedderName".fragments."fragmentName" EXISTS"#);
|
||||||
|
insta::assert_snapshot!(p("NOT _vectors EXISTS"), @"NOT (_vectors EXISTS)");
|
||||||
|
insta::assert_snapshot!(p(" NOT _vectors EXISTS"), @"NOT (_vectors EXISTS)");
|
||||||
|
insta::assert_snapshot!(p(" _vectors NOT EXISTS"), @"NOT (_vectors EXISTS)");
|
||||||
|
|
||||||
// Test EXISTS + NOT EXITS
|
// Test EXISTS + NOT EXITS
|
||||||
insta::assert_snapshot!(p("subscribers EXISTS"), @"{subscribers} EXISTS");
|
insta::assert_snapshot!(p("subscribers EXISTS"), @"{subscribers} EXISTS");
|
||||||
insta::assert_snapshot!(p("NOT subscribers EXISTS"), @"NOT ({subscribers} EXISTS)");
|
insta::assert_snapshot!(p("NOT subscribers EXISTS"), @"NOT ({subscribers} EXISTS)");
|
||||||
@@ -721,12 +842,17 @@ pub mod tests {
|
|||||||
insta::assert_snapshot!(p("_geoRadius(12, 13, 14)"), @"_geoRadius({12}, {13}, {14})");
|
insta::assert_snapshot!(p("_geoRadius(12, 13, 14)"), @"_geoRadius({12}, {13}, {14})");
|
||||||
insta::assert_snapshot!(p("NOT _geoRadius(12, 13, 14)"), @"NOT (_geoRadius({12}, {13}, {14}))");
|
insta::assert_snapshot!(p("NOT _geoRadius(12, 13, 14)"), @"NOT (_geoRadius({12}, {13}, {14}))");
|
||||||
insta::assert_snapshot!(p("_geoRadius(12,13,14)"), @"_geoRadius({12}, {13}, {14})");
|
insta::assert_snapshot!(p("_geoRadius(12,13,14)"), @"_geoRadius({12}, {13}, {14})");
|
||||||
|
insta::assert_snapshot!(p("_geoRadius(12,13,14,1000)"), @"_geoRadius({12}, {13}, {14}, {1000})");
|
||||||
|
|
||||||
// Test geo bounding box
|
// Test geo bounding box
|
||||||
insta::assert_snapshot!(p("_geoBoundingBox([12, 13], [14, 15])"), @"_geoBoundingBox([{12}, {13}], [{14}, {15}])");
|
insta::assert_snapshot!(p("_geoBoundingBox([12, 13], [14, 15])"), @"_geoBoundingBox([{12}, {13}], [{14}, {15}])");
|
||||||
insta::assert_snapshot!(p("NOT _geoBoundingBox([12, 13], [14, 15])"), @"NOT (_geoBoundingBox([{12}, {13}], [{14}, {15}]))");
|
insta::assert_snapshot!(p("NOT _geoBoundingBox([12, 13], [14, 15])"), @"NOT (_geoBoundingBox([{12}, {13}], [{14}, {15}]))");
|
||||||
insta::assert_snapshot!(p("_geoBoundingBox([12,13],[14,15])"), @"_geoBoundingBox([{12}, {13}], [{14}, {15}])");
|
insta::assert_snapshot!(p("_geoBoundingBox([12,13],[14,15])"), @"_geoBoundingBox([{12}, {13}], [{14}, {15}])");
|
||||||
|
|
||||||
|
// Test geo polygon
|
||||||
|
insta::assert_snapshot!(p("_geoPolygon([12, 13], [14, 15], [16, 17])"), @"_geoPolygon([[{12}, {13}], [{14}, {15}], [{16}, {17}], ])");
|
||||||
|
insta::assert_snapshot!(p("_geoPolygon([12, 13], [14, 15], [-1.2,2939.2], [1,1])"), @"_geoPolygon([[{12}, {13}], [{14}, {15}], [{-1.2}, {2939.2}], [{1}, {1}], ])");
|
||||||
|
|
||||||
// Test OR + AND
|
// Test OR + AND
|
||||||
insta::assert_snapshot!(p("channel = ponce AND 'dog race' != 'bernese mountain'"), @"AND[{channel} = {ponce}, {dog race} != {bernese mountain}, ]");
|
insta::assert_snapshot!(p("channel = ponce AND 'dog race' != 'bernese mountain'"), @"AND[{channel} = {ponce}, {dog race} != {bernese mountain}, ]");
|
||||||
insta::assert_snapshot!(p("channel = ponce OR 'dog race' != 'bernese mountain'"), @"OR[{channel} = {ponce}, {dog race} != {bernese mountain}, ]");
|
insta::assert_snapshot!(p("channel = ponce OR 'dog race' != 'bernese mountain'"), @"OR[{channel} = {ponce}, {dog race} != {bernese mountain}, ]");
|
||||||
@@ -783,50 +909,80 @@ pub mod tests {
|
|||||||
11:12 channel = 🐻 AND followers < 100
|
11:12 channel = 🐻 AND followers < 100
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
insta::assert_snapshot!(p("'OR'"), @r###"
|
insta::assert_snapshot!(p("'OR'"), @r"
|
||||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `\'OR\'`.
|
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `\'OR\'`.
|
||||||
1:5 'OR'
|
1:5 'OR'
|
||||||
"###);
|
");
|
||||||
|
|
||||||
insta::assert_snapshot!(p("OR"), @r###"
|
insta::assert_snapshot!(p("OR"), @r###"
|
||||||
Was expecting a value but instead got `OR`, which is a reserved keyword. To use `OR` as a field name or a value, surround it by quotes.
|
Was expecting a value but instead got `OR`, which is a reserved keyword. To use `OR` as a field name or a value, surround it by quotes.
|
||||||
1:3 OR
|
1:3 OR
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
insta::assert_snapshot!(p("channel Ponce"), @r###"
|
insta::assert_snapshot!(p("channel Ponce"), @r"
|
||||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `channel Ponce`.
|
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `channel Ponce`.
|
||||||
1:14 channel Ponce
|
1:14 channel Ponce
|
||||||
"###);
|
");
|
||||||
|
|
||||||
insta::assert_snapshot!(p("channel = Ponce OR"), @r###"
|
insta::assert_snapshot!(p("channel = Ponce OR"), @r"
|
||||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` but instead got nothing.
|
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` but instead got nothing.
|
||||||
19:19 channel = Ponce OR
|
19:19 channel = Ponce OR
|
||||||
"###);
|
");
|
||||||
|
|
||||||
insta::assert_snapshot!(p("_geoRadius"), @r###"
|
insta::assert_snapshot!(p("_geoRadius"), @r"
|
||||||
The `_geoRadius` filter expects three arguments: `_geoRadius(latitude, longitude, radius)`.
|
The `_geoRadius` filter must be in the form: `_geoRadius(latitude, longitude, radius, optionalResolution)`.
|
||||||
1:11 _geoRadius
|
11:11 _geoRadius
|
||||||
"###);
|
");
|
||||||
|
|
||||||
insta::assert_snapshot!(p("_geoRadius = 12"), @r###"
|
insta::assert_snapshot!(p("_geoRadius = 12"), @r"
|
||||||
The `_geoRadius` filter expects three arguments: `_geoRadius(latitude, longitude, radius)`.
|
The `_geoRadius` filter must be in the form: `_geoRadius(latitude, longitude, radius, optionalResolution)`.
|
||||||
1:16 _geoRadius = 12
|
11:16 _geoRadius = 12
|
||||||
"###);
|
");
|
||||||
|
|
||||||
insta::assert_snapshot!(p("_geoBoundingBox"), @r###"
|
insta::assert_snapshot!(p("_geoBoundingBox"), @r"
|
||||||
The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.
|
The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.
|
||||||
1:16 _geoBoundingBox
|
16:16 _geoBoundingBox
|
||||||
"###);
|
");
|
||||||
|
|
||||||
insta::assert_snapshot!(p("_geoBoundingBox = 12"), @r###"
|
insta::assert_snapshot!(p("_geoBoundingBox = 12"), @r"
|
||||||
The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.
|
The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.
|
||||||
1:21 _geoBoundingBox = 12
|
16:21 _geoBoundingBox = 12
|
||||||
"###);
|
");
|
||||||
|
|
||||||
insta::assert_snapshot!(p("_geoBoundingBox(1.0, 1.0)"), @r###"
|
insta::assert_snapshot!(p("_geoBoundingBox(1.0, 1.0)"), @r"
|
||||||
The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.
|
The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.
|
||||||
1:26 _geoBoundingBox(1.0, 1.0)
|
17:26 _geoBoundingBox(1.0, 1.0)
|
||||||
"###);
|
");
|
||||||
|
|
||||||
|
insta::assert_snapshot!(p("_geoPolygon([1,2,3])"), @r"
|
||||||
|
The `_geoPolygon` filter expects at least 3 points but only 1 were specified
|
||||||
|
18:19 _geoPolygon([1,2,3])
|
||||||
|
");
|
||||||
|
|
||||||
|
insta::assert_snapshot!(p("_geoPolygon(1,2,3)"), @r"
|
||||||
|
The `_geoPolygon` filter doesn't match the expected format: `_geoPolygon([latitude, longitude], [latitude, longitude])`.
|
||||||
|
13:19 _geoPolygon(1,2,3)
|
||||||
|
");
|
||||||
|
|
||||||
|
insta::assert_snapshot!(p("_geoPolygon([1,2],[1,2],[1,2,3])"), @r"
|
||||||
|
Was expecting 2 coordinates but instead found 3.
|
||||||
|
26:27 _geoPolygon([1,2],[1,2],[1,2,3])
|
||||||
|
");
|
||||||
|
|
||||||
|
insta::assert_snapshot!(p("_geoPolygon([1,2],[1,2,3])"), @r"
|
||||||
|
The `_geoPolygon` filter expects at least 3 points but only 2 were specified
|
||||||
|
24:25 _geoPolygon([1,2],[1,2,3])
|
||||||
|
");
|
||||||
|
|
||||||
|
insta::assert_snapshot!(p("_geoPolygon(1)"), @r"
|
||||||
|
The `_geoPolygon` filter doesn't match the expected format: `_geoPolygon([latitude, longitude], [latitude, longitude])`.
|
||||||
|
13:15 _geoPolygon(1)
|
||||||
|
");
|
||||||
|
|
||||||
|
insta::assert_snapshot!(p("_geoPolygon([1,2)"), @r"
|
||||||
|
The `_geoPolygon` filter doesn't match the expected format: `_geoPolygon([latitude, longitude], [latitude, longitude])`.
|
||||||
|
17:18 _geoPolygon([1,2)
|
||||||
|
");
|
||||||
|
|
||||||
insta::assert_snapshot!(p("_geoPoint(12, 13, 14)"), @r###"
|
insta::assert_snapshot!(p("_geoPoint(12, 13, 14)"), @r###"
|
||||||
`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.
|
`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.
|
||||||
@@ -883,15 +1039,15 @@ pub mod tests {
|
|||||||
34:35 channel = mv OR followers >= 1000)
|
34:35 channel = mv OR followers >= 1000)
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
insta::assert_snapshot!(p("colour NOT EXIST"), @r###"
|
insta::assert_snapshot!(p("colour NOT EXIST"), @r"
|
||||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `colour NOT EXIST`.
|
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `colour NOT EXIST`.
|
||||||
1:17 colour NOT EXIST
|
1:17 colour NOT EXIST
|
||||||
"###);
|
");
|
||||||
|
|
||||||
insta::assert_snapshot!(p("subscribers 100 TO1000"), @r###"
|
insta::assert_snapshot!(p("subscribers 100 TO1000"), @r"
|
||||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `subscribers 100 TO1000`.
|
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `subscribers 100 TO1000`.
|
||||||
1:23 subscribers 100 TO1000
|
1:23 subscribers 100 TO1000
|
||||||
"###);
|
");
|
||||||
|
|
||||||
insta::assert_snapshot!(p("channel = ponce ORdog != 'bernese mountain'"), @r###"
|
insta::assert_snapshot!(p("channel = ponce ORdog != 'bernese mountain'"), @r###"
|
||||||
Found unexpected characters at the end of the filter: `ORdog != \'bernese mountain\'`. You probably forgot an `OR` or an `AND` rule.
|
Found unexpected characters at the end of the filter: `ORdog != \'bernese mountain\'`. You probably forgot an `OR` or an `AND` rule.
|
||||||
@@ -946,43 +1102,108 @@ pub mod tests {
|
|||||||
"###
|
"###
|
||||||
);
|
);
|
||||||
|
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors _vectors EXISTS"#), @r"
|
||||||
|
Was expecting an operation like `EXISTS` or `NOT EXISTS` after the vector filter.
|
||||||
|
10:25 _vectors _vectors EXISTS
|
||||||
|
");
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors. embedderName EXISTS"#), @r"
|
||||||
|
Was expecting embedder name but found nothing.
|
||||||
|
10:11 _vectors. embedderName EXISTS
|
||||||
|
");
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors .embedderName EXISTS"#), @r"
|
||||||
|
Was expecting an operation like `EXISTS` or `NOT EXISTS` after the vector filter.
|
||||||
|
10:30 _vectors .embedderName EXISTS
|
||||||
|
");
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors.embedderName. EXISTS"#), @r"
|
||||||
|
Was expecting one of `.fragments`, `.userProvided`, `.documentTemplate`, `.regenerate` or nothing, but instead found a point without a valid value.
|
||||||
|
22:23 _vectors.embedderName. EXISTS
|
||||||
|
");
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors."embedderName EXISTS"#), @r#"
|
||||||
|
The quotes in one of the values are inconsistent.
|
||||||
|
10:30 _vectors."embedderName EXISTS
|
||||||
|
"#);
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors."embedderNam"e EXISTS"#), @r#"
|
||||||
|
The vector filter has leftover tokens.
|
||||||
|
23:31 _vectors."embedderNam"e EXISTS
|
||||||
|
"#);
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors.embedderName.documentTemplate. EXISTS"#), @r"
|
||||||
|
Was expecting one of `.fragments`, `.userProvided`, `.documentTemplate`, `.regenerate` or nothing, but instead found a point without a valid value.
|
||||||
|
39:40 _vectors.embedderName.documentTemplate. EXISTS
|
||||||
|
");
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors.embedderName.fragments EXISTS"#), @r"
|
||||||
|
The vector filter is missing a fragment name.
|
||||||
|
32:39 _vectors.embedderName.fragments EXISTS
|
||||||
|
");
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors.embedderName.fragments. EXISTS"#), @r"
|
||||||
|
The vector filter's fragment name is invalid.
|
||||||
|
33:40 _vectors.embedderName.fragments. EXISTS
|
||||||
|
");
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors.embedderName.fragments.test test EXISTS"#), @r"
|
||||||
|
Was expecting an operation like `EXISTS` or `NOT EXISTS` after the vector filter.
|
||||||
|
38:49 _vectors.embedderName.fragments.test test EXISTS
|
||||||
|
");
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors.embedderName.fragments. test EXISTS"#), @r"
|
||||||
|
The vector filter's fragment name is invalid.
|
||||||
|
33:45 _vectors.embedderName.fragments. test EXISTS
|
||||||
|
");
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors.embedderName .fragments. test EXISTS"#), @r"
|
||||||
|
Was expecting an operation like `EXISTS` or `NOT EXISTS` after the vector filter.
|
||||||
|
23:46 _vectors.embedderName .fragments. test EXISTS
|
||||||
|
");
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors.embedderName .fragments.test EXISTS"#), @r"
|
||||||
|
Was expecting an operation like `EXISTS` or `NOT EXISTS` after the vector filter.
|
||||||
|
23:45 _vectors.embedderName .fragments.test EXISTS
|
||||||
|
");
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors.embedderName.fargments.test EXISTS"#), @r"
|
||||||
|
Was expecting one of `fragments`, `userProvided`, `documentTemplate`, `regenerate` or nothing, but instead found `fargments`. Did you mean `fragments`?
|
||||||
|
23:32 _vectors.embedderName.fargments.test EXISTS
|
||||||
|
");
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors.embedderName."userProvided" EXISTS"#), @r#"
|
||||||
|
Was expecting this part to be unquoted.
|
||||||
|
24:36 _vectors.embedderName."userProvided" EXISTS
|
||||||
|
"#);
|
||||||
|
insta::assert_snapshot!(p(r#"_vectors.embedderName.userProvided.fragments.test EXISTS"#), @r"
|
||||||
|
Vector filter can only accept one of `fragments`, `userProvided`, `documentTemplate` or `regenerate`, but found both `userProvided` and `fragments`.
|
||||||
|
36:45 _vectors.embedderName.userProvided.fragments.test EXISTS
|
||||||
|
");
|
||||||
|
|
||||||
insta::assert_snapshot!(p(r#"NOT OR EXISTS AND EXISTS NOT EXISTS"#), @r###"
|
insta::assert_snapshot!(p(r#"NOT OR EXISTS AND EXISTS NOT EXISTS"#), @r###"
|
||||||
Was expecting a value but instead got `OR`, which is a reserved keyword. To use `OR` as a field name or a value, surround it by quotes.
|
Was expecting a value but instead got `OR`, which is a reserved keyword. To use `OR` as a field name or a value, surround it by quotes.
|
||||||
5:7 NOT OR EXISTS AND EXISTS NOT EXISTS
|
5:7 NOT OR EXISTS AND EXISTS NOT EXISTS
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
insta::assert_snapshot!(p(r#"value NULL"#), @r###"
|
insta::assert_snapshot!(p(r#"value NULL"#), @r"
|
||||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value NULL`.
|
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value NULL`.
|
||||||
1:11 value NULL
|
1:11 value NULL
|
||||||
"###);
|
");
|
||||||
insta::assert_snapshot!(p(r#"value NOT NULL"#), @r###"
|
insta::assert_snapshot!(p(r#"value NOT NULL"#), @r"
|
||||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value NOT NULL`.
|
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value NOT NULL`.
|
||||||
1:15 value NOT NULL
|
1:15 value NOT NULL
|
||||||
"###);
|
");
|
||||||
insta::assert_snapshot!(p(r#"value EMPTY"#), @r###"
|
insta::assert_snapshot!(p(r#"value EMPTY"#), @r"
|
||||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value EMPTY`.
|
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value EMPTY`.
|
||||||
1:12 value EMPTY
|
1:12 value EMPTY
|
||||||
"###);
|
");
|
||||||
insta::assert_snapshot!(p(r#"value NOT EMPTY"#), @r###"
|
insta::assert_snapshot!(p(r#"value NOT EMPTY"#), @r"
|
||||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value NOT EMPTY`.
|
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value NOT EMPTY`.
|
||||||
1:16 value NOT EMPTY
|
1:16 value NOT EMPTY
|
||||||
"###);
|
");
|
||||||
insta::assert_snapshot!(p(r#"value IS"#), @r###"
|
insta::assert_snapshot!(p(r#"value IS"#), @r"
|
||||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value IS`.
|
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value IS`.
|
||||||
1:9 value IS
|
1:9 value IS
|
||||||
"###);
|
");
|
||||||
insta::assert_snapshot!(p(r#"value IS NOT"#), @r###"
|
insta::assert_snapshot!(p(r#"value IS NOT"#), @r"
|
||||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value IS NOT`.
|
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value IS NOT`.
|
||||||
1:13 value IS NOT
|
1:13 value IS NOT
|
||||||
"###);
|
");
|
||||||
insta::assert_snapshot!(p(r#"value IS EXISTS"#), @r###"
|
insta::assert_snapshot!(p(r#"value IS EXISTS"#), @r"
|
||||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value IS EXISTS`.
|
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value IS EXISTS`.
|
||||||
1:16 value IS EXISTS
|
1:16 value IS EXISTS
|
||||||
"###);
|
");
|
||||||
insta::assert_snapshot!(p(r#"value IS NOT EXISTS"#), @r###"
|
insta::assert_snapshot!(p(r#"value IS NOT EXISTS"#), @r"
|
||||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value IS NOT EXISTS`.
|
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value IS NOT EXISTS`.
|
||||||
1:20 value IS NOT EXISTS
|
1:20 value IS NOT EXISTS
|
||||||
"###);
|
");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -80,6 +80,51 @@ pub fn word_exact<'a, 'b: 'a>(tag: &'b str) -> impl Fn(Span<'a>) -> IResult<'a,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// vector_value = ( non_dot_word | singleQuoted | doubleQuoted)
|
||||||
|
pub fn parse_vector_value(input: Span) -> IResult<Token> {
|
||||||
|
pub fn non_dot_word(input: Span) -> IResult<Token> {
|
||||||
|
let (input, word) = take_while1(|c| is_value_component(c) && c != '.')(input)?;
|
||||||
|
Ok((input, word.into()))
|
||||||
|
}
|
||||||
|
|
||||||
|
let (input, value) = alt((
|
||||||
|
delimited(char('\''), cut(|input| quoted_by('\'', input)), cut(char('\''))),
|
||||||
|
delimited(char('"'), cut(|input| quoted_by('"', input)), cut(char('"'))),
|
||||||
|
non_dot_word,
|
||||||
|
))(input)?;
|
||||||
|
|
||||||
|
match unescaper::unescape(value.value()) {
|
||||||
|
Ok(content) => {
|
||||||
|
if content.len() != value.value().len() {
|
||||||
|
Ok((input, Token::new(value.original_span(), Some(content))))
|
||||||
|
} else {
|
||||||
|
Ok((input, value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(unescaper::Error::IncompleteStr(_)) => Err(nom::Err::Incomplete(nom::Needed::Unknown)),
|
||||||
|
Err(unescaper::Error::ParseIntError { .. }) => Err(nom::Err::Error(Error::new_from_kind(
|
||||||
|
value.original_span(),
|
||||||
|
ErrorKind::InvalidEscapedNumber,
|
||||||
|
))),
|
||||||
|
Err(unescaper::Error::InvalidChar { .. }) => Err(nom::Err::Error(Error::new_from_kind(
|
||||||
|
value.original_span(),
|
||||||
|
ErrorKind::MalformedValue,
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse_vector_value_cut<'a>(input: Span<'a>, kind: ErrorKind<'a>) -> IResult<'a, Token<'a>> {
|
||||||
|
parse_vector_value(input).map_err(|e| match e {
|
||||||
|
nom::Err::Failure(e) => match e.kind() {
|
||||||
|
ErrorKind::Char(c) if *c == '"' || *c == '\'' => {
|
||||||
|
crate::Error::failure_from_kind(input, ErrorKind::VectorFilterInvalidQuotes)
|
||||||
|
}
|
||||||
|
_ => crate::Error::failure_from_kind(input, kind),
|
||||||
|
},
|
||||||
|
_ => crate::Error::failure_from_kind(input, kind),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// value = WS* ( word | singleQuoted | doubleQuoted) WS+
|
/// value = WS* ( word | singleQuoted | doubleQuoted) WS+
|
||||||
pub fn parse_value(input: Span) -> IResult<Token> {
|
pub fn parse_value(input: Span) -> IResult<Token> {
|
||||||
// to get better diagnostic message we are going to strip the left whitespaces from the input right now
|
// to get better diagnostic message we are going to strip the left whitespaces from the input right now
|
||||||
@@ -99,31 +144,21 @@ pub fn parse_value(input: Span) -> IResult<Token> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
match parse_geo_radius(input) {
|
match parse_geo_radius(input) {
|
||||||
Ok(_) => {
|
Ok(_) => return Err(Error::failure_from_kind(input, ErrorKind::MisusedGeoRadius)),
|
||||||
return Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::MisusedGeoRadius)))
|
|
||||||
}
|
|
||||||
// if we encountered a failure it means the user badly wrote a _geoRadius filter.
|
// if we encountered a failure it means the user badly wrote a _geoRadius filter.
|
||||||
// But instead of showing them how to fix his syntax we are going to tell them they should not use this filter as a value.
|
// But instead of showing them how to fix his syntax we are going to tell them they should not use this filter as a value.
|
||||||
Err(e) if e.is_failure() => {
|
Err(e) if e.is_failure() => {
|
||||||
return Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::MisusedGeoRadius)))
|
return Err(Error::failure_from_kind(input, ErrorKind::MisusedGeoRadius))
|
||||||
}
|
}
|
||||||
_ => (),
|
_ => (),
|
||||||
}
|
}
|
||||||
|
|
||||||
match parse_geo_bounding_box(input) {
|
match parse_geo_bounding_box(input) {
|
||||||
Ok(_) => {
|
Ok(_) => return Err(Error::failure_from_kind(input, ErrorKind::MisusedGeoBoundingBox)),
|
||||||
return Err(nom::Err::Failure(Error::new_from_kind(
|
|
||||||
input,
|
|
||||||
ErrorKind::MisusedGeoBoundingBox,
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
// if we encountered a failure it means the user badly wrote a _geoBoundingBox filter.
|
// if we encountered a failure it means the user badly wrote a _geoBoundingBox filter.
|
||||||
// But instead of showing them how to fix his syntax we are going to tell them they should not use this filter as a value.
|
// But instead of showing them how to fix his syntax we are going to tell them they should not use this filter as a value.
|
||||||
Err(e) if e.is_failure() => {
|
Err(e) if e.is_failure() => {
|
||||||
return Err(nom::Err::Failure(Error::new_from_kind(
|
return Err(Error::failure_from_kind(input, ErrorKind::MisusedGeoBoundingBox))
|
||||||
input,
|
|
||||||
ErrorKind::MisusedGeoBoundingBox,
|
|
||||||
)))
|
|
||||||
}
|
}
|
||||||
_ => (),
|
_ => (),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -129,6 +129,7 @@ fn main() {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| false,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ dump = { path = "../dump" }
|
|||||||
enum-iterator = "2.1.0"
|
enum-iterator = "2.1.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.1.2"
|
flate2 = "1.1.2"
|
||||||
|
hashbrown = "0.15.4"
|
||||||
indexmap = "2.9.0"
|
indexmap = "2.9.0"
|
||||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
@@ -45,6 +46,8 @@ tracing = "0.1.41"
|
|||||||
ureq = "2.12.1"
|
ureq = "2.12.1"
|
||||||
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||||
backoff = "0.4.0"
|
backoff = "0.4.0"
|
||||||
|
itertools = "0.14.0"
|
||||||
|
tokio = { version = "1.47.1", features = ["full"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
big_s = "1.0.2"
|
big_s = "1.0.2"
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
#![allow(clippy::result_large_err)]
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
@@ -147,6 +149,7 @@ impl<'a> Dump<'a> {
|
|||||||
canceled_by: task.canceled_by,
|
canceled_by: task.canceled_by,
|
||||||
details: task.details,
|
details: task.details,
|
||||||
status: task.status,
|
status: task.status,
|
||||||
|
network: task.network,
|
||||||
kind: match task.kind {
|
kind: match task.kind {
|
||||||
KindDump::DocumentImport {
|
KindDump::DocumentImport {
|
||||||
primary_key,
|
primary_key,
|
||||||
@@ -197,9 +200,10 @@ impl<'a> Dump<'a> {
|
|||||||
index_uid: task.index_uid.ok_or(Error::CorruptedDump)?,
|
index_uid: task.index_uid.ok_or(Error::CorruptedDump)?,
|
||||||
primary_key,
|
primary_key,
|
||||||
},
|
},
|
||||||
KindDump::IndexUpdate { primary_key } => KindWithContent::IndexUpdate {
|
KindDump::IndexUpdate { primary_key, uid } => KindWithContent::IndexUpdate {
|
||||||
index_uid: task.index_uid.ok_or(Error::CorruptedDump)?,
|
index_uid: task.index_uid.ok_or(Error::CorruptedDump)?,
|
||||||
primary_key,
|
primary_key,
|
||||||
|
new_index_uid: uid,
|
||||||
},
|
},
|
||||||
KindDump::IndexSwap { swaps } => KindWithContent::IndexSwap { swaps },
|
KindDump::IndexSwap { swaps } => KindWithContent::IndexSwap { swaps },
|
||||||
KindDump::TaskCancelation { query, tasks } => {
|
KindDump::TaskCancelation { query, tasks } => {
|
||||||
@@ -230,6 +234,9 @@ impl<'a> Dump<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
KindDump::UpgradeDatabase { from } => KindWithContent::UpgradeDatabase { from },
|
KindDump::UpgradeDatabase { from } => KindWithContent::UpgradeDatabase { from },
|
||||||
|
KindDump::NetworkTopologyChange { network: new_network, origin } => {
|
||||||
|
KindWithContent::NetworkTopologyChange { network: new_network, origin }
|
||||||
|
}
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -67,6 +67,8 @@ pub enum Error {
|
|||||||
SwapDuplicateIndexesFound(Vec<String>),
|
SwapDuplicateIndexesFound(Vec<String>),
|
||||||
#[error("Index `{0}` not found.")]
|
#[error("Index `{0}` not found.")]
|
||||||
SwapIndexNotFound(String),
|
SwapIndexNotFound(String),
|
||||||
|
#[error("Cannot rename `{0}` to `{1}` as the index already exists. Hint: You can remove `{1}` first and then do your remove.")]
|
||||||
|
SwapIndexFoundDuringRename(String, String),
|
||||||
#[error("Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.")]
|
#[error("Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.")]
|
||||||
NoSpaceLeftInTaskQueue,
|
NoSpaceLeftInTaskQueue,
|
||||||
#[error(
|
#[error(
|
||||||
@@ -74,6 +76,10 @@ pub enum Error {
|
|||||||
.0.iter().map(|s| format!("`{}`", s)).collect::<Vec<_>>().join(", ")
|
.0.iter().map(|s| format!("`{}`", s)).collect::<Vec<_>>().join(", ")
|
||||||
)]
|
)]
|
||||||
SwapIndexesNotFound(Vec<String>),
|
SwapIndexesNotFound(Vec<String>),
|
||||||
|
#[error("The following indexes are being renamed but cannot because their new name conflicts with an already existing index: {}. Renaming doesn't overwrite the other index name.",
|
||||||
|
.0.iter().map(|s| format!("`{}`", s)).collect::<Vec<_>>().join(", ")
|
||||||
|
)]
|
||||||
|
SwapIndexesFoundDuringRename(Vec<String>),
|
||||||
#[error("Corrupted dump.")]
|
#[error("Corrupted dump.")]
|
||||||
CorruptedDump,
|
CorruptedDump,
|
||||||
#[error(
|
#[error(
|
||||||
@@ -203,6 +209,8 @@ impl Error {
|
|||||||
| Error::SwapIndexNotFound(_)
|
| Error::SwapIndexNotFound(_)
|
||||||
| Error::NoSpaceLeftInTaskQueue
|
| Error::NoSpaceLeftInTaskQueue
|
||||||
| Error::SwapIndexesNotFound(_)
|
| Error::SwapIndexesNotFound(_)
|
||||||
|
| Error::SwapIndexFoundDuringRename(_, _)
|
||||||
|
| Error::SwapIndexesFoundDuringRename(_)
|
||||||
| Error::CorruptedDump
|
| Error::CorruptedDump
|
||||||
| Error::InvalidTaskDate { .. }
|
| Error::InvalidTaskDate { .. }
|
||||||
| Error::InvalidTaskUid { .. }
|
| Error::InvalidTaskUid { .. }
|
||||||
@@ -271,6 +279,8 @@ impl ErrorCode for Error {
|
|||||||
Error::SwapDuplicateIndexFound(_) => Code::InvalidSwapDuplicateIndexFound,
|
Error::SwapDuplicateIndexFound(_) => Code::InvalidSwapDuplicateIndexFound,
|
||||||
Error::SwapIndexNotFound(_) => Code::IndexNotFound,
|
Error::SwapIndexNotFound(_) => Code::IndexNotFound,
|
||||||
Error::SwapIndexesNotFound(_) => Code::IndexNotFound,
|
Error::SwapIndexesNotFound(_) => Code::IndexNotFound,
|
||||||
|
Error::SwapIndexFoundDuringRename(_, _) => Code::IndexAlreadyExists,
|
||||||
|
Error::SwapIndexesFoundDuringRename(_) => Code::IndexAlreadyExists,
|
||||||
Error::InvalidTaskDate { field, .. } => (*field).into(),
|
Error::InvalidTaskDate { field, .. } => (*field).into(),
|
||||||
Error::InvalidTaskUid { .. } => Code::InvalidTaskUids,
|
Error::InvalidTaskUid { .. } => Code::InvalidTaskUids,
|
||||||
Error::InvalidBatchUid { .. } => Code::InvalidBatchUids,
|
Error::InvalidBatchUid { .. } => Code::InvalidBatchUids,
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
use meilisearch_types::features::{InstanceTogglableFeatures, Network, RuntimeTogglableFeatures};
|
use meilisearch_types::enterprise_edition::network::DbNetwork;
|
||||||
|
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
|
||||||
use meilisearch_types::heed::types::{SerdeJson, Str};
|
use meilisearch_types::heed::types::{SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
|
||||||
|
|
||||||
@@ -23,7 +24,7 @@ mod db_keys {
|
|||||||
pub(crate) struct FeatureData {
|
pub(crate) struct FeatureData {
|
||||||
persisted: Database<Str, SerdeJson<RuntimeTogglableFeatures>>,
|
persisted: Database<Str, SerdeJson<RuntimeTogglableFeatures>>,
|
||||||
runtime: Arc<RwLock<RuntimeTogglableFeatures>>,
|
runtime: Arc<RwLock<RuntimeTogglableFeatures>>,
|
||||||
network: Arc<RwLock<Network>>,
|
network: Arc<RwLock<DbNetwork>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
#[derive(Debug, Clone, Copy)]
|
||||||
@@ -157,6 +158,19 @@ impl RoFeatures {
|
|||||||
.into())
|
.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn check_vector_store_setting(&self, disabled_action: &'static str) -> Result<()> {
|
||||||
|
if self.runtime.vector_store_setting {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(FeatureNotEnabledError {
|
||||||
|
disabled_action,
|
||||||
|
feature: "vector_store_setting",
|
||||||
|
issue_link: "https://github.com/orgs/meilisearch/discussions/860",
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FeatureData {
|
impl FeatureData {
|
||||||
@@ -183,8 +197,8 @@ impl FeatureData {
|
|||||||
}));
|
}));
|
||||||
|
|
||||||
// Once this is stabilized, network should be stored along with webhooks in index-scheduler's persisted database
|
// Once this is stabilized, network should be stored along with webhooks in index-scheduler's persisted database
|
||||||
let network_db = runtime_features_db.remap_data_type::<SerdeJson<Network>>();
|
let network_db = runtime_features_db.remap_data_type::<SerdeJson<DbNetwork>>();
|
||||||
let network: Network = network_db.get(wtxn, db_keys::NETWORK)?.unwrap_or_default();
|
let network: DbNetwork = network_db.get(wtxn, db_keys::NETWORK)?.unwrap_or_default();
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
persisted: runtime_features_db,
|
persisted: runtime_features_db,
|
||||||
@@ -220,8 +234,8 @@ impl FeatureData {
|
|||||||
RoFeatures::new(self)
|
RoFeatures::new(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_network(&self, mut wtxn: RwTxn, new_network: Network) -> Result<()> {
|
pub fn put_network(&self, mut wtxn: RwTxn, new_network: DbNetwork) -> Result<()> {
|
||||||
self.persisted.remap_data_type::<SerdeJson<Network>>().put(
|
self.persisted.remap_data_type::<SerdeJson<DbNetwork>>().put(
|
||||||
&mut wtxn,
|
&mut wtxn,
|
||||||
db_keys::NETWORK,
|
db_keys::NETWORK,
|
||||||
&new_network,
|
&new_network,
|
||||||
@@ -233,7 +247,7 @@ impl FeatureData {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn network(&self) -> Network {
|
pub fn network(&self) -> DbNetwork {
|
||||||
Network::clone(&*self.network.read().unwrap())
|
DbNetwork::clone(&*self.network.read().unwrap())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -143,10 +143,10 @@ impl IndexStats {
|
|||||||
///
|
///
|
||||||
/// - rtxn: a RO transaction for the index, obtained from `Index::read_txn()`.
|
/// - rtxn: a RO transaction for the index, obtained from `Index::read_txn()`.
|
||||||
pub fn new(index: &Index, rtxn: &RoTxn) -> milli::Result<Self> {
|
pub fn new(index: &Index, rtxn: &RoTxn) -> milli::Result<Self> {
|
||||||
let arroy_stats = index.arroy_stats(rtxn)?;
|
let vector_store_stats = index.vector_store_stats(rtxn)?;
|
||||||
Ok(IndexStats {
|
Ok(IndexStats {
|
||||||
number_of_embeddings: Some(arroy_stats.number_of_embeddings),
|
number_of_embeddings: Some(vector_store_stats.number_of_embeddings),
|
||||||
number_of_embedded_documents: Some(arroy_stats.documents.len()),
|
number_of_embedded_documents: Some(vector_store_stats.documents.len()),
|
||||||
documents_database_stats: index.documents_stats(rtxn)?.unwrap_or_default(),
|
documents_database_stats: index.documents_stats(rtxn)?.unwrap_or_default(),
|
||||||
number_of_documents: None,
|
number_of_documents: None,
|
||||||
database_size: index.on_disk_size()?,
|
database_size: index.on_disk_size()?,
|
||||||
@@ -526,6 +526,20 @@ impl IndexMapper {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Rename an index.
|
||||||
|
pub fn rename(&self, wtxn: &mut RwTxn, current: &str, new: &str) -> Result<()> {
|
||||||
|
let uuid = self
|
||||||
|
.index_mapping
|
||||||
|
.get(wtxn, current)?
|
||||||
|
.ok_or_else(|| Error::IndexNotFound(current.to_string()))?;
|
||||||
|
if self.index_mapping.get(wtxn, new)?.is_some() {
|
||||||
|
return Err(Error::IndexAlreadyExists(new.to_string()));
|
||||||
|
}
|
||||||
|
self.index_mapping.delete(wtxn, current)?;
|
||||||
|
self.index_mapping.put(wtxn, new, &uuid)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// The stats of an index.
|
/// The stats of an index.
|
||||||
///
|
///
|
||||||
/// If available in the cache, they are directly returned.
|
/// If available in the cache, they are directly returned.
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
|||||||
run_loop_iteration: _,
|
run_loop_iteration: _,
|
||||||
embedders: _,
|
embedders: _,
|
||||||
chat_settings: _,
|
chat_settings: _,
|
||||||
|
runtime: _,
|
||||||
} = scheduler;
|
} = scheduler;
|
||||||
|
|
||||||
let rtxn = env.read_txn().unwrap();
|
let rtxn = env.read_txn().unwrap();
|
||||||
@@ -230,6 +231,7 @@ pub fn snapshot_task(task: &Task) -> String {
|
|||||||
details,
|
details,
|
||||||
status,
|
status,
|
||||||
kind,
|
kind,
|
||||||
|
network,
|
||||||
} = task;
|
} = task;
|
||||||
snap.push('{');
|
snap.push('{');
|
||||||
snap.push_str(&format!("uid: {uid}, "));
|
snap.push_str(&format!("uid: {uid}, "));
|
||||||
@@ -247,6 +249,9 @@ pub fn snapshot_task(task: &Task) -> String {
|
|||||||
snap.push_str(&format!("details: {}, ", &snapshot_details(details)));
|
snap.push_str(&format!("details: {}, ", &snapshot_details(details)));
|
||||||
}
|
}
|
||||||
snap.push_str(&format!("kind: {kind:?}"));
|
snap.push_str(&format!("kind: {kind:?}"));
|
||||||
|
if let Some(network) = network {
|
||||||
|
snap.push_str(&format!("network: {network:?}, "))
|
||||||
|
}
|
||||||
|
|
||||||
snap.push('}');
|
snap.push('}');
|
||||||
snap
|
snap
|
||||||
@@ -274,8 +279,8 @@ fn snapshot_details(d: &Details) -> String {
|
|||||||
Details::SettingsUpdate { settings } => {
|
Details::SettingsUpdate { settings } => {
|
||||||
format!("{{ settings: {settings:?} }}")
|
format!("{{ settings: {settings:?} }}")
|
||||||
}
|
}
|
||||||
Details::IndexInfo { primary_key } => {
|
Details::IndexInfo { primary_key, new_index_uid, old_index_uid } => {
|
||||||
format!("{{ primary_key: {primary_key:?} }}")
|
format!("{{ primary_key: {primary_key:?}, old_new_uid: {old_index_uid:?}, new_index_uid: {new_index_uid:?} }}")
|
||||||
}
|
}
|
||||||
Details::DocumentDeletion {
|
Details::DocumentDeletion {
|
||||||
provided_ids: received_document_ids,
|
provided_ids: received_document_ids,
|
||||||
@@ -313,6 +318,9 @@ fn snapshot_details(d: &Details) -> String {
|
|||||||
Details::UpgradeDatabase { from, to } => {
|
Details::UpgradeDatabase { from, to } => {
|
||||||
format!("{{ from: {from:?}, to: {to:?} }}")
|
format!("{{ from: {from:?}, to: {to:?} }}")
|
||||||
}
|
}
|
||||||
|
Details::NetworkTopologyChange { network: new_network } => {
|
||||||
|
format!("{{ new_network: {new_network:?} }}")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
// The main Error type is large and boxing the large variant make the pattern matching fails
|
||||||
|
#![allow(clippy::result_large_err)]
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
This crate defines the index scheduler, which is responsible for:
|
This crate defines the index scheduler, which is responsible for:
|
||||||
1. Keeping references to meilisearch's indexes and mapping them to their
|
1. Keeping references to meilisearch's indexes and mapping them to their
|
||||||
@@ -51,8 +54,9 @@ pub use features::RoFeatures;
|
|||||||
use flate2::bufread::GzEncoder;
|
use flate2::bufread::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use meilisearch_types::batches::Batch;
|
use meilisearch_types::batches::Batch;
|
||||||
|
use meilisearch_types::enterprise_edition::network::DbNetwork;
|
||||||
use meilisearch_types::features::{
|
use meilisearch_types::features::{
|
||||||
ChatCompletionSettings, InstanceTogglableFeatures, Network, RuntimeTogglableFeatures,
|
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
|
||||||
};
|
};
|
||||||
use meilisearch_types::heed::byteorder::BE;
|
use meilisearch_types::heed::byteorder::BE;
|
||||||
use meilisearch_types::heed::types::{DecodeIgnore, SerdeJson, Str, I128};
|
use meilisearch_types::heed::types::{DecodeIgnore, SerdeJson, Str, I128};
|
||||||
@@ -64,7 +68,7 @@ use meilisearch_types::milli::vector::{
|
|||||||
};
|
};
|
||||||
use meilisearch_types::milli::{self, Index};
|
use meilisearch_types::milli::{self, Index};
|
||||||
use meilisearch_types::task_view::TaskView;
|
use meilisearch_types::task_view::TaskView;
|
||||||
use meilisearch_types::tasks::{KindWithContent, Task};
|
use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork};
|
||||||
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
|
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
|
||||||
use milli::vector::db::IndexEmbeddingConfig;
|
use milli::vector::db::IndexEmbeddingConfig;
|
||||||
use processing::ProcessingTasks;
|
use processing::ProcessingTasks;
|
||||||
@@ -212,6 +216,8 @@ pub struct IndexScheduler {
|
|||||||
/// A counter that is incremented before every call to [`tick`](IndexScheduler::tick)
|
/// A counter that is incremented before every call to [`tick`](IndexScheduler::tick)
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
run_loop_iteration: Arc<RwLock<usize>>,
|
run_loop_iteration: Arc<RwLock<usize>>,
|
||||||
|
|
||||||
|
runtime: Option<tokio::runtime::Handle>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IndexScheduler {
|
impl IndexScheduler {
|
||||||
@@ -238,6 +244,7 @@ impl IndexScheduler {
|
|||||||
run_loop_iteration: self.run_loop_iteration.clone(),
|
run_loop_iteration: self.run_loop_iteration.clone(),
|
||||||
features: self.features.clone(),
|
features: self.features.clone(),
|
||||||
chat_settings: self.chat_settings,
|
chat_settings: self.chat_settings,
|
||||||
|
runtime: self.runtime.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -256,6 +263,7 @@ impl IndexScheduler {
|
|||||||
options: IndexSchedulerOptions,
|
options: IndexSchedulerOptions,
|
||||||
auth_env: Env<WithoutTls>,
|
auth_env: Env<WithoutTls>,
|
||||||
from_db_version: (u32, u32, u32),
|
from_db_version: (u32, u32, u32),
|
||||||
|
runtime: Option<tokio::runtime::Handle>,
|
||||||
#[cfg(test)] test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
|
#[cfg(test)] test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
|
||||||
#[cfg(test)] planned_failures: Vec<(usize, test_utils::FailureLocation)>,
|
#[cfg(test)] planned_failures: Vec<(usize, test_utils::FailureLocation)>,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
@@ -337,13 +345,14 @@ impl IndexScheduler {
|
|||||||
run_loop_iteration: Arc::new(RwLock::new(0)),
|
run_loop_iteration: Arc::new(RwLock::new(0)),
|
||||||
features,
|
features,
|
||||||
chat_settings,
|
chat_settings,
|
||||||
|
runtime,
|
||||||
};
|
};
|
||||||
|
|
||||||
this.run();
|
this.run();
|
||||||
Ok(this)
|
Ok(this)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_txn(&self) -> Result<RoTxn<WithoutTls>> {
|
fn read_txn(&self) -> Result<RoTxn<'_, WithoutTls>> {
|
||||||
self.env.read_txn().map_err(|e| e.into())
|
self.env.read_txn().map_err(|e| e.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -666,6 +675,16 @@ impl IndexScheduler {
|
|||||||
self.queue.get_task_ids_from_authorized_indexes(&rtxn, query, filters, &processing)
|
self.queue.get_task_ids_from_authorized_indexes(&rtxn, query, filters, &processing)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn set_task_network(&self, task_id: TaskId, network: TaskNetwork) -> Result<()> {
|
||||||
|
let mut wtxn = self.env.write_txn()?;
|
||||||
|
let mut task =
|
||||||
|
self.queue.tasks.get_task(&wtxn, task_id)?.ok_or(Error::TaskNotFound(task_id))?;
|
||||||
|
task.network = Some(network);
|
||||||
|
self.queue.tasks.all_tasks.put(&mut wtxn, &task_id, &task)?;
|
||||||
|
wtxn.commit()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Return the batches matching the query from the user's point of view along
|
/// Return the batches matching the query from the user's point of view along
|
||||||
/// with the total number of batches matching the query, ignoring from and limit.
|
/// with the total number of batches matching the query, ignoring from and limit.
|
||||||
///
|
///
|
||||||
@@ -746,7 +765,7 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
/// Register a new task coming from a dump in the scheduler.
|
/// Register a new task coming from a dump in the scheduler.
|
||||||
/// By taking a mutable ref we're pretty sure no one will ever import a dump while actix is running.
|
/// By taking a mutable ref we're pretty sure no one will ever import a dump while actix is running.
|
||||||
pub fn register_dumped_task(&mut self) -> Result<Dump> {
|
pub fn register_dumped_task(&mut self) -> Result<Dump<'_>> {
|
||||||
Dump::new(self)
|
Dump::new(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -795,10 +814,8 @@ impl IndexScheduler {
|
|||||||
.queue
|
.queue
|
||||||
.tasks
|
.tasks
|
||||||
.get_task(self.rtxn, task_id)
|
.get_task(self.rtxn, task_id)
|
||||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?
|
.map_err(io::Error::other)?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| io::Error::other(Error::CorruptedTaskQueue))?;
|
||||||
io::Error::new(io::ErrorKind::Other, Error::CorruptedTaskQueue)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
serde_json::to_writer(&mut self.buffer, &TaskView::from_task(&task))?;
|
serde_json::to_writer(&mut self.buffer, &TaskView::from_task(&task))?;
|
||||||
self.buffer.push(b'\n');
|
self.buffer.push(b'\n');
|
||||||
@@ -880,13 +897,13 @@ impl IndexScheduler {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_network(&self, network: Network) -> Result<()> {
|
pub fn put_network(&self, network: DbNetwork) -> Result<()> {
|
||||||
let wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
let wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
||||||
self.features.put_network(wtxn, network)?;
|
self.features.put_network(wtxn, network)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn network(&self) -> Network {
|
pub fn network(&self) -> DbNetwork {
|
||||||
self.features.network()
|
self.features.network()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -915,9 +932,10 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
pub fn embedders(
|
pub fn embedders(
|
||||||
&self,
|
&self,
|
||||||
index_uid: String,
|
index_uid: &str,
|
||||||
embedding_configs: Vec<IndexEmbeddingConfig>,
|
embedding_configs: Vec<IndexEmbeddingConfig>,
|
||||||
) -> Result<RuntimeEmbedders> {
|
) -> Result<RuntimeEmbedders> {
|
||||||
|
let err = |err| Error::from_milli(err, Some(index_uid.to_owned()));
|
||||||
let res: Result<_> = embedding_configs
|
let res: Result<_> = embedding_configs
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(
|
.map(
|
||||||
@@ -930,7 +948,7 @@ impl IndexScheduler {
|
|||||||
let document_template = prompt
|
let document_template = prompt
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(meilisearch_types::milli::Error::from)
|
.map_err(meilisearch_types::milli::Error::from)
|
||||||
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?;
|
.map_err(err)?;
|
||||||
|
|
||||||
let fragments = fragments
|
let fragments = fragments
|
||||||
.into_inner()
|
.into_inner()
|
||||||
@@ -960,9 +978,8 @@ impl IndexScheduler {
|
|||||||
let embedder = Arc::new(
|
let embedder = Arc::new(
|
||||||
Embedder::new(embedder_options.clone(), self.scheduler.embedding_cache_cap)
|
Embedder::new(embedder_options.clone(), self.scheduler.embedding_cache_cap)
|
||||||
.map_err(meilisearch_types::milli::vector::Error::from)
|
.map_err(meilisearch_types::milli::vector::Error::from)
|
||||||
.map_err(|err| {
|
.map_err(milli::Error::from)
|
||||||
Error::from_milli(err.into(), Some(index_uid.clone()))
|
.map_err(err)?,
|
||||||
})?,
|
|
||||||
);
|
);
|
||||||
{
|
{
|
||||||
let mut embedders = self.embedders.write().unwrap();
|
let mut embedders = self.embedders.write().unwrap();
|
||||||
|
|||||||
@@ -275,19 +275,27 @@ impl BatchQueue {
|
|||||||
pub(crate) fn get_existing_batches(
|
pub(crate) fn get_existing_batches(
|
||||||
&self,
|
&self,
|
||||||
rtxn: &RoTxn,
|
rtxn: &RoTxn,
|
||||||
tasks: impl IntoIterator<Item = BatchId>,
|
batches: impl IntoIterator<Item = BatchId>,
|
||||||
processing: &ProcessingTasks,
|
processing: &ProcessingTasks,
|
||||||
) -> Result<Vec<Batch>> {
|
) -> Result<Vec<Batch>> {
|
||||||
tasks
|
batches
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|batch_id| {
|
.map(|batch_id| {
|
||||||
if Some(batch_id) == processing.batch.as_ref().map(|batch| batch.uid) {
|
if Some(batch_id) == processing.batch.as_ref().map(|batch| batch.uid) {
|
||||||
let mut batch = processing.batch.as_ref().unwrap().to_batch();
|
let mut batch = processing.batch.as_ref().unwrap().to_batch();
|
||||||
batch.progress = processing.get_progress_view();
|
batch.progress = processing.get_progress_view();
|
||||||
|
// Add progress_trace from the current progress state
|
||||||
|
if let Some(progress) = &processing.progress {
|
||||||
|
batch.stats.progress_trace = progress
|
||||||
|
.accumulated_durations()
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k, v.into()))
|
||||||
|
.collect();
|
||||||
|
}
|
||||||
Ok(batch)
|
Ok(batch)
|
||||||
} else {
|
} else {
|
||||||
self.get_batch(rtxn, batch_id)
|
self.get_batch(rtxn, batch_id)
|
||||||
.and_then(|task| task.ok_or(Error::CorruptedTaskQueue))
|
.and_then(|batch| batch.ok_or(Error::CorruptedTaskQueue))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect::<Result<_>>()
|
.collect::<Result<_>>()
|
||||||
|
|||||||
@@ -104,6 +104,15 @@ fn query_batches_simple() {
|
|||||||
batches[0].started_at = OffsetDateTime::UNIX_EPOCH;
|
batches[0].started_at = OffsetDateTime::UNIX_EPOCH;
|
||||||
assert!(batches[0].enqueued_at.is_some());
|
assert!(batches[0].enqueued_at.is_some());
|
||||||
batches[0].enqueued_at = None;
|
batches[0].enqueued_at = None;
|
||||||
|
|
||||||
|
if !batches[0].stats.progress_trace.is_empty() {
|
||||||
|
batches[0].stats.progress_trace.clear();
|
||||||
|
batches[0]
|
||||||
|
.stats
|
||||||
|
.progress_trace
|
||||||
|
.insert("processing tasks".to_string(), "deterministic_duration".into());
|
||||||
|
}
|
||||||
|
|
||||||
// Insta cannot snapshot our batches because the batch stats contains an enum as key: https://github.com/mitsuhiko/insta/issues/689
|
// Insta cannot snapshot our batches because the batch stats contains an enum as key: https://github.com/mitsuhiko/insta/issues/689
|
||||||
let batch = serde_json::to_string_pretty(&batches[0]).unwrap();
|
let batch = serde_json::to_string_pretty(&batches[0]).unwrap();
|
||||||
snapshot!(batch, @r###"
|
snapshot!(batch, @r###"
|
||||||
@@ -122,6 +131,9 @@ fn query_batches_simple() {
|
|||||||
},
|
},
|
||||||
"indexUids": {
|
"indexUids": {
|
||||||
"catto": 1
|
"catto": 1
|
||||||
|
},
|
||||||
|
"progressTrace": {
|
||||||
|
"processing tasks": "deterministic_duration"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"startedAt": "1970-01-01T00:00:00Z",
|
"startedAt": "1970-01-01T00:00:00Z",
|
||||||
@@ -334,11 +346,11 @@ fn query_batches_special_rules() {
|
|||||||
let kind = index_creation_task("doggo", "sheep");
|
let kind = index_creation_task("doggo", "sheep");
|
||||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||||
let kind = KindWithContent::IndexSwap {
|
let kind = KindWithContent::IndexSwap {
|
||||||
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()) }],
|
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()), rename: false }],
|
||||||
};
|
};
|
||||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||||
let kind = KindWithContent::IndexSwap {
|
let kind = KindWithContent::IndexSwap {
|
||||||
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "whalo".to_owned()) }],
|
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "whalo".to_owned()), rename: false }],
|
||||||
};
|
};
|
||||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||||
|
|
||||||
@@ -442,7 +454,7 @@ fn query_batches_canceled_by() {
|
|||||||
let kind = index_creation_task("doggo", "sheep");
|
let kind = index_creation_task("doggo", "sheep");
|
||||||
let _ = index_scheduler.register(kind, None, false).unwrap();
|
let _ = index_scheduler.register(kind, None, false).unwrap();
|
||||||
let kind = KindWithContent::IndexSwap {
|
let kind = KindWithContent::IndexSwap {
|
||||||
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()) }],
|
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()), rename: false }],
|
||||||
};
|
};
|
||||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||||
|
|
||||||
|
|||||||
@@ -279,6 +279,7 @@ impl Queue {
|
|||||||
details: kind.default_details(),
|
details: kind.default_details(),
|
||||||
status: Status::Enqueued,
|
status: Status::Enqueued,
|
||||||
kind: kind.clone(),
|
kind: kind.clone(),
|
||||||
|
network: None,
|
||||||
};
|
};
|
||||||
// For deletion and cancelation tasks, we want to make extra sure that they
|
// For deletion and cancelation tasks, we want to make extra sure that they
|
||||||
// don't attempt to delete/cancel tasks that are newer than themselves.
|
// don't attempt to delete/cancel tasks that are newer than themselves.
|
||||||
@@ -325,7 +326,7 @@ impl Queue {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// it's safe to unwrap here because we checked the len above
|
// it's safe to unwrap here because we checked the len above
|
||||||
let newest_task_id = to_delete.iter().last().unwrap();
|
let newest_task_id = to_delete.iter().next_back().unwrap();
|
||||||
let last_task_to_delete =
|
let last_task_to_delete =
|
||||||
self.tasks.get_task(wtxn, newest_task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
self.tasks.get_task(wtxn, newest_task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
|
|
||||||
|
|||||||
@@ -6,9 +6,9 @@ source: crates/index-scheduler/src/queue/batches_test.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
1 {uid: 1, batch_uid: 1, status: canceled, canceled_by: 3, details: { primary_key: Some("sheep") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
1 {uid: 1, batch_uid: 1, status: canceled, canceled_by: 3, details: { primary_key: Some("sheep"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
||||||
2 {uid: 2, batch_uid: 1, status: canceled, canceled_by: 3, details: { swaps: [IndexSwap { indexes: ("catto", "doggo") }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "doggo") }] }}
|
2 {uid: 2, batch_uid: 1, status: canceled, canceled_by: 3, details: { swaps: [IndexSwap { indexes: ("catto", "doggo"), rename: false }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "doggo"), rename: false }] }}
|
||||||
3 {uid: 3, batch_uid: 1, status: succeeded, details: { matched_tasks: 3, canceled_tasks: Some(2), original_filter: "test_query" }, kind: TaskCancelation { query: "test_query", tasks: RoaringBitmap<[0, 1, 2]> }}
|
3 {uid: 3, batch_uid: 1, status: succeeded, details: { matched_tasks: 3, canceled_tasks: Some(2), original_filter: "test_query" }, kind: TaskCancelation { query: "test_query", tasks: RoaringBitmap<[0, 1, 2]> }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
@@ -49,7 +49,7 @@ catto: { number_of_documents: 0, field_distribution: {} }
|
|||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
1 {uid: 1, details: {"primaryKey":"sheep","matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query","swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"indexCreation":1,"indexSwap":1,"taskCancelation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `taskCancelation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"primaryKey":"sheep","matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query","swaps":[{"indexes":["catto","doggo"],"rename":false}]}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"indexCreation":1,"indexSwap":1,"taskCancelation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `taskCancelation` that cannot be batched with any other task.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -6,9 +6,9 @@ source: crates/index-scheduler/src/queue/batches_test.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("plankton") }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("plankton") }}
|
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("plankton"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("plankton") }}
|
||||||
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("his_own_vomit") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("his_own_vomit") }}
|
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("his_own_vomit"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("his_own_vomit") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued []
|
enqueued []
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,]
|
enqueued [0,]
|
||||||
|
|||||||
@@ -1,14 +1,13 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("plankton") }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("plankton") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("plankton"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("plankton") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,1,]
|
enqueued [0,1,]
|
||||||
|
|||||||
@@ -1,15 +1,14 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("plankton") }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("plankton") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("plankton"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("plankton") }}
|
||||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("his_own_vomit") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("his_own_vomit") }}
|
2 {uid: 2, status: enqueued, details: { primary_key: Some("his_own_vomit"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("his_own_vomit") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,1,2,]
|
enqueued [0,1,2,]
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ source: crates/index-scheduler/src/queue/batches_test.rs
|
|||||||
{uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
{uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("sheep") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("sheep"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
||||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("fish") }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("fish") }}
|
2 {uid: 2, status: enqueued, details: { primary_key: Some("fish"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("fish") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [1,2,]
|
enqueued [1,2,]
|
||||||
|
|||||||
@@ -6,9 +6,9 @@ source: crates/index-scheduler/src/queue/batches_test.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("sheep") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("sheep"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
||||||
2 {uid: 2, batch_uid: 2, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { primary_key: Some("fish") }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("fish") }}
|
2 {uid: 2, batch_uid: 2, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { primary_key: Some("fish"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("fish") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued []
|
enqueued []
|
||||||
|
|||||||
@@ -1,15 +1,14 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("sheep") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("sheep"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
||||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("fish") }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("fish") }}
|
2 {uid: 2, status: enqueued, details: { primary_key: Some("fish"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("fish") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,1,2,]
|
enqueued [0,1,2,]
|
||||||
|
|||||||
@@ -6,10 +6,10 @@ source: crates/index-scheduler/src/queue/batches_test.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("sheep") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("sheep"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
||||||
2 {uid: 2, batch_uid: 2, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { swaps: [IndexSwap { indexes: ("catto", "doggo") }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "doggo") }] }}
|
2 {uid: 2, batch_uid: 2, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { swaps: [IndexSwap { indexes: ("catto", "doggo"), rename: false }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "doggo"), rename: false }] }}
|
||||||
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `whalo` not found.", error_code: "index_not_found", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_not_found" }, details: { swaps: [IndexSwap { indexes: ("catto", "whalo") }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "whalo") }] }}
|
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `whalo` not found.", error_code: "index_not_found", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_not_found" }, details: { swaps: [IndexSwap { indexes: ("catto", "whalo"), rename: false }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "whalo"), rename: false }] }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued []
|
enqueued []
|
||||||
@@ -54,8 +54,8 @@ doggo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
2 {uid: 2, details: {"swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "created batch containing only task with id 2 of type `indexSwap` that cannot be batched with any other task.", }
|
2 {uid: 2, details: {"swaps":[{"indexes":["catto","doggo"],"rename":false}]}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "created batch containing only task with id 2 of type `indexSwap` that cannot be batched with any other task.", }
|
||||||
3 {uid: 3, details: {"swaps":[{"indexes":["catto","whalo"]}]}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "created batch containing only task with id 3 of type `indexSwap` that cannot be batched with any other task.", }
|
3 {uid: 3, details: {"swaps":[{"indexes":["catto","whalo"],"rename":false}]}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "created batch containing only task with id 3 of type `indexSwap` that cannot be batched with any other task.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,16 +1,15 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("sheep") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("sheep"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
||||||
2 {uid: 2, status: enqueued, details: { swaps: [IndexSwap { indexes: ("catto", "doggo") }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "doggo") }] }}
|
2 {uid: 2, status: enqueued, details: { swaps: [IndexSwap { indexes: ("catto", "doggo"), rename: false }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "doggo"), rename: false }] }}
|
||||||
3 {uid: 3, status: enqueued, details: { swaps: [IndexSwap { indexes: ("catto", "whalo") }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "whalo") }] }}
|
3 {uid: 3, status: enqueued, details: { swaps: [IndexSwap { indexes: ("catto", "whalo"), rename: false }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "whalo"), rename: false }] }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,1,2,3,]
|
enqueued [0,1,2,3,]
|
||||||
|
|||||||
@@ -6,9 +6,9 @@ source: crates/index-scheduler/src/queue/tasks_test.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
1 {uid: 1, batch_uid: 1, status: canceled, canceled_by: 3, details: { primary_key: Some("sheep") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
1 {uid: 1, batch_uid: 1, status: canceled, canceled_by: 3, details: { primary_key: Some("sheep"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
||||||
2 {uid: 2, batch_uid: 1, status: canceled, canceled_by: 3, details: { swaps: [IndexSwap { indexes: ("catto", "doggo") }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "doggo") }] }}
|
2 {uid: 2, batch_uid: 1, status: canceled, canceled_by: 3, details: { swaps: [IndexSwap { indexes: ("catto", "doggo"), rename: false }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "doggo"), rename: false }] }}
|
||||||
3 {uid: 3, batch_uid: 1, status: succeeded, details: { matched_tasks: 3, canceled_tasks: Some(2), original_filter: "test_query" }, kind: TaskCancelation { query: "test_query", tasks: RoaringBitmap<[0, 1, 2]> }}
|
3 {uid: 3, batch_uid: 1, status: succeeded, details: { matched_tasks: 3, canceled_tasks: Some(2), original_filter: "test_query" }, kind: TaskCancelation { query: "test_query", tasks: RoaringBitmap<[0, 1, 2]> }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
@@ -49,7 +49,7 @@ catto: { number_of_documents: 0, field_distribution: {} }
|
|||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
1 {uid: 1, details: {"primaryKey":"sheep","matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query","swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"indexCreation":1,"indexSwap":1,"taskCancelation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `taskCancelation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"primaryKey":"sheep","matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query","swaps":[{"indexes":["catto","doggo"],"rename":false}]}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"indexCreation":1,"indexSwap":1,"taskCancelation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `taskCancelation` that cannot be batched with any other task.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -6,9 +6,9 @@ source: crates/index-scheduler/src/queue/tasks_test.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("plankton") }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("plankton") }}
|
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("plankton"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("plankton") }}
|
||||||
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("his_own_vomit") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("his_own_vomit") }}
|
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("his_own_vomit"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("his_own_vomit") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued []
|
enqueued []
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/tasks_test.rs
|
source: crates/index-scheduler/src/queue/tasks_test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,]
|
enqueued [0,]
|
||||||
|
|||||||
@@ -1,14 +1,13 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/tasks_test.rs
|
source: crates/index-scheduler/src/queue/tasks_test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("plankton") }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("plankton") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("plankton"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("plankton") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,1,]
|
enqueued [0,1,]
|
||||||
|
|||||||
@@ -1,15 +1,14 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/tasks_test.rs
|
source: crates/index-scheduler/src/queue/tasks_test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("plankton") }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("plankton") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("plankton"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("plankton") }}
|
||||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("his_own_vomit") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("his_own_vomit") }}
|
2 {uid: 2, status: enqueued, details: { primary_key: Some("his_own_vomit"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("his_own_vomit") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,1,2,]
|
enqueued [0,1,2,]
|
||||||
|
|||||||
@@ -6,9 +6,9 @@ source: crates/index-scheduler/src/queue/tasks_test.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("sheep") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("sheep"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
||||||
2 {uid: 2, batch_uid: 2, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { primary_key: Some("fish") }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("fish") }}
|
2 {uid: 2, batch_uid: 2, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { primary_key: Some("fish"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("fish") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued []
|
enqueued []
|
||||||
|
|||||||
@@ -1,15 +1,14 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/tasks_test.rs
|
source: crates/index-scheduler/src/queue/tasks_test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("sheep") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("sheep"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
||||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("fish") }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("fish") }}
|
2 {uid: 2, status: enqueued, details: { primary_key: Some("fish"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "whalo", primary_key: Some("fish") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,1,2,]
|
enqueued [0,1,2,]
|
||||||
|
|||||||
@@ -1,16 +1,15 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/tasks_test.rs
|
source: crates/index-scheduler/src/queue/tasks_test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("sheep") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("sheep"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("sheep") }}
|
||||||
2 {uid: 2, status: enqueued, details: { swaps: [IndexSwap { indexes: ("catto", "doggo") }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "doggo") }] }}
|
2 {uid: 2, status: enqueued, details: { swaps: [IndexSwap { indexes: ("catto", "doggo"), rename: false }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "doggo"), rename: false }] }}
|
||||||
3 {uid: 3, status: enqueued, details: { swaps: [IndexSwap { indexes: ("catto", "whalo") }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "whalo") }] }}
|
3 {uid: 3, status: enqueued, details: { swaps: [IndexSwap { indexes: ("catto", "whalo"), rename: false }] }, kind: IndexSwap { swaps: [IndexSwap { indexes: ("catto", "whalo"), rename: false }] }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,1,2,3,]
|
enqueued [0,1,2,3,]
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/test.rs
|
source: crates/index-scheduler/src/queue/test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
1 {uid: 1, status: enqueued, details: { received_documents: 12, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 12, allow_index_creation: true }}
|
1 {uid: 1, status: enqueued, details: { received_documents: 12, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 12, allow_index_creation: true }}
|
||||||
2 {uid: 2, status: enqueued, details: { received_documents: 50, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000001, documents_count: 50, allow_index_creation: true }}
|
2 {uid: 2, status: enqueued, details: { received_documents: 50, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000001, documents_count: 50, allow_index_creation: true }}
|
||||||
3 {uid: 3, status: enqueued, details: { received_documents: 5000, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggo", primary_key: Some("bone"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000002, documents_count: 5000, allow_index_creation: true }}
|
3 {uid: 3, status: enqueued, details: { received_documents: 5000, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggo", primary_key: Some("bone"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000002, documents_count: 5000, allow_index_creation: true }}
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/test.rs
|
source: crates/index-scheduler/src/queue/test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
@@ -13,7 +12,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "enqueued",
|
"status": "enqueued",
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/test.rs
|
source: crates/index-scheduler/src/queue/test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
@@ -13,7 +12,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "succeeded",
|
"status": "succeeded",
|
||||||
@@ -39,7 +40,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "failed",
|
"status": "failed",
|
||||||
@@ -60,7 +63,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "enqueued",
|
"status": "enqueued",
|
||||||
@@ -81,7 +86,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "enqueued",
|
"status": "enqueued",
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/test.rs
|
source: crates/index-scheduler/src/queue/test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
@@ -13,7 +12,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "enqueued",
|
"status": "enqueued",
|
||||||
@@ -34,7 +35,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "enqueued",
|
"status": "enqueued",
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/test.rs
|
source: crates/index-scheduler/src/queue/test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
@@ -13,7 +12,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "succeeded",
|
"status": "succeeded",
|
||||||
@@ -39,7 +40,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "failed",
|
"status": "failed",
|
||||||
@@ -60,7 +63,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "enqueued",
|
"status": "enqueued",
|
||||||
@@ -81,7 +86,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "enqueued",
|
"status": "enqueued",
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/test.rs
|
source: crates/index-scheduler/src/queue/test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
@@ -13,7 +12,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "succeeded",
|
"status": "succeeded",
|
||||||
@@ -39,7 +40,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "failed",
|
"status": "failed",
|
||||||
@@ -60,7 +63,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "enqueued",
|
"status": "enqueued",
|
||||||
@@ -81,7 +86,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "enqueued",
|
"status": "enqueued",
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/test.rs
|
source: crates/index-scheduler/src/queue/test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
@@ -13,7 +12,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "succeeded",
|
"status": "succeeded",
|
||||||
@@ -39,7 +40,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "failed",
|
"status": "failed",
|
||||||
@@ -60,7 +63,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "enqueued",
|
"status": "enqueued",
|
||||||
@@ -81,7 +86,9 @@ snapshot_kind: text
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"IndexInfo": {
|
"IndexInfo": {
|
||||||
"primary_key": null
|
"primary_key": null,
|
||||||
|
"new_index_uid": null,
|
||||||
|
"old_index_uid": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"status": "enqueued",
|
"status": "enqueued",
|
||||||
|
|||||||
@@ -97,7 +97,22 @@ impl TaskQueue {
|
|||||||
Ok(self.all_tasks.get(rtxn, &task_id)?)
|
Ok(self.all_tasks.get(rtxn, &task_id)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn update_task(&self, wtxn: &mut RwTxn, task: &Task) -> Result<()> {
|
/// Update the inverted task indexes and write the new value of the task.
|
||||||
|
///
|
||||||
|
/// The passed `task` object typically comes from a previous transaction, so two kinds of modification might have occurred:
|
||||||
|
/// 1. Modification to the `task` object after loading it from the DB (the purpose of this method is to persist these changes)
|
||||||
|
/// 2. Modification to the task committed by another transaction in the DB (an annoying consequence of having lost the original
|
||||||
|
/// transaction from which the `task` instance was deserialized)
|
||||||
|
///
|
||||||
|
/// When calling this function, this `task` is modified to take into account any existing `network`
|
||||||
|
/// that can have been added since the task was loaded into memory.
|
||||||
|
///
|
||||||
|
/// Any other modification to the task that was committed from the DB since the parameter was pulled from the DB will be overwritten.
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// - CorruptedTaskQueue: The task doesn't exist in the database
|
||||||
|
pub(crate) fn update_task(&self, wtxn: &mut RwTxn, task: &mut Task) -> Result<()> {
|
||||||
let old_task = self.get_task(wtxn, task.uid)?.ok_or(Error::CorruptedTaskQueue)?;
|
let old_task = self.get_task(wtxn, task.uid)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
let reprocessing = old_task.status != Status::Enqueued;
|
let reprocessing = old_task.status != Status::Enqueued;
|
||||||
|
|
||||||
@@ -157,6 +172,12 @@ impl TaskQueue {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
task.network = match (old_task.network, task.network.take()) {
|
||||||
|
(None, None) => None,
|
||||||
|
(None, Some(network)) | (Some(network), None) => Some(network),
|
||||||
|
(Some(_), Some(network)) => Some(network),
|
||||||
|
};
|
||||||
|
|
||||||
self.all_tasks.put(wtxn, &task.uid, task)?;
|
self.all_tasks.put(wtxn, &task.uid, task)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -304,11 +304,11 @@ fn query_tasks_special_rules() {
|
|||||||
let kind = index_creation_task("doggo", "sheep");
|
let kind = index_creation_task("doggo", "sheep");
|
||||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||||
let kind = KindWithContent::IndexSwap {
|
let kind = KindWithContent::IndexSwap {
|
||||||
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()) }],
|
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()), rename: false }],
|
||||||
};
|
};
|
||||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||||
let kind = KindWithContent::IndexSwap {
|
let kind = KindWithContent::IndexSwap {
|
||||||
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "whalo".to_owned()) }],
|
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "whalo".to_owned()), rename: false }],
|
||||||
};
|
};
|
||||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||||
|
|
||||||
@@ -399,7 +399,7 @@ fn query_tasks_canceled_by() {
|
|||||||
let kind = index_creation_task("doggo", "sheep");
|
let kind = index_creation_task("doggo", "sheep");
|
||||||
let _ = index_scheduler.register(kind, None, false).unwrap();
|
let _ = index_scheduler.register(kind, None, false).unwrap();
|
||||||
let kind = KindWithContent::IndexSwap {
|
let kind = KindWithContent::IndexSwap {
|
||||||
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()) }],
|
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()), rename: false }],
|
||||||
};
|
};
|
||||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||||
|
|
||||||
|
|||||||
@@ -73,6 +73,7 @@ impl From<KindWithContent> for AutobatchKind {
|
|||||||
| KindWithContent::DumpCreation { .. }
|
| KindWithContent::DumpCreation { .. }
|
||||||
| KindWithContent::Export { .. }
|
| KindWithContent::Export { .. }
|
||||||
| KindWithContent::UpgradeDatabase { .. }
|
| KindWithContent::UpgradeDatabase { .. }
|
||||||
|
| KindWithContent::NetworkTopologyChange { .. }
|
||||||
| KindWithContent::SnapshotCreation => {
|
| KindWithContent::SnapshotCreation => {
|
||||||
panic!("The autobatcher should never be called with tasks that don't apply to an index.")
|
panic!("The autobatcher should never be called with tasks that don't apply to an index.")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,7 +75,11 @@ fn idx_create() -> KindWithContent {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn idx_update() -> KindWithContent {
|
fn idx_update() -> KindWithContent {
|
||||||
KindWithContent::IndexUpdate { index_uid: String::from("doggo"), primary_key: None }
|
KindWithContent::IndexUpdate {
|
||||||
|
index_uid: String::from("doggo"),
|
||||||
|
primary_key: None,
|
||||||
|
new_index_uid: None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn idx_del() -> KindWithContent {
|
fn idx_del() -> KindWithContent {
|
||||||
@@ -84,7 +88,10 @@ fn idx_del() -> KindWithContent {
|
|||||||
|
|
||||||
fn idx_swap() -> KindWithContent {
|
fn idx_swap() -> KindWithContent {
|
||||||
KindWithContent::IndexSwap {
|
KindWithContent::IndexSwap {
|
||||||
swaps: vec![IndexSwap { indexes: (String::from("doggo"), String::from("catto")) }],
|
swaps: vec![IndexSwap {
|
||||||
|
indexes: (String::from("doggo"), String::from("catto")),
|
||||||
|
rename: false,
|
||||||
|
}],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ pub(crate) enum Batch {
|
|||||||
IndexUpdate {
|
IndexUpdate {
|
||||||
index_uid: String,
|
index_uid: String,
|
||||||
primary_key: Option<String>,
|
primary_key: Option<String>,
|
||||||
|
new_index_uid: Option<String>,
|
||||||
task: Task,
|
task: Task,
|
||||||
},
|
},
|
||||||
IndexDeletion {
|
IndexDeletion {
|
||||||
@@ -54,6 +55,9 @@ pub(crate) enum Batch {
|
|||||||
UpgradeDatabase {
|
UpgradeDatabase {
|
||||||
tasks: Vec<Task>,
|
tasks: Vec<Task>,
|
||||||
},
|
},
|
||||||
|
NetworkTopologyChanges {
|
||||||
|
tasks: Vec<Task>,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -65,6 +69,7 @@ pub(crate) enum DocumentOperation {
|
|||||||
|
|
||||||
/// A [batch](Batch) that combines multiple tasks operating on an index.
|
/// A [batch](Batch) that combines multiple tasks operating on an index.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
pub(crate) enum IndexOperation {
|
pub(crate) enum IndexOperation {
|
||||||
DocumentOperation {
|
DocumentOperation {
|
||||||
index_uid: String,
|
index_uid: String,
|
||||||
@@ -114,7 +119,8 @@ impl Batch {
|
|||||||
Batch::SnapshotCreation(tasks)
|
Batch::SnapshotCreation(tasks)
|
||||||
| Batch::TaskDeletions(tasks)
|
| Batch::TaskDeletions(tasks)
|
||||||
| Batch::UpgradeDatabase { tasks }
|
| Batch::UpgradeDatabase { tasks }
|
||||||
| Batch::IndexDeletion { tasks, .. } => {
|
| Batch::IndexDeletion { tasks, .. }
|
||||||
|
| Batch::NetworkTopologyChanges { tasks } => {
|
||||||
RoaringBitmap::from_iter(tasks.iter().map(|task| task.uid))
|
RoaringBitmap::from_iter(tasks.iter().map(|task| task.uid))
|
||||||
}
|
}
|
||||||
Batch::IndexOperation { op, .. } => match op {
|
Batch::IndexOperation { op, .. } => match op {
|
||||||
@@ -149,6 +155,7 @@ impl Batch {
|
|||||||
| Dump(_)
|
| Dump(_)
|
||||||
| Export { .. }
|
| Export { .. }
|
||||||
| UpgradeDatabase { .. }
|
| UpgradeDatabase { .. }
|
||||||
|
| NetworkTopologyChanges { .. }
|
||||||
| IndexSwap { .. } => None,
|
| IndexSwap { .. } => None,
|
||||||
IndexOperation { op, .. } => Some(op.index_uid()),
|
IndexOperation { op, .. } => Some(op.index_uid()),
|
||||||
IndexCreation { index_uid, .. }
|
IndexCreation { index_uid, .. }
|
||||||
@@ -174,6 +181,7 @@ impl fmt::Display for Batch {
|
|||||||
Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?,
|
Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?,
|
||||||
Batch::IndexSwap { .. } => f.write_str("IndexSwap")?,
|
Batch::IndexSwap { .. } => f.write_str("IndexSwap")?,
|
||||||
Batch::Export { .. } => f.write_str("Export")?,
|
Batch::Export { .. } => f.write_str("Export")?,
|
||||||
|
Batch::NetworkTopologyChanges { .. } => f.write_str("NetworkTopologyChange")?,
|
||||||
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
|
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
|
||||||
};
|
};
|
||||||
match index_uid {
|
match index_uid {
|
||||||
@@ -405,11 +413,13 @@ impl IndexScheduler {
|
|||||||
let mut task =
|
let mut task =
|
||||||
self.queue.tasks.get_task(rtxn, id)?.ok_or(Error::CorruptedTaskQueue)?;
|
self.queue.tasks.get_task(rtxn, id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
current_batch.processing(Some(&mut task));
|
current_batch.processing(Some(&mut task));
|
||||||
let primary_key = match &task.kind {
|
let (primary_key, new_index_uid) = match &task.kind {
|
||||||
KindWithContent::IndexUpdate { primary_key, .. } => primary_key.clone(),
|
KindWithContent::IndexUpdate { primary_key, new_index_uid, .. } => {
|
||||||
|
(primary_key.clone(), new_index_uid.clone())
|
||||||
|
}
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
Ok(Some(Batch::IndexUpdate { index_uid, primary_key, task }))
|
Ok(Some(Batch::IndexUpdate { index_uid, primary_key, new_index_uid, task }))
|
||||||
}
|
}
|
||||||
BatchKind::IndexDeletion { ids } => Ok(Some(Batch::IndexDeletion {
|
BatchKind::IndexDeletion { ids } => Ok(Some(Batch::IndexDeletion {
|
||||||
index_uid,
|
index_uid,
|
||||||
@@ -541,7 +551,18 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::Dump(task), current_batch)));
|
return Ok(Some((Batch::Dump(task), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
|
// 6. We batch the network changes.
|
||||||
|
let to_network = self.queue.tasks.get_kind(rtxn, Kind::NetworkTopologyChange)? & enqueued;
|
||||||
|
if !to_network.is_empty() {
|
||||||
|
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_network)?;
|
||||||
|
current_batch.processing(&mut tasks);
|
||||||
|
current_batch.reason(BatchStopReason::TaskKindCannotBeBatched {
|
||||||
|
kind: Kind::NetworkTopologyChange,
|
||||||
|
});
|
||||||
|
return Ok(Some((Batch::NetworkTopologyChanges { tasks }, current_batch)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// 7. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
|
||||||
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
|
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
|
||||||
let mut task =
|
let mut task =
|
||||||
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
|
|||||||
@@ -0,0 +1,6 @@
|
|||||||
|
// Copyright © 2025 Meilisearch Some Rights Reserved
|
||||||
|
// This file is part of Meilisearch Enterprise Edition (EE).
|
||||||
|
// Use of this source code is governed by the Business Source License 1.1,
|
||||||
|
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||||
|
|
||||||
|
mod process_network;
|
||||||
@@ -0,0 +1,362 @@
|
|||||||
|
// Copyright © 2025 Meilisearch Some Rights Reserved
|
||||||
|
// This file is part of Meilisearch Enterprise Edition (EE).
|
||||||
|
// Use of this source code is governed by the Business Source License 1.1,
|
||||||
|
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||||
|
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use bumpalo::Bump;
|
||||||
|
use itertools::{EitherOrBoth, Itertools};
|
||||||
|
use meilisearch_types::enterprise_edition::network::{DbNetwork, DbRemote, Network, Remote};
|
||||||
|
use meilisearch_types::milli::documents::PrimaryKey;
|
||||||
|
use meilisearch_types::milli::progress::{EmbedderStats, Progress};
|
||||||
|
use meilisearch_types::milli::update::new::indexer;
|
||||||
|
use meilisearch_types::milli::update::Setting;
|
||||||
|
use meilisearch_types::milli::{self};
|
||||||
|
use meilisearch_types::tasks::{KindWithContent, Status, Task};
|
||||||
|
use roaring::RoaringBitmap;
|
||||||
|
|
||||||
|
use crate::scheduler::process_export::{ExportContext, ExportOptions, TargetInstance};
|
||||||
|
use crate::{Error, IndexScheduler};
|
||||||
|
|
||||||
|
impl IndexScheduler {
|
||||||
|
pub(crate) fn process_network_changes(
|
||||||
|
&self,
|
||||||
|
progress: Progress,
|
||||||
|
mut tasks: Vec<Task>,
|
||||||
|
) -> crate::Result<Vec<Task>> {
|
||||||
|
let old_network = self.network();
|
||||||
|
let mut current_network = Some(old_network.clone());
|
||||||
|
for task in &tasks {
|
||||||
|
let KindWithContent::NetworkTopologyChange { network, origin } = &task.kind else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
current_network = match (current_network, network) {
|
||||||
|
(None, None) => None,
|
||||||
|
(None, Some(network)) => Some(accumulate(DbNetwork::default(), network.clone())?),
|
||||||
|
(Some(current_network), None) => Some(current_network),
|
||||||
|
(Some(current_network), Some(new_network)) => {
|
||||||
|
Some(accumulate(current_network, new_network.clone())?)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
'network: {
|
||||||
|
let mut new_network = current_network.unwrap_or_default();
|
||||||
|
if old_network == new_network {
|
||||||
|
// no change, exit
|
||||||
|
break 'network;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// TODO: only do this if the task originates with an end-user
|
||||||
|
let must_replicate = old_network.sharding || new_network.sharding;
|
||||||
|
|
||||||
|
if !must_replicate {
|
||||||
|
self.put_network(new_network)?;
|
||||||
|
break 'network;
|
||||||
|
}
|
||||||
|
|
||||||
|
let must_stop_processing = &self.scheduler.must_stop_processing;
|
||||||
|
|
||||||
|
/// FIXME: make it mandatory for `self` to be part of the network
|
||||||
|
let old_this = old_network.local.as_deref();
|
||||||
|
/// FIXME: error here
|
||||||
|
let new_this = new_network.local.unwrap();
|
||||||
|
|
||||||
|
// in network replication, we need to tell old nodes that they are no longer part of the network.
|
||||||
|
// This is made difficult by "node aliasing": Meilisearch has no way of knowing if two nodes with different names
|
||||||
|
// or even different URLs actually refer to the same machine in two different versions of the network.
|
||||||
|
//
|
||||||
|
// This implementation ignores aliasing: a node is the same when it has the same name.
|
||||||
|
//
|
||||||
|
// To defeat aliasing, we iterate a first time to collect all deletions and additions, then we make sure to process the deletions
|
||||||
|
// first, rather than processing the tasks in the alphalexical order of remotes.
|
||||||
|
let mut node_deletions = Vec::new();
|
||||||
|
let mut node_additions = Vec::new();
|
||||||
|
for eob in old_network
|
||||||
|
.remotes
|
||||||
|
.iter()
|
||||||
|
.merge_join_by(new_network.remotes.iter(), |(left, _), (right, _)| left.cmp(right))
|
||||||
|
{
|
||||||
|
match eob {
|
||||||
|
EitherOrBoth::Both((to_update_name, _), (_, new_node)) => {
|
||||||
|
if to_update_name.as_str() == new_this {
|
||||||
|
continue; // skip `self`
|
||||||
|
}
|
||||||
|
node_additions.push((to_update_name, new_node));
|
||||||
|
}
|
||||||
|
EitherOrBoth::Left((to_delete_name, to_delete_node)) => {
|
||||||
|
if Some(to_delete_name.as_str()) == old_this {
|
||||||
|
continue; // skip `self`
|
||||||
|
}
|
||||||
|
node_deletions.push((to_delete_name, to_delete_node));
|
||||||
|
}
|
||||||
|
EitherOrBoth::Right((to_add_name, to_add_node)) => {
|
||||||
|
if to_add_name.as_str() == new_this {
|
||||||
|
continue; // skip `self`
|
||||||
|
}
|
||||||
|
node_additions.push((to_add_name, to_add_node));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let runtime = self.runtime.clone().unwrap();
|
||||||
|
let mut in_flight = Vec::new();
|
||||||
|
// process deletions
|
||||||
|
for (to_delete_name, to_delete) in node_deletions {
|
||||||
|
// set `self` to None so that this node is forgotten about
|
||||||
|
new_network.local = None;
|
||||||
|
in_flight.push(proxy_network(&runtime, to_delete.url.as_str(), &new_network)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
runtime.block_on(async {
|
||||||
|
for task in in_flight.drain(..) {
|
||||||
|
// TODO: log and ignore errors during deletion
|
||||||
|
let res = task.await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// process additions
|
||||||
|
for (to_add_name, to_add) in node_additions {
|
||||||
|
new_network.local = Some(to_add_name.clone());
|
||||||
|
in_flight.push(proxy_network(&runtime, to_add.url.as_str(), &new_network)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
runtime.block_on(async {
|
||||||
|
for task in in_flight.drain(..) {
|
||||||
|
// TODO: handle errors during addition
|
||||||
|
let res = task.await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// balance documents
|
||||||
|
new_network.local = Some(new_this);
|
||||||
|
|
||||||
|
self.balance_documents(&new_network, &progress, &must_stop_processing)?;
|
||||||
|
|
||||||
|
self.put_network(new_network)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for task in &mut tasks {
|
||||||
|
task.status = Status::Succeeded;
|
||||||
|
}
|
||||||
|
Ok(tasks)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn balance_documents(
|
||||||
|
&self,
|
||||||
|
new_network: &DbNetwork,
|
||||||
|
progress: &Progress,
|
||||||
|
must_stop_processing: &crate::scheduler::MustStopProcessing,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
/// FIXME unwrap
|
||||||
|
let new_shards = new_network.shards().unwrap();
|
||||||
|
|
||||||
|
// TECHDEBT: this spawns a `ureq` agent additionally to `reqwest`. We probably want to harmonize all of this.
|
||||||
|
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
||||||
|
|
||||||
|
let mut indexer_alloc = Bump::new();
|
||||||
|
|
||||||
|
// process by batches of 20MiB. Allow for compression? Don't forget about embeddings
|
||||||
|
let _: Vec<()> = self.try_for_each_index(|index_uid, index| -> crate::Result<()> {
|
||||||
|
indexer_alloc.reset();
|
||||||
|
let err = |err| Error::from_milli(err, Some(index_uid.to_string()));
|
||||||
|
let index_rtxn = index.read_txn()?;
|
||||||
|
let all_docids = index.external_documents_ids();
|
||||||
|
let mut documents_to_move_to: hashbrown::HashMap<String, RoaringBitmap> =
|
||||||
|
hashbrown::HashMap::new();
|
||||||
|
let mut documents_to_delete = RoaringBitmap::new();
|
||||||
|
|
||||||
|
for res in all_docids.iter(&index_rtxn)? {
|
||||||
|
let (external_docid, docid) = res?;
|
||||||
|
match new_shards.processing_shard(external_docid) {
|
||||||
|
Some(shard) if shard.is_own => continue,
|
||||||
|
Some(shard) => {
|
||||||
|
documents_to_move_to
|
||||||
|
.entry_ref(shard.name.as_str())
|
||||||
|
.or_default()
|
||||||
|
.insert(docid);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
documents_to_delete.insert(docid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
|
||||||
|
|
||||||
|
for (remote, documents_to_move) in documents_to_move_to {
|
||||||
|
/// TODO: justify the unwrap
|
||||||
|
let remote = new_network.remotes.get(&remote).unwrap();
|
||||||
|
|
||||||
|
let target = TargetInstance {
|
||||||
|
base_url: &remote.url,
|
||||||
|
api_key: remote.write_api_key.as_deref(),
|
||||||
|
};
|
||||||
|
let options = ExportOptions {
|
||||||
|
index_uid,
|
||||||
|
payload_size: None,
|
||||||
|
override_settings: false,
|
||||||
|
extra_headers: &Default::default(),
|
||||||
|
};
|
||||||
|
let ctx = ExportContext {
|
||||||
|
index,
|
||||||
|
index_rtxn: &index_rtxn,
|
||||||
|
universe: &documents_to_move,
|
||||||
|
progress,
|
||||||
|
agent: &agent,
|
||||||
|
must_stop_processing,
|
||||||
|
};
|
||||||
|
|
||||||
|
self.export_one_index(target, options, ctx)?;
|
||||||
|
|
||||||
|
documents_to_delete |= documents_to_move;
|
||||||
|
}
|
||||||
|
|
||||||
|
if documents_to_delete.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut new_fields_ids_map = fields_ids_map.clone();
|
||||||
|
|
||||||
|
// candidates not empty => index not empty => a primary key is set
|
||||||
|
let primary_key = index.primary_key(&index_rtxn)?.unwrap();
|
||||||
|
|
||||||
|
let primary_key = PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
|
||||||
|
.map_err(milli::Error::from)
|
||||||
|
.map_err(err)?;
|
||||||
|
|
||||||
|
let mut index_wtxn = index.write_txn()?;
|
||||||
|
|
||||||
|
let mut indexer = indexer::DocumentDeletion::new();
|
||||||
|
indexer.delete_documents_by_docids(documents_to_delete);
|
||||||
|
let document_changes = indexer.into_changes(&indexer_alloc, primary_key);
|
||||||
|
let embedders = index
|
||||||
|
.embedding_configs()
|
||||||
|
.embedding_configs(&index_wtxn)
|
||||||
|
.map_err(milli::Error::from)
|
||||||
|
.map_err(err)?;
|
||||||
|
let embedders = self.embedders(index_uid, embedders)?;
|
||||||
|
let indexer_config = self.index_mapper.indexer_config();
|
||||||
|
let pool = &indexer_config.thread_pool;
|
||||||
|
|
||||||
|
indexer::index(
|
||||||
|
&mut index_wtxn,
|
||||||
|
index,
|
||||||
|
pool,
|
||||||
|
indexer_config.grenad_parameters(),
|
||||||
|
&fields_ids_map,
|
||||||
|
new_fields_ids_map,
|
||||||
|
None, // document deletion never changes primary key
|
||||||
|
&document_changes,
|
||||||
|
embedders,
|
||||||
|
&|| must_stop_processing.get(),
|
||||||
|
&progress,
|
||||||
|
&EmbedderStats::default(),
|
||||||
|
)
|
||||||
|
.map_err(err)?;
|
||||||
|
|
||||||
|
index_wtxn.commit()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn proxy_network(
|
||||||
|
runtime: &tokio::runtime::Handle,
|
||||||
|
url: &str,
|
||||||
|
network: &DbNetwork,
|
||||||
|
) -> crate::Result<tokio::task::JoinHandle<()>> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn accumulate(old_network: DbNetwork, new_network: Network) -> crate::Result<DbNetwork> {
|
||||||
|
let err = |err| Err(Error::from_milli(milli::Error::UserError(err), None));
|
||||||
|
|
||||||
|
let merged_local = match new_network.local {
|
||||||
|
Setting::Set(new_self) => Some(new_self),
|
||||||
|
Setting::Reset => None,
|
||||||
|
Setting::NotSet => old_network.local,
|
||||||
|
};
|
||||||
|
|
||||||
|
let merged_sharding = match new_network.sharding {
|
||||||
|
Setting::Set(new_sharding) => new_sharding,
|
||||||
|
Setting::Reset => false,
|
||||||
|
Setting::NotSet => old_network.sharding,
|
||||||
|
};
|
||||||
|
|
||||||
|
if merged_sharding && merged_local.is_none() {
|
||||||
|
return err(milli::UserError::NetworkShardingWithoutSelf);
|
||||||
|
}
|
||||||
|
|
||||||
|
let merged_remotes = match new_network.remotes {
|
||||||
|
Setting::Set(new_remotes) => {
|
||||||
|
let mut merged_remotes = BTreeMap::new();
|
||||||
|
for either_or_both in old_network
|
||||||
|
.remotes
|
||||||
|
.into_iter()
|
||||||
|
.merge_join_by(new_remotes.into_iter(), |left, right| left.0.cmp(&right.0))
|
||||||
|
{
|
||||||
|
match either_or_both {
|
||||||
|
EitherOrBoth::Both((name, old), (_, Some(new))) => {
|
||||||
|
let DbRemote {
|
||||||
|
url: old_url,
|
||||||
|
search_api_key: old_search_api_key,
|
||||||
|
write_api_key: old_write_api_key,
|
||||||
|
} = old;
|
||||||
|
|
||||||
|
let Remote {
|
||||||
|
url: new_url,
|
||||||
|
search_api_key: new_search_api_key,
|
||||||
|
write_api_key: new_write_api_key,
|
||||||
|
} = new;
|
||||||
|
|
||||||
|
let merged = DbRemote {
|
||||||
|
url: match new_url {
|
||||||
|
Setting::Set(new_url) => new_url,
|
||||||
|
Setting::Reset => {
|
||||||
|
return err(milli::UserError::NetworkMissingUrl(name))
|
||||||
|
}
|
||||||
|
Setting::NotSet => old_url,
|
||||||
|
},
|
||||||
|
search_api_key: match new_search_api_key {
|
||||||
|
Setting::Set(new_search_api_key) => Some(new_search_api_key),
|
||||||
|
Setting::Reset => None,
|
||||||
|
Setting::NotSet => old_search_api_key,
|
||||||
|
},
|
||||||
|
write_api_key: match new_write_api_key {
|
||||||
|
Setting::Set(new_write_api_key) => Some(new_write_api_key),
|
||||||
|
Setting::Reset => None,
|
||||||
|
Setting::NotSet => old_write_api_key,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
merged_remotes.insert(name, merged);
|
||||||
|
}
|
||||||
|
EitherOrBoth::Both((_, _), (_, None)) | EitherOrBoth::Right((_, None)) => {}
|
||||||
|
EitherOrBoth::Left((name, node)) => {
|
||||||
|
merged_remotes.insert(name, node);
|
||||||
|
}
|
||||||
|
EitherOrBoth::Right((name, Some(node))) => {
|
||||||
|
let Some(url) = node.url.set() else {
|
||||||
|
return err(milli::UserError::NetworkMissingUrl(name));
|
||||||
|
};
|
||||||
|
let node = DbRemote {
|
||||||
|
url,
|
||||||
|
search_api_key: node.search_api_key.set(),
|
||||||
|
write_api_key: node.write_api_key.set(),
|
||||||
|
};
|
||||||
|
merged_remotes.insert(name, node);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
merged_remotes
|
||||||
|
}
|
||||||
|
Setting::Reset => BTreeMap::new(),
|
||||||
|
Setting::NotSet => old_network.remotes,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(DbNetwork { local: merged_local, remotes: merged_remotes, sharding: merged_sharding })
|
||||||
|
}
|
||||||
@@ -2,6 +2,7 @@ mod autobatcher;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod autobatcher_test;
|
mod autobatcher_test;
|
||||||
mod create_batch;
|
mod create_batch;
|
||||||
|
mod enterprise_edition;
|
||||||
mod process_batch;
|
mod process_batch;
|
||||||
mod process_dump_creation;
|
mod process_dump_creation;
|
||||||
mod process_export;
|
mod process_export;
|
||||||
@@ -268,7 +269,7 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
self.queue
|
self.queue
|
||||||
.tasks
|
.tasks
|
||||||
.update_task(&mut wtxn, &task)
|
.update_task(&mut wtxn, &mut task)
|
||||||
.map_err(|e| Error::UnrecoverableError(Box::new(e)))?;
|
.map_err(|e| Error::UnrecoverableError(Box::new(e)))?;
|
||||||
}
|
}
|
||||||
if let Some(canceled_by) = canceled_by {
|
if let Some(canceled_by) = canceled_by {
|
||||||
@@ -349,7 +350,7 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
self.queue
|
self.queue
|
||||||
.tasks
|
.tasks
|
||||||
.update_task(&mut wtxn, &task)
|
.update_task(&mut wtxn, &mut task)
|
||||||
.map_err(|e| Error::UnrecoverableError(Box::new(e)))?;
|
.map_err(|e| Error::UnrecoverableError(Box::new(e)))?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status
|
|||||||
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
||||||
use milli::update::Settings as MilliSettings;
|
use milli::update::Settings as MilliSettings;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
|
use time::OffsetDateTime;
|
||||||
|
|
||||||
use super::create_batch::Batch;
|
use super::create_batch::Batch;
|
||||||
use crate::processing::{
|
use crate::processing::{
|
||||||
@@ -134,6 +135,9 @@ impl IndexScheduler {
|
|||||||
Batch::Dump(task) => self
|
Batch::Dump(task) => self
|
||||||
.process_dump_creation(progress, task)
|
.process_dump_creation(progress, task)
|
||||||
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
||||||
|
Batch::NetworkTopologyChanges { tasks } => self
|
||||||
|
.process_network_changes(progress, tasks)
|
||||||
|
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
||||||
Batch::IndexOperation { op, must_create_index } => {
|
Batch::IndexOperation { op, must_create_index } => {
|
||||||
let index_uid = op.index_uid().to_string();
|
let index_uid = op.index_uid().to_string();
|
||||||
let index = if must_create_index {
|
let index = if must_create_index {
|
||||||
@@ -146,7 +150,6 @@ impl IndexScheduler {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut index_wtxn = index.write_txn()?;
|
let mut index_wtxn = index.write_txn()?;
|
||||||
|
|
||||||
let index_version = index.get_version(&index_wtxn)?.unwrap_or((1, 12, 0));
|
let index_version = index.get_version(&index_wtxn)?.unwrap_or((1, 12, 0));
|
||||||
let package_version = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH);
|
let package_version = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH);
|
||||||
if index_version != package_version {
|
if index_version != package_version {
|
||||||
@@ -224,24 +227,46 @@ impl IndexScheduler {
|
|||||||
self.index_mapper.create_index(wtxn, &index_uid, None)?;
|
self.index_mapper.create_index(wtxn, &index_uid, None)?;
|
||||||
|
|
||||||
self.process_batch(
|
self.process_batch(
|
||||||
Batch::IndexUpdate { index_uid, primary_key, task },
|
Batch::IndexUpdate { index_uid, primary_key, new_index_uid: None, task },
|
||||||
current_batch,
|
current_batch,
|
||||||
progress,
|
progress,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
Batch::IndexUpdate { index_uid, primary_key, mut task } => {
|
Batch::IndexUpdate { index_uid, primary_key, new_index_uid, mut task } => {
|
||||||
progress.update_progress(UpdateIndexProgress::UpdatingTheIndex);
|
progress.update_progress(UpdateIndexProgress::UpdatingTheIndex);
|
||||||
|
|
||||||
|
// Get the index (renamed or not)
|
||||||
let rtxn = self.env.read_txn()?;
|
let rtxn = self.env.read_txn()?;
|
||||||
let index = self.index_mapper.index(&rtxn, &index_uid)?;
|
let index = self.index_mapper.index(&rtxn, &index_uid)?;
|
||||||
|
let mut index_wtxn = index.write_txn()?;
|
||||||
|
|
||||||
if let Some(primary_key) = primary_key.clone() {
|
// Handle rename if new_index_uid is provided
|
||||||
let mut index_wtxn = index.write_txn()?;
|
let final_index_uid = if let Some(new_uid) = &new_index_uid {
|
||||||
|
if new_uid != &index_uid {
|
||||||
|
index.set_updated_at(&mut index_wtxn, &OffsetDateTime::now_utc())?;
|
||||||
|
|
||||||
|
let mut wtxn = self.env.write_txn()?;
|
||||||
|
self.apply_index_swap(
|
||||||
|
&mut wtxn, &progress, task.uid, &index_uid, new_uid, true,
|
||||||
|
)?;
|
||||||
|
wtxn.commit()?;
|
||||||
|
|
||||||
|
new_uid.clone()
|
||||||
|
} else {
|
||||||
|
new_uid.clone()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
index_uid.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Handle primary key update if provided
|
||||||
|
if let Some(ref primary_key) = primary_key {
|
||||||
let mut builder = MilliSettings::new(
|
let mut builder = MilliSettings::new(
|
||||||
&mut index_wtxn,
|
&mut index_wtxn,
|
||||||
&index,
|
&index,
|
||||||
self.index_mapper.indexer_config(),
|
self.index_mapper.indexer_config(),
|
||||||
);
|
);
|
||||||
builder.set_primary_key(primary_key);
|
builder.set_primary_key(primary_key.clone());
|
||||||
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
||||||
|
|
||||||
builder
|
builder
|
||||||
@@ -250,15 +275,20 @@ impl IndexScheduler {
|
|||||||
&progress,
|
&progress,
|
||||||
current_batch.embedder_stats.clone(),
|
current_batch.embedder_stats.clone(),
|
||||||
)
|
)
|
||||||
.map_err(|e| Error::from_milli(e, Some(index_uid.to_string())))?;
|
.map_err(|e| Error::from_milli(e, Some(final_index_uid.to_string())))?;
|
||||||
index_wtxn.commit()?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
index_wtxn.commit()?;
|
||||||
// drop rtxn before starting a new wtxn on the same db
|
// drop rtxn before starting a new wtxn on the same db
|
||||||
rtxn.commit()?;
|
rtxn.commit()?;
|
||||||
|
|
||||||
task.status = Status::Succeeded;
|
task.status = Status::Succeeded;
|
||||||
task.details = Some(Details::IndexInfo { primary_key });
|
task.details = Some(Details::IndexInfo {
|
||||||
|
primary_key: primary_key.clone(),
|
||||||
|
new_index_uid: new_index_uid.clone(),
|
||||||
|
// we only display the old index uid if a rename happened => there is a new_index_uid
|
||||||
|
old_index_uid: new_index_uid.map(|_| index_uid.clone()),
|
||||||
|
});
|
||||||
|
|
||||||
// if the update processed successfully, we're going to store the new
|
// if the update processed successfully, we're going to store the new
|
||||||
// stats of the index. Since the tasks have already been processed and
|
// stats of the index. Since the tasks have already been processed and
|
||||||
@@ -268,8 +298,8 @@ impl IndexScheduler {
|
|||||||
let mut wtxn = self.env.write_txn()?;
|
let mut wtxn = self.env.write_txn()?;
|
||||||
let index_rtxn = index.read_txn()?;
|
let index_rtxn = index.read_txn()?;
|
||||||
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)
|
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)
|
||||||
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?;
|
.map_err(|e| Error::from_milli(e, Some(final_index_uid.clone())))?;
|
||||||
self.index_mapper.store_stats_of(&mut wtxn, &index_uid, &stats)?;
|
self.index_mapper.store_stats_of(&mut wtxn, &final_index_uid, &stats)?;
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}();
|
}();
|
||||||
@@ -330,13 +360,18 @@ impl IndexScheduler {
|
|||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
let mut not_found_indexes = BTreeSet::new();
|
let mut not_found_indexes = BTreeSet::new();
|
||||||
for IndexSwap { indexes: (lhs, rhs) } in swaps {
|
let mut found_indexes_but_should_not = BTreeSet::new();
|
||||||
for index in [lhs, rhs] {
|
for IndexSwap { indexes: (lhs, rhs), rename } in swaps {
|
||||||
let index_exists = self.index_mapper.index_exists(&wtxn, index)?;
|
let index_exists = self.index_mapper.index_exists(&wtxn, lhs)?;
|
||||||
if !index_exists {
|
if !index_exists {
|
||||||
not_found_indexes.insert(index);
|
not_found_indexes.insert(lhs);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
let index_exists = self.index_mapper.index_exists(&wtxn, rhs)?;
|
||||||
|
match (index_exists, rename) {
|
||||||
|
(true, true) => found_indexes_but_should_not.insert((lhs, rhs)),
|
||||||
|
(false, false) => not_found_indexes.insert(rhs),
|
||||||
|
(true, false) | (false, true) => true, // random value we don't read it anyway
|
||||||
|
};
|
||||||
}
|
}
|
||||||
if !not_found_indexes.is_empty() {
|
if !not_found_indexes.is_empty() {
|
||||||
if not_found_indexes.len() == 1 {
|
if not_found_indexes.len() == 1 {
|
||||||
@@ -349,6 +384,23 @@ impl IndexScheduler {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if !found_indexes_but_should_not.is_empty() {
|
||||||
|
if found_indexes_but_should_not.len() == 1 {
|
||||||
|
let (lhs, rhs) = found_indexes_but_should_not
|
||||||
|
.into_iter()
|
||||||
|
.next()
|
||||||
|
.map(|(lhs, rhs)| (lhs.clone(), rhs.clone()))
|
||||||
|
.unwrap();
|
||||||
|
return Err(Error::SwapIndexFoundDuringRename(lhs, rhs));
|
||||||
|
} else {
|
||||||
|
return Err(Error::SwapIndexesFoundDuringRename(
|
||||||
|
found_indexes_but_should_not
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, rhs)| rhs.to_string())
|
||||||
|
.collect(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
progress.update_progress(SwappingTheIndexes::SwappingTheIndexes);
|
progress.update_progress(SwappingTheIndexes::SwappingTheIndexes);
|
||||||
for (step, swap) in swaps.iter().enumerate() {
|
for (step, swap) in swaps.iter().enumerate() {
|
||||||
progress.update_progress(VariableNameStep::<SwappingTheIndexes>::new(
|
progress.update_progress(VariableNameStep::<SwappingTheIndexes>::new(
|
||||||
@@ -362,6 +414,7 @@ impl IndexScheduler {
|
|||||||
task.uid,
|
task.uid,
|
||||||
&swap.indexes.0,
|
&swap.indexes.0,
|
||||||
&swap.indexes.1,
|
&swap.indexes.1,
|
||||||
|
swap.rename,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
@@ -451,6 +504,7 @@ impl IndexScheduler {
|
|||||||
task_id: u32,
|
task_id: u32,
|
||||||
lhs: &str,
|
lhs: &str,
|
||||||
rhs: &str,
|
rhs: &str,
|
||||||
|
rename: bool,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
progress.update_progress(InnerSwappingTwoIndexes::RetrieveTheTasks);
|
progress.update_progress(InnerSwappingTwoIndexes::RetrieveTheTasks);
|
||||||
// 1. Verify that both lhs and rhs are existing indexes
|
// 1. Verify that both lhs and rhs are existing indexes
|
||||||
@@ -458,16 +512,23 @@ impl IndexScheduler {
|
|||||||
if !index_lhs_exists {
|
if !index_lhs_exists {
|
||||||
return Err(Error::IndexNotFound(lhs.to_owned()));
|
return Err(Error::IndexNotFound(lhs.to_owned()));
|
||||||
}
|
}
|
||||||
let index_rhs_exists = self.index_mapper.index_exists(wtxn, rhs)?;
|
if !rename {
|
||||||
if !index_rhs_exists {
|
let index_rhs_exists = self.index_mapper.index_exists(wtxn, rhs)?;
|
||||||
return Err(Error::IndexNotFound(rhs.to_owned()));
|
if !index_rhs_exists {
|
||||||
|
return Err(Error::IndexNotFound(rhs.to_owned()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Get the task set for index = name that appeared before the index swap task
|
// 2. Get the task set for index = name that appeared before the index swap task
|
||||||
let mut index_lhs_task_ids = self.queue.tasks.index_tasks(wtxn, lhs)?;
|
let mut index_lhs_task_ids = self.queue.tasks.index_tasks(wtxn, lhs)?;
|
||||||
index_lhs_task_ids.remove_range(task_id..);
|
index_lhs_task_ids.remove_range(task_id..);
|
||||||
let mut index_rhs_task_ids = self.queue.tasks.index_tasks(wtxn, rhs)?;
|
let index_rhs_task_ids = if rename {
|
||||||
index_rhs_task_ids.remove_range(task_id..);
|
let mut index_rhs_task_ids = self.queue.tasks.index_tasks(wtxn, rhs)?;
|
||||||
|
index_rhs_task_ids.remove_range(task_id..);
|
||||||
|
index_rhs_task_ids
|
||||||
|
} else {
|
||||||
|
RoaringBitmap::new()
|
||||||
|
};
|
||||||
|
|
||||||
// 3. before_name -> new_name in the task's KindWithContent
|
// 3. before_name -> new_name in the task's KindWithContent
|
||||||
progress.update_progress(InnerSwappingTwoIndexes::UpdateTheTasks);
|
progress.update_progress(InnerSwappingTwoIndexes::UpdateTheTasks);
|
||||||
@@ -496,7 +557,11 @@ impl IndexScheduler {
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
// 6. Swap in the index mapper
|
// 6. Swap in the index mapper
|
||||||
self.index_mapper.swap(wtxn, lhs, rhs)?;
|
if rename {
|
||||||
|
self.index_mapper.rename(wtxn, lhs, rhs)?;
|
||||||
|
} else {
|
||||||
|
self.index_mapper.swap(wtxn, lhs, rhs)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOr
|
|||||||
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
|
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
|
||||||
use meilisearch_types::settings::{self, SecretPolicy};
|
use meilisearch_types::settings::{self, SecretPolicy};
|
||||||
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
|
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
|
||||||
|
use roaring::RoaringBitmap;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use ureq::{json, Response};
|
use ureq::{json, Response};
|
||||||
|
|
||||||
@@ -50,6 +51,7 @@ impl IndexScheduler {
|
|||||||
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
||||||
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
||||||
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
|
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
|
||||||
|
let err = |err| Error::from_milli(err, Some(uid.to_string()));
|
||||||
if must_stop_processing.get() {
|
if must_stop_processing.get() {
|
||||||
return Err(Error::AbortedTask);
|
return Err(Error::AbortedTask);
|
||||||
}
|
}
|
||||||
@@ -61,104 +63,31 @@ impl IndexScheduler {
|
|||||||
));
|
));
|
||||||
|
|
||||||
let ExportIndexSettings { filter, override_settings } = export_settings;
|
let ExportIndexSettings { filter, override_settings } = export_settings;
|
||||||
|
|
||||||
let index = self.index(uid)?;
|
let index = self.index(uid)?;
|
||||||
let index_rtxn = index.read_txn()?;
|
let index_rtxn = index.read_txn()?;
|
||||||
let bearer = api_key.map(|api_key| format!("Bearer {api_key}"));
|
let filter = filter.as_ref().map(Filter::from_json).transpose().map_err(err)?.flatten();
|
||||||
|
let filter_universe =
|
||||||
// First, check if the index already exists
|
filter.map(|f| f.evaluate(&index_rtxn, &index)).transpose().map_err(err)?;
|
||||||
let url = format!("{base_url}/indexes/{uid}");
|
let whole_universe =
|
||||||
let response = retry(&must_stop_processing, || {
|
index.documents_ids(&index_rtxn).map_err(milli::Error::from).map_err(err)?;
|
||||||
let mut request = agent.get(&url);
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
|
|
||||||
request.send_bytes(Default::default()).map_err(into_backoff_error)
|
|
||||||
});
|
|
||||||
let index_exists = match response {
|
|
||||||
Ok(response) => response.status() == 200,
|
|
||||||
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
Err(e) => return Err(e),
|
|
||||||
};
|
|
||||||
|
|
||||||
let primary_key = index
|
|
||||||
.primary_key(&index_rtxn)
|
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
|
||||||
|
|
||||||
// Create the index
|
|
||||||
if !index_exists {
|
|
||||||
let url = format!("{base_url}/indexes");
|
|
||||||
retry(&must_stop_processing, || {
|
|
||||||
let mut request = agent.post(&url);
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
let index_param = json!({ "uid": uid, "primaryKey": primary_key });
|
|
||||||
request.send_json(&index_param).map_err(into_backoff_error)
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patch the index primary key
|
|
||||||
if index_exists && *override_settings {
|
|
||||||
let url = format!("{base_url}/indexes/{uid}");
|
|
||||||
retry(&must_stop_processing, || {
|
|
||||||
let mut request = agent.patch(&url);
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
let index_param = json!({ "primaryKey": primary_key });
|
|
||||||
request.send_json(&index_param).map_err(into_backoff_error)
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send the index settings
|
|
||||||
if !index_exists || *override_settings {
|
|
||||||
let mut settings =
|
|
||||||
settings::settings(&index, &index_rtxn, SecretPolicy::RevealSecrets)
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
|
||||||
// Remove the experimental chat setting if not enabled
|
|
||||||
if self.features().check_chat_completions("exporting chat settings").is_err() {
|
|
||||||
settings.chat = Setting::NotSet;
|
|
||||||
}
|
|
||||||
// Retry logic for sending settings
|
|
||||||
let url = format!("{base_url}/indexes/{uid}/settings");
|
|
||||||
retry(&must_stop_processing, || {
|
|
||||||
let mut request = agent.patch(&url);
|
|
||||||
if let Some(bearer) = bearer.as_ref() {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
request.send_json(settings.clone()).map_err(into_backoff_error)
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let filter = filter
|
|
||||||
.as_ref()
|
|
||||||
.map(Filter::from_json)
|
|
||||||
.transpose()
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?
|
|
||||||
.flatten();
|
|
||||||
|
|
||||||
let filter_universe = filter
|
|
||||||
.map(|f| f.evaluate(&index_rtxn, &index))
|
|
||||||
.transpose()
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
|
||||||
let whole_universe = index
|
|
||||||
.documents_ids(&index_rtxn)
|
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
|
||||||
let universe = filter_universe.unwrap_or(whole_universe);
|
let universe = filter_universe.unwrap_or(whole_universe);
|
||||||
|
let target = TargetInstance { base_url, api_key };
|
||||||
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
|
let ctx = ExportContext {
|
||||||
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
index: &index,
|
||||||
|
index_rtxn: &index_rtxn,
|
||||||
// We don't need to keep this one alive as we will
|
universe: &universe,
|
||||||
// spawn many threads to process the documents
|
progress: &progress,
|
||||||
drop(index_rtxn);
|
agent: &agent,
|
||||||
|
must_stop_processing: &must_stop_processing,
|
||||||
let total_documents = universe.len() as u32;
|
};
|
||||||
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
|
let options = ExportOptions {
|
||||||
progress.update_progress(progress_step);
|
index_uid: uid,
|
||||||
|
payload_size,
|
||||||
|
override_settings: *override_settings,
|
||||||
|
extra_headers: &Default::default(),
|
||||||
|
};
|
||||||
|
let total_documents = self.export_one_index(target, options, ctx)?;
|
||||||
|
|
||||||
output.insert(
|
output.insert(
|
||||||
IndexUidPattern::new_unchecked(uid.clone()),
|
IndexUidPattern::new_unchecked(uid.clone()),
|
||||||
@@ -167,155 +96,217 @@ impl IndexScheduler {
|
|||||||
matched_documents: Some(total_documents as u64),
|
matched_documents: Some(total_documents as u64),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
let limit = payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024); // defaults to 20 MiB
|
|
||||||
let documents_url = format!("{base_url}/indexes/{uid}/documents");
|
|
||||||
|
|
||||||
let results = request_threads()
|
|
||||||
.broadcast(|ctx| {
|
|
||||||
let index_rtxn = index
|
|
||||||
.read_txn()
|
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
|
||||||
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
let mut tmp_buffer = Vec::new();
|
|
||||||
let mut compressed_buffer = Vec::new();
|
|
||||||
for (i, docid) in universe.iter().enumerate() {
|
|
||||||
if i % ctx.num_threads() != ctx.index() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let document = index
|
|
||||||
.document(&index_rtxn, docid)
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
|
||||||
|
|
||||||
let mut document = obkv_to_json(&all_fields, &fields_ids_map, document)
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
|
||||||
|
|
||||||
// TODO definitely factorize this code
|
|
||||||
'inject_vectors: {
|
|
||||||
let embeddings = index
|
|
||||||
.embeddings(&index_rtxn, docid)
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
|
||||||
|
|
||||||
if embeddings.is_empty() {
|
|
||||||
break 'inject_vectors;
|
|
||||||
}
|
|
||||||
|
|
||||||
let vectors = document
|
|
||||||
.entry(RESERVED_VECTORS_FIELD_NAME)
|
|
||||||
.or_insert(serde_json::Value::Object(Default::default()));
|
|
||||||
|
|
||||||
let serde_json::Value::Object(vectors) = vectors else {
|
|
||||||
return Err(Error::from_milli(
|
|
||||||
milli::Error::UserError(
|
|
||||||
milli::UserError::InvalidVectorsMapType {
|
|
||||||
document_id: {
|
|
||||||
if let Ok(Some(Ok(index))) = index
|
|
||||||
.external_id_of(
|
|
||||||
&index_rtxn,
|
|
||||||
std::iter::once(docid),
|
|
||||||
)
|
|
||||||
.map(|it| it.into_iter().next())
|
|
||||||
{
|
|
||||||
index
|
|
||||||
} else {
|
|
||||||
format!("internal docid={docid}")
|
|
||||||
}
|
|
||||||
},
|
|
||||||
value: vectors.clone(),
|
|
||||||
},
|
|
||||||
),
|
|
||||||
Some(uid.to_string()),
|
|
||||||
));
|
|
||||||
};
|
|
||||||
|
|
||||||
for (
|
|
||||||
embedder_name,
|
|
||||||
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
|
|
||||||
) in embeddings
|
|
||||||
{
|
|
||||||
let embeddings = ExplicitVectors {
|
|
||||||
embeddings: Some(
|
|
||||||
VectorOrArrayOfVectors::from_array_of_vectors(embeddings),
|
|
||||||
),
|
|
||||||
regenerate: regenerate &&
|
|
||||||
// Meilisearch does not handle well dumps with fragments, because as the fragments
|
|
||||||
// are marked as user-provided,
|
|
||||||
// all embeddings would be regenerated on any settings change or document update.
|
|
||||||
// To prevent this, we mark embeddings has non regenerate in this case.
|
|
||||||
!has_fragments,
|
|
||||||
};
|
|
||||||
vectors.insert(
|
|
||||||
embedder_name,
|
|
||||||
serde_json::to_value(embeddings).unwrap(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tmp_buffer.clear();
|
|
||||||
serde_json::to_writer(&mut tmp_buffer, &document)
|
|
||||||
.map_err(milli::InternalError::from)
|
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
|
||||||
|
|
||||||
// Make sure we put at least one document in the buffer even
|
|
||||||
// though we might go above the buffer limit before sending
|
|
||||||
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
|
|
||||||
// We compress the documents before sending them
|
|
||||||
let mut encoder =
|
|
||||||
GzEncoder::new(&mut compressed_buffer, Compression::default());
|
|
||||||
encoder
|
|
||||||
.write_all(&buffer)
|
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
|
|
||||||
encoder
|
|
||||||
.finish()
|
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
|
|
||||||
|
|
||||||
retry(&must_stop_processing, || {
|
|
||||||
let mut request = agent.post(&documents_url);
|
|
||||||
request = request.set("Content-Type", "application/x-ndjson");
|
|
||||||
request = request.set("Content-Encoding", "gzip");
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
|
|
||||||
})?;
|
|
||||||
buffer.clear();
|
|
||||||
compressed_buffer.clear();
|
|
||||||
}
|
|
||||||
buffer.extend_from_slice(&tmp_buffer);
|
|
||||||
|
|
||||||
if i > 0 && i % 100 == 0 {
|
|
||||||
step.fetch_add(100, atomic::Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
retry(&must_stop_processing, || {
|
|
||||||
let mut request = agent.post(&documents_url);
|
|
||||||
request = request.set("Content-Type", "application/x-ndjson");
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
request.send_bytes(&buffer).map_err(into_backoff_error)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
.map_err(|e| {
|
|
||||||
Error::from_milli(
|
|
||||||
milli::Error::InternalError(InternalError::PanicInThreadPool(e)),
|
|
||||||
Some(uid.to_string()),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
for result in results {
|
|
||||||
result?;
|
|
||||||
}
|
|
||||||
|
|
||||||
step.store(total_documents, atomic::Ordering::Relaxed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(output)
|
Ok(output)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(super) fn export_one_index(
|
||||||
|
&self,
|
||||||
|
target: TargetInstance<'_>,
|
||||||
|
options: ExportOptions<'_>,
|
||||||
|
ctx: ExportContext<'_>,
|
||||||
|
) -> Result<u64, Error> {
|
||||||
|
let err = |err| Error::from_milli(err, Some(options.index_uid.to_string()));
|
||||||
|
|
||||||
|
let bearer = target.api_key.map(|api_key| format!("Bearer {api_key}"));
|
||||||
|
let url = format!(
|
||||||
|
"{base_url}/indexes/{index_uid}",
|
||||||
|
base_url = target.base_url,
|
||||||
|
index_uid = options.index_uid
|
||||||
|
);
|
||||||
|
let response = retry(ctx.must_stop_processing, || {
|
||||||
|
let mut request = ctx.agent.get(&url);
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
|
||||||
|
request.send_bytes(Default::default()).map_err(into_backoff_error)
|
||||||
|
});
|
||||||
|
let index_exists = match response {
|
||||||
|
Ok(response) => response.status() == 200,
|
||||||
|
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => false,
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
let primary_key =
|
||||||
|
ctx.index.primary_key(&ctx.index_rtxn).map_err(milli::Error::from).map_err(err)?;
|
||||||
|
if !index_exists {
|
||||||
|
let url = format!("{base_url}/indexes", base_url = target.base_url);
|
||||||
|
retry(ctx.must_stop_processing, || {
|
||||||
|
let mut request = ctx.agent.post(&url);
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
let index_param = json!({ "uid": options.index_uid, "primaryKey": primary_key });
|
||||||
|
request.send_json(&index_param).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
if index_exists && options.override_settings {
|
||||||
|
retry(ctx.must_stop_processing, || {
|
||||||
|
let mut request = ctx.agent.patch(&url);
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
let index_param = json!({ "primaryKey": primary_key });
|
||||||
|
request.send_json(&index_param).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
if !index_exists || options.override_settings {
|
||||||
|
let mut settings =
|
||||||
|
settings::settings(&ctx.index, &ctx.index_rtxn, SecretPolicy::RevealSecrets)
|
||||||
|
.map_err(err)?;
|
||||||
|
// Remove the experimental chat setting if not enabled
|
||||||
|
if self.features().check_chat_completions("exporting chat settings").is_err() {
|
||||||
|
settings.chat = Setting::NotSet;
|
||||||
|
}
|
||||||
|
// Retry logic for sending settings
|
||||||
|
let url = format!(
|
||||||
|
"{base_url}/indexes/{index_uid}/settings",
|
||||||
|
base_url = target.base_url,
|
||||||
|
index_uid = options.index_uid
|
||||||
|
);
|
||||||
|
retry(ctx.must_stop_processing, || {
|
||||||
|
let mut request = ctx.agent.patch(&url);
|
||||||
|
if let Some(bearer) = bearer.as_ref() {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
request.send_json(settings.clone()).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let fields_ids_map = ctx.index.fields_ids_map(&ctx.index_rtxn)?;
|
||||||
|
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
||||||
|
let total_documents = ctx.universe.len() as u32;
|
||||||
|
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
|
||||||
|
ctx.progress.update_progress(progress_step);
|
||||||
|
|
||||||
|
let limit = options.payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024);
|
||||||
|
let documents_url = format!(
|
||||||
|
"{base_url}/indexes/{index_uid}/documents",
|
||||||
|
base_url = target.base_url,
|
||||||
|
index_uid = options.index_uid
|
||||||
|
);
|
||||||
|
let results = request_threads()
|
||||||
|
.broadcast(|broadcast| {
|
||||||
|
let index_rtxn = ctx.index.read_txn().map_err(milli::Error::from).map_err(err)?;
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
let mut tmp_buffer = Vec::new();
|
||||||
|
let mut compressed_buffer = Vec::new();
|
||||||
|
for (i, docid) in ctx.universe.iter().enumerate() {
|
||||||
|
if i % broadcast.num_threads() != broadcast.index() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let document = ctx.index.document(&index_rtxn, docid).map_err(err)?;
|
||||||
|
|
||||||
|
let mut document =
|
||||||
|
obkv_to_json(&all_fields, &fields_ids_map, document).map_err(err)?;
|
||||||
|
|
||||||
|
// TODO definitely factorize this code
|
||||||
|
'inject_vectors: {
|
||||||
|
let embeddings = ctx.index.embeddings(&index_rtxn, docid).map_err(err)?;
|
||||||
|
|
||||||
|
if embeddings.is_empty() {
|
||||||
|
break 'inject_vectors;
|
||||||
|
}
|
||||||
|
|
||||||
|
let vectors = document
|
||||||
|
.entry(RESERVED_VECTORS_FIELD_NAME)
|
||||||
|
.or_insert(serde_json::Value::Object(Default::default()));
|
||||||
|
|
||||||
|
let serde_json::Value::Object(vectors) = vectors else {
|
||||||
|
return Err(err(milli::Error::UserError(
|
||||||
|
milli::UserError::InvalidVectorsMapType {
|
||||||
|
document_id: {
|
||||||
|
if let Ok(Some(Ok(index))) = ctx
|
||||||
|
.index
|
||||||
|
.external_id_of(&index_rtxn, std::iter::once(docid))
|
||||||
|
.map(|it| it.into_iter().next())
|
||||||
|
{
|
||||||
|
index
|
||||||
|
} else {
|
||||||
|
format!("internal docid={docid}")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
value: vectors.clone(),
|
||||||
|
},
|
||||||
|
)));
|
||||||
|
};
|
||||||
|
|
||||||
|
for (
|
||||||
|
embedder_name,
|
||||||
|
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
|
||||||
|
) in embeddings
|
||||||
|
{
|
||||||
|
let embeddings = ExplicitVectors {
|
||||||
|
embeddings: Some(VectorOrArrayOfVectors::from_array_of_vectors(
|
||||||
|
embeddings,
|
||||||
|
)),
|
||||||
|
regenerate: regenerate &&
|
||||||
|
// Meilisearch does not handle well dumps with fragments, because as the fragments
|
||||||
|
// are marked as user-provided,
|
||||||
|
// all embeddings would be regenerated on any settings change or document update.
|
||||||
|
// To prevent this, we mark embeddings has non regenerate in this case.
|
||||||
|
!has_fragments,
|
||||||
|
};
|
||||||
|
vectors
|
||||||
|
.insert(embedder_name, serde_json::to_value(embeddings).unwrap());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp_buffer.clear();
|
||||||
|
serde_json::to_writer(&mut tmp_buffer, &document)
|
||||||
|
.map_err(milli::InternalError::from)
|
||||||
|
.map_err(milli::Error::from)
|
||||||
|
.map_err(err)?;
|
||||||
|
|
||||||
|
// Make sure we put at least one document in the buffer even
|
||||||
|
// though we might go above the buffer limit before sending
|
||||||
|
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
|
||||||
|
// We compress the documents before sending them
|
||||||
|
let mut encoder =
|
||||||
|
GzEncoder::new(&mut compressed_buffer, Compression::default());
|
||||||
|
encoder.write_all(&buffer).map_err(milli::Error::from).map_err(err)?;
|
||||||
|
encoder.finish().map_err(milli::Error::from).map_err(err)?;
|
||||||
|
|
||||||
|
retry(ctx.must_stop_processing, || {
|
||||||
|
let mut request = ctx.agent.post(&documents_url);
|
||||||
|
request = request.set("Content-Type", "application/x-ndjson");
|
||||||
|
request = request.set("Content-Encoding", "gzip");
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
buffer.clear();
|
||||||
|
compressed_buffer.clear();
|
||||||
|
}
|
||||||
|
buffer.extend_from_slice(&tmp_buffer);
|
||||||
|
|
||||||
|
if i > 0 && i % 100 == 0 {
|
||||||
|
step.fetch_add(100, atomic::Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
retry(ctx.must_stop_processing, || {
|
||||||
|
let mut request = ctx.agent.post(&documents_url);
|
||||||
|
request = request.set("Content-Type", "application/x-ndjson");
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
request.send_bytes(&buffer).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.map_err(|e| err(milli::Error::InternalError(InternalError::PanicInThreadPool(e))))?;
|
||||||
|
for result in results {
|
||||||
|
result?;
|
||||||
|
}
|
||||||
|
step.store(total_documents, atomic::Ordering::Relaxed);
|
||||||
|
Ok(total_documents as u64)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn retry<F>(must_stop_processing: &MustStopProcessing, send_request: F) -> Result<ureq::Response>
|
fn retry<F>(must_stop_processing: &MustStopProcessing, send_request: F) -> Result<ureq::Response>
|
||||||
@@ -370,8 +361,31 @@ fn ureq_error_into_error(error: ureq::Error) -> Error {
|
|||||||
}
|
}
|
||||||
Err(e) => e.into(),
|
Err(e) => e.into(),
|
||||||
},
|
},
|
||||||
ureq::Error::Transport(transport) => io::Error::new(io::ErrorKind::Other, transport).into(),
|
ureq::Error::Transport(transport) => io::Error::other(transport).into(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// export_one_index arguments
|
||||||
|
pub(super) struct TargetInstance<'a> {
|
||||||
|
pub(super) base_url: &'a str,
|
||||||
|
pub(super) api_key: Option<&'a str>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) struct ExportOptions<'a> {
|
||||||
|
pub(super) index_uid: &'a str,
|
||||||
|
pub(super) payload_size: Option<&'a Byte>,
|
||||||
|
pub(super) override_settings: bool,
|
||||||
|
pub(super) extra_headers: &'a hashbrown::HashMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) struct ExportContext<'a> {
|
||||||
|
pub(super) index: &'a meilisearch_types::milli::Index,
|
||||||
|
pub(super) index_rtxn: &'a milli::heed::RoTxn<'a>,
|
||||||
|
pub(super) universe: &'a RoaringBitmap,
|
||||||
|
pub(super) progress: &'a Progress,
|
||||||
|
pub(super) agent: &'a ureq::Agent,
|
||||||
|
pub(super) must_stop_processing: &'a MustStopProcessing,
|
||||||
|
}
|
||||||
|
|
||||||
|
// progress related
|
||||||
enum ExportIndex {}
|
enum ExportIndex {}
|
||||||
|
|||||||
@@ -66,6 +66,11 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
IndexOperation::DocumentOperation { index_uid, primary_key, operations, mut tasks } => {
|
IndexOperation::DocumentOperation { index_uid, primary_key, operations, mut tasks } => {
|
||||||
progress.update_progress(DocumentOperationProgress::RetrievingConfig);
|
progress.update_progress(DocumentOperationProgress::RetrievingConfig);
|
||||||
|
|
||||||
|
let network = self.network();
|
||||||
|
|
||||||
|
let shards = network.shards();
|
||||||
|
|
||||||
// TODO: at some point, for better efficiency we might want to reuse the bumpalo for successive batches.
|
// TODO: at some point, for better efficiency we might want to reuse the bumpalo for successive batches.
|
||||||
// this is made difficult by the fact we're doing private clones of the index scheduler and sending it
|
// this is made difficult by the fact we're doing private clones of the index scheduler and sending it
|
||||||
// to a fresh thread.
|
// to a fresh thread.
|
||||||
@@ -92,7 +97,7 @@ impl IndexScheduler {
|
|||||||
.embedding_configs()
|
.embedding_configs()
|
||||||
.embedding_configs(index_wtxn)
|
.embedding_configs(index_wtxn)
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(index_uid.clone())))?;
|
.map_err(|e| Error::from_milli(e.into(), Some(index_uid.clone())))?;
|
||||||
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
let embedders = self.embedders(&index_uid, embedders)?;
|
||||||
for operation in operations {
|
for operation in operations {
|
||||||
match operation {
|
match operation {
|
||||||
DocumentOperation::Replace(_content_uuid) => {
|
DocumentOperation::Replace(_content_uuid) => {
|
||||||
@@ -130,6 +135,7 @@ impl IndexScheduler {
|
|||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&|| must_stop_processing.get(),
|
&|| must_stop_processing.get(),
|
||||||
progress.clone(),
|
progress.clone(),
|
||||||
|
shards.as_ref(),
|
||||||
)
|
)
|
||||||
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?;
|
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?;
|
||||||
|
|
||||||
@@ -278,7 +284,7 @@ impl IndexScheduler {
|
|||||||
.embedding_configs()
|
.embedding_configs()
|
||||||
.embedding_configs(index_wtxn)
|
.embedding_configs(index_wtxn)
|
||||||
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
|
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
|
||||||
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
let embedders = self.embedders(&index_uid, embedders)?;
|
||||||
|
|
||||||
progress.update_progress(DocumentEditionProgress::Indexing);
|
progress.update_progress(DocumentEditionProgress::Indexing);
|
||||||
congestion = Some(
|
congestion = Some(
|
||||||
@@ -428,7 +434,7 @@ impl IndexScheduler {
|
|||||||
.embedding_configs()
|
.embedding_configs()
|
||||||
.embedding_configs(index_wtxn)
|
.embedding_configs(index_wtxn)
|
||||||
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
|
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
|
||||||
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
let embedders = self.embedders(&index_uid, embedders)?;
|
||||||
|
|
||||||
progress.update_progress(DocumentDeletionProgress::Indexing);
|
progress.update_progress(DocumentDeletionProgress::Indexing);
|
||||||
congestion = Some(
|
congestion = Some(
|
||||||
|
|||||||
@@ -6,9 +6,9 @@ source: crates/index-scheduler/src/scheduler/test.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
||||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: None }, kind: IndexCreation { index_uid: "cattos", primary_key: None }}
|
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "cattos", primary_key: None }}
|
||||||
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: None }, kind: IndexCreation { index_uid: "girafos", primary_key: None }}
|
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "girafos", primary_key: None }}
|
||||||
3 {uid: 3, batch_uid: 3, status: succeeded, details: { deleted_documents: Some(0) }, kind: DocumentClear { index_uid: "doggos" }}
|
3 {uid: 3, batch_uid: 3, status: succeeded, details: { deleted_documents: Some(0) }, kind: DocumentClear { index_uid: "doggos" }}
|
||||||
4 {uid: 4, batch_uid: 4, status: succeeded, details: { deleted_documents: Some(0) }, kind: DocumentClear { index_uid: "cattos" }}
|
4 {uid: 4, batch_uid: 4, status: succeeded, details: { deleted_documents: Some(0) }, kind: DocumentClear { index_uid: "cattos" }}
|
||||||
5 {uid: 5, batch_uid: 5, status: succeeded, details: { deleted_documents: Some(0) }, kind: DocumentClear { index_uid: "girafos" }}
|
5 {uid: 5, batch_uid: 5, status: succeeded, details: { deleted_documents: Some(0) }, kind: DocumentClear { index_uid: "girafos" }}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
||||||
1 {uid: 1, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
1 {uid: 1, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
2 {uid: 2, status: enqueued, details: { deleted_documents: None }, kind: IndexDeletion { index_uid: "doggos" }}
|
2 {uid: 2, status: enqueued, details: { deleted_documents: None }, kind: IndexDeletion { index_uid: "doggos" }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
||||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { received_documents: 1, indexed_documents: Some(0) }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
1 {uid: 1, batch_uid: 1, status: succeeded, details: { received_documents: 1, indexed_documents: Some(0) }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
2 {uid: 2, batch_uid: 1, status: succeeded, details: { deleted_documents: Some(0) }, kind: IndexDeletion { index_uid: "doggos" }}
|
2 {uid: 2, batch_uid: 1, status: succeeded, details: { deleted_documents: Some(0) }, kind: IndexDeletion { index_uid: "doggos" }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
0 {uid: 0, status: enqueued, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,]
|
enqueued [0,]
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
0 {uid: 0, status: enqueued, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
||||||
1 {uid: 1, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
1 {uid: 1, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
0 {uid: 0, status: enqueued, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
||||||
1 {uid: 1, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
1 {uid: 1, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
2 {uid: 2, status: enqueued, details: { deleted_documents: None }, kind: IndexDeletion { index_uid: "doggos" }}
|
2 {uid: 2, status: enqueued, details: { deleted_documents: None }, kind: IndexDeletion { index_uid: "doggos" }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ source: crates/index-scheduler/src/scheduler/test.rs
|
|||||||
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("id"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,]
|
enqueued [0,]
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
snapshot_kind: text
|
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("id"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,]
|
enqueued [0,]
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ source: crates/index-scheduler/src/scheduler/test.rs
|
|||||||
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("id"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_b", primary_key: Some("id") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("id"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "index_b", primary_key: Some("id") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,1,]
|
enqueued [0,1,]
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ source: crates/index-scheduler/src/scheduler/test.rs
|
|||||||
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("id"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_b", primary_key: Some("id") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("id"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "index_b", primary_key: Some("id") }}
|
||||||
2 {uid: 2, status: enqueued, details: { deleted_documents: None }, kind: IndexDeletion { index_uid: "index_a" }}
|
2 {uid: 2, status: enqueued, details: { deleted_documents: None }, kind: IndexDeletion { index_uid: "index_a" }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ source: crates/index-scheduler/src/scheduler/test.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: None }, kind: IndexCreation { index_uid: "cattos", primary_key: None }}
|
1 {uid: 1, status: enqueued, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "cattos", primary_key: None }}
|
||||||
2 {uid: 2, status: enqueued, details: { deleted_documents: None }, kind: IndexDeletion { index_uid: "doggos" }}
|
2 {uid: 2, status: enqueued, details: { deleted_documents: None }, kind: IndexDeletion { index_uid: "doggos" }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ source: crates/index-scheduler/src/scheduler/test.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
||||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: None }, kind: IndexCreation { index_uid: "cattos", primary_key: None }}
|
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "cattos", primary_key: None }}
|
||||||
2 {uid: 2, status: enqueued, details: { deleted_documents: None }, kind: IndexDeletion { index_uid: "doggos" }}
|
2 {uid: 2, status: enqueued, details: { deleted_documents: None }, kind: IndexDeletion { index_uid: "doggos" }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ source: crates/index-scheduler/src/scheduler/test.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggos", primary_key: None }}
|
||||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: None }, kind: IndexCreation { index_uid: "cattos", primary_key: None }}
|
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: None, old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "cattos", primary_key: None }}
|
||||||
2 {uid: 2, batch_uid: 2, status: succeeded, details: { deleted_documents: Some(0) }, kind: IndexDeletion { index_uid: "doggos" }}
|
2 {uid: 2, batch_uid: 2, status: succeeded, details: { deleted_documents: Some(0) }, kind: IndexDeletion { index_uid: "doggos" }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user