mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-04 19:55:43 +00:00
Compare commits
150 Commits
change-net
...
openapi-co
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
72b4b36516 | ||
|
|
d795ade246 | ||
|
|
82adabc5a0 | ||
|
|
c9a22247d2 | ||
|
|
c535b8ddef | ||
|
|
8e89619aed | ||
|
|
f617ca8e38 | ||
|
|
959175ad2a | ||
|
|
341ffbf5ef | ||
|
|
542f3073f4 | ||
|
|
0f134b079f | ||
|
|
9e7ae47355 | ||
|
|
1edf07df29 | ||
|
|
88aa3cddde | ||
|
|
e6846cb55a | ||
|
|
29b715e2f9 | ||
|
|
f28dc5bd2b | ||
|
|
56d0b8ea54 | ||
|
|
514edb1b79 | ||
|
|
cfb609d41d | ||
|
|
11cb062067 | ||
|
|
2ca4926ac5 | ||
|
|
834bd9b879 | ||
|
|
cac7e00983 | ||
|
|
e9300bac64 | ||
|
|
b0da7864a4 | ||
|
|
2b9d379feb | ||
|
|
8d585a04d4 | ||
|
|
0095a72fba | ||
|
|
651339648c | ||
|
|
a489f4c172 | ||
|
|
3b875ea00e | ||
|
|
9d269c499c | ||
|
|
da35ae0a6e | ||
|
|
61945b235d | ||
|
|
e936ac172d | ||
|
|
162a84cdbf | ||
|
|
92c63cf351 | ||
|
|
fca35b7476 | ||
|
|
4056657a55 | ||
|
|
685d227597 | ||
|
|
49b9f6ff38 | ||
|
|
79d0a3fb97 | ||
|
|
313ef7e79b | ||
|
|
256407be61 | ||
|
|
8b3943bd32 | ||
|
|
87b972d29a | ||
|
|
09ab61b360 | ||
|
|
2459f381b4 | ||
|
|
6442f02de4 | ||
|
|
91c4d9ea79 | ||
|
|
92a4091da3 | ||
|
|
29a337f0f9 | ||
|
|
8c3cebadaa | ||
|
|
b566458aa2 | ||
|
|
ae4344e359 | ||
|
|
b6cb384650 | ||
|
|
2c3e3d856c | ||
|
|
93e97f814c | ||
|
|
e9350f033d | ||
|
|
54c92fd6c0 | ||
|
|
4f4df83a51 | ||
|
|
a51021cab7 | ||
|
|
e33f4fdeae | ||
|
|
e407bca196 | ||
|
|
cd24ea11b4 | ||
|
|
ba578e7ab5 | ||
|
|
05a74d1e68 | ||
|
|
41d61deb97 | ||
|
|
bba292b01a | ||
|
|
96923dff33 | ||
|
|
8f9c9305da | ||
|
|
a9f309e1d1 | ||
|
|
e456a9acd8 | ||
|
|
9b7d29466c | ||
|
|
b0ef14b6f0 | ||
|
|
36febe2068 | ||
|
|
6f14a6ec18 | ||
|
|
1a45b19e7e | ||
|
|
bd7525b166 | ||
|
|
359757d939 | ||
|
|
1c6eea596c | ||
|
|
693b6f483e | ||
|
|
818a4aa6d9 | ||
|
|
ddadeb99e9 | ||
|
|
b8d8be934a | ||
|
|
7175d70b8f | ||
|
|
8a3e65ab6f | ||
|
|
4737e1a2a5 | ||
|
|
36522e951b | ||
|
|
fce046d84d | ||
|
|
3fc507bb44 | ||
|
|
fdbcd033fb | ||
|
|
aaab49baca | ||
|
|
0d0d6e8099 | ||
|
|
c1e351c92b | ||
|
|
67cab4cc9d | ||
|
|
f30a37b0fe | ||
|
|
a78a9f80dd | ||
|
|
439fee5434 | ||
|
|
9e858590e0 | ||
|
|
29eebd5f93 | ||
|
|
07da6edbdf | ||
|
|
22b83042e6 | ||
|
|
52ab13906a | ||
|
|
29bec8efd4 | ||
|
|
6947a8990b | ||
|
|
fbb2bb0c73 | ||
|
|
15918f53a9 | ||
|
|
d7f5f3a0a3 | ||
|
|
1afbf35f27 | ||
|
|
d7675233d5 | ||
|
|
c63c1ac32b | ||
|
|
6171dcde0d | ||
|
|
04bc134324 | ||
|
|
8ff39d927d | ||
|
|
ffd461c800 | ||
|
|
9134d27980 | ||
|
|
f60242979f | ||
|
|
d347417cfd | ||
|
|
55d54afd69 | ||
|
|
dca7679c47 | ||
|
|
a34b692396 | ||
|
|
63829b62e9 | ||
|
|
44c8252ad5 | ||
|
|
19ae428890 | ||
|
|
7adcb657ae | ||
|
|
9624768976 | ||
|
|
5025acfd2a | ||
|
|
4bbfdccc3e | ||
|
|
a5b24b54b8 | ||
|
|
461e69c143 | ||
|
|
915aeafefe | ||
|
|
408529d8b2 | ||
|
|
1724ab6d94 | ||
|
|
49a500a342 | ||
|
|
f26eabcfa1 | ||
|
|
b468c090f3 | ||
|
|
c14114840e | ||
|
|
7933d1f9ea | ||
|
|
d5a5372aba | ||
|
|
0d5e176dc2 | ||
|
|
d6f36a773d | ||
|
|
a8d55562e9 | ||
|
|
40d649ec9e | ||
|
|
c272ac8204 | ||
|
|
e18c677f0e | ||
|
|
84a288da57 | ||
|
|
cbfc325b56 | ||
|
|
ea640b076e |
5
.github/ISSUE_TEMPLATE/new_feature_issue.md
vendored
5
.github/ISSUE_TEMPLATE/new_feature_issue.md
vendored
@@ -24,6 +24,11 @@ TBD
|
|||||||
- [ ] If not, add the `no db change` label to your PR, and you're good to merge.
|
- [ ] If not, add the `no db change` label to your PR, and you're good to merge.
|
||||||
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
|
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
|
||||||
|
|
||||||
|
### Reminders when adding features
|
||||||
|
|
||||||
|
- [ ] Write unit tests using insta
|
||||||
|
- [ ] Write declarative integration tests in [workloads/tests](https://github.com/meilisearch/meilisearch/tree/main/workloads/test). Specify the routes to call and then call `cargo xtask test workloads/tests/YOUR_TEST.json --update-responses` so that responses are automatically filled.
|
||||||
|
|
||||||
### Reminders when modifying the API
|
### Reminders when modifying the API
|
||||||
|
|
||||||
- [ ] Update the openAPI file with utoipa:
|
- [ ] Update the openAPI file with utoipa:
|
||||||
|
|||||||
2
.github/workflows/bench-pr.yml
vendored
2
.github/workflows/bench-pr.yml
vendored
@@ -67,8 +67,6 @@ jobs:
|
|||||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.89
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
|
||||||
profile: minimal
|
|
||||||
|
|
||||||
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
2
.github/workflows/bench-push-indexing.yml
vendored
2
.github/workflows/bench-push-indexing.yml
vendored
@@ -13,8 +13,6 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.89
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
|
||||||
profile: minimal
|
|
||||||
|
|
||||||
# Run benchmarks
|
# Run benchmarks
|
||||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
||||||
|
|||||||
6
.github/workflows/db-change-comments.yml
vendored
6
.github/workflows/db-change-comments.yml
vendored
@@ -6,7 +6,7 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
MESSAGE: |
|
MESSAGE: |
|
||||||
### Hello, I'm a bot 🤖
|
### Hello, I'm a bot 🤖
|
||||||
|
|
||||||
You are receiving this message because you declared that this PR make changes to the Meilisearch database.
|
You are receiving this message because you declared that this PR make changes to the Meilisearch database.
|
||||||
Depending on the nature of the change, additional actions might be required on your part. The following sections detail the additional actions depending on the nature of the change, please copy the relevant section in the description of your PR, and make sure to perform the required actions.
|
Depending on the nature of the change, additional actions might be required on your part. The following sections detail the additional actions depending on the nature of the change, please copy the relevant section in the description of your PR, and make sure to perform the required actions.
|
||||||
@@ -19,6 +19,7 @@ env:
|
|||||||
|
|
||||||
- [ ] Detail the change to the DB format and why they are forward compatible
|
- [ ] Detail the change to the DB format and why they are forward compatible
|
||||||
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
|
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
|
||||||
|
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
|
||||||
|
|
||||||
|
|
||||||
## This PR makes breaking changes
|
## This PR makes breaking changes
|
||||||
@@ -35,8 +36,7 @@ env:
|
|||||||
- [ ] Write the code to go from the old database to the new one
|
- [ ] Write the code to go from the old database to the new one
|
||||||
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
|
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
|
||||||
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
|
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
|
||||||
- [ ] Write an integration test [here](https://github.com/meilisearch/meilisearch/blob/main/crates/meilisearch/tests/upgrade/mod.rs) ensuring you can read the old database, upgrade to the new database, and read the new database as expected
|
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
add-comment:
|
add-comment:
|
||||||
|
|||||||
6
.github/workflows/flaky-tests.yml
vendored
6
.github/workflows/flaky-tests.yml
vendored
@@ -13,6 +13,12 @@ jobs:
|
|||||||
image: ubuntu:22.04
|
image: ubuntu:22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||||
|
run: |
|
||||||
|
sudo rm -rf "/opt/ghc" || true
|
||||||
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
|
|||||||
2
.github/workflows/fuzzer-indexing.yml
vendored
2
.github/workflows/fuzzer-indexing.yml
vendored
@@ -13,8 +13,6 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.89
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
|
||||||
profile: minimal
|
|
||||||
|
|
||||||
# Run benchmarks
|
# Run benchmarks
|
||||||
- name: Run the fuzzer
|
- name: Run the fuzzer
|
||||||
|
|||||||
6
.github/workflows/publish-apt-brew-pkg.yml
vendored
6
.github/workflows/publish-apt-brew-pkg.yml
vendored
@@ -25,6 +25,12 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
|
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||||
|
run: |
|
||||||
|
sudo rm -rf "/opt/ghc" || true
|
||||||
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.89
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Install cargo-deb
|
- name: Install cargo-deb
|
||||||
run: cargo install cargo-deb
|
run: cargo install cargo-deb
|
||||||
|
|||||||
175
.github/workflows/publish-docker-images.yml
vendored
175
.github/workflows/publish-docker-images.yml
vendored
@@ -14,10 +14,105 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
build:
|
||||||
runs-on: docker
|
runs-on: ${{ matrix.runner }}
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
platform: [amd64, arm64]
|
||||||
|
edition: [community, enterprise]
|
||||||
|
include:
|
||||||
|
- platform: amd64
|
||||||
|
runner: ubuntu-24.04
|
||||||
|
- platform: arm64
|
||||||
|
runner: ubuntu-24.04-arm
|
||||||
|
- edition: community
|
||||||
|
registry: getmeili/meilisearch
|
||||||
|
feature-flag: ""
|
||||||
|
- edition: enterprise
|
||||||
|
registry: getmeili/meilisearch-enterprise
|
||||||
|
feature-flag: "--features enterprise"
|
||||||
|
|
||||||
|
permissions: {}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
|
- name: Prepare
|
||||||
|
run: |
|
||||||
|
platform=linux/${{ matrix.platform }}
|
||||||
|
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
platforms: linux/${{ matrix.platform }}
|
||||||
|
install: true
|
||||||
|
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: ${{ matrix.registry }}
|
||||||
|
# Prevent `latest` to be updated for each new tag pushed.
|
||||||
|
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
|
||||||
|
flavor: latest=false
|
||||||
|
tags: |
|
||||||
|
type=ref,event=tag
|
||||||
|
type=raw,value=nightly,enable=${{ github.event_name != 'push' }}
|
||||||
|
type=semver,pattern=v{{major}}.{{minor}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
||||||
|
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
||||||
|
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
||||||
|
|
||||||
|
- name: Build and push by digest
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
id: build-and-push
|
||||||
|
with:
|
||||||
|
platforms: linux/${{ matrix.platform }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
tags: ${{ matrix.registry }}
|
||||||
|
outputs: type=image,push-by-digest=true,name-canonical=true,push=true
|
||||||
|
build-args: |
|
||||||
|
COMMIT_SHA=${{ github.sha }}
|
||||||
|
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
||||||
|
GIT_TAG=${{ github.ref_name }}
|
||||||
|
EXTRA_ARGS=${{ matrix.feature-flag }}
|
||||||
|
|
||||||
|
- name: Export digest
|
||||||
|
run: |
|
||||||
|
mkdir -p ${{ runner.temp }}/digests
|
||||||
|
digest="${{ steps.build-and-push.outputs.digest }}"
|
||||||
|
touch "${{ runner.temp }}/digests/${digest#sha256:}"
|
||||||
|
|
||||||
|
- name: Upload digest
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: digests-${{ matrix.edition }}-${{ env.PLATFORM_PAIR }}
|
||||||
|
path: ${{ runner.temp }}/digests/*
|
||||||
|
if-no-files-found: error
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
|
merge:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
edition: [community, enterprise]
|
||||||
|
include:
|
||||||
|
- edition: community
|
||||||
|
registry: getmeili/meilisearch
|
||||||
|
- edition: enterprise
|
||||||
|
registry: getmeili/meilisearch-enterprise
|
||||||
|
needs:
|
||||||
|
- build
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
id-token: write # This is needed to use Cosign in keyless mode
|
id-token: write # This is needed to use Cosign in keyless mode
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
@@ -58,26 +153,30 @@ jobs:
|
|||||||
|
|
||||||
echo "date=$commit_date" >> $GITHUB_OUTPUT
|
echo "date=$commit_date" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Install cosign
|
- name: Install cosign
|
||||||
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # tag=v3.10.0
|
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # tag=v3.10.0
|
||||||
|
|
||||||
|
- name: Download digests
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
path: ${{ runner.temp }}/digests
|
||||||
|
pattern: digests-${{ matrix.edition }}-*
|
||||||
|
merge-multiple: true
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: getmeili/meilisearch
|
images: ${{ matrix.registry }}
|
||||||
# Prevent `latest` to be updated for each new tag pushed.
|
# Prevent `latest` to be updated for each new tag pushed.
|
||||||
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
|
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
|
||||||
flavor: latest=false
|
flavor: latest=false
|
||||||
@@ -88,33 +187,31 @@ jobs:
|
|||||||
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
||||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
||||||
|
|
||||||
- name: Build and push
|
- name: Create manifest list and push
|
||||||
uses: docker/build-push-action@v6
|
working-directory: ${{ runner.temp }}/digests
|
||||||
id: build-and-push
|
run: |
|
||||||
with:
|
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||||
push: true
|
$(printf '${{ matrix.registry }}@sha256:%s ' *)
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
- name: Inspect image to fetch digest to sign
|
||||||
build-args: |
|
run: |
|
||||||
COMMIT_SHA=${{ github.sha }}
|
digest=$(docker buildx imagetools inspect --format='{{ json .Manifest }}' ${{ matrix.registry }}:${{ steps.meta.outputs.version }} | jq -r '.digest')
|
||||||
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
echo "DIGEST=${digest}" >> $GITHUB_ENV
|
||||||
GIT_TAG=${{ github.ref_name }}
|
|
||||||
|
|
||||||
- name: Sign the images with GitHub OIDC Token
|
- name: Sign the images with GitHub OIDC Token
|
||||||
env:
|
env:
|
||||||
DIGEST: ${{ steps.build-and-push.outputs.digest }}
|
|
||||||
TAGS: ${{ steps.meta.outputs.tags }}
|
TAGS: ${{ steps.meta.outputs.tags }}
|
||||||
run: |
|
run: |
|
||||||
images=""
|
images=""
|
||||||
for tag in ${TAGS}; do
|
for tag in ${TAGS}; do
|
||||||
images+="${tag}@${DIGEST} "
|
images+="${tag}@${{ env.DIGEST }} "
|
||||||
done
|
done
|
||||||
cosign sign --yes ${images}
|
cosign sign --yes ${images}
|
||||||
|
|
||||||
# /!\ Don't touch this without checking with Cloud team
|
# /!\ Don't touch this without checking with engineers working on the Cloud code base on #discussion-engineering Slack channel
|
||||||
- name: Send CI information to Cloud team
|
- name: Notify meilisearch-cloud
|
||||||
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
|
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
|
||||||
if: github.event_name == 'push'
|
if: ${{ (github.event_name == 'push') && (matrix.edition == 'enterprise') }}
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v3
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
@@ -122,21 +219,13 @@ jobs:
|
|||||||
event-type: cloud-docker-build
|
event-type: cloud-docker-build
|
||||||
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
||||||
|
|
||||||
# Send notification to Swarmia to notify of a deployment: https://app.swarmia.com
|
# /!\ Don't touch this without checking with integration team members on #discussion-integrations Slack channel
|
||||||
# - name: 'Setup jq'
|
- name: Notify meilisearch-kubernetes
|
||||||
# uses: dcarbone/install-jq-action
|
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event), or if not stable
|
||||||
# - name: Send deployment to Swarmia
|
if: ${{ github.event_name == 'push' && matrix.edition == 'community' && steps.check-tag-format.outputs.stable == 'true' }}
|
||||||
# if: github.event_name == 'push' && success()
|
uses: peter-evans/repository-dispatch@v3
|
||||||
# run: |
|
with:
|
||||||
# JSON_STRING=$( jq --null-input --compact-output \
|
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
# --arg version "${{ github.ref_name }}" \
|
repository: meilisearch/meilisearch-kubernetes
|
||||||
# --arg appName "meilisearch" \
|
event-type: meilisearch-release
|
||||||
# --arg environment "production" \
|
client-payload: '{ "version": "${{ github.ref_name }}" }'
|
||||||
# --arg commitSha "${{ github.sha }}" \
|
|
||||||
# --arg repositoryFullName "${{ github.repository }}" \
|
|
||||||
# '{"version": $version, "appName": $appName, "environment": $environment, "commitSha": $commitSha, "repositoryFullName": $repositoryFullName}' )
|
|
||||||
|
|
||||||
# curl -H "Authorization: ${{ secrets.SWARMIA_DEPLOYMENTS_AUTHORIZATION }}" \
|
|
||||||
# -H "Content-Type: application/json" \
|
|
||||||
# -d "$JSON_STRING" \
|
|
||||||
# https://hook.swarmia.com/deployments
|
|
||||||
|
|||||||
176
.github/workflows/publish-release-assets.yml
vendored
176
.github/workflows/publish-release-assets.yml
vendored
@@ -32,157 +32,61 @@ jobs:
|
|||||||
if: github.event_name == 'release' && steps.check-tag-format.outputs.stable == 'true'
|
if: github.event_name == 'release' && steps.check-tag-format.outputs.stable == 'true'
|
||||||
run: bash .github/scripts/check-release.sh
|
run: bash .github/scripts/check-release.sh
|
||||||
|
|
||||||
publish-linux:
|
publish-binaries:
|
||||||
name: Publish binary for Linux
|
name: Publish binary for ${{ matrix.release }} ${{ matrix.edition }} edition
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: check-version
|
|
||||||
container:
|
|
||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
|
||||||
image: ubuntu:22.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Install needed dependencies
|
|
||||||
run: |
|
|
||||||
apt-get update && apt-get install -y curl
|
|
||||||
apt-get install build-essential -y
|
|
||||||
- uses: dtolnay/rust-toolchain@1.89
|
|
||||||
- name: Build
|
|
||||||
run: cargo build --release --locked
|
|
||||||
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
|
||||||
- name: Upload binaries to release
|
|
||||||
if: github.event_name == 'release'
|
|
||||||
uses: svenstaro/upload-release-action@2.11.2
|
|
||||||
with:
|
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
|
||||||
file: target/release/meilisearch
|
|
||||||
asset_name: meilisearch-linux-amd64
|
|
||||||
tag: ${{ github.ref }}
|
|
||||||
|
|
||||||
publish-macos-windows:
|
|
||||||
name: Publish binary for ${{ matrix.os }}
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
needs: check-version
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [macos-14, windows-2022]
|
edition: [community, enterprise]
|
||||||
|
release:
|
||||||
|
[macos-amd64, macos-aarch64, windows, linux-amd64, linux-aarch64]
|
||||||
include:
|
include:
|
||||||
- os: macos-14
|
- edition: "community"
|
||||||
artifact_name: meilisearch
|
feature-flag: ""
|
||||||
asset_name: meilisearch-macos-amd64
|
edition-suffix: ""
|
||||||
- os: windows-2022
|
- edition: "enterprise"
|
||||||
artifact_name: meilisearch.exe
|
feature-flag: "--features enterprise"
|
||||||
asset_name: meilisearch-windows-amd64.exe
|
edition-suffix: "enterprise-"
|
||||||
|
- release: macos-amd64
|
||||||
|
os: macos-15-intel
|
||||||
|
binary_path: release/meilisearch
|
||||||
|
asset_name: macos-amd64
|
||||||
|
extra-args: ""
|
||||||
|
- release: macos-aarch64
|
||||||
|
os: macos-14
|
||||||
|
binary_path: aarch64-apple-darwin/release/meilisearch
|
||||||
|
asset_name: macos-apple-silicon
|
||||||
|
extra-args: "--target aarch64-apple-darwin"
|
||||||
|
- release: windows
|
||||||
|
os: windows-2022
|
||||||
|
binary_path: release/meilisearch.exe
|
||||||
|
asset_name: windows-amd64.exe
|
||||||
|
extra-args: ""
|
||||||
|
- release: linux-amd64
|
||||||
|
os: ubuntu-22.04
|
||||||
|
binary_path: x86_64-unknown-linux-gnu/release/meilisearch
|
||||||
|
asset_name: linux-amd64
|
||||||
|
extra-args: "--target x86_64-unknown-linux-gnu"
|
||||||
|
- release: linux-aarch64
|
||||||
|
os: ubuntu-22.04-arm
|
||||||
|
binary_path: aarch64-unknown-linux-gnu/release/meilisearch
|
||||||
|
asset_name: linux-aarch64
|
||||||
|
extra-args: "--target aarch64-unknown-linux-gnu"
|
||||||
|
needs: check-version
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.89
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --release --locked
|
run: cargo build --release --locked ${{ matrix.feature-flag }} ${{ matrix.extra-args }}
|
||||||
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
||||||
- name: Upload binaries to release
|
- name: Upload binaries to release
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.11.2
|
uses: svenstaro/upload-release-action@2.11.2
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/release/${{ matrix.artifact_name }}
|
file: target/${{ matrix.binary_path }}
|
||||||
asset_name: ${{ matrix.asset_name }}
|
asset_name: meilisearch-${{ matrix.edition-suffix }}${{ matrix.asset_name }}
|
||||||
tag: ${{ github.ref }}
|
|
||||||
|
|
||||||
publish-macos-apple-silicon:
|
|
||||||
name: Publish binary for macOS silicon
|
|
||||||
runs-on: macos-14
|
|
||||||
needs: check-version
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- target: aarch64-apple-darwin
|
|
||||||
asset_name: meilisearch-macos-apple-silicon
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v5
|
|
||||||
- name: Installing Rust toolchain
|
|
||||||
uses: dtolnay/rust-toolchain@1.89
|
|
||||||
with:
|
|
||||||
profile: minimal
|
|
||||||
target: ${{ matrix.target }}
|
|
||||||
- name: Cargo build
|
|
||||||
uses: actions-rs/cargo@v1
|
|
||||||
with:
|
|
||||||
command: build
|
|
||||||
args: --release --target ${{ matrix.target }}
|
|
||||||
- name: Upload the binary to release
|
|
||||||
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
|
||||||
if: github.event_name == 'release'
|
|
||||||
uses: svenstaro/upload-release-action@2.11.2
|
|
||||||
with:
|
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
|
||||||
file: target/${{ matrix.target }}/release/meilisearch
|
|
||||||
asset_name: ${{ matrix.asset_name }}
|
|
||||||
tag: ${{ github.ref }}
|
|
||||||
|
|
||||||
publish-aarch64:
|
|
||||||
name: Publish binary for aarch64
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: check-version
|
|
||||||
env:
|
|
||||||
DEBIAN_FRONTEND: noninteractive
|
|
||||||
container:
|
|
||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
|
||||||
image: ubuntu:22.04
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- target: aarch64-unknown-linux-gnu
|
|
||||||
asset_name: meilisearch-linux-aarch64
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v5
|
|
||||||
- name: Install needed dependencies
|
|
||||||
run: |
|
|
||||||
apt-get update -y && apt upgrade -y
|
|
||||||
apt-get install -y curl build-essential gcc-aarch64-linux-gnu
|
|
||||||
- name: Set up Docker for cross compilation
|
|
||||||
run: |
|
|
||||||
apt-get install -y curl apt-transport-https ca-certificates software-properties-common
|
|
||||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
|
||||||
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
|
||||||
apt-get update -y && apt-get install -y docker-ce
|
|
||||||
- name: Installing Rust toolchain
|
|
||||||
uses: dtolnay/rust-toolchain@1.89
|
|
||||||
with:
|
|
||||||
profile: minimal
|
|
||||||
target: ${{ matrix.target }}
|
|
||||||
- name: Configure target aarch64 GNU
|
|
||||||
## Environment variable is not passed using env:
|
|
||||||
## LD gold won't work with MUSL
|
|
||||||
# env:
|
|
||||||
# JEMALLOC_SYS_WITH_LG_PAGE: 16
|
|
||||||
# RUSTFLAGS: '-Clink-arg=-fuse-ld=gold'
|
|
||||||
run: |
|
|
||||||
echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config
|
|
||||||
echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
|
||||||
echo 'JEMALLOC_SYS_WITH_LG_PAGE=16' >> $GITHUB_ENV
|
|
||||||
- name: Install a default toolchain that will be used to build cargo cross
|
|
||||||
run: |
|
|
||||||
rustup default stable
|
|
||||||
- name: Cargo build
|
|
||||||
uses: actions-rs/cargo@v1
|
|
||||||
with:
|
|
||||||
command: build
|
|
||||||
use-cross: true
|
|
||||||
args: --release --target ${{ matrix.target }}
|
|
||||||
env:
|
|
||||||
CROSS_DOCKER_IN_DOCKER: true
|
|
||||||
- name: List target output files
|
|
||||||
run: ls -lR ./target
|
|
||||||
- name: Upload the binary to release
|
|
||||||
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
|
||||||
if: github.event_name == 'release'
|
|
||||||
uses: svenstaro/upload-release-action@2.11.2
|
|
||||||
with:
|
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
|
||||||
file: target/${{ matrix.target }}/release/meilisearch
|
|
||||||
asset_name: ${{ matrix.asset_name }}
|
|
||||||
tag: ${{ github.ref }}
|
tag: ${{ github.ref }}
|
||||||
|
|
||||||
publish-openapi-file:
|
publish-openapi-file:
|
||||||
|
|||||||
24
.github/workflows/sdks-tests.yml
vendored
24
.github/workflows/sdks-tests.yml
vendored
@@ -68,7 +68,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -92,7 +92,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -122,7 +122,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -149,7 +149,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -184,7 +184,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -213,7 +213,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -238,7 +238,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -263,7 +263,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -284,7 +284,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -307,7 +307,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -338,7 +338,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -370,7 +370,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
|
|||||||
147
.github/workflows/test-suite.yml
vendored
147
.github/workflows/test-suite.yml
vendored
@@ -15,31 +15,40 @@ env:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test-linux:
|
test-linux:
|
||||||
name: Tests on ubuntu-22.04
|
name: Tests on Ubuntu
|
||||||
runs-on: ubuntu-latest
|
runs-on: ${{ matrix.runner }}
|
||||||
container:
|
strategy:
|
||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
matrix:
|
||||||
image: ubuntu:22.04
|
runner: [ubuntu-22.04, ubuntu-22.04-arm]
|
||||||
|
features: ["", "--features enterprise"]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Install needed dependencies
|
- name: check free space before
|
||||||
|
run: df -h
|
||||||
|
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
sudo rm -rf "/opt/ghc" || true
|
||||||
apt-get install build-essential -y
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
|
- name: check free space after
|
||||||
|
run: df -h
|
||||||
- name: Setup test with Rust stable
|
- name: Setup test with Rust stable
|
||||||
uses: dtolnay/rust-toolchain@1.89
|
uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
- name: Run cargo check without any default features
|
with:
|
||||||
|
key: ${{ matrix.features }}
|
||||||
|
- name: Run cargo build without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: build
|
command: build
|
||||||
args: --locked --release --no-default-features --all
|
args: --locked --no-default-features --all
|
||||||
- name: Run cargo test
|
- name: Run cargo test
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: test
|
command: test
|
||||||
args: --locked --release --all
|
args: --locked --all ${{ matrix.features }}
|
||||||
|
|
||||||
test-others:
|
test-others:
|
||||||
name: Tests on ${{ matrix.os }}
|
name: Tests on ${{ matrix.os }}
|
||||||
@@ -48,50 +57,57 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [macos-14, windows-2022]
|
os: [macos-14, windows-2022]
|
||||||
|
features: ["", "--features enterprise"]
|
||||||
|
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
- uses: dtolnay/rust-toolchain@1.89
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Run cargo check without any default features
|
- name: Run cargo build without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: build
|
command: build
|
||||||
args: --locked --release --no-default-features --all
|
args: --locked --no-default-features --all
|
||||||
- name: Run cargo test
|
- name: Run cargo test
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: test
|
command: test
|
||||||
args: --locked --release --all
|
args: --locked --all ${{ matrix.features }}
|
||||||
|
|
||||||
test-all-features:
|
test-all-features:
|
||||||
name: Tests almost all features
|
name: Tests almost all features
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
container:
|
|
||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
|
||||||
image: ubuntu:22.04
|
|
||||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Install needed dependencies
|
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||||
run: |
|
run: |
|
||||||
apt-get update
|
sudo rm -rf "/opt/ghc" || true
|
||||||
apt-get install --assume-yes build-essential curl
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.89
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Run cargo build with almost all features
|
- name: Run cargo build with almost all features
|
||||||
run: |
|
run: |
|
||||||
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
cargo build --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||||
- name: Run cargo test with almost all features
|
- name: Run cargo test with almost all features
|
||||||
run: |
|
run: |
|
||||||
cargo test --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
cargo test --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||||
|
|
||||||
ollama-ubuntu:
|
ollama-ubuntu:
|
||||||
name: Test with Ollama
|
name: Test with Ollama
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
env:
|
env:
|
||||||
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
|
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||||
|
run: |
|
||||||
|
sudo rm -rf "/opt/ghc" || true
|
||||||
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- name: Install Ollama
|
- name: Install Ollama
|
||||||
run: |
|
run: |
|
||||||
curl -fsSL https://ollama.com/install.sh | sudo -E sh
|
curl -fsSL https://ollama.com/install.sh | sudo -E sh
|
||||||
@@ -115,20 +131,20 @@ jobs:
|
|||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: test
|
command: test
|
||||||
args: --locked --release --all --features test-ollama ollama
|
args: --locked -p meilisearch --features test-ollama ollama
|
||||||
|
|
||||||
test-disabled-tokenization:
|
test-disabled-tokenization:
|
||||||
name: Test disabled tokenization
|
name: Test disabled tokenization
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
container:
|
|
||||||
image: ubuntu:22.04
|
|
||||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Install needed dependencies
|
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||||
run: |
|
run: |
|
||||||
apt-get update
|
sudo rm -rf "/opt/ghc" || true
|
||||||
apt-get install --assume-yes build-essential curl
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.89
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Run cargo tree without default features and check lindera is not present
|
- name: Run cargo tree without default features and check lindera is not present
|
||||||
run: |
|
run: |
|
||||||
@@ -140,36 +156,42 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cargo tree -f '{p} {f}' -e normal | grep lindera -qz
|
cargo tree -f '{p} {f}' -e normal | grep lindera -qz
|
||||||
|
|
||||||
# We run tests in debug also, to make sure that the debug_assertions are hit
|
build:
|
||||||
test-debug:
|
name: Build in release
|
||||||
name: Run tests in debug
|
runs-on: ubuntu-22.04
|
||||||
runs-on: ubuntu-latest
|
|
||||||
container:
|
|
||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
|
||||||
image: ubuntu:22.04
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Install needed dependencies
|
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
sudo rm -rf "/opt/ghc" || true
|
||||||
apt-get install build-essential -y
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.89
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
- name: Run tests in debug
|
- name: Run cargo build in release
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: test
|
command: build
|
||||||
args: --locked --all
|
args: --all-targets --release
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Run Clippy
|
name: Run Clippy
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
features: ["", "--features enterprise"]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||||
|
run: |
|
||||||
|
sudo rm -rf "/opt/ghc" || true
|
||||||
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.89
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
|
||||||
components: clippy
|
components: clippy
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
@@ -177,18 +199,21 @@ jobs:
|
|||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: clippy
|
command: clippy
|
||||||
args: --all-targets -- --deny warnings
|
args: --all-targets ${{ matrix.features }} -- --deny warnings
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Run Rustfmt
|
name: Run Rustfmt
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||||
|
run: |
|
||||||
|
sudo rm -rf "/opt/ghc" || true
|
||||||
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.89
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
|
||||||
toolchain: nightly-2024-07-09
|
|
||||||
override: true
|
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
@@ -199,3 +224,23 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs
|
echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs
|
||||||
cargo fmt --all -- --check
|
cargo fmt --all -- --check
|
||||||
|
|
||||||
|
declarative-tests:
|
||||||
|
name: Run declarative tests
|
||||||
|
runs-on: ubuntu-22.04-arm
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v5
|
||||||
|
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||||
|
run: |
|
||||||
|
sudo rm -rf "/opt/ghc" || true
|
||||||
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
|
- name: Cache dependencies
|
||||||
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
|
- name: Run declarative tests
|
||||||
|
run: |
|
||||||
|
cargo xtask test workloads/tests/*.json
|
||||||
|
|||||||
@@ -18,9 +18,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||||
|
run: |
|
||||||
|
sudo rm -rf "/opt/ghc" || true
|
||||||
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.89
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
|
||||||
profile: minimal
|
|
||||||
- name: Install sd
|
- name: Install sd
|
||||||
run: cargo install sd
|
run: cargo install sd
|
||||||
- name: Update Cargo.toml file
|
- name: Update Cargo.toml file
|
||||||
|
|||||||
@@ -124,6 +124,7 @@ They are JSON files with the following structure (comments are not actually supp
|
|||||||
{
|
{
|
||||||
// Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard.
|
// Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard.
|
||||||
"name": "hackernews.ndjson_1M,no-threads",
|
"name": "hackernews.ndjson_1M,no-threads",
|
||||||
|
"type": "bench",
|
||||||
// Number of consecutive runs of the commands that should be performed.
|
// Number of consecutive runs of the commands that should be performed.
|
||||||
// Each run uses a fresh instance of Meilisearch and a fresh database.
|
// Each run uses a fresh instance of Meilisearch and a fresh database.
|
||||||
// Each run produces its own report file.
|
// Each run produces its own report file.
|
||||||
|
|||||||
1877
Cargo.lock
generated
1877
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -23,7 +23,7 @@ members = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.27.0"
|
version = "1.28.2"
|
||||||
authors = [
|
authors = [
|
||||||
"Quentin de Quelen <quentin@dequelen.me>",
|
"Quentin de Quelen <quentin@dequelen.me>",
|
||||||
"Clément Renault <clement@meilisearch.com>",
|
"Clément Renault <clement@meilisearch.com>",
|
||||||
@@ -50,3 +50,5 @@ opt-level = 3
|
|||||||
opt-level = 3
|
opt-level = 3
|
||||||
[profile.dev.package.roaring]
|
[profile.dev.package.roaring]
|
||||||
opt-level = 3
|
opt-level = 3
|
||||||
|
[profile.dev.package.gemm-f16]
|
||||||
|
opt-level = 3
|
||||||
|
|||||||
@@ -1,7 +0,0 @@
|
|||||||
[build.env]
|
|
||||||
passthrough = [
|
|
||||||
"RUST_BACKTRACE",
|
|
||||||
"CARGO_TERM_COLOR",
|
|
||||||
"RUSTFLAGS",
|
|
||||||
"JEMALLOC_SYS_WITH_LG_PAGE"
|
|
||||||
]
|
|
||||||
@@ -8,16 +8,14 @@ WORKDIR /
|
|||||||
ARG COMMIT_SHA
|
ARG COMMIT_SHA
|
||||||
ARG COMMIT_DATE
|
ARG COMMIT_DATE
|
||||||
ARG GIT_TAG
|
ARG GIT_TAG
|
||||||
|
ARG EXTRA_ARGS
|
||||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_DESCRIBE=${GIT_TAG}
|
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_DESCRIBE=${GIT_TAG}
|
||||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN set -eux; \
|
RUN set -eux; \
|
||||||
apkArch="$(apk --print-arch)"; \
|
apkArch="$(apk --print-arch)"; \
|
||||||
if [ "$apkArch" = "aarch64" ]; then \
|
cargo build --release -p meilisearch -p meilitool ${EXTRA_ARGS}
|
||||||
export JEMALLOC_SYS_WITH_LG_PAGE=16; \
|
|
||||||
fi && \
|
|
||||||
cargo build --release -p meilisearch -p meilitool
|
|
||||||
|
|
||||||
# Run
|
# Run
|
||||||
FROM alpine:3.22
|
FROM alpine:3.22
|
||||||
|
|||||||
326
TESTING.md
Normal file
326
TESTING.md
Normal file
@@ -0,0 +1,326 @@
|
|||||||
|
# Declarative tests
|
||||||
|
|
||||||
|
Declarative tests ensure that Meilisearch features remain stable across versions.
|
||||||
|
|
||||||
|
While we already have unit tests, those are run against **temporary databases** that are created fresh each time and therefore never risk corruption.
|
||||||
|
|
||||||
|
Declarative tests instead **simulate the lifetime of a database**: they chain together commands and requests to change the binary, verifying that database state and API responses remain consistent.
|
||||||
|
|
||||||
|
## Basic example
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"type": "test",
|
||||||
|
"name": "api-keys",
|
||||||
|
"binary": { // the first command will run on the binary following this specification.
|
||||||
|
"source": "release", // get the binary as a release from GitHub
|
||||||
|
"version": "1.19.0", // version to fetch
|
||||||
|
"edition": "community" // edition to fetch
|
||||||
|
},
|
||||||
|
"commands": []
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This example defines a no-op test (it does nothing).
|
||||||
|
|
||||||
|
If the file is saved at `workloads/tests/example.json`, you can run it with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo xtask test workloads/tests/example.json
|
||||||
|
```
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
Commands represent API requests sent to Meilisearch endpoints during a test.
|
||||||
|
|
||||||
|
They are executed sequentially, and their responses can be validated to ensure consistent behavior across upgrades.
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
|
||||||
|
{
|
||||||
|
"route": "keys",
|
||||||
|
"method": "POST",
|
||||||
|
"body": {
|
||||||
|
"inline": {
|
||||||
|
"actions": [
|
||||||
|
"search",
|
||||||
|
"documents.add"
|
||||||
|
],
|
||||||
|
"description": "Test API Key",
|
||||||
|
"expiresAt": null,
|
||||||
|
"indexes": [ "movies" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This command issues a `POST /keys` request, creating an API key with permissions to search and add documents in the `movies` index.
|
||||||
|
|
||||||
|
### Using assets in commands
|
||||||
|
|
||||||
|
To keep tests concise and reusable, you can define **assets** at the root of the workload file.
|
||||||
|
|
||||||
|
Assets are external data sources (such as datasets) that are cached between runs, making tests faster and easier to read.
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"type": "test",
|
||||||
|
"name": "movies",
|
||||||
|
"binary": {
|
||||||
|
"source": "release",
|
||||||
|
"version": "1.19.0",
|
||||||
|
"edition": "community"
|
||||||
|
},
|
||||||
|
"assets": {
|
||||||
|
"movies.json": {
|
||||||
|
"local_location": null,
|
||||||
|
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
|
||||||
|
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"commands": [
|
||||||
|
{
|
||||||
|
"route": "indexes/movies/documents",
|
||||||
|
"method": "POST",
|
||||||
|
"body": {
|
||||||
|
"asset": "movies.json"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
In this example:
|
||||||
|
- The `movies.json` dataset is defined as an asset, pointing to a remote URL.
|
||||||
|
- The SHA-256 checksum ensures integrity.
|
||||||
|
- The `POST /indexes/movies/documents` command uses this asset as the request body.
|
||||||
|
|
||||||
|
This makes the test much cleaner than inlining a large dataset directly into the command.
|
||||||
|
|
||||||
|
For asset handling, please refer to the [declarative benchmarks documentation](/BENCHMARKS.md#adding-new-assets).
|
||||||
|
|
||||||
|
### Asserting responses
|
||||||
|
|
||||||
|
Commands can specify both the **expected status code** and the **expected response body**.
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"route": "indexes/movies/documents",
|
||||||
|
"method": "POST",
|
||||||
|
"body": {
|
||||||
|
"asset": "movies.json"
|
||||||
|
},
|
||||||
|
"expectedStatus": 202,
|
||||||
|
"expectedResponse": {
|
||||||
|
"enqueuedAt": "[timestamp]", // Set to a bracketed string to ignore the value
|
||||||
|
"indexUid": "movies",
|
||||||
|
"status": "enqueued",
|
||||||
|
"taskUid": 1,
|
||||||
|
"type": "documentAdditionOrUpdate"
|
||||||
|
},
|
||||||
|
"synchronous": "WaitForTask"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Manually writing `expectedResponse` fields can be tedious.
|
||||||
|
|
||||||
|
Instead, you can let the test runner populate them automatically:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run the workload to populate expected fields. Only adds the missing ones, doesn't change existing data
|
||||||
|
cargo xtask test workloads/tests/example.json --add-missing-responses
|
||||||
|
|
||||||
|
# OR
|
||||||
|
|
||||||
|
# Run the workload to populate expected fields. Updates all fields including existing ones
|
||||||
|
cargo xtask test workloads/tests/example.json --update-responses
|
||||||
|
```
|
||||||
|
|
||||||
|
This workflow is recommended:
|
||||||
|
|
||||||
|
1. Write the test without expected fields.
|
||||||
|
2. Run it with `--add-missing-responses` to capture the actual responses.
|
||||||
|
3. Review and commit the generated expectations.
|
||||||
|
|
||||||
|
## Changing binary
|
||||||
|
|
||||||
|
It is possible to insert an instruction to change the current Meilisearch instance from one binary specification to another during a test.
|
||||||
|
|
||||||
|
When executed, such an instruction will:
|
||||||
|
1. Stop the current Meilisearch instance.
|
||||||
|
2. Fetch the binary specified by the instruction.
|
||||||
|
3. Restart the server with the specified binary on the same database.
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"type": "test",
|
||||||
|
"name": "movies",
|
||||||
|
"binary": {
|
||||||
|
"source": "release",
|
||||||
|
"version": "1.19.0", // start with version v1.19.0
|
||||||
|
"edition": "community"
|
||||||
|
},
|
||||||
|
"assets": {
|
||||||
|
"movies.json": {
|
||||||
|
"local_location": null,
|
||||||
|
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
|
||||||
|
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"commands": [
|
||||||
|
// setup some data
|
||||||
|
{
|
||||||
|
"route": "indexes/movies/documents",
|
||||||
|
"method": "POST",
|
||||||
|
"body": {
|
||||||
|
"asset": "movies.json"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// switch binary to v1.24.0
|
||||||
|
{
|
||||||
|
"binary": {
|
||||||
|
"source": "release",
|
||||||
|
"version": "1.24.0",
|
||||||
|
"edition": "community"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Typical Usage
|
||||||
|
|
||||||
|
In most cases, the change binary instruction will be used to update a database.
|
||||||
|
|
||||||
|
- **Set up** some data using commands on an older version.
|
||||||
|
- **Upgrade** to the latest version.
|
||||||
|
- **Assert** that the data and API behavior remain correct after the upgrade.
|
||||||
|
|
||||||
|
To properly test the dumpless upgrade, one should typically:
|
||||||
|
|
||||||
|
1. Open the database without processing the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` and `--experimental-max-number-of-batched-tasks=0` as extra CLI arguments
|
||||||
|
2. Check that the search, stats and task queue still work.
|
||||||
|
3. Open the database and process the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` as the extra CLI argument. Use a `health` command to wait for the upgrade task to finish.
|
||||||
|
4. Check that the indexing, search, stats, and task queue still work.
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"type": "test",
|
||||||
|
"name": "movies",
|
||||||
|
"binary": {
|
||||||
|
"source": "release",
|
||||||
|
"version": "1.12.0",
|
||||||
|
"edition": "community"
|
||||||
|
},
|
||||||
|
"commands": [
|
||||||
|
// 0. Run commands to populate the database
|
||||||
|
{
|
||||||
|
// ..
|
||||||
|
},
|
||||||
|
// 1. Open the database with new MS without processing the update task
|
||||||
|
{
|
||||||
|
"binary": {
|
||||||
|
"source": "build", // build the binary from the sources in the current git repository
|
||||||
|
"edition": "community",
|
||||||
|
"extraCliArgs": [
|
||||||
|
"--experimental-dumpless-upgrade", // allows to open with a newer MS
|
||||||
|
"--experimental-max-number-of-batched-tasks=0" // prevent processing of the update task
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// 2. Check the search etc.
|
||||||
|
{
|
||||||
|
// ..
|
||||||
|
},
|
||||||
|
// 3. Open the database with new MS and processing the update task
|
||||||
|
{
|
||||||
|
"binary": {
|
||||||
|
"source": "build", // build the binary from the sources in the current git repository
|
||||||
|
"edition": "community",
|
||||||
|
"extraCliArgs": [
|
||||||
|
"--experimental-dumpless-upgrade" // allows to open with a newer MS
|
||||||
|
// no `--experimental-max-number-of-batched-tasks=0`
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// 4. Check the indexing, search, etc.
|
||||||
|
{
|
||||||
|
// ..
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This ensures backward compatibility: databases created with older Meilisearch versions should remain functional and consistent after an upgrade.
|
||||||
|
|
||||||
|
## Variables
|
||||||
|
|
||||||
|
Sometimes a command needs to use a value returned by a **previous response**.
|
||||||
|
These values can be captured and reused using the register field.
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"route": "keys",
|
||||||
|
"method": "POST",
|
||||||
|
"body": {
|
||||||
|
"inline": {
|
||||||
|
"actions": [
|
||||||
|
"search",
|
||||||
|
"documents.add"
|
||||||
|
],
|
||||||
|
"description": "Test API Key",
|
||||||
|
"expiresAt": null,
|
||||||
|
"indexes": [ "movies" ]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"expectedResponse": {
|
||||||
|
"key": "c6f64630bad2996b1f675007c8800168e14adf5d6a7bb1a400a6d2b158050eaf",
|
||||||
|
// ...
|
||||||
|
},
|
||||||
|
"register": {
|
||||||
|
"key": "/key"
|
||||||
|
},
|
||||||
|
"synchronous": "WaitForResponse"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The `register` field captures the value at the JSON path `/key` from the response.
|
||||||
|
Paths follow the **JavaScript Object Notation Pointer (RFC 6901)** format.
|
||||||
|
Registered variables are available for all subsequent commands.
|
||||||
|
|
||||||
|
Registered variables can be referenced by wrapping their name in double curly braces:
|
||||||
|
|
||||||
|
In the route/path:
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"route": "tasks/{{ task_id }}",
|
||||||
|
"method": "GET"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
In the request body:
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"route": "indexes/movies/documents",
|
||||||
|
"method": "PATCH",
|
||||||
|
"body": {
|
||||||
|
"inline": {
|
||||||
|
"id": "{{ document_id }}",
|
||||||
|
"overview": "Shazam turns evil and the world is in danger.",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Or they can be referenced by their name (**without curly braces**) as an API key:
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"route": "indexes/movies/documents",
|
||||||
|
"method": "POST",
|
||||||
|
"body": { /* ... */ },
|
||||||
|
"apiKeyVariable": "key" // The **content** of the key variable will be used as an API key
|
||||||
|
}
|
||||||
|
```
|
||||||
@@ -11,27 +11,27 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0.98"
|
anyhow = "1.0.100"
|
||||||
bumpalo = "3.18.1"
|
bumpalo = "3.19.0"
|
||||||
csv = "1.3.1"
|
csv = "1.4.0"
|
||||||
memmap2 = "0.9.7"
|
memmap2 = "0.9.9"
|
||||||
milli = { path = "../milli" }
|
milli = { path = "../milli" }
|
||||||
mimalloc = { version = "0.1.47", default-features = false }
|
mimalloc = { version = "0.1.48", default-features = false }
|
||||||
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
||||||
tempfile = "3.20.0"
|
tempfile = "3.23.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = { version = "0.6.0", features = ["html_reports"] }
|
criterion = { version = "0.7.0", features = ["html_reports"] }
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
rand_chacha = "0.3.1"
|
rand_chacha = "0.3.1"
|
||||||
roaring = "0.10.12"
|
roaring = "0.10.12"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
anyhow = "1.0.98"
|
anyhow = "1.0.100"
|
||||||
bytes = "1.10.1"
|
bytes = "1.11.0"
|
||||||
convert_case = "0.8.0"
|
convert_case = "0.9.0"
|
||||||
flate2 = "1.1.2"
|
flate2 = "1.1.5"
|
||||||
reqwest = { version = "0.12.20", features = ["blocking", "rustls-tls"], default-features = false }
|
reqwest = { version = "0.12.24", features = ["blocking", "rustls-tls"], default-features = false }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["milli/all-tokenizations"]
|
default = ["milli/all-tokenizations"]
|
||||||
|
|||||||
@@ -11,8 +11,8 @@ license.workspace = true
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
time = { version = "0.3.41", features = ["parsing"] }
|
time = { version = "0.3.44", features = ["parsing"] }
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
anyhow = "1.0.98"
|
anyhow = "1.0.100"
|
||||||
vergen-git2 = "1.0.7"
|
vergen-git2 = "1.0.7"
|
||||||
|
|||||||
@@ -11,24 +11,27 @@ readme.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0.98"
|
anyhow = "1.0.100"
|
||||||
flate2 = "1.1.2"
|
flate2 = "1.1.5"
|
||||||
http = "1.3.1"
|
http = "1.3.1"
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
once_cell = "1.21.3"
|
once_cell = "1.21.3"
|
||||||
regex = "1.11.1"
|
regex = "1.12.2"
|
||||||
roaring = { version = "0.10.12", features = ["serde"] }
|
roaring = { version = "0.10.12", features = ["serde"] }
|
||||||
serde = { version = "1.0.219", features = ["derive"] }
|
serde = { version = "1.0.228", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
||||||
tar = "0.4.44"
|
tar = "0.4.44"
|
||||||
tempfile = "3.20.0"
|
tempfile = "3.23.0"
|
||||||
thiserror = "2.0.12"
|
thiserror = "2.0.17"
|
||||||
time = { version = "0.3.41", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
time = { version = "0.3.44", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||||
tracing = "0.1.41"
|
tracing = "0.1.41"
|
||||||
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
big_s = "1.0.2"
|
big_s = "1.0.2"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
meili-snap = { path = "../meili-snap" }
|
meili-snap = { path = "../meili-snap" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
enterprise = ["meilisearch-types/enterprise"]
|
||||||
@@ -9,9 +9,8 @@ use meilisearch_types::error::ResponseError;
|
|||||||
use meilisearch_types::keys::Key;
|
use meilisearch_types::keys::Key;
|
||||||
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
||||||
use meilisearch_types::settings::Unchecked;
|
use meilisearch_types::settings::Unchecked;
|
||||||
use meilisearch_types::tasks::enterprise_edition::network::{DbTaskNetwork, NetworkTopologyChange};
|
|
||||||
use meilisearch_types::tasks::{
|
use meilisearch_types::tasks::{
|
||||||
Details, ExportIndexSettings, IndexSwap, KindWithContent, Status, Task, TaskId,
|
Details, ExportIndexSettings, IndexSwap, KindWithContent, Status, Task, TaskId, TaskNetwork,
|
||||||
};
|
};
|
||||||
use meilisearch_types::InstanceUid;
|
use meilisearch_types::InstanceUid;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
@@ -96,7 +95,7 @@ pub struct TaskDump {
|
|||||||
)]
|
)]
|
||||||
pub finished_at: Option<OffsetDateTime>,
|
pub finished_at: Option<OffsetDateTime>,
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub network: Option<DbTaskNetwork>,
|
pub network: Option<TaskNetwork>,
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub custom_metadata: Option<String>,
|
pub custom_metadata: Option<String>,
|
||||||
}
|
}
|
||||||
@@ -164,7 +163,6 @@ pub enum KindDump {
|
|||||||
IndexCompaction {
|
IndexCompaction {
|
||||||
index_uid: String,
|
index_uid: String,
|
||||||
},
|
},
|
||||||
NetworkTopologyChange(NetworkTopologyChange),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Task> for TaskDump {
|
impl From<Task> for TaskDump {
|
||||||
@@ -251,9 +249,6 @@ impl From<KindWithContent> for KindDump {
|
|||||||
KindWithContent::IndexCompaction { index_uid } => {
|
KindWithContent::IndexCompaction { index_uid } => {
|
||||||
KindDump::IndexCompaction { index_uid }
|
KindDump::IndexCompaction { index_uid }
|
||||||
}
|
}
|
||||||
KindWithContent::NetworkTopologyChange(network_topology_change) => {
|
|
||||||
KindDump::NetworkTopologyChange(network_topology_change)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -267,13 +262,13 @@ pub(crate) mod test {
|
|||||||
use big_s::S;
|
use big_s::S;
|
||||||
use maplit::{btreemap, btreeset};
|
use maplit::{btreemap, btreeset};
|
||||||
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
|
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
|
||||||
use meilisearch_types::enterprise_edition::network::{Network, Remote};
|
|
||||||
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
||||||
use meilisearch_types::features::RuntimeTogglableFeatures;
|
use meilisearch_types::features::RuntimeTogglableFeatures;
|
||||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||||
use meilisearch_types::keys::{Action, Key};
|
use meilisearch_types::keys::{Action, Key};
|
||||||
use meilisearch_types::milli::update::Setting;
|
use meilisearch_types::milli::update::Setting;
|
||||||
use meilisearch_types::milli::{self, FilterableAttributesRule};
|
use meilisearch_types::milli::{self, FilterableAttributesRule};
|
||||||
|
use meilisearch_types::network::{Network, Remote};
|
||||||
use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
|
use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
|
||||||
use meilisearch_types::task_view::DetailsView;
|
use meilisearch_types::task_view::DetailsView;
|
||||||
use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status};
|
use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status};
|
||||||
@@ -565,8 +560,7 @@ pub(crate) mod test {
|
|||||||
Network {
|
Network {
|
||||||
local: Some("myself".to_string()),
|
local: Some("myself".to_string()),
|
||||||
remotes: maplit::btreemap! {"other".to_string() => Remote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
|
remotes: maplit::btreemap! {"other".to_string() => Remote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
|
||||||
leader: None,
|
sharding: false,
|
||||||
version: Default::default(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ pub type Batch = meilisearch_types::batches::Batch;
|
|||||||
pub type Key = meilisearch_types::keys::Key;
|
pub type Key = meilisearch_types::keys::Key;
|
||||||
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
|
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
|
||||||
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
|
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
|
||||||
pub type Network = meilisearch_types::enterprise_edition::network::Network;
|
pub type Network = meilisearch_types::network::Network;
|
||||||
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
|
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
|
||||||
|
|
||||||
// ===== Other types to clarify the code of the compat module
|
// ===== Other types to clarify the code of the compat module
|
||||||
|
|||||||
@@ -5,9 +5,9 @@ use std::path::PathBuf;
|
|||||||
use flate2::write::GzEncoder;
|
use flate2::write::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use meilisearch_types::batches::Batch;
|
use meilisearch_types::batches::Batch;
|
||||||
use meilisearch_types::enterprise_edition::network::Network;
|
|
||||||
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
|
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
|
||||||
use meilisearch_types::keys::Key;
|
use meilisearch_types::keys::Key;
|
||||||
|
use meilisearch_types::network::Network;
|
||||||
use meilisearch_types::settings::{Checked, Settings};
|
use meilisearch_types::settings::{Checked, Settings};
|
||||||
use meilisearch_types::webhooks::WebhooksDumpView;
|
use meilisearch_types::webhooks::WebhooksDumpView;
|
||||||
use serde_json::{Map, Value};
|
use serde_json::{Map, Value};
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tempfile = "3.20.0"
|
tempfile = "3.23.0"
|
||||||
thiserror = "2.0.12"
|
thiserror = "2.0.17"
|
||||||
tracing = "0.1.41"
|
tracing = "0.1.41"
|
||||||
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ license.workspace = true
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = { version = "0.6.0", features = ["html_reports"] }
|
criterion = { version = "0.7.0", features = ["html_reports"] }
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "benchmarks"
|
name = "benchmarks"
|
||||||
|
|||||||
@@ -11,12 +11,12 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
arbitrary = { version = "1.4.1", features = ["derive"] }
|
arbitrary = { version = "1.4.2", features = ["derive"] }
|
||||||
bumpalo = "3.18.1"
|
bumpalo = "3.19.0"
|
||||||
clap = { version = "4.5.40", features = ["derive"] }
|
clap = { version = "4.5.52", features = ["derive"] }
|
||||||
either = "1.15.0"
|
either = "1.15.0"
|
||||||
fastrand = "2.3.0"
|
fastrand = "2.3.0"
|
||||||
milli = { path = "../milli" }
|
milli = { path = "../milli" }
|
||||||
serde = { version = "1.0.219", features = ["derive"] }
|
serde = { version = "1.0.228", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
||||||
tempfile = "3.20.0"
|
tempfile = "3.23.0"
|
||||||
|
|||||||
@@ -11,34 +11,33 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0.98"
|
anyhow = "1.0.100"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
byte-unit = "5.1.6"
|
byte-unit = "5.1.6"
|
||||||
bytes = "1.10.1"
|
bytes = "1.11.0"
|
||||||
bumpalo = "3.18.1"
|
bumpalo = "3.19.0"
|
||||||
bumparaw-collections = "0.1.4"
|
bumparaw-collections = "0.1.4"
|
||||||
convert_case = "0.8.0"
|
convert_case = "0.9.0"
|
||||||
csv = "1.3.1"
|
csv = "1.4.0"
|
||||||
derive_builder = "0.20.2"
|
derive_builder = "0.20.2"
|
||||||
dump = { path = "../dump" }
|
dump = { path = "../dump" }
|
||||||
enum-iterator = "2.1.0"
|
enum-iterator = "2.3.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.1.2"
|
flate2 = "1.1.5"
|
||||||
hashbrown = "0.15.4"
|
indexmap = "2.12.0"
|
||||||
indexmap = "2.9.0"
|
|
||||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
memmap2 = "0.9.7"
|
memmap2 = "0.9.9"
|
||||||
page_size = "0.6.0"
|
page_size = "0.6.0"
|
||||||
rayon = "1.10.0"
|
rayon = "1.11.0"
|
||||||
roaring = { version = "0.10.12", features = ["serde"] }
|
roaring = { version = "0.10.12", features = ["serde"] }
|
||||||
serde = { version = "1.0.219", features = ["derive"] }
|
serde = { version = "1.0.228", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
||||||
tar = "0.4.44"
|
tar = "0.4.44"
|
||||||
synchronoise = "1.0.1"
|
synchronoise = "1.0.1"
|
||||||
tempfile = "3.20.0"
|
tempfile = "3.23.0"
|
||||||
thiserror = "2.0.12"
|
thiserror = "2.0.17"
|
||||||
time = { version = "0.3.41", features = [
|
time = { version = "0.3.44", features = [
|
||||||
"serde-well-known",
|
"serde-well-known",
|
||||||
"formatting",
|
"formatting",
|
||||||
"parsing",
|
"parsing",
|
||||||
@@ -46,11 +45,11 @@ time = { version = "0.3.41", features = [
|
|||||||
] }
|
] }
|
||||||
tracing = "0.1.41"
|
tracing = "0.1.41"
|
||||||
ureq = "2.12.1"
|
ureq = "2.12.1"
|
||||||
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
||||||
backoff = "0.4.0"
|
backoff = "0.4.0"
|
||||||
reqwest = { version = "0.12.23", features = ["rustls-tls", "http2"], default-features = false }
|
reqwest = { version = "0.12.24", features = ["rustls-tls", "http2"], default-features = false }
|
||||||
rusty-s3 = "0.8.1"
|
rusty-s3 = "0.8.1"
|
||||||
tokio = { version = "1.47.1", features = ["full"] }
|
tokio = { version = "1.48.0", features = ["full"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
big_s = "1.0.2"
|
big_s = "1.0.2"
|
||||||
|
|||||||
@@ -238,9 +238,6 @@ impl<'a> Dump<'a> {
|
|||||||
KindDump::IndexCompaction { index_uid } => {
|
KindDump::IndexCompaction { index_uid } => {
|
||||||
KindWithContent::IndexCompaction { index_uid }
|
KindWithContent::IndexCompaction { index_uid }
|
||||||
}
|
}
|
||||||
KindDump::NetworkTopologyChange(network_topology_change) => {
|
|
||||||
KindWithContent::NetworkTopologyChange(network_topology_change)
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -3,13 +3,10 @@ use std::fmt::Display;
|
|||||||
use meilisearch_types::batches::BatchId;
|
use meilisearch_types::batches::BatchId;
|
||||||
use meilisearch_types::error::{Code, ErrorCode};
|
use meilisearch_types::error::{Code, ErrorCode};
|
||||||
use meilisearch_types::milli::index::RollbackOutcome;
|
use meilisearch_types::milli::index::RollbackOutcome;
|
||||||
use meilisearch_types::milli::DocumentId;
|
|
||||||
use meilisearch_types::tasks::enterprise_edition::network::ReceiveTaskError;
|
|
||||||
use meilisearch_types::tasks::{Kind, Status};
|
use meilisearch_types::tasks::{Kind, Status};
|
||||||
use meilisearch_types::{heed, milli};
|
use meilisearch_types::{heed, milli};
|
||||||
use reqwest::StatusCode;
|
use reqwest::StatusCode;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::TaskId;
|
use crate::TaskId;
|
||||||
|
|
||||||
@@ -194,15 +191,6 @@ pub enum Error {
|
|||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
HeedTransaction(heed::Error),
|
HeedTransaction(heed::Error),
|
||||||
|
|
||||||
#[error("No network topology change task is currently enqueued or processing")]
|
|
||||||
ImportTaskWithoutNetworkTask,
|
|
||||||
#[error("The network task version (`{network_task}`) does not match the import task version (`{import_task}`)")]
|
|
||||||
NetworkVersionMismatch { network_task: Uuid, import_task: Uuid },
|
|
||||||
#[error("The import task emanates from an unknown remote `{0}`")]
|
|
||||||
ImportTaskUnknownRemote(String),
|
|
||||||
#[error("The import task with key `{0}` was already received")]
|
|
||||||
ImportTaskAlreadyReceived(DocumentId),
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[error("Planned failure for tests.")]
|
#[error("Planned failure for tests.")]
|
||||||
PlannedFailure,
|
PlannedFailure,
|
||||||
@@ -260,10 +248,6 @@ impl Error {
|
|||||||
| Error::Persist(_)
|
| Error::Persist(_)
|
||||||
| Error::FeatureNotEnabled(_)
|
| Error::FeatureNotEnabled(_)
|
||||||
| Error::Export(_)
|
| Error::Export(_)
|
||||||
| Error::ImportTaskWithoutNetworkTask
|
|
||||||
| Error::NetworkVersionMismatch { .. }
|
|
||||||
| Error::ImportTaskAlreadyReceived(_)
|
|
||||||
| Error::ImportTaskUnknownRemote(_)
|
|
||||||
| Error::Anyhow(_) => true,
|
| Error::Anyhow(_) => true,
|
||||||
Error::CreateBatch(_)
|
Error::CreateBatch(_)
|
||||||
| Error::CorruptedTaskQueue
|
| Error::CorruptedTaskQueue
|
||||||
@@ -323,10 +307,6 @@ impl ErrorCode for Error {
|
|||||||
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
|
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
|
||||||
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
|
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
|
||||||
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
|
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
|
||||||
Error::ImportTaskWithoutNetworkTask => Code::ImportTaskWithoutNetworkTask,
|
|
||||||
Error::NetworkVersionMismatch { .. } => Code::NetworkVersionMismatch,
|
|
||||||
Error::ImportTaskAlreadyReceived(_) => Code::ImportTaskAlreadyReceived,
|
|
||||||
Error::ImportTaskUnknownRemote(_) => Code::ImportTaskUnknownRemote,
|
|
||||||
Error::S3Error { status, .. } if status.is_client_error() => {
|
Error::S3Error { status, .. } if status.is_client_error() => {
|
||||||
Code::InvalidS3SnapshotRequest
|
Code::InvalidS3SnapshotRequest
|
||||||
}
|
}
|
||||||
@@ -365,12 +345,3 @@ impl ErrorCode for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ReceiveTaskError> for Error {
|
|
||||||
fn from(value: ReceiveTaskError) -> Self {
|
|
||||||
match value {
|
|
||||||
ReceiveTaskError::UnknownRemote(unknown) => Error::ImportTaskUnknownRemote(unknown),
|
|
||||||
ReceiveTaskError::DuplicateTask(dup) => Error::ImportTaskAlreadyReceived(dup),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
use meilisearch_types::enterprise_edition::network::Network;
|
|
||||||
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
|
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
|
||||||
use meilisearch_types::heed::types::{SerdeJson, Str};
|
use meilisearch_types::heed::types::{SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
|
||||||
|
use meilisearch_types::network::Network;
|
||||||
|
|
||||||
use crate::error::FeatureNotEnabledError;
|
use crate::error::FeatureNotEnabledError;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
@@ -38,10 +38,6 @@ impl RoFeatures {
|
|||||||
Self { runtime }
|
Self { runtime }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_runtime_features(features: RuntimeTogglableFeatures) -> Self {
|
|
||||||
Self { runtime: features }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn runtime_features(&self) -> RuntimeTogglableFeatures {
|
pub fn runtime_features(&self) -> RuntimeTogglableFeatures {
|
||||||
self.runtime
|
self.runtime
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -361,11 +361,6 @@ impl IndexMapper {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The number of indexes in the database
|
|
||||||
pub fn index_count(&self, rtxn: &RoTxn) -> Result<u64> {
|
|
||||||
Ok(self.index_mapping.len(rtxn)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return an index, may open it if it wasn't already opened.
|
/// Return an index, may open it if it wasn't already opened.
|
||||||
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
|
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
|
||||||
if let Some((current_name, current_index)) =
|
if let Some((current_name, current_index)) =
|
||||||
|
|||||||
@@ -325,9 +325,6 @@ fn snapshot_details(d: &Details) -> String {
|
|||||||
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
|
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
|
||||||
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
|
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
|
||||||
}
|
}
|
||||||
Details::NetworkTopologyChange { moved_documents, message } => {
|
|
||||||
format!("{{ moved_documents: {moved_documents:?}, message: {message:?}")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -54,7 +54,6 @@ pub use features::RoFeatures;
|
|||||||
use flate2::bufread::GzEncoder;
|
use flate2::bufread::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use meilisearch_types::batches::Batch;
|
use meilisearch_types::batches::Batch;
|
||||||
use meilisearch_types::enterprise_edition::network::Network;
|
|
||||||
use meilisearch_types::features::{
|
use meilisearch_types::features::{
|
||||||
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
|
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
|
||||||
};
|
};
|
||||||
@@ -67,13 +66,12 @@ use meilisearch_types::milli::vector::{
|
|||||||
Embedder, EmbedderOptions, RuntimeEmbedder, RuntimeEmbedders, RuntimeFragment,
|
Embedder, EmbedderOptions, RuntimeEmbedder, RuntimeEmbedders, RuntimeFragment,
|
||||||
};
|
};
|
||||||
use meilisearch_types::milli::{self, Index};
|
use meilisearch_types::milli::{self, Index};
|
||||||
|
use meilisearch_types::network::Network;
|
||||||
use meilisearch_types::task_view::TaskView;
|
use meilisearch_types::task_view::TaskView;
|
||||||
use meilisearch_types::tasks::enterprise_edition::network::{
|
use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork};
|
||||||
DbTaskNetwork, ImportData, ImportMetadata, Origin, TaskNetwork,
|
|
||||||
};
|
|
||||||
use meilisearch_types::tasks::{KindWithContent, Task};
|
|
||||||
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
|
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
|
||||||
use milli::vector::db::IndexEmbeddingConfig;
|
use milli::vector::db::IndexEmbeddingConfig;
|
||||||
|
use processing::ProcessingTasks;
|
||||||
pub use queue::Query;
|
pub use queue::Query;
|
||||||
use queue::Queue;
|
use queue::Queue;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
@@ -84,7 +82,6 @@ use uuid::Uuid;
|
|||||||
use versioning::Versioning;
|
use versioning::Versioning;
|
||||||
|
|
||||||
use crate::index_mapper::IndexMapper;
|
use crate::index_mapper::IndexMapper;
|
||||||
use crate::processing::ProcessingTasks;
|
|
||||||
use crate::utils::clamp_to_page_size;
|
use crate::utils::clamp_to_page_size;
|
||||||
|
|
||||||
pub(crate) type BEI128 = I128<BE>;
|
pub(crate) type BEI128 = I128<BE>;
|
||||||
@@ -703,14 +700,14 @@ impl IndexScheduler {
|
|||||||
self.queue.get_task_ids_from_authorized_indexes(&rtxn, query, filters, &processing)
|
self.queue.get_task_ids_from_authorized_indexes(&rtxn, query, filters, &processing)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_task_network(&self, task_id: TaskId, network: DbTaskNetwork) -> Result<Task> {
|
pub fn set_task_network(&self, task_id: TaskId, network: TaskNetwork) -> Result<()> {
|
||||||
let mut wtxn = self.env.write_txn()?;
|
let mut wtxn = self.env.write_txn()?;
|
||||||
let mut task =
|
let mut task =
|
||||||
self.queue.tasks.get_task(&wtxn, task_id)?.ok_or(Error::TaskNotFound(task_id))?;
|
self.queue.tasks.get_task(&wtxn, task_id)?.ok_or(Error::TaskNotFound(task_id))?;
|
||||||
task.network = Some(network);
|
task.network = Some(network);
|
||||||
self.queue.tasks.all_tasks.put(&mut wtxn, &task_id, &task)?;
|
self.queue.tasks.all_tasks.put(&mut wtxn, &task_id, &task)?;
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
Ok(task)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the batches matching the query from the user's point of view along
|
/// Return the batches matching the query from the user's point of view along
|
||||||
@@ -760,30 +757,18 @@ impl IndexScheduler {
|
|||||||
task_id: Option<TaskId>,
|
task_id: Option<TaskId>,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
) -> Result<Task> {
|
) -> Result<Task> {
|
||||||
self.register_with_custom_metadata(kind, task_id, None, dry_run, None)
|
self.register_with_custom_metadata(kind, task_id, None, dry_run)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Register a new task in the scheduler, with metadata.
|
/// Register a new task in the scheduler, with metadata.
|
||||||
///
|
///
|
||||||
/// If it fails and data was associated with the task, it tries to delete the associated data.
|
/// If it fails and data was associated with the task, it tries to delete the associated data.
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
///
|
|
||||||
/// - task_network: network of the task to check.
|
|
||||||
///
|
|
||||||
/// If the task is an import task, only accept it if:
|
|
||||||
///
|
|
||||||
/// 1. There is an ongoing network topology change task
|
|
||||||
/// 2. The task to register matches the network version of the network topology change task
|
|
||||||
///
|
|
||||||
/// Always accept the task if it is not an import task.
|
|
||||||
pub fn register_with_custom_metadata(
|
pub fn register_with_custom_metadata(
|
||||||
&self,
|
&self,
|
||||||
kind: KindWithContent,
|
kind: KindWithContent,
|
||||||
task_id: Option<TaskId>,
|
task_id: Option<TaskId>,
|
||||||
custom_metadata: Option<String>,
|
custom_metadata: Option<String>,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
task_network: Option<TaskNetwork>,
|
|
||||||
) -> Result<Task> {
|
) -> Result<Task> {
|
||||||
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
|
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
|
||||||
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
|
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
|
||||||
@@ -794,19 +779,7 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut wtxn = self.env.write_txn()?;
|
let mut wtxn = self.env.write_txn()?;
|
||||||
|
let task = self.queue.register(&mut wtxn, &kind, task_id, custom_metadata, dry_run)?;
|
||||||
if let Some(TaskNetwork::Import { import_from, network_change, metadata }) = &task_network {
|
|
||||||
self.update_network_task(&mut wtxn, import_from, network_change, metadata)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let task = self.queue.register(
|
|
||||||
&mut wtxn,
|
|
||||||
&kind,
|
|
||||||
task_id,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network.map(DbTaskNetwork::from),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// If the registered task is a task cancelation
|
// If the registered task is a task cancelation
|
||||||
// we inform the processing tasks to stop (if necessary).
|
// we inform the processing tasks to stop (if necessary).
|
||||||
@@ -828,91 +801,6 @@ impl IndexScheduler {
|
|||||||
Ok(task)
|
Ok(task)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn network_no_index_for_remote(
|
|
||||||
&self,
|
|
||||||
remote_name: String,
|
|
||||||
origin: Origin,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut wtxn = self.env.write_txn()?;
|
|
||||||
|
|
||||||
self.update_network_task(
|
|
||||||
&mut wtxn,
|
|
||||||
&ImportData { remote_name, index_name: None, document_count: 0 },
|
|
||||||
&origin,
|
|
||||||
&ImportMetadata { index_count: 0, task_key: None, total_index_documents: 0 },
|
|
||||||
)?;
|
|
||||||
|
|
||||||
wtxn.commit()?;
|
|
||||||
|
|
||||||
// wake up the scheduler as the task state has changed
|
|
||||||
self.scheduler.wake_up.signal();
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_network_task(
|
|
||||||
&self,
|
|
||||||
wtxn: &mut heed::RwTxn<'_>,
|
|
||||||
import_from: &ImportData,
|
|
||||||
network_change: &Origin,
|
|
||||||
metadata: &ImportMetadata,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut network_tasks = self
|
|
||||||
.queue
|
|
||||||
.tasks
|
|
||||||
.get_kind(&*wtxn, meilisearch_types::tasks::Kind::NetworkTopologyChange)?;
|
|
||||||
if network_tasks.is_empty() {
|
|
||||||
return Err(Error::ImportTaskWithoutNetworkTask);
|
|
||||||
}
|
|
||||||
let network_task = {
|
|
||||||
let processing = self.processing_tasks.read().unwrap().processing.clone();
|
|
||||||
if processing.is_disjoint(&network_tasks) {
|
|
||||||
let enqueued = self
|
|
||||||
.queue
|
|
||||||
.tasks
|
|
||||||
.get_status(&*wtxn, meilisearch_types::tasks::Status::Enqueued)?;
|
|
||||||
|
|
||||||
network_tasks &= enqueued;
|
|
||||||
if let Some(network_task) = network_tasks.into_iter().next() {
|
|
||||||
network_task
|
|
||||||
} else {
|
|
||||||
return Err(Error::ImportTaskWithoutNetworkTask);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
network_tasks &= &*processing;
|
|
||||||
network_tasks.into_iter().next().unwrap()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let mut network_task = self.queue.tasks.get_task(&*wtxn, network_task)?.unwrap();
|
|
||||||
let network_task_version = network_task
|
|
||||||
.network
|
|
||||||
.as_ref()
|
|
||||||
.map(|network| network.network_version())
|
|
||||||
.unwrap_or_default();
|
|
||||||
if network_task_version != network_change.network_version {
|
|
||||||
return Err(Error::NetworkVersionMismatch {
|
|
||||||
network_task: network_task_version,
|
|
||||||
import_task: network_change.network_version,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
let KindWithContent::NetworkTopologyChange(network_topology_change) =
|
|
||||||
&mut network_task.kind
|
|
||||||
else {
|
|
||||||
tracing::error!("unexpected network kind for network task while registering task");
|
|
||||||
return Err(Error::CorruptedTaskQueue);
|
|
||||||
};
|
|
||||||
network_topology_change.receive_remote_task(
|
|
||||||
&import_from.remote_name,
|
|
||||||
import_from.index_name.as_deref(),
|
|
||||||
metadata.task_key,
|
|
||||||
import_from.document_count,
|
|
||||||
metadata.index_count,
|
|
||||||
metadata.total_index_documents,
|
|
||||||
)?;
|
|
||||||
self.queue.tasks.update_task(wtxn, &mut network_task)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Register a new task coming from a dump in the scheduler.
|
/// Register a new task coming from a dump in the scheduler.
|
||||||
/// By taking a mutable ref we're pretty sure no one will ever import a dump while actix is running.
|
/// By taking a mutable ref we're pretty sure no one will ever import a dump while actix is running.
|
||||||
pub fn register_dumped_task(&mut self) -> Result<Dump<'_>> {
|
pub fn register_dumped_task(&mut self) -> Result<Dump<'_>> {
|
||||||
|
|||||||
@@ -42,10 +42,12 @@ impl ProcessingTasks {
|
|||||||
|
|
||||||
/// Set the processing tasks to an empty list
|
/// Set the processing tasks to an empty list
|
||||||
pub fn stop_processing(&mut self) -> Self {
|
pub fn stop_processing(&mut self) -> Self {
|
||||||
|
self.progress = None;
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
batch: std::mem::take(&mut self.batch),
|
batch: std::mem::take(&mut self.batch),
|
||||||
processing: std::mem::take(&mut self.processing),
|
processing: std::mem::take(&mut self.processing),
|
||||||
progress: std::mem::take(&mut self.progress),
|
progress: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ use file_store::FileStore;
|
|||||||
use meilisearch_types::batches::BatchId;
|
use meilisearch_types::batches::BatchId;
|
||||||
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, BEU32};
|
use meilisearch_types::milli::{CboRoaringBitmapCodec, BEU32};
|
||||||
use meilisearch_types::tasks::enterprise_edition::network::DbTaskNetwork;
|
|
||||||
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
use time::format_description::well_known::Rfc3339;
|
use time::format_description::well_known::Rfc3339;
|
||||||
@@ -260,7 +259,6 @@ impl Queue {
|
|||||||
task_id: Option<TaskId>,
|
task_id: Option<TaskId>,
|
||||||
custom_metadata: Option<String>,
|
custom_metadata: Option<String>,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
network: Option<DbTaskNetwork>,
|
|
||||||
) -> Result<Task> {
|
) -> Result<Task> {
|
||||||
let next_task_id = self.tasks.next_task_id(wtxn)?;
|
let next_task_id = self.tasks.next_task_id(wtxn)?;
|
||||||
|
|
||||||
@@ -282,7 +280,7 @@ impl Queue {
|
|||||||
details: kind.default_details(),
|
details: kind.default_details(),
|
||||||
status: Status::Enqueued,
|
status: Status::Enqueued,
|
||||||
kind: kind.clone(),
|
kind: kind.clone(),
|
||||||
network,
|
network: None,
|
||||||
custom_metadata,
|
custom_metadata,
|
||||||
};
|
};
|
||||||
// For deletion and cancelation tasks, we want to make extra sure that they
|
// For deletion and cancelation tasks, we want to make extra sure that they
|
||||||
@@ -350,7 +348,6 @@ impl Queue {
|
|||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
false,
|
false,
|
||||||
None,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -3,8 +3,7 @@ use std::ops::{Bound, RangeBounds};
|
|||||||
use meilisearch_types::heed::types::{DecodeIgnore, SerdeBincode, SerdeJson, Str};
|
use meilisearch_types::heed::types::{DecodeIgnore, SerdeBincode, SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
||||||
use meilisearch_types::tasks::enterprise_edition::network::DbTaskNetwork;
|
use meilisearch_types::tasks::{Kind, Status, Task};
|
||||||
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
|
|
||||||
use roaring::{MultiOps, RoaringBitmap};
|
use roaring::{MultiOps, RoaringBitmap};
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
|
|
||||||
@@ -115,16 +114,14 @@ impl TaskQueue {
|
|||||||
/// - CorruptedTaskQueue: The task doesn't exist in the database
|
/// - CorruptedTaskQueue: The task doesn't exist in the database
|
||||||
pub(crate) fn update_task(&self, wtxn: &mut RwTxn, task: &mut Task) -> Result<()> {
|
pub(crate) fn update_task(&self, wtxn: &mut RwTxn, task: &mut Task) -> Result<()> {
|
||||||
let old_task = self.get_task(wtxn, task.uid)?.ok_or(Error::CorruptedTaskQueue)?;
|
let old_task = self.get_task(wtxn, task.uid)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
// network topology tasks may be processed multiple times.
|
let reprocessing = old_task.status != Status::Enqueued;
|
||||||
let maybe_reprocessing = old_task.status != Status::Enqueued
|
|
||||||
|| task.kind.as_kind() == Kind::NetworkTopologyChange;
|
|
||||||
|
|
||||||
debug_assert!(old_task != *task);
|
debug_assert!(old_task != *task);
|
||||||
debug_assert_eq!(old_task.uid, task.uid);
|
debug_assert_eq!(old_task.uid, task.uid);
|
||||||
|
|
||||||
// If we're processing a task that failed it may already contains a batch_uid
|
// If we're processing a task that failed it may already contains a batch_uid
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
maybe_reprocessing || (old_task.batch_uid.is_none() && task.batch_uid.is_some()),
|
reprocessing || (old_task.batch_uid.is_none() && task.batch_uid.is_some()),
|
||||||
"\n==> old: {old_task:?}\n==> new: {task:?}"
|
"\n==> old: {old_task:?}\n==> new: {task:?}"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -146,24 +143,13 @@ impl TaskQueue {
|
|||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Avoids rewriting part of the network topology change because of TOCTOU errors
|
|
||||||
if let (
|
|
||||||
KindWithContent::NetworkTopologyChange(old_state),
|
|
||||||
KindWithContent::NetworkTopologyChange(new_state),
|
|
||||||
) = (old_task.kind, &mut task.kind)
|
|
||||||
{
|
|
||||||
new_state.merge(old_state);
|
|
||||||
// the state possibly just changed, rewrite the details
|
|
||||||
task.details = Some(new_state.to_details());
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
old_task.enqueued_at, task.enqueued_at,
|
old_task.enqueued_at, task.enqueued_at,
|
||||||
"Cannot update a task's enqueued_at time"
|
"Cannot update a task's enqueued_at time"
|
||||||
);
|
);
|
||||||
if old_task.started_at != task.started_at {
|
if old_task.started_at != task.started_at {
|
||||||
assert!(
|
assert!(
|
||||||
maybe_reprocessing || old_task.started_at.is_none(),
|
reprocessing || old_task.started_at.is_none(),
|
||||||
"Cannot update a task's started_at time"
|
"Cannot update a task's started_at time"
|
||||||
);
|
);
|
||||||
if let Some(started_at) = old_task.started_at {
|
if let Some(started_at) = old_task.started_at {
|
||||||
@@ -175,7 +161,7 @@ impl TaskQueue {
|
|||||||
}
|
}
|
||||||
if old_task.finished_at != task.finished_at {
|
if old_task.finished_at != task.finished_at {
|
||||||
assert!(
|
assert!(
|
||||||
maybe_reprocessing || old_task.finished_at.is_none(),
|
reprocessing || old_task.finished_at.is_none(),
|
||||||
"Cannot update a task's finished_at time"
|
"Cannot update a task's finished_at time"
|
||||||
);
|
);
|
||||||
if let Some(finished_at) = old_task.finished_at {
|
if let Some(finished_at) = old_task.finished_at {
|
||||||
@@ -189,16 +175,7 @@ impl TaskQueue {
|
|||||||
task.network = match (old_task.network, task.network.take()) {
|
task.network = match (old_task.network, task.network.take()) {
|
||||||
(None, None) => None,
|
(None, None) => None,
|
||||||
(None, Some(network)) | (Some(network), None) => Some(network),
|
(None, Some(network)) | (Some(network), None) => Some(network),
|
||||||
(Some(left), Some(right)) => Some(match (left, right) {
|
(Some(_), Some(network)) => Some(network),
|
||||||
(
|
|
||||||
DbTaskNetwork::Remotes { remote_tasks: mut left, network_version: _ },
|
|
||||||
DbTaskNetwork::Remotes { remote_tasks: mut right, network_version },
|
|
||||||
) => {
|
|
||||||
left.append(&mut right);
|
|
||||||
DbTaskNetwork::Remotes { remote_tasks: left, network_version }
|
|
||||||
}
|
|
||||||
(_, right) => right,
|
|
||||||
}),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
self.all_tasks.put(wtxn, &task.uid, task)?;
|
self.all_tasks.put(wtxn, &task.uid, task)?;
|
||||||
|
|||||||
@@ -203,30 +203,26 @@ fn test_disable_auto_deletion_of_tasks() {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
{
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let tasks =
|
||||||
let tasks = index_scheduler
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.queue
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
|
||||||
.unwrap();
|
drop(rtxn);
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
drop(proc);
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
|
|
||||||
}
|
|
||||||
|
|
||||||
// now we're above the max number of tasks
|
// now we're above the max number of tasks
|
||||||
// and if we try to advance in the tick function no new task deletion should be enqueued
|
// and if we try to advance in the tick function no new task deletion should be enqueued
|
||||||
handle.advance_till([Start, BatchCreated]);
|
handle.advance_till([Start, BatchCreated]);
|
||||||
{
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let tasks =
|
||||||
let tasks = index_scheduler
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.queue
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_not_been_enqueued");
|
||||||
.unwrap();
|
drop(rtxn);
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
drop(proc);
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_not_been_enqueued");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -271,69 +267,59 @@ fn test_auto_deletion_of_tasks() {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
{
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let tasks =
|
||||||
let tasks = index_scheduler
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.queue
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
|
||||||
.unwrap();
|
drop(rtxn);
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
drop(proc);
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
// now we're above the max number of tasks
|
||||||
// now we're above the max number of tasks
|
// and if we try to advance in the tick function a new task deletion should be enqueued
|
||||||
// and if we try to advance in the tick function a new task deletion should be enqueued
|
handle.advance_till([Start, BatchCreated]);
|
||||||
handle.advance_till([Start, BatchCreated]);
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let tasks =
|
||||||
let tasks = index_scheduler
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.queue
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_enqueued");
|
||||||
.unwrap();
|
drop(rtxn);
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
drop(proc);
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_enqueued");
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
handle.advance_till([InsideProcessBatch, ProcessBatchSucceeded, AfterProcessing]);
|
||||||
handle.advance_till([InsideProcessBatch, ProcessBatchSucceeded, AfterProcessing]);
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let tasks =
|
||||||
let tasks = index_scheduler
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.queue
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_processed");
|
||||||
.unwrap();
|
drop(rtxn);
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
drop(proc);
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_processed");
|
|
||||||
}
|
|
||||||
|
|
||||||
handle.advance_one_failed_batch();
|
handle.advance_one_failed_batch();
|
||||||
// a new task deletion has been enqueued
|
// a new task deletion has been enqueued
|
||||||
handle.advance_one_successful_batch();
|
handle.advance_one_successful_batch();
|
||||||
{
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let tasks =
|
||||||
let tasks = index_scheduler
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.queue
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "after_the_second_task_deletion");
|
||||||
.unwrap();
|
drop(rtxn);
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
drop(proc);
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "after_the_second_task_deletion");
|
|
||||||
}
|
|
||||||
|
|
||||||
handle.advance_one_failed_batch();
|
handle.advance_one_failed_batch();
|
||||||
handle.advance_one_successful_batch();
|
handle.advance_one_successful_batch();
|
||||||
{
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let tasks =
|
||||||
let tasks = index_scheduler
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.queue
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "everything_has_been_processed");
|
||||||
.unwrap();
|
drop(rtxn);
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
drop(proc);
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "everything_has_been_processed");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -74,7 +74,6 @@ impl From<KindWithContent> for AutobatchKind {
|
|||||||
| KindWithContent::DumpCreation { .. }
|
| KindWithContent::DumpCreation { .. }
|
||||||
| KindWithContent::Export { .. }
|
| KindWithContent::Export { .. }
|
||||||
| KindWithContent::UpgradeDatabase { .. }
|
| KindWithContent::UpgradeDatabase { .. }
|
||||||
| KindWithContent::NetworkTopologyChange(_)
|
|
||||||
| KindWithContent::SnapshotCreation => {
|
| KindWithContent::SnapshotCreation => {
|
||||||
panic!("The autobatcher should never be called with tasks with special priority or that don't apply to an index.")
|
panic!("The autobatcher should never be called with tasks with special priority or that don't apply to an index.")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ use std::io::ErrorKind;
|
|||||||
use meilisearch_types::heed::RoTxn;
|
use meilisearch_types::heed::RoTxn;
|
||||||
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
||||||
use meilisearch_types::settings::{Settings, Unchecked};
|
use meilisearch_types::settings::{Settings, Unchecked};
|
||||||
use meilisearch_types::tasks::enterprise_edition::network::NetworkTopologyState;
|
|
||||||
use meilisearch_types::tasks::{BatchStopReason, Kind, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{BatchStopReason, Kind, KindWithContent, Status, Task};
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
@@ -60,14 +59,6 @@ pub(crate) enum Batch {
|
|||||||
index_uid: String,
|
index_uid: String,
|
||||||
task: Task,
|
task: Task,
|
||||||
},
|
},
|
||||||
#[allow(clippy::enum_variant_names)] // warranted because we are executing an inner index batch
|
|
||||||
NetworkIndexBatch {
|
|
||||||
network_task: Task,
|
|
||||||
inner_batch: Box<Batch>,
|
|
||||||
},
|
|
||||||
NetworkReady {
|
|
||||||
task: Task,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -149,14 +140,9 @@ impl Batch {
|
|||||||
..
|
..
|
||||||
} => RoaringBitmap::from_iter(tasks.iter().chain(other).map(|task| task.uid)),
|
} => RoaringBitmap::from_iter(tasks.iter().chain(other).map(|task| task.uid)),
|
||||||
},
|
},
|
||||||
Batch::IndexSwap { task } | Batch::NetworkReady { task } => {
|
Batch::IndexSwap { task } => {
|
||||||
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
||||||
}
|
}
|
||||||
Batch::NetworkIndexBatch { network_task, inner_batch } => {
|
|
||||||
let mut tasks = inner_batch.ids();
|
|
||||||
tasks.insert(network_task.uid);
|
|
||||||
tasks
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -170,14 +156,12 @@ impl Batch {
|
|||||||
| Dump(_)
|
| Dump(_)
|
||||||
| Export { .. }
|
| Export { .. }
|
||||||
| UpgradeDatabase { .. }
|
| UpgradeDatabase { .. }
|
||||||
| NetworkReady { .. }
|
|
||||||
| IndexSwap { .. } => None,
|
| IndexSwap { .. } => None,
|
||||||
IndexOperation { op, .. } => Some(op.index_uid()),
|
IndexOperation { op, .. } => Some(op.index_uid()),
|
||||||
IndexCreation { index_uid, .. }
|
IndexCreation { index_uid, .. }
|
||||||
| IndexUpdate { index_uid, .. }
|
| IndexUpdate { index_uid, .. }
|
||||||
| IndexDeletion { index_uid, .. }
|
| IndexDeletion { index_uid, .. }
|
||||||
| IndexCompaction { index_uid, .. } => Some(index_uid),
|
| IndexCompaction { index_uid, .. } => Some(index_uid),
|
||||||
NetworkIndexBatch { network_task: _, inner_batch } => inner_batch.index_uid(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -200,8 +184,6 @@ impl fmt::Display for Batch {
|
|||||||
Batch::IndexCompaction { .. } => f.write_str("IndexCompaction")?,
|
Batch::IndexCompaction { .. } => f.write_str("IndexCompaction")?,
|
||||||
Batch::Export { .. } => f.write_str("Export")?,
|
Batch::Export { .. } => f.write_str("Export")?,
|
||||||
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
|
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
|
||||||
Batch::NetworkIndexBatch { .. } => f.write_str("NetworkTopologyChange")?,
|
|
||||||
Batch::NetworkReady { .. } => f.write_str("NetworkTopologyChange")?,
|
|
||||||
};
|
};
|
||||||
match index_uid {
|
match index_uid {
|
||||||
Some(name) => f.write_fmt(format_args!(" on {name:?} from tasks: {tasks:?}")),
|
Some(name) => f.write_fmt(format_args!(" on {name:?} from tasks: {tasks:?}")),
|
||||||
@@ -470,7 +452,6 @@ impl IndexScheduler {
|
|||||||
pub(crate) fn create_next_batch(
|
pub(crate) fn create_next_batch(
|
||||||
&self,
|
&self,
|
||||||
rtxn: &RoTxn,
|
rtxn: &RoTxn,
|
||||||
processing_network_tasks: &RoaringBitmap,
|
|
||||||
) -> Result<Option<(Batch, ProcessingBatch)>> {
|
) -> Result<Option<(Batch, ProcessingBatch)>> {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.maybe_fail(crate::test_utils::FailureLocation::InsideCreateBatch)?;
|
self.maybe_fail(crate::test_utils::FailureLocation::InsideCreateBatch)?;
|
||||||
@@ -479,6 +460,7 @@ impl IndexScheduler {
|
|||||||
let mut current_batch = ProcessingBatch::new(batch_id);
|
let mut current_batch = ProcessingBatch::new(batch_id);
|
||||||
|
|
||||||
let enqueued = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
let enqueued = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
||||||
|
let count_total_enqueued = enqueued.len();
|
||||||
let failed = &self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
let failed = &self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
||||||
|
|
||||||
// 0. we get the last task to cancel.
|
// 0. we get the last task to cancel.
|
||||||
@@ -527,15 +509,7 @@ impl IndexScheduler {
|
|||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Check for enqueued network topology changes
|
// 2. we get the next task to delete
|
||||||
let network_changes = self.queue.tasks.get_kind(rtxn, Kind::NetworkTopologyChange)?
|
|
||||||
& (enqueued | processing_network_tasks);
|
|
||||||
if let Some(task_id) = network_changes.iter().next() {
|
|
||||||
let task = self.queue.tasks.get_task(rtxn, task_id)?.unwrap();
|
|
||||||
return self.start_processing_network(rtxn, task, enqueued, current_batch);
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. we get the next task to delete
|
|
||||||
let to_delete = self.queue.tasks.get_kind(rtxn, Kind::TaskDeletion)? & enqueued;
|
let to_delete = self.queue.tasks.get_kind(rtxn, Kind::TaskDeletion)? & enqueued;
|
||||||
if !to_delete.is_empty() {
|
if !to_delete.is_empty() {
|
||||||
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_delete)?;
|
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_delete)?;
|
||||||
@@ -545,7 +519,7 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
|
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. we get the next task to compact
|
// 3. we get the next task to compact
|
||||||
let to_compact = self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)? & enqueued;
|
let to_compact = self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)? & enqueued;
|
||||||
if let Some(task_id) = to_compact.min() {
|
if let Some(task_id) = to_compact.min() {
|
||||||
let mut task =
|
let mut task =
|
||||||
@@ -560,7 +534,7 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::IndexCompaction { index_uid, task }, current_batch)));
|
return Ok(Some((Batch::IndexCompaction { index_uid, task }, current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 5. we batch the export.
|
// 4. we batch the export.
|
||||||
let to_export = self.queue.tasks.get_kind(rtxn, Kind::Export)? & enqueued;
|
let to_export = self.queue.tasks.get_kind(rtxn, Kind::Export)? & enqueued;
|
||||||
if !to_export.is_empty() {
|
if !to_export.is_empty() {
|
||||||
let task_id = to_export.iter().next().expect("There must be at least one export task");
|
let task_id = to_export.iter().next().expect("There must be at least one export task");
|
||||||
@@ -571,7 +545,7 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::Export { task }, current_batch)));
|
return Ok(Some((Batch::Export { task }, current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. we batch the snapshot.
|
// 5. we batch the snapshot.
|
||||||
let to_snapshot = self.queue.tasks.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued;
|
let to_snapshot = self.queue.tasks.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued;
|
||||||
if !to_snapshot.is_empty() {
|
if !to_snapshot.is_empty() {
|
||||||
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
|
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
|
||||||
@@ -581,7 +555,7 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
|
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 7. we batch the dumps.
|
// 6. we batch the dumps.
|
||||||
let to_dump = self.queue.tasks.get_kind(rtxn, Kind::DumpCreation)? & enqueued;
|
let to_dump = self.queue.tasks.get_kind(rtxn, Kind::DumpCreation)? & enqueued;
|
||||||
if let Some(to_dump) = to_dump.min() {
|
if let Some(to_dump) = to_dump.min() {
|
||||||
let mut task =
|
let mut task =
|
||||||
@@ -594,63 +568,25 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::Dump(task), current_batch)));
|
return Ok(Some((Batch::Dump(task), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let network = self.network();
|
// 7. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
|
||||||
|
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
|
||||||
|
let mut task =
|
||||||
|
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
|
|
||||||
// 8. We make a batch from the unprioritised tasks.
|
// If the task is not associated with any index, verify that it is an index swap and
|
||||||
let (batch, current_batch) =
|
// create the batch directly. Otherwise, get the index name associated with the task
|
||||||
self.create_next_batch_unprioritized(rtxn, enqueued, current_batch, |task| {
|
// and use the autobatcher to batch the enqueued tasks associated with it
|
||||||
let is_task_from_the_future = task
|
|
||||||
.network
|
|
||||||
.as_ref()
|
|
||||||
.map(|task_network| task_network.network_version() > network.version)
|
|
||||||
// tasks without versions are not from the future
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
is_task_from_the_future
|
let index_name = if let Some(&index_name) = task.indexes().first() {
|
||||||
})?;
|
index_name
|
||||||
Ok(batch.map(|batch| (batch, current_batch)))
|
} else {
|
||||||
}
|
assert!(matches!(&task.kind, KindWithContent::IndexSwap { swaps } if swaps.is_empty()));
|
||||||
|
current_batch.processing(Some(&mut task));
|
||||||
fn create_next_batch_unprioritized<F>(
|
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
|
||||||
&self,
|
kind: Kind::IndexSwap,
|
||||||
rtxn: &RoTxn,
|
id: task.uid,
|
||||||
enqueued: &RoaringBitmap,
|
});
|
||||||
mut current_batch: ProcessingBatch,
|
return Ok(Some((Batch::IndexSwap { task }, current_batch)));
|
||||||
mut skip_if: F,
|
|
||||||
) -> Result<(Option<Batch>, ProcessingBatch)>
|
|
||||||
where
|
|
||||||
F: FnMut(&Task) -> bool,
|
|
||||||
{
|
|
||||||
let count_total_enqueued = enqueued.len();
|
|
||||||
|
|
||||||
let mut enqueued_it = enqueued.iter();
|
|
||||||
let mut task;
|
|
||||||
let index_name = loop {
|
|
||||||
let Some(task_id) = enqueued_it.next() else {
|
|
||||||
return Ok((None, current_batch));
|
|
||||||
};
|
|
||||||
task = self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
|
||||||
|
|
||||||
if skip_if(&task) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// If the task is not associated with any index, verify that it is an index swap and
|
|
||||||
// create the batch directly. Otherwise, get the index name associated with the task
|
|
||||||
// and use the autobatcher to batch the enqueued tasks associated with it
|
|
||||||
|
|
||||||
if let Some(&index_name) = task.indexes().first() {
|
|
||||||
break index_name;
|
|
||||||
} else {
|
|
||||||
assert!(
|
|
||||||
matches!(&task.kind, KindWithContent::IndexSwap { swaps } if swaps.is_empty())
|
|
||||||
);
|
|
||||||
current_batch.processing(Some(&mut task));
|
|
||||||
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
|
|
||||||
kind: Kind::IndexSwap,
|
|
||||||
id: task.uid,
|
|
||||||
});
|
|
||||||
return Ok((Some(Batch::IndexSwap { task }), current_batch));
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let index_already_exists = self.index_mapper.exists(rtxn, index_name)?;
|
let index_already_exists = self.index_mapper.exists(rtxn, index_name)?;
|
||||||
@@ -685,10 +621,6 @@ impl IndexScheduler {
|
|||||||
.get_task(rtxn, task_id)
|
.get_task(rtxn, task_id)
|
||||||
.and_then(|task| task.ok_or(Error::CorruptedTaskQueue))?;
|
.and_then(|task| task.ok_or(Error::CorruptedTaskQueue))?;
|
||||||
|
|
||||||
if skip_if(&task) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(uuid) = task.content_uuid() {
|
if let Some(uuid) = task.content_uuid() {
|
||||||
let content_size = match self.queue.file_store.compute_size(uuid) {
|
let content_size = match self.queue.file_store.compute_size(uuid) {
|
||||||
Ok(content_size) => content_size,
|
Ok(content_size) => content_size,
|
||||||
@@ -719,109 +651,19 @@ impl IndexScheduler {
|
|||||||
autobatcher::autobatch(enqueued, index_already_exists, primary_key.as_deref())
|
autobatcher::autobatch(enqueued, index_already_exists, primary_key.as_deref())
|
||||||
{
|
{
|
||||||
current_batch.reason(autobatch_stop_reason.unwrap_or(stop_reason));
|
current_batch.reason(autobatch_stop_reason.unwrap_or(stop_reason));
|
||||||
let batch = self.create_next_batch_index(
|
return Ok(self
|
||||||
rtxn,
|
.create_next_batch_index(
|
||||||
index_name.to_string(),
|
rtxn,
|
||||||
batchkind,
|
index_name.to_string(),
|
||||||
&mut current_batch,
|
batchkind,
|
||||||
create_index,
|
&mut current_batch,
|
||||||
)?;
|
create_index,
|
||||||
return Ok((batch, current_batch));
|
)?
|
||||||
|
.map(|batch| (batch, current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we found no tasks then we were notified for something that got autobatched
|
// If we found no tasks then we were notified for something that got autobatched
|
||||||
// somehow and there is nothing to do.
|
// somehow and there is nothing to do.
|
||||||
Ok((None, current_batch))
|
Ok(None)
|
||||||
}
|
|
||||||
|
|
||||||
fn start_processing_network(
|
|
||||||
&self,
|
|
||||||
rtxn: &RoTxn,
|
|
||||||
mut task: Task,
|
|
||||||
enqueued: &RoaringBitmap,
|
|
||||||
mut current_batch: ProcessingBatch,
|
|
||||||
) -> Result<Option<(Batch, ProcessingBatch)>> {
|
|
||||||
current_batch.processing(Some(&mut task));
|
|
||||||
|
|
||||||
let change_version =
|
|
||||||
task.network.as_ref().map(|network| network.network_version()).unwrap_or_default();
|
|
||||||
let KindWithContent::NetworkTopologyChange(network_topology_change) = &task.kind else {
|
|
||||||
panic!("inconsistent kind with content")
|
|
||||||
};
|
|
||||||
|
|
||||||
match network_topology_change.state() {
|
|
||||||
NetworkTopologyState::WaitingForOlderTasks => {
|
|
||||||
let res =
|
|
||||||
self.create_next_batch_unprioritized(rtxn, enqueued, current_batch, |task| {
|
|
||||||
let has_index = task.index_uid().is_some();
|
|
||||||
|
|
||||||
if !has_index {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
let has_older_network_version = task
|
|
||||||
.network
|
|
||||||
.as_ref()
|
|
||||||
.map(|network| network.network_version() < change_version)
|
|
||||||
// if there is no version, we never retain the task
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
!has_older_network_version
|
|
||||||
});
|
|
||||||
|
|
||||||
let (batch, current_batch) = res?;
|
|
||||||
|
|
||||||
let batch = match batch {
|
|
||||||
Some(batch) => {
|
|
||||||
let inner_batch = Box::new(batch);
|
|
||||||
|
|
||||||
Batch::NetworkIndexBatch { network_task: task, inner_batch }
|
|
||||||
}
|
|
||||||
None => Batch::NetworkReady { task },
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Some((batch, current_batch)))
|
|
||||||
}
|
|
||||||
NetworkTopologyState::ImportingDocuments => {
|
|
||||||
// if the import is done we need to go to the next state
|
|
||||||
if network_topology_change.is_import_finished() {
|
|
||||||
return Ok(Some((Batch::NetworkReady { task }, current_batch)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let res =
|
|
||||||
self.create_next_batch_unprioritized(rtxn, enqueued, current_batch, |task| {
|
|
||||||
let has_index = task.index_uid().is_some();
|
|
||||||
|
|
||||||
if !has_index {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
let is_import_task = task
|
|
||||||
.network
|
|
||||||
.as_ref()
|
|
||||||
.map(|network| {
|
|
||||||
network.network_version() == change_version
|
|
||||||
&& network.import_data().is_some()
|
|
||||||
})
|
|
||||||
// if there is no version, we never retain the task
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
!is_import_task
|
|
||||||
});
|
|
||||||
|
|
||||||
let (batch, current_batch) = res?;
|
|
||||||
|
|
||||||
let batch = batch.map(|batch| {
|
|
||||||
let inner_batch = Box::new(batch);
|
|
||||||
|
|
||||||
(Batch::NetworkIndexBatch { network_task: task, inner_batch }, current_batch)
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(batch)
|
|
||||||
}
|
|
||||||
NetworkTopologyState::ExportingDocuments | NetworkTopologyState::Finished => {
|
|
||||||
Ok(Some((Batch::NetworkReady { task }, current_batch)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,281 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use bumpalo::Bump;
|
|
||||||
use meilisearch_types::enterprise_edition::network::Remote;
|
|
||||||
use meilisearch_types::milli::documents::PrimaryKey;
|
|
||||||
use meilisearch_types::milli::progress::{EmbedderStats, Progress};
|
|
||||||
use meilisearch_types::milli::update::new::indexer;
|
|
||||||
use meilisearch_types::milli::update::new::indexer::enterprise_edition::sharding::Shards;
|
|
||||||
use meilisearch_types::milli::{self};
|
|
||||||
use meilisearch_types::tasks::enterprise_edition::network::{NetworkTopologyState, Origin};
|
|
||||||
use meilisearch_types::tasks::{KindWithContent, Status, Task};
|
|
||||||
use roaring::RoaringBitmap;
|
|
||||||
|
|
||||||
use super::create_batch::Batch;
|
|
||||||
use crate::scheduler::process_batch::ProcessBatchInfo;
|
|
||||||
use crate::scheduler::process_export::{ExportContext, ExportOptions, TargetInstance};
|
|
||||||
use crate::utils::ProcessingBatch;
|
|
||||||
use crate::{Error, IndexScheduler, Result};
|
|
||||||
|
|
||||||
impl IndexScheduler {
|
|
||||||
pub(super) fn process_network_index_batch(
|
|
||||||
&self,
|
|
||||||
mut network_task: Task,
|
|
||||||
inner_batch: Box<Batch>,
|
|
||||||
current_batch: &mut ProcessingBatch,
|
|
||||||
progress: Progress,
|
|
||||||
) -> Result<(Vec<Task>, ProcessBatchInfo)> {
|
|
||||||
let (mut tasks, info) = self.process_batch(*inner_batch, current_batch, progress)?;
|
|
||||||
let KindWithContent::NetworkTopologyChange(network_topology_change) =
|
|
||||||
&mut network_task.kind
|
|
||||||
else {
|
|
||||||
tracing::error!("unexpected network kind for network task while processing batch");
|
|
||||||
return Err(Error::CorruptedTaskQueue);
|
|
||||||
};
|
|
||||||
for task in &tasks {
|
|
||||||
let Some(network) = task.network.as_ref() else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let Some(import) = network.import_data() else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
if let Some(index_name) = import.index_name.as_deref() {
|
|
||||||
network_topology_change.process_remote_tasks(
|
|
||||||
&import.remote_name,
|
|
||||||
index_name,
|
|
||||||
import.document_count,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
network_task.details = Some(network_topology_change.to_details());
|
|
||||||
|
|
||||||
tasks.push(network_task);
|
|
||||||
Ok((tasks, info))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn process_network_ready(
|
|
||||||
&self,
|
|
||||||
mut task: Task,
|
|
||||||
progress: Progress,
|
|
||||||
) -> Result<(Vec<Task>, ProcessBatchInfo)> {
|
|
||||||
let KindWithContent::NetworkTopologyChange(network_topology_change) = &mut task.kind else {
|
|
||||||
tracing::error!("network topology change task has the wrong kind with content");
|
|
||||||
return Err(Error::CorruptedTaskQueue);
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(task_network) = &task.network else {
|
|
||||||
tracing::error!("network topology change task has no network");
|
|
||||||
return Err(Error::CorruptedTaskQueue);
|
|
||||||
};
|
|
||||||
|
|
||||||
let origin;
|
|
||||||
let origin = match task_network.origin() {
|
|
||||||
Some(origin) => origin,
|
|
||||||
None => {
|
|
||||||
let myself = network_topology_change.in_name().expect("origin is not the leader");
|
|
||||||
origin = Origin {
|
|
||||||
remote_name: myself.to_string(),
|
|
||||||
task_uid: task.uid,
|
|
||||||
network_version: task_network.network_version(),
|
|
||||||
};
|
|
||||||
&origin
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some((remotes, out_name)) = network_topology_change.export_to_process() {
|
|
||||||
network_topology_change.set_moved(self.balance_documents(
|
|
||||||
remotes,
|
|
||||||
out_name,
|
|
||||||
network_topology_change.in_name(),
|
|
||||||
origin,
|
|
||||||
&progress,
|
|
||||||
&self.scheduler.must_stop_processing,
|
|
||||||
)?);
|
|
||||||
}
|
|
||||||
network_topology_change.update_state();
|
|
||||||
if network_topology_change.state() == NetworkTopologyState::Finished {
|
|
||||||
task.status = Status::Succeeded;
|
|
||||||
}
|
|
||||||
|
|
||||||
task.details = Some(network_topology_change.to_details());
|
|
||||||
Ok((vec![task], Default::default()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn balance_documents(
|
|
||||||
&self,
|
|
||||||
remotes: &BTreeMap<String, Remote>,
|
|
||||||
out_name: &str,
|
|
||||||
in_name: Option<&str>,
|
|
||||||
network_change_origin: &Origin,
|
|
||||||
progress: &Progress,
|
|
||||||
must_stop_processing: &crate::scheduler::MustStopProcessing,
|
|
||||||
) -> crate::Result<u64> {
|
|
||||||
let new_shards =
|
|
||||||
Shards::from_remotes_local(remotes.keys().map(String::as_str).chain(in_name), in_name);
|
|
||||||
|
|
||||||
// TECHDEBT: this spawns a `ureq` agent additionally to `reqwest`. We probably want to harmonize all of this.
|
|
||||||
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
|
||||||
|
|
||||||
let mut indexer_alloc = Bump::new();
|
|
||||||
|
|
||||||
let scheduler_rtxn = self.env.read_txn()?;
|
|
||||||
|
|
||||||
let index_count = self.index_mapper.index_count(&scheduler_rtxn)?;
|
|
||||||
|
|
||||||
// when the instance is empty, we still need to tell that to remotes, as they cannot know of that fact and will be waiting for
|
|
||||||
// data
|
|
||||||
if index_count == 0 {
|
|
||||||
for (remote_name, remote) in remotes {
|
|
||||||
let target = TargetInstance {
|
|
||||||
base_url: &remote.url,
|
|
||||||
api_key: remote.write_api_key.as_deref(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let res = self.export_no_index(
|
|
||||||
target,
|
|
||||||
out_name,
|
|
||||||
network_change_origin,
|
|
||||||
&agent,
|
|
||||||
must_stop_processing,
|
|
||||||
);
|
|
||||||
|
|
||||||
match res {
|
|
||||||
Ok(_) => {}
|
|
||||||
Err(err) => {
|
|
||||||
tracing::warn!("Could not signal not to wait documents to `{remote_name}` due to error: {err}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Ok(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
let moved_documents: Vec<u64> = self.index_mapper.try_for_each_index(
|
|
||||||
&scheduler_rtxn,
|
|
||||||
|index_uid, index| -> crate::Result<u64> {
|
|
||||||
indexer_alloc.reset();
|
|
||||||
let err = |err| Error::from_milli(err, Some(index_uid.to_string()));
|
|
||||||
let index_rtxn = index.read_txn()?;
|
|
||||||
let all_docids = index.external_documents_ids();
|
|
||||||
let mut documents_to_move_to: hashbrown::HashMap<String, RoaringBitmap> =
|
|
||||||
hashbrown::HashMap::new();
|
|
||||||
let mut documents_to_delete = RoaringBitmap::new();
|
|
||||||
|
|
||||||
for res in all_docids.iter(&index_rtxn)? {
|
|
||||||
let (external_docid, docid) = res?;
|
|
||||||
match new_shards.processing_shard(external_docid) {
|
|
||||||
Some(shard) if shard.is_own => continue,
|
|
||||||
Some(shard) => {
|
|
||||||
documents_to_move_to.entry_ref(&shard.name).or_default().insert(docid);
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
documents_to_delete.insert(docid);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
|
|
||||||
|
|
||||||
for (remote_name, remote) in remotes {
|
|
||||||
let documents_to_move =
|
|
||||||
documents_to_move_to.remove(remote_name).unwrap_or_default();
|
|
||||||
|
|
||||||
let target = TargetInstance {
|
|
||||||
base_url: &remote.url,
|
|
||||||
api_key: remote.write_api_key.as_deref(),
|
|
||||||
};
|
|
||||||
let options = ExportOptions {
|
|
||||||
index_uid,
|
|
||||||
payload_size: None,
|
|
||||||
override_settings: false,
|
|
||||||
export_mode: super::process_export::ExportMode::NetworkBalancing {
|
|
||||||
index_count,
|
|
||||||
export_old_remote_name: out_name,
|
|
||||||
network_change_origin,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
let ctx = ExportContext {
|
|
||||||
index,
|
|
||||||
index_rtxn: &index_rtxn,
|
|
||||||
universe: &documents_to_move,
|
|
||||||
progress,
|
|
||||||
agent: &agent,
|
|
||||||
must_stop_processing,
|
|
||||||
};
|
|
||||||
|
|
||||||
let res = self.export_one_index(target, options, ctx);
|
|
||||||
|
|
||||||
match res {
|
|
||||||
Ok(_) =>{ documents_to_delete |= documents_to_move;}
|
|
||||||
Err(err) => {
|
|
||||||
tracing::warn!("Could not export documents to `{remote_name}` due to error: {err}\n - Note: Documents will be kept");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if documents_to_delete.is_empty() {
|
|
||||||
return Ok(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
let moved_count = documents_to_delete.len();
|
|
||||||
|
|
||||||
let mut new_fields_ids_map = fields_ids_map.clone();
|
|
||||||
|
|
||||||
// candidates not empty => index not empty => a primary key is set
|
|
||||||
let primary_key = index.primary_key(&index_rtxn)?.unwrap();
|
|
||||||
|
|
||||||
let primary_key = PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
|
|
||||||
.map_err(milli::Error::from)
|
|
||||||
.map_err(err)?;
|
|
||||||
|
|
||||||
let mut index_wtxn = index.write_txn()?;
|
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentDeletion::new();
|
|
||||||
indexer.delete_documents_by_docids(documents_to_delete);
|
|
||||||
let document_changes = indexer.into_changes(&indexer_alloc, primary_key);
|
|
||||||
let embedders = index
|
|
||||||
.embedding_configs()
|
|
||||||
.embedding_configs(&index_wtxn)
|
|
||||||
.map_err(milli::Error::from)
|
|
||||||
.map_err(err)?;
|
|
||||||
let embedders = self.embedders(index_uid.to_string(), embedders)?;
|
|
||||||
let indexer_config = self.index_mapper.indexer_config();
|
|
||||||
let pool = &indexer_config.thread_pool;
|
|
||||||
|
|
||||||
indexer::index(
|
|
||||||
&mut index_wtxn,
|
|
||||||
index,
|
|
||||||
pool,
|
|
||||||
indexer_config.grenad_parameters(),
|
|
||||||
&fields_ids_map,
|
|
||||||
new_fields_ids_map,
|
|
||||||
None, // document deletion never changes primary key
|
|
||||||
&document_changes,
|
|
||||||
embedders,
|
|
||||||
&|| must_stop_processing.get(),
|
|
||||||
progress,
|
|
||||||
&EmbedderStats::default(),
|
|
||||||
)
|
|
||||||
.map_err(err)?;
|
|
||||||
|
|
||||||
// update stats
|
|
||||||
let mut mapper_wtxn = self.env.write_txn()?;
|
|
||||||
let stats =
|
|
||||||
crate::index_mapper::IndexStats::new(index, &index_wtxn).map_err(err)?;
|
|
||||||
self.index_mapper.store_stats_of(&mut mapper_wtxn, index_uid, &stats)?;
|
|
||||||
|
|
||||||
index_wtxn.commit()?;
|
|
||||||
// update stats after committing changes to index
|
|
||||||
mapper_wtxn.commit()?;
|
|
||||||
|
|
||||||
Ok(moved_count)
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let moved_documents: u64 = moved_documents.into_iter().sum();
|
|
||||||
|
|
||||||
Ok(moved_documents)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -2,7 +2,6 @@ mod autobatcher;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod autobatcher_test;
|
mod autobatcher_test;
|
||||||
mod create_batch;
|
mod create_batch;
|
||||||
mod enterprise_edition;
|
|
||||||
mod process_batch;
|
mod process_batch;
|
||||||
mod process_dump_creation;
|
mod process_dump_creation;
|
||||||
mod process_export;
|
mod process_export;
|
||||||
@@ -22,6 +21,7 @@ use std::path::PathBuf;
|
|||||||
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use convert_case::{Case, Casing as _};
|
||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::heed::{Env, WithoutTls};
|
use meilisearch_types::heed::{Env, WithoutTls};
|
||||||
use meilisearch_types::milli;
|
use meilisearch_types::milli;
|
||||||
@@ -178,8 +178,6 @@ impl IndexScheduler {
|
|||||||
self.breakpoint(crate::test_utils::Breakpoint::Start);
|
self.breakpoint(crate::test_utils::Breakpoint::Start);
|
||||||
}
|
}
|
||||||
|
|
||||||
let previous_processing_batch = self.processing_tasks.write().unwrap().stop_processing();
|
|
||||||
|
|
||||||
if self.cleanup_enabled {
|
if self.cleanup_enabled {
|
||||||
let mut wtxn = self.env.write_txn()?;
|
let mut wtxn = self.env.write_txn()?;
|
||||||
self.queue.cleanup_task_queue(&mut wtxn)?;
|
self.queue.cleanup_task_queue(&mut wtxn)?;
|
||||||
@@ -187,16 +185,11 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let rtxn = self.env.read_txn().map_err(Error::HeedTransaction)?;
|
let rtxn = self.env.read_txn().map_err(Error::HeedTransaction)?;
|
||||||
let (batch, mut processing_batch) = match self
|
let (batch, mut processing_batch) =
|
||||||
.create_next_batch(&rtxn, &previous_processing_batch.processing)
|
match self.create_next_batch(&rtxn).map_err(|e| Error::CreateBatch(Box::new(e)))? {
|
||||||
.map_err(|e| Error::CreateBatch(Box::new(e)))?
|
Some(batch) => batch,
|
||||||
{
|
None => return Ok(TickOutcome::WaitForSignal),
|
||||||
Some(batch) => batch,
|
};
|
||||||
None => {
|
|
||||||
*self.processing_tasks.write().unwrap() = previous_processing_batch;
|
|
||||||
return Ok(TickOutcome::WaitForSignal);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let index_uid = batch.index_uid().map(ToOwned::to_owned);
|
let index_uid = batch.index_uid().map(ToOwned::to_owned);
|
||||||
drop(rtxn);
|
drop(rtxn);
|
||||||
|
|
||||||
@@ -267,14 +260,7 @@ impl IndexScheduler {
|
|||||||
self.maybe_fail(crate::test_utils::FailureLocation::AcquiringWtxn)?;
|
self.maybe_fail(crate::test_utils::FailureLocation::AcquiringWtxn)?;
|
||||||
|
|
||||||
progress.update_progress(BatchProgress::WritingTasksToDisk);
|
progress.update_progress(BatchProgress::WritingTasksToDisk);
|
||||||
|
|
||||||
processing_batch.finished();
|
processing_batch.finished();
|
||||||
// whether the batch made progress.
|
|
||||||
// a batch make progress if it failed or if it contains at least one fully processed (or cancelled) task.
|
|
||||||
//
|
|
||||||
// if a batch did not make progress, it means that all of its tasks are waiting on the scheduler to make progress,
|
|
||||||
// and so we must wait for new tasks. Such a batch is not persisted to DB, and is resumed on the next tick.
|
|
||||||
let mut batch_made_progress = false;
|
|
||||||
let mut stop_scheduler_forever = false;
|
let mut stop_scheduler_forever = false;
|
||||||
let mut wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
let mut wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
||||||
let mut canceled = RoaringBitmap::new();
|
let mut canceled = RoaringBitmap::new();
|
||||||
@@ -295,11 +281,7 @@ impl IndexScheduler {
|
|||||||
#[allow(unused_variables)]
|
#[allow(unused_variables)]
|
||||||
for (i, mut task) in tasks.into_iter().enumerate() {
|
for (i, mut task) in tasks.into_iter().enumerate() {
|
||||||
task_progress.fetch_add(1, Ordering::Relaxed);
|
task_progress.fetch_add(1, Ordering::Relaxed);
|
||||||
processing_batch.update_from_task(&task);
|
processing_batch.update(&mut task);
|
||||||
if !matches!(task.status, Status::Processing | Status::Enqueued) {
|
|
||||||
batch_made_progress = true;
|
|
||||||
processing_batch.finish_task(&mut task);
|
|
||||||
}
|
|
||||||
if task.status == Status::Canceled {
|
if task.status == Status::Canceled {
|
||||||
canceled.insert(task.uid);
|
canceled.insert(task.uid);
|
||||||
canceled_by = task.canceled_by;
|
canceled_by = task.canceled_by;
|
||||||
@@ -366,9 +348,6 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
// In case of a failure we must get back and patch all the tasks with the error.
|
// In case of a failure we must get back and patch all the tasks with the error.
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
// always persist failed batches
|
|
||||||
batch_made_progress = true;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.breakpoint(crate::test_utils::Breakpoint::ProcessBatchFailed);
|
self.breakpoint(crate::test_utils::Breakpoint::ProcessBatchFailed);
|
||||||
let (task_progress, task_progress_obj) = AtomicTaskStep::new(ids.len() as u32);
|
let (task_progress, task_progress_obj) = AtomicTaskStep::new(ids.len() as u32);
|
||||||
@@ -392,10 +371,7 @@ impl IndexScheduler {
|
|||||||
task.status = Status::Failed;
|
task.status = Status::Failed;
|
||||||
task.error = Some(error.clone());
|
task.error = Some(error.clone());
|
||||||
task.details = task.details.map(|d| d.to_failed());
|
task.details = task.details.map(|d| d.to_failed());
|
||||||
processing_batch.update_from_task(&task);
|
processing_batch.update(&mut task);
|
||||||
if !matches!(task.status, Status::Processing | Status::Enqueued) {
|
|
||||||
processing_batch.finish_task(&mut task);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.maybe_fail(
|
self.maybe_fail(
|
||||||
@@ -418,12 +394,44 @@ impl IndexScheduler {
|
|||||||
let ProcessBatchInfo { congestion, pre_commit_dabases_sizes, post_commit_dabases_sizes } =
|
let ProcessBatchInfo { congestion, pre_commit_dabases_sizes, post_commit_dabases_sizes } =
|
||||||
process_batch_info;
|
process_batch_info;
|
||||||
|
|
||||||
processing_batch.write_stats(
|
processing_batch.stats.progress_trace =
|
||||||
&progress,
|
progress.accumulated_durations().into_iter().map(|(k, v)| (k, v.into())).collect();
|
||||||
congestion,
|
processing_batch.stats.write_channel_congestion = congestion.map(|congestion| {
|
||||||
pre_commit_dabases_sizes,
|
let mut congestion_info = serde_json::Map::new();
|
||||||
post_commit_dabases_sizes,
|
congestion_info.insert("attempts".into(), congestion.attempts.into());
|
||||||
);
|
congestion_info.insert("blocking_attempts".into(), congestion.blocking_attempts.into());
|
||||||
|
congestion_info.insert("blocking_ratio".into(), congestion.congestion_ratio().into());
|
||||||
|
congestion_info
|
||||||
|
});
|
||||||
|
processing_batch.stats.internal_database_sizes = pre_commit_dabases_sizes
|
||||||
|
.iter()
|
||||||
|
.flat_map(|(dbname, pre_size)| {
|
||||||
|
post_commit_dabases_sizes
|
||||||
|
.get(dbname)
|
||||||
|
.map(|post_size| {
|
||||||
|
use std::cmp::Ordering::{Equal, Greater, Less};
|
||||||
|
|
||||||
|
use byte_unit::Byte;
|
||||||
|
use byte_unit::UnitType::Binary;
|
||||||
|
|
||||||
|
let post = Byte::from_u64(*post_size as u64).get_appropriate_unit(Binary);
|
||||||
|
let diff_size = post_size.abs_diff(*pre_size) as u64;
|
||||||
|
let diff = Byte::from_u64(diff_size).get_appropriate_unit(Binary);
|
||||||
|
let sign = match post_size.cmp(pre_size) {
|
||||||
|
Equal => return None,
|
||||||
|
Greater => "+",
|
||||||
|
Less => "-",
|
||||||
|
};
|
||||||
|
|
||||||
|
Some((
|
||||||
|
dbname.to_case(Case::Camel),
|
||||||
|
format!("{post:#.2} ({sign}{diff:#.2})").into(),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
.into_iter()
|
||||||
|
.flatten()
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
if let Some(congestion) = congestion {
|
if let Some(congestion) = congestion {
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
@@ -436,49 +444,46 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
tracing::debug!("call trace: {:?}", progress.accumulated_durations());
|
tracing::debug!("call trace: {:?}", progress.accumulated_durations());
|
||||||
|
|
||||||
if batch_made_progress {
|
self.queue.write_batch(&mut wtxn, processing_batch, &ids)?;
|
||||||
self.queue.write_batch(&mut wtxn, processing_batch, &ids)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.maybe_fail(crate::test_utils::FailureLocation::CommittingWtxn)?;
|
self.maybe_fail(crate::test_utils::FailureLocation::CommittingWtxn)?;
|
||||||
|
|
||||||
wtxn.commit().map_err(Error::HeedTransaction)?;
|
wtxn.commit().map_err(Error::HeedTransaction)?;
|
||||||
|
|
||||||
if batch_made_progress {
|
// We should stop processing AFTER everything is processed and written to disk otherwise, a batch (which only lives in RAM) may appear in the processing task
|
||||||
// We should stop processing AFTER everything is processed and written to disk otherwise, a batch (which only lives in RAM) may appear in the processing task
|
// and then become « not found » for some time until the commit everything is written and the final commit is made.
|
||||||
// and then become « not found » for some time until the commit everything is written and the final commit is made.
|
self.processing_tasks.write().unwrap().stop_processing();
|
||||||
self.processing_tasks.write().unwrap().stop_processing();
|
|
||||||
|
|
||||||
// Once the tasks are committed, we should delete all the update files associated ASAP to avoid leaking files in case of a restart
|
// Once the tasks are committed, we should delete all the update files associated ASAP to avoid leaking files in case of a restart
|
||||||
tracing::debug!("Deleting the update files");
|
tracing::debug!("Deleting the update files");
|
||||||
|
|
||||||
//We take one read transaction **per thread**. Then, every thread is going to pull out new IDs from the roaring bitmap with the help of an atomic shared index into the bitmap
|
//We take one read transaction **per thread**. Then, every thread is going to pull out new IDs from the roaring bitmap with the help of an atomic shared index into the bitmap
|
||||||
let idx = AtomicU32::new(0);
|
let idx = AtomicU32::new(0);
|
||||||
(0..current_num_threads()).into_par_iter().try_for_each(|_| -> Result<()> {
|
(0..current_num_threads()).into_par_iter().try_for_each(|_| -> Result<()> {
|
||||||
let rtxn = self.read_txn()?;
|
let rtxn = self.read_txn()?;
|
||||||
while let Some(id) = ids.select(idx.fetch_add(1, Ordering::Relaxed)) {
|
while let Some(id) = ids.select(idx.fetch_add(1, Ordering::Relaxed)) {
|
||||||
let task = self
|
let task = self
|
||||||
.queue
|
.queue
|
||||||
.tasks
|
.tasks
|
||||||
.get_task(&rtxn, id)
|
.get_task(&rtxn, id)
|
||||||
.map_err(|e| Error::UnrecoverableError(Box::new(e)))?
|
.map_err(|e| Error::UnrecoverableError(Box::new(e)))?
|
||||||
.ok_or(Error::CorruptedTaskQueue)?;
|
.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
if let Err(e) = self.queue.delete_persisted_task_data(&task) {
|
if let Err(e) = self.queue.delete_persisted_task_data(&task) {
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
"Failure to delete the content files associated with task {}. Error: {e}",
|
"Failure to delete the content files associated with task {}. Error: {e}",
|
||||||
task.uid
|
task.uid
|
||||||
);
|
);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(())
|
}
|
||||||
})?;
|
Ok(())
|
||||||
|
})?;
|
||||||
|
|
||||||
self.notify_webhooks(ids);
|
self.notify_webhooks(ids);
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.breakpoint(crate::test_utils::Breakpoint::AfterProcessing);
|
self.breakpoint(crate::test_utils::Breakpoint::AfterProcessing);
|
||||||
|
|
||||||
if stop_scheduler_forever {
|
if stop_scheduler_forever {
|
||||||
Ok(TickOutcome::StopProcessingForever)
|
Ok(TickOutcome::StopProcessingForever)
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -539,10 +539,6 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
Ok((tasks, ProcessBatchInfo::default()))
|
Ok((tasks, ProcessBatchInfo::default()))
|
||||||
}
|
}
|
||||||
Batch::NetworkIndexBatch { network_task, inner_batch } => {
|
|
||||||
self.process_network_index_batch(network_task, inner_batch, current_batch, progress)
|
|
||||||
}
|
|
||||||
Batch::NetworkReady { task } => self.process_network_ready(task, progress),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::io::{self, Write as _};
|
use std::io::{self, Write as _};
|
||||||
use std::ops::ControlFlow;
|
|
||||||
use std::sync::atomic;
|
use std::sync::atomic;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
@@ -8,7 +7,6 @@ use backoff::ExponentialBackoff;
|
|||||||
use byte_unit::Byte;
|
use byte_unit::Byte;
|
||||||
use flate2::write::GzEncoder;
|
use flate2::write::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use meilisearch_types::error::Code;
|
|
||||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||||
use meilisearch_types::milli::constants::RESERVED_VECTORS_FIELD_NAME;
|
use meilisearch_types::milli::constants::RESERVED_VECTORS_FIELD_NAME;
|
||||||
use meilisearch_types::milli::index::EmbeddingsWithMetadata;
|
use meilisearch_types::milli::index::EmbeddingsWithMetadata;
|
||||||
@@ -17,11 +15,7 @@ use meilisearch_types::milli::update::{request_threads, Setting};
|
|||||||
use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOrArrayOfVectors};
|
use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOrArrayOfVectors};
|
||||||
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
|
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
|
||||||
use meilisearch_types::settings::{self, SecretPolicy};
|
use meilisearch_types::settings::{self, SecretPolicy};
|
||||||
use meilisearch_types::tasks::enterprise_edition::network::{
|
|
||||||
headers, ImportData, ImportMetadata, Origin,
|
|
||||||
};
|
|
||||||
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
|
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
|
||||||
use roaring::RoaringBitmap;
|
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use ureq::{json, Response};
|
use ureq::{json, Response};
|
||||||
|
|
||||||
@@ -56,7 +50,6 @@ impl IndexScheduler {
|
|||||||
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
||||||
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
||||||
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
|
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
|
||||||
let err = |err| Error::from_milli(err, Some(uid.to_string()));
|
|
||||||
if must_stop_processing.get() {
|
if must_stop_processing.get() {
|
||||||
return Err(Error::AbortedTask);
|
return Err(Error::AbortedTask);
|
||||||
}
|
}
|
||||||
@@ -68,426 +61,261 @@ impl IndexScheduler {
|
|||||||
));
|
));
|
||||||
|
|
||||||
let ExportIndexSettings { filter, override_settings } = export_settings;
|
let ExportIndexSettings { filter, override_settings } = export_settings;
|
||||||
|
|
||||||
let index = self.index(uid)?;
|
let index = self.index(uid)?;
|
||||||
let index_rtxn = index.read_txn()?;
|
let index_rtxn = index.read_txn()?;
|
||||||
let filter = filter.as_ref().map(Filter::from_json).transpose().map_err(err)?.flatten();
|
let bearer = api_key.map(|api_key| format!("Bearer {api_key}"));
|
||||||
let filter_universe =
|
|
||||||
filter.map(|f| f.evaluate(&index_rtxn, &index)).transpose().map_err(err)?;
|
// First, check if the index already exists
|
||||||
let whole_universe =
|
let url = format!("{base_url}/indexes/{uid}");
|
||||||
index.documents_ids(&index_rtxn).map_err(milli::Error::from).map_err(err)?;
|
let response = retry(&must_stop_processing, || {
|
||||||
|
let mut request = agent.get(&url);
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
|
||||||
|
request.send_bytes(Default::default()).map_err(into_backoff_error)
|
||||||
|
});
|
||||||
|
let index_exists = match response {
|
||||||
|
Ok(response) => response.status() == 200,
|
||||||
|
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let primary_key = index
|
||||||
|
.primary_key(&index_rtxn)
|
||||||
|
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||||
|
|
||||||
|
// Create the index
|
||||||
|
if !index_exists {
|
||||||
|
let url = format!("{base_url}/indexes");
|
||||||
|
retry(&must_stop_processing, || {
|
||||||
|
let mut request = agent.post(&url);
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
let index_param = json!({ "uid": uid, "primaryKey": primary_key });
|
||||||
|
request.send_json(&index_param).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Patch the index primary key
|
||||||
|
if index_exists && *override_settings {
|
||||||
|
let url = format!("{base_url}/indexes/{uid}");
|
||||||
|
retry(&must_stop_processing, || {
|
||||||
|
let mut request = agent.patch(&url);
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
let index_param = json!({ "primaryKey": primary_key });
|
||||||
|
request.send_json(&index_param).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send the index settings
|
||||||
|
if !index_exists || *override_settings {
|
||||||
|
let mut settings =
|
||||||
|
settings::settings(&index, &index_rtxn, SecretPolicy::RevealSecrets)
|
||||||
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||||
|
// Remove the experimental chat setting if not enabled
|
||||||
|
if self.features().check_chat_completions("exporting chat settings").is_err() {
|
||||||
|
settings.chat = Setting::NotSet;
|
||||||
|
}
|
||||||
|
// Retry logic for sending settings
|
||||||
|
let url = format!("{base_url}/indexes/{uid}/settings");
|
||||||
|
retry(&must_stop_processing, || {
|
||||||
|
let mut request = agent.patch(&url);
|
||||||
|
if let Some(bearer) = bearer.as_ref() {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
request.send_json(settings.clone()).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let filter = filter
|
||||||
|
.as_ref()
|
||||||
|
.map(Filter::from_json)
|
||||||
|
.transpose()
|
||||||
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?
|
||||||
|
.flatten();
|
||||||
|
|
||||||
|
let filter_universe = filter
|
||||||
|
.map(|f| f.evaluate(&index_rtxn, &index))
|
||||||
|
.transpose()
|
||||||
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||||
|
let whole_universe = index
|
||||||
|
.documents_ids(&index_rtxn)
|
||||||
|
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||||
let universe = filter_universe.unwrap_or(whole_universe);
|
let universe = filter_universe.unwrap_or(whole_universe);
|
||||||
let target = TargetInstance { base_url, api_key };
|
|
||||||
let ctx = ExportContext {
|
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
|
||||||
index: &index,
|
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
||||||
index_rtxn: &index_rtxn,
|
|
||||||
universe: &universe,
|
// We don't need to keep this one alive as we will
|
||||||
progress: &progress,
|
// spawn many threads to process the documents
|
||||||
agent: &agent,
|
drop(index_rtxn);
|
||||||
must_stop_processing: &must_stop_processing,
|
|
||||||
};
|
let total_documents = universe.len() as u32;
|
||||||
let options = ExportOptions {
|
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
|
||||||
index_uid: uid,
|
progress.update_progress(progress_step);
|
||||||
payload_size,
|
|
||||||
override_settings: *override_settings,
|
|
||||||
export_mode: ExportMode::ExportRoute,
|
|
||||||
};
|
|
||||||
let total_documents = self.export_one_index(target, options, ctx)?;
|
|
||||||
|
|
||||||
output.insert(
|
output.insert(
|
||||||
IndexUidPattern::new_unchecked(uid.clone()),
|
IndexUidPattern::new_unchecked(uid.clone()),
|
||||||
DetailsExportIndexSettings {
|
DetailsExportIndexSettings {
|
||||||
settings: (*export_settings).clone(),
|
settings: (*export_settings).clone(),
|
||||||
matched_documents: Some(total_documents),
|
matched_documents: Some(total_documents as u64),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let limit = payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024); // defaults to 20 MiB
|
||||||
|
let documents_url = format!("{base_url}/indexes/{uid}/documents");
|
||||||
|
|
||||||
|
let results = request_threads()
|
||||||
|
.broadcast(|ctx| {
|
||||||
|
let index_rtxn = index
|
||||||
|
.read_txn()
|
||||||
|
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
let mut tmp_buffer = Vec::new();
|
||||||
|
let mut compressed_buffer = Vec::new();
|
||||||
|
for (i, docid) in universe.iter().enumerate() {
|
||||||
|
if i % ctx.num_threads() != ctx.index() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let document = index
|
||||||
|
.document(&index_rtxn, docid)
|
||||||
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||||
|
|
||||||
|
let mut document = obkv_to_json(&all_fields, &fields_ids_map, document)
|
||||||
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||||
|
|
||||||
|
// TODO definitely factorize this code
|
||||||
|
'inject_vectors: {
|
||||||
|
let embeddings = index
|
||||||
|
.embeddings(&index_rtxn, docid)
|
||||||
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||||
|
|
||||||
|
if embeddings.is_empty() {
|
||||||
|
break 'inject_vectors;
|
||||||
|
}
|
||||||
|
|
||||||
|
let vectors = document
|
||||||
|
.entry(RESERVED_VECTORS_FIELD_NAME)
|
||||||
|
.or_insert(serde_json::Value::Object(Default::default()));
|
||||||
|
|
||||||
|
let serde_json::Value::Object(vectors) = vectors else {
|
||||||
|
return Err(Error::from_milli(
|
||||||
|
milli::Error::UserError(
|
||||||
|
milli::UserError::InvalidVectorsMapType {
|
||||||
|
document_id: {
|
||||||
|
if let Ok(Some(Ok(index))) = index
|
||||||
|
.external_id_of(
|
||||||
|
&index_rtxn,
|
||||||
|
std::iter::once(docid),
|
||||||
|
)
|
||||||
|
.map(|it| it.into_iter().next())
|
||||||
|
{
|
||||||
|
index
|
||||||
|
} else {
|
||||||
|
format!("internal docid={docid}")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
value: vectors.clone(),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
Some(uid.to_string()),
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
for (
|
||||||
|
embedder_name,
|
||||||
|
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
|
||||||
|
) in embeddings
|
||||||
|
{
|
||||||
|
let embeddings = ExplicitVectors {
|
||||||
|
embeddings: Some(
|
||||||
|
VectorOrArrayOfVectors::from_array_of_vectors(embeddings),
|
||||||
|
),
|
||||||
|
regenerate: regenerate &&
|
||||||
|
// Meilisearch does not handle well dumps with fragments, because as the fragments
|
||||||
|
// are marked as user-provided,
|
||||||
|
// all embeddings would be regenerated on any settings change or document update.
|
||||||
|
// To prevent this, we mark embeddings has non regenerate in this case.
|
||||||
|
!has_fragments,
|
||||||
|
};
|
||||||
|
vectors.insert(
|
||||||
|
embedder_name,
|
||||||
|
serde_json::to_value(embeddings).unwrap(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp_buffer.clear();
|
||||||
|
serde_json::to_writer(&mut tmp_buffer, &document)
|
||||||
|
.map_err(milli::InternalError::from)
|
||||||
|
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||||
|
|
||||||
|
// Make sure we put at least one document in the buffer even
|
||||||
|
// though we might go above the buffer limit before sending
|
||||||
|
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
|
||||||
|
// We compress the documents before sending them
|
||||||
|
let mut encoder =
|
||||||
|
GzEncoder::new(&mut compressed_buffer, Compression::default());
|
||||||
|
encoder
|
||||||
|
.write_all(&buffer)
|
||||||
|
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
|
||||||
|
encoder
|
||||||
|
.finish()
|
||||||
|
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
|
||||||
|
|
||||||
|
retry(&must_stop_processing, || {
|
||||||
|
let mut request = agent.post(&documents_url);
|
||||||
|
request = request.set("Content-Type", "application/x-ndjson");
|
||||||
|
request = request.set("Content-Encoding", "gzip");
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
buffer.clear();
|
||||||
|
compressed_buffer.clear();
|
||||||
|
}
|
||||||
|
buffer.extend_from_slice(&tmp_buffer);
|
||||||
|
|
||||||
|
if i > 0 && i % 100 == 0 {
|
||||||
|
step.fetch_add(100, atomic::Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
retry(&must_stop_processing, || {
|
||||||
|
let mut request = agent.post(&documents_url);
|
||||||
|
request = request.set("Content-Type", "application/x-ndjson");
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
request.send_bytes(&buffer).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.map_err(|e| {
|
||||||
|
Error::from_milli(
|
||||||
|
milli::Error::InternalError(InternalError::PanicInThreadPool(e)),
|
||||||
|
Some(uid.to_string()),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
for result in results {
|
||||||
|
result?;
|
||||||
|
}
|
||||||
|
|
||||||
|
step.store(total_documents, atomic::Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(output)
|
Ok(output)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn export_one_index(
|
|
||||||
&self,
|
|
||||||
target: TargetInstance<'_>,
|
|
||||||
options: ExportOptions<'_>,
|
|
||||||
ctx: ExportContext<'_>,
|
|
||||||
) -> Result<u64, Error> {
|
|
||||||
let err = |err| Error::from_milli(err, Some(options.index_uid.to_string()));
|
|
||||||
let total_index_documents = ctx.universe.len();
|
|
||||||
let task_network = options.task_network(total_index_documents);
|
|
||||||
|
|
||||||
let bearer = target.api_key.map(|api_key| format!("Bearer {api_key}"));
|
|
||||||
let url = format!(
|
|
||||||
"{base_url}/indexes/{index_uid}",
|
|
||||||
base_url = target.base_url,
|
|
||||||
index_uid = options.index_uid
|
|
||||||
);
|
|
||||||
let response = retry(ctx.must_stop_processing, || {
|
|
||||||
let mut request = ctx.agent.get(&url);
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
|
|
||||||
request.send_bytes(Default::default()).map_err(into_backoff_error)
|
|
||||||
});
|
|
||||||
let index_exists = match response {
|
|
||||||
Ok(response) => response.status() == 200,
|
|
||||||
Err(Error::FromRemoteWhenExporting { code, .. })
|
|
||||||
if code == Code::IndexNotFound.name() =>
|
|
||||||
{
|
|
||||||
false
|
|
||||||
}
|
|
||||||
Err(e) => return Err(e),
|
|
||||||
};
|
|
||||||
let primary_key =
|
|
||||||
ctx.index.primary_key(ctx.index_rtxn).map_err(milli::Error::from).map_err(err)?;
|
|
||||||
if !index_exists {
|
|
||||||
let url = format!("{base_url}/indexes", base_url = target.base_url);
|
|
||||||
retry(ctx.must_stop_processing, || {
|
|
||||||
let mut request = ctx.agent.post(&url);
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
let index_param = json!({ "uid": options.index_uid, "primaryKey": primary_key });
|
|
||||||
request.send_json(&index_param).map_err(into_backoff_error)
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
if index_exists && options.override_settings {
|
|
||||||
retry(ctx.must_stop_processing, || {
|
|
||||||
let mut request = ctx.agent.patch(&url);
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
let index_param = json!({ "primaryKey": primary_key });
|
|
||||||
request.send_json(&index_param).map_err(into_backoff_error)
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
if !index_exists || options.override_settings {
|
|
||||||
let mut settings =
|
|
||||||
settings::settings(ctx.index, ctx.index_rtxn, SecretPolicy::RevealSecrets)
|
|
||||||
.map_err(err)?;
|
|
||||||
// Remove the experimental chat setting if not enabled
|
|
||||||
if self.features().check_chat_completions("exporting chat settings").is_err() {
|
|
||||||
settings.chat = Setting::NotSet;
|
|
||||||
}
|
|
||||||
// Retry logic for sending settings
|
|
||||||
let url = format!(
|
|
||||||
"{base_url}/indexes/{index_uid}/settings",
|
|
||||||
base_url = target.base_url,
|
|
||||||
index_uid = options.index_uid
|
|
||||||
);
|
|
||||||
|
|
||||||
let _ = handle_response(retry(ctx.must_stop_processing, || {
|
|
||||||
let mut request = ctx.agent.patch(&url);
|
|
||||||
|
|
||||||
if let Some((import_data, origin, metadata)) = &task_network {
|
|
||||||
request = set_network_ureq_headers(request, import_data, origin, metadata);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(bearer) = bearer.as_ref() {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
request.send_json(settings.clone()).map_err(into_backoff_error)
|
|
||||||
}))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let fields_ids_map = ctx.index.fields_ids_map(ctx.index_rtxn)?;
|
|
||||||
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
|
||||||
let total_documents = ctx.universe.len() as u32;
|
|
||||||
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
|
|
||||||
ctx.progress.update_progress(progress_step);
|
|
||||||
|
|
||||||
let limit = options.payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024);
|
|
||||||
let documents_url = format!(
|
|
||||||
"{base_url}/indexes/{index_uid}/documents",
|
|
||||||
base_url = target.base_url,
|
|
||||||
index_uid = options.index_uid
|
|
||||||
);
|
|
||||||
|
|
||||||
// no document to send, but we must still send a task when performing network balancing
|
|
||||||
if ctx.universe.is_empty() {
|
|
||||||
if let Some((import_data, network_change_origin, metadata)) = task_network {
|
|
||||||
let mut compressed_buffer = Vec::new();
|
|
||||||
// ignore control flow, we're returning anyway
|
|
||||||
let _ = send_buffer(
|
|
||||||
b" ", // needs something otherwise meili complains about missing payload
|
|
||||||
&mut compressed_buffer,
|
|
||||||
ctx.must_stop_processing,
|
|
||||||
ctx.agent,
|
|
||||||
&documents_url,
|
|
||||||
bearer.as_deref(),
|
|
||||||
Some(&(import_data, network_change_origin.clone(), metadata)),
|
|
||||||
&err,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
return Ok(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
let results = request_threads()
|
|
||||||
.broadcast(|broadcast| {
|
|
||||||
let mut task_network = options.task_network(total_index_documents);
|
|
||||||
|
|
||||||
let index_rtxn = ctx.index.read_txn().map_err(milli::Error::from).map_err(err)?;
|
|
||||||
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
let mut tmp_buffer = Vec::new();
|
|
||||||
let mut compressed_buffer = Vec::new();
|
|
||||||
for (i, docid) in ctx.universe.iter().enumerate() {
|
|
||||||
if i % broadcast.num_threads() != broadcast.index() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if let Some((import_data, _, metadata)) = &mut task_network {
|
|
||||||
import_data.document_count += 1;
|
|
||||||
metadata.task_key = Some(docid);
|
|
||||||
}
|
|
||||||
|
|
||||||
let document = ctx.index.document(&index_rtxn, docid).map_err(err)?;
|
|
||||||
|
|
||||||
let mut document =
|
|
||||||
obkv_to_json(&all_fields, &fields_ids_map, document).map_err(err)?;
|
|
||||||
|
|
||||||
// TODO definitely factorize this code
|
|
||||||
'inject_vectors: {
|
|
||||||
let embeddings = ctx.index.embeddings(&index_rtxn, docid).map_err(err)?;
|
|
||||||
|
|
||||||
if embeddings.is_empty() {
|
|
||||||
break 'inject_vectors;
|
|
||||||
}
|
|
||||||
|
|
||||||
let vectors = document
|
|
||||||
.entry(RESERVED_VECTORS_FIELD_NAME)
|
|
||||||
.or_insert(serde_json::Value::Object(Default::default()));
|
|
||||||
|
|
||||||
let serde_json::Value::Object(vectors) = vectors else {
|
|
||||||
return Err(err(milli::Error::UserError(
|
|
||||||
milli::UserError::InvalidVectorsMapType {
|
|
||||||
document_id: {
|
|
||||||
if let Ok(Some(Ok(index))) = ctx
|
|
||||||
.index
|
|
||||||
.external_id_of(&index_rtxn, std::iter::once(docid))
|
|
||||||
.map(|it| it.into_iter().next())
|
|
||||||
{
|
|
||||||
index
|
|
||||||
} else {
|
|
||||||
format!("internal docid={docid}")
|
|
||||||
}
|
|
||||||
},
|
|
||||||
value: vectors.clone(),
|
|
||||||
},
|
|
||||||
)));
|
|
||||||
};
|
|
||||||
|
|
||||||
for (
|
|
||||||
embedder_name,
|
|
||||||
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
|
|
||||||
) in embeddings
|
|
||||||
{
|
|
||||||
let embeddings = ExplicitVectors {
|
|
||||||
embeddings: Some(VectorOrArrayOfVectors::from_array_of_vectors(
|
|
||||||
embeddings,
|
|
||||||
)),
|
|
||||||
regenerate: regenerate &&
|
|
||||||
// Meilisearch does not handle well dumps with fragments, because as the fragments
|
|
||||||
// are marked as user-provided,
|
|
||||||
// all embeddings would be regenerated on any settings change or document update.
|
|
||||||
// To prevent this, we mark embeddings has non regenerate in this case.
|
|
||||||
!has_fragments,
|
|
||||||
};
|
|
||||||
vectors
|
|
||||||
.insert(embedder_name, serde_json::to_value(embeddings).unwrap());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tmp_buffer.clear();
|
|
||||||
serde_json::to_writer(&mut tmp_buffer, &document)
|
|
||||||
.map_err(milli::InternalError::from)
|
|
||||||
.map_err(milli::Error::from)
|
|
||||||
.map_err(err)?;
|
|
||||||
|
|
||||||
// Make sure we put at least one document in the buffer even
|
|
||||||
// though we might go above the buffer limit before sending
|
|
||||||
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
|
|
||||||
let control_flow = send_buffer(
|
|
||||||
&buffer,
|
|
||||||
&mut compressed_buffer,
|
|
||||||
ctx.must_stop_processing,
|
|
||||||
ctx.agent,
|
|
||||||
&documents_url,
|
|
||||||
bearer.as_deref(),
|
|
||||||
task_network.as_ref(),
|
|
||||||
&err,
|
|
||||||
)?;
|
|
||||||
buffer.clear();
|
|
||||||
compressed_buffer.clear();
|
|
||||||
if let Some((import_data, _, metadata)) = &mut task_network {
|
|
||||||
import_data.document_count = 0;
|
|
||||||
metadata.task_key = None;
|
|
||||||
}
|
|
||||||
if control_flow.is_break() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buffer.extend_from_slice(&tmp_buffer);
|
|
||||||
|
|
||||||
if i > 0 && i % 100 == 0 {
|
|
||||||
step.fetch_add(100, atomic::Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// send the last buffered documents if any
|
|
||||||
if !buffer.is_empty() {
|
|
||||||
// ignore control flow here
|
|
||||||
let _ = send_buffer(
|
|
||||||
&buffer,
|
|
||||||
&mut compressed_buffer,
|
|
||||||
ctx.must_stop_processing,
|
|
||||||
ctx.agent,
|
|
||||||
&documents_url,
|
|
||||||
bearer.as_deref(),
|
|
||||||
task_network.as_ref(),
|
|
||||||
&err,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
.map_err(|e| err(milli::Error::InternalError(InternalError::PanicInThreadPool(e))))?;
|
|
||||||
for result in results {
|
|
||||||
result?;
|
|
||||||
}
|
|
||||||
step.store(total_documents, atomic::Ordering::Relaxed);
|
|
||||||
Ok(total_documents as u64)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn export_no_index(
|
|
||||||
&self,
|
|
||||||
target: TargetInstance<'_>,
|
|
||||||
export_old_remote_name: &str,
|
|
||||||
network_change_origin: &Origin,
|
|
||||||
agent: &ureq::Agent,
|
|
||||||
must_stop_processing: &MustStopProcessing,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let bearer = target.api_key.map(|api_key| format!("Bearer {api_key}"));
|
|
||||||
let url = format!("{base_url}/network", base_url = target.base_url,);
|
|
||||||
|
|
||||||
{
|
|
||||||
let _ = handle_response(retry(must_stop_processing, || {
|
|
||||||
let request = agent.patch(&url);
|
|
||||||
let mut request = set_network_ureq_headers(
|
|
||||||
request,
|
|
||||||
&ImportData {
|
|
||||||
remote_name: export_old_remote_name.to_string(),
|
|
||||||
index_name: None,
|
|
||||||
document_count: 0,
|
|
||||||
},
|
|
||||||
network_change_origin,
|
|
||||||
&ImportMetadata { index_count: 0, task_key: None, total_index_documents: 0 },
|
|
||||||
);
|
|
||||||
request = request.set("Content-Type", "application/json");
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
request
|
|
||||||
.send_json(
|
|
||||||
// empty payload that will be disregarded
|
|
||||||
serde_json::Value::Object(Default::default()),
|
|
||||||
)
|
|
||||||
.map_err(into_backoff_error)
|
|
||||||
}))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_network_ureq_headers(
|
|
||||||
request: ureq::Request,
|
|
||||||
import_data: &ImportData,
|
|
||||||
origin: &Origin,
|
|
||||||
metadata: &ImportMetadata,
|
|
||||||
) -> ureq::Request {
|
|
||||||
let request = request
|
|
||||||
.set(headers::PROXY_ORIGIN_REMOTE_HEADER, &origin.remote_name)
|
|
||||||
.set(headers::PROXY_ORIGIN_TASK_UID_HEADER, &origin.task_uid.to_string())
|
|
||||||
.set(headers::PROXY_ORIGIN_NETWORK_VERSION_HEADER, &origin.network_version.to_string())
|
|
||||||
.set(headers::PROXY_IMPORT_REMOTE_HEADER, &import_data.remote_name)
|
|
||||||
.set(headers::PROXY_IMPORT_DOCS_HEADER, &import_data.document_count.to_string())
|
|
||||||
.set(headers::PROXY_IMPORT_INDEX_COUNT_HEADER, &metadata.index_count.to_string())
|
|
||||||
.set(
|
|
||||||
headers::PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
&metadata.total_index_documents.to_string(),
|
|
||||||
);
|
|
||||||
let request = if let Some(index_name) = import_data.index_name.as_deref() {
|
|
||||||
request.set(headers::PROXY_IMPORT_INDEX_HEADER, index_name)
|
|
||||||
} else {
|
|
||||||
request
|
|
||||||
};
|
|
||||||
if let Some(task_key) = metadata.task_key {
|
|
||||||
request.set(headers::PROXY_IMPORT_TASK_KEY_HEADER, &task_key.to_string())
|
|
||||||
} else {
|
|
||||||
request
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
fn send_buffer<'a>(
|
|
||||||
buffer: &'a [u8],
|
|
||||||
mut compressed_buffer: &'a mut Vec<u8>,
|
|
||||||
must_stop_processing: &MustStopProcessing,
|
|
||||||
agent: &ureq::Agent,
|
|
||||||
documents_url: &'a str,
|
|
||||||
bearer: Option<&'a str>,
|
|
||||||
task_network: Option<&(ImportData, Origin, ImportMetadata)>,
|
|
||||||
err: &'a impl Fn(milli::Error) -> crate::Error,
|
|
||||||
) -> Result<ControlFlow<(), ()>> {
|
|
||||||
// We compress the documents before sending them
|
|
||||||
let mut encoder: GzEncoder<&mut &mut Vec<u8>> =
|
|
||||||
GzEncoder::new(&mut compressed_buffer, Compression::default());
|
|
||||||
encoder.write_all(buffer).map_err(milli::Error::from).map_err(err)?;
|
|
||||||
encoder.finish().map_err(milli::Error::from).map_err(err)?;
|
|
||||||
|
|
||||||
let res = retry(must_stop_processing, || {
|
|
||||||
let mut request = agent.post(documents_url);
|
|
||||||
request = request.set("Content-Type", "application/x-ndjson");
|
|
||||||
request = request.set("Content-Encoding", "gzip");
|
|
||||||
if let Some(bearer) = bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
if let Some((import_data, origin, metadata)) = task_network {
|
|
||||||
request = set_network_ureq_headers(request, import_data, origin, metadata);
|
|
||||||
}
|
|
||||||
request.send_bytes(compressed_buffer).map_err(into_backoff_error)
|
|
||||||
});
|
|
||||||
|
|
||||||
handle_response(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_response(res: Result<Response>) -> Result<ControlFlow<()>> {
|
|
||||||
match res {
|
|
||||||
Ok(_response) => Ok(ControlFlow::Continue(())),
|
|
||||||
Err(Error::FromRemoteWhenExporting { code, .. })
|
|
||||||
if code == Code::ImportTaskAlreadyReceived.name() =>
|
|
||||||
{
|
|
||||||
Ok(ControlFlow::Continue(()))
|
|
||||||
}
|
|
||||||
Err(Error::FromRemoteWhenExporting { code, message, .. })
|
|
||||||
if code == Code::ImportTaskUnknownRemote.name() =>
|
|
||||||
{
|
|
||||||
tracing::warn!("remote answered with: {message}");
|
|
||||||
Ok(ControlFlow::Break(()))
|
|
||||||
}
|
|
||||||
// note: there has already been many attempts to get this due to exponential backoff
|
|
||||||
Err(Error::FromRemoteWhenExporting { code, message, .. })
|
|
||||||
if code == Code::ImportTaskWithoutNetworkTask.name() =>
|
|
||||||
{
|
|
||||||
tracing::warn!("remote answered with: {message}");
|
|
||||||
Ok(ControlFlow::Break(()))
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
tracing::warn!("error while exporting: {e}");
|
|
||||||
Err(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn retry<F>(must_stop_processing: &MustStopProcessing, send_request: F) -> Result<ureq::Response>
|
fn retry<F>(must_stop_processing: &MustStopProcessing, send_request: F) -> Result<ureq::Response>
|
||||||
@@ -546,63 +374,4 @@ fn ureq_error_into_error(error: ureq::Error) -> Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// export_one_index arguments
|
|
||||||
pub(super) struct TargetInstance<'a> {
|
|
||||||
pub(super) base_url: &'a str,
|
|
||||||
pub(super) api_key: Option<&'a str>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) struct ExportOptions<'a> {
|
|
||||||
pub(super) index_uid: &'a str,
|
|
||||||
pub(super) payload_size: Option<&'a Byte>,
|
|
||||||
pub(super) override_settings: bool,
|
|
||||||
pub(super) export_mode: ExportMode<'a>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ExportOptions<'_> {
|
|
||||||
fn task_network(
|
|
||||||
&self,
|
|
||||||
total_index_documents: u64,
|
|
||||||
) -> Option<(ImportData, Origin, ImportMetadata)> {
|
|
||||||
if let ExportMode::NetworkBalancing {
|
|
||||||
index_count,
|
|
||||||
export_old_remote_name,
|
|
||||||
network_change_origin,
|
|
||||||
} = self.export_mode
|
|
||||||
{
|
|
||||||
Some((
|
|
||||||
ImportData {
|
|
||||||
remote_name: export_old_remote_name.to_string(),
|
|
||||||
index_name: Some(self.index_uid.to_string()),
|
|
||||||
document_count: 0,
|
|
||||||
},
|
|
||||||
network_change_origin.clone(),
|
|
||||||
ImportMetadata { index_count, task_key: None, total_index_documents },
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) struct ExportContext<'a> {
|
|
||||||
pub(super) index: &'a meilisearch_types::milli::Index,
|
|
||||||
pub(super) index_rtxn: &'a milli::heed::RoTxn<'a>,
|
|
||||||
pub(super) universe: &'a RoaringBitmap,
|
|
||||||
pub(super) progress: &'a Progress,
|
|
||||||
pub(super) agent: &'a ureq::Agent,
|
|
||||||
pub(super) must_stop_processing: &'a MustStopProcessing,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) enum ExportMode<'a> {
|
|
||||||
ExportRoute,
|
|
||||||
NetworkBalancing {
|
|
||||||
index_count: u64,
|
|
||||||
|
|
||||||
export_old_remote_name: &'a str,
|
|
||||||
network_change_origin: &'a Origin,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// progress related
|
|
||||||
enum ExportIndex {}
|
enum ExportIndex {}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 27, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [4,]
|
[timestamp] [4,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.27.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.28.2"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||||
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }
|
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 27, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,]
|
enqueued [0,]
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 27, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 27, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
@@ -37,7 +37,7 @@ catto [1,]
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.27.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.28.2"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 27, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
@@ -40,7 +40,7 @@ doggo [2,]
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.27.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.28.2"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 27, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
@@ -43,7 +43,7 @@ doggo [2,3,]
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.27.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.28.2"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -747,7 +747,6 @@ fn basic_get_stats() {
|
|||||||
"indexDeletion": 0,
|
"indexDeletion": 0,
|
||||||
"indexSwap": 0,
|
"indexSwap": 0,
|
||||||
"indexUpdate": 0,
|
"indexUpdate": 0,
|
||||||
"networkTopologyChange": 0,
|
|
||||||
"settingsUpdate": 0,
|
"settingsUpdate": 0,
|
||||||
"snapshotCreation": 0,
|
"snapshotCreation": 0,
|
||||||
"taskCancelation": 0,
|
"taskCancelation": 0,
|
||||||
@@ -783,7 +782,6 @@ fn basic_get_stats() {
|
|||||||
"indexDeletion": 0,
|
"indexDeletion": 0,
|
||||||
"indexSwap": 0,
|
"indexSwap": 0,
|
||||||
"indexUpdate": 0,
|
"indexUpdate": 0,
|
||||||
"networkTopologyChange": 0,
|
|
||||||
"settingsUpdate": 0,
|
"settingsUpdate": 0,
|
||||||
"snapshotCreation": 0,
|
"snapshotCreation": 0,
|
||||||
"taskCancelation": 0,
|
"taskCancelation": 0,
|
||||||
@@ -826,7 +824,6 @@ fn basic_get_stats() {
|
|||||||
"indexDeletion": 0,
|
"indexDeletion": 0,
|
||||||
"indexSwap": 0,
|
"indexSwap": 0,
|
||||||
"indexUpdate": 0,
|
"indexUpdate": 0,
|
||||||
"networkTopologyChange": 0,
|
|
||||||
"settingsUpdate": 0,
|
"settingsUpdate": 0,
|
||||||
"snapshotCreation": 0,
|
"snapshotCreation": 0,
|
||||||
"taskCancelation": 0,
|
"taskCancelation": 0,
|
||||||
@@ -870,7 +867,6 @@ fn basic_get_stats() {
|
|||||||
"indexDeletion": 0,
|
"indexDeletion": 0,
|
||||||
"indexSwap": 0,
|
"indexSwap": 0,
|
||||||
"indexUpdate": 0,
|
"indexUpdate": 0,
|
||||||
"networkTopologyChange": 0,
|
|
||||||
"settingsUpdate": 0,
|
"settingsUpdate": 0,
|
||||||
"snapshotCreation": 0,
|
"snapshotCreation": 0,
|
||||||
"taskCancelation": 0,
|
"taskCancelation": 0,
|
||||||
|
|||||||
@@ -51,6 +51,7 @@ pub fn upgrade_index_scheduler(
|
|||||||
(1, 25, _) => 0,
|
(1, 25, _) => 0,
|
||||||
(1, 26, _) => 0,
|
(1, 26, _) => 0,
|
||||||
(1, 27, _) => 0,
|
(1, 27, _) => 0,
|
||||||
|
(1, 28, _) => 0,
|
||||||
(major, minor, patch) => {
|
(major, minor, patch) => {
|
||||||
if major > current_major
|
if major > current_major
|
||||||
|| (major == current_major && minor > current_minor)
|
|| (major == current_major && minor > current_minor)
|
||||||
|
|||||||
@@ -4,11 +4,9 @@ use std::collections::{BTreeSet, HashSet};
|
|||||||
use std::ops::Bound;
|
use std::ops::Bound;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use convert_case::{Case, Casing as _};
|
|
||||||
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchId, BatchStats};
|
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchId, BatchStats};
|
||||||
use meilisearch_types::heed::{Database, RoTxn, RwTxn};
|
use meilisearch_types::heed::{Database, RoTxn, RwTxn};
|
||||||
use meilisearch_types::milli::progress::Progress;
|
use meilisearch_types::milli::CboRoaringBitmapCodec;
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, ChannelCongestion};
|
|
||||||
use meilisearch_types::task_view::DetailsView;
|
use meilisearch_types::task_view::DetailsView;
|
||||||
use meilisearch_types::tasks::{
|
use meilisearch_types::tasks::{
|
||||||
BatchStopReason, Details, IndexSwap, Kind, KindWithContent, Status,
|
BatchStopReason, Details, IndexSwap, Kind, KindWithContent, Status,
|
||||||
@@ -121,8 +119,17 @@ impl ProcessingBatch {
|
|||||||
self.stats.total_nb_tasks = 0;
|
self.stats.total_nb_tasks = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update batch task from a processed task
|
/// Update the timestamp of the tasks and the inner structure of this structure.
|
||||||
pub fn update_from_task(&mut self, task: &Task) {
|
pub fn update(&mut self, task: &mut Task) {
|
||||||
|
// We must re-set this value in case we're dealing with a task that has been added between
|
||||||
|
// the `processing` and `finished` state
|
||||||
|
// We must re-set this value in case we're dealing with a task that has been added between
|
||||||
|
// the `processing` and `finished` state or that failed.
|
||||||
|
task.batch_uid = Some(self.uid);
|
||||||
|
// Same
|
||||||
|
task.started_at = Some(self.started_at);
|
||||||
|
task.finished_at = self.finished_at;
|
||||||
|
|
||||||
self.statuses.insert(task.status);
|
self.statuses.insert(task.status);
|
||||||
|
|
||||||
// Craft an aggregation of the details of all the tasks encountered in this batch.
|
// Craft an aggregation of the details of all the tasks encountered in this batch.
|
||||||
@@ -137,63 +144,6 @@ impl ProcessingBatch {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the timestamp of the tasks after they're done
|
|
||||||
pub fn finish_task(&self, task: &mut Task) {
|
|
||||||
// We must re-set this value in case we're dealing with a task that has been added between
|
|
||||||
// the `processing` and `finished` state or that failed.
|
|
||||||
task.batch_uid = Some(self.uid);
|
|
||||||
// Same
|
|
||||||
task.started_at = Some(self.started_at);
|
|
||||||
task.finished_at = self.finished_at;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write_stats(
|
|
||||||
&mut self,
|
|
||||||
progress: &Progress,
|
|
||||||
congestion: Option<ChannelCongestion>,
|
|
||||||
pre_commit_dabases_sizes: indexmap::IndexMap<&'static str, usize>,
|
|
||||||
post_commit_dabases_sizes: indexmap::IndexMap<&'static str, usize>,
|
|
||||||
) {
|
|
||||||
self.stats.progress_trace =
|
|
||||||
progress.accumulated_durations().into_iter().map(|(k, v)| (k, v.into())).collect();
|
|
||||||
self.stats.write_channel_congestion = congestion.map(|congestion| {
|
|
||||||
let mut congestion_info = serde_json::Map::new();
|
|
||||||
congestion_info.insert("attempts".into(), congestion.attempts.into());
|
|
||||||
congestion_info.insert("blocking_attempts".into(), congestion.blocking_attempts.into());
|
|
||||||
congestion_info.insert("blocking_ratio".into(), congestion.congestion_ratio().into());
|
|
||||||
congestion_info
|
|
||||||
});
|
|
||||||
self.stats.internal_database_sizes = pre_commit_dabases_sizes
|
|
||||||
.iter()
|
|
||||||
.flat_map(|(dbname, pre_size)| {
|
|
||||||
post_commit_dabases_sizes
|
|
||||||
.get(dbname)
|
|
||||||
.map(|post_size| {
|
|
||||||
use std::cmp::Ordering::{Equal, Greater, Less};
|
|
||||||
|
|
||||||
use byte_unit::Byte;
|
|
||||||
use byte_unit::UnitType::Binary;
|
|
||||||
|
|
||||||
let post = Byte::from_u64(*post_size as u64).get_appropriate_unit(Binary);
|
|
||||||
let diff_size = post_size.abs_diff(*pre_size) as u64;
|
|
||||||
let diff = Byte::from_u64(diff_size).get_appropriate_unit(Binary);
|
|
||||||
let sign = match post_size.cmp(pre_size) {
|
|
||||||
Equal => return None,
|
|
||||||
Greater => "+",
|
|
||||||
Less => "-",
|
|
||||||
};
|
|
||||||
|
|
||||||
Some((
|
|
||||||
dbname.to_case(Case::Camel),
|
|
||||||
format!("{post:#.2} ({sign}{diff:#.2})").into(),
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.into_iter()
|
|
||||||
.flatten()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn to_batch(&self) -> Batch {
|
pub fn to_batch(&self) -> Batch {
|
||||||
Batch {
|
Batch {
|
||||||
uid: self.uid,
|
uid: self.uid,
|
||||||
@@ -336,7 +286,6 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
|
|||||||
| K::DumpCreation { .. }
|
| K::DumpCreation { .. }
|
||||||
| K::Export { .. }
|
| K::Export { .. }
|
||||||
| K::UpgradeDatabase { .. }
|
| K::UpgradeDatabase { .. }
|
||||||
| K::NetworkTopologyChange(_)
|
|
||||||
| K::SnapshotCreation => (),
|
| K::SnapshotCreation => (),
|
||||||
};
|
};
|
||||||
if let Some(Details::IndexSwap { swaps }) = &mut task.details {
|
if let Some(Details::IndexSwap { swaps }) = &mut task.details {
|
||||||
@@ -678,9 +627,6 @@ impl crate::IndexScheduler {
|
|||||||
} => {
|
} => {
|
||||||
assert_eq!(kind.as_kind(), Kind::IndexCompaction);
|
assert_eq!(kind.as_kind(), Kind::IndexCompaction);
|
||||||
}
|
}
|
||||||
Details::NetworkTopologyChange { moved_documents: _, message: _ } => {
|
|
||||||
assert_eq!(kind.as_kind(), Kind::NetworkTopologyChange);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ license.workspace = true
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = "0.6.0"
|
criterion = "0.7.0"
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "depth"
|
name = "depth"
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ license.workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
# fixed version due to format breakages in v1.40
|
# fixed version due to format breakages in v1.40
|
||||||
insta = { version = "=1.39.0", features = ["json", "redactions"] }
|
insta = { version = "=1.39.0", features = ["json", "redactions"] }
|
||||||
md5 = "0.7.0"
|
md5 = "0.8.0"
|
||||||
once_cell = "1.21"
|
once_cell = "1.21"
|
||||||
regex-lite = "0.1.6"
|
regex-lite = "0.1.8"
|
||||||
uuid = { version = "1.17.0", features = ["v4"] }
|
uuid = { version = "1.18.1", features = ["v4"] }
|
||||||
|
|||||||
@@ -12,15 +12,15 @@ license.workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64 = "0.22.1"
|
base64 = "0.22.1"
|
||||||
enum-iterator = "2.1.0"
|
enum-iterator = "2.3.0"
|
||||||
hmac = "0.12.1"
|
hmac = "0.12.1"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
roaring = { version = "0.10.12", features = ["serde"] }
|
roaring = { version = "0.10.12", features = ["serde"] }
|
||||||
serde = { version = "1.0.219", features = ["derive"] }
|
serde = { version = "1.0.228", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
||||||
sha2 = "0.10.9"
|
sha2 = "0.10.9"
|
||||||
thiserror = "2.0.12"
|
thiserror = "2.0.17"
|
||||||
time = { version = "0.3.41", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
time = { version = "0.3.44", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||||
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
||||||
|
|||||||
@@ -11,39 +11,38 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
actix-web = { version = "4.11.0", default-features = false }
|
actix-web = { version = "4.12.0", default-features = false }
|
||||||
anyhow = "1.0.98"
|
anyhow = "1.0.100"
|
||||||
bumpalo = "3.18.1"
|
bumpalo = "3.19.0"
|
||||||
bumparaw-collections = "0.1.4"
|
bumparaw-collections = "0.1.4"
|
||||||
byte-unit = { version = "5.1.6", features = ["serde"] }
|
byte-unit = { version = "5.1.6", features = ["serde"] }
|
||||||
convert_case = "0.8.0"
|
convert_case = "0.9.0"
|
||||||
csv = "1.3.1"
|
csv = "1.4.0"
|
||||||
deserr = { version = "0.6.3", features = ["actix-web"] }
|
deserr = { version = "0.6.4", features = ["actix-web"] }
|
||||||
either = { version = "1.15.0", features = ["serde"] }
|
either = { version = "1.15.0", features = ["serde"] }
|
||||||
enum-iterator = "2.1.0"
|
enum-iterator = "2.3.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.1.2"
|
flate2 = "1.1.5"
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
itertools = "0.14.0"
|
memmap2 = "0.9.9"
|
||||||
memmap2 = "0.9.7"
|
|
||||||
milli = { path = "../milli" }
|
milli = { path = "../milli" }
|
||||||
roaring = { version = "0.10.12", features = ["serde"] }
|
roaring = { version = "0.10.12", features = ["serde"] }
|
||||||
rustc-hash = "2.1.1"
|
rustc-hash = "2.1.1"
|
||||||
serde = { version = "1.0.219", features = ["derive"] }
|
serde = { version = "1.0.228", features = ["derive"] }
|
||||||
serde-cs = "0.2.4"
|
serde-cs = "0.2.4"
|
||||||
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
||||||
tar = "0.4.44"
|
tar = "0.4.44"
|
||||||
tempfile = "3.20.0"
|
tempfile = "3.23.0"
|
||||||
thiserror = "2.0.12"
|
thiserror = "2.0.17"
|
||||||
time = { version = "0.3.41", features = [
|
time = { version = "0.3.44", features = [
|
||||||
"serde-well-known",
|
"serde-well-known",
|
||||||
"formatting",
|
"formatting",
|
||||||
"parsing",
|
"parsing",
|
||||||
"macros",
|
"macros",
|
||||||
] }
|
] }
|
||||||
tokio = "1.45"
|
tokio = "1.48"
|
||||||
utoipa = { version = "5.4.0", features = ["macros"] }
|
utoipa = { version = "5.4.0", features = ["macros"] }
|
||||||
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
# fixed version due to format breakages in v1.40
|
# fixed version due to format breakages in v1.40
|
||||||
@@ -57,6 +56,9 @@ all-tokenizations = ["milli/all-tokenizations"]
|
|||||||
# chinese specialized tokenization
|
# chinese specialized tokenization
|
||||||
chinese = ["milli/chinese"]
|
chinese = ["milli/chinese"]
|
||||||
chinese-pinyin = ["milli/chinese-pinyin"]
|
chinese-pinyin = ["milli/chinese-pinyin"]
|
||||||
|
|
||||||
|
enterprise = ["milli/enterprise"]
|
||||||
|
|
||||||
# hebrew specialized tokenization
|
# hebrew specialized tokenization
|
||||||
hebrew = ["milli/hebrew"]
|
hebrew = ["milli/hebrew"]
|
||||||
# japanese specialized tokenization
|
# japanese specialized tokenization
|
||||||
|
|||||||
16
crates/meilisearch-types/src/community_edition.rs
Normal file
16
crates/meilisearch-types/src/community_edition.rs
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
pub mod network {
|
||||||
|
use milli::update::new::indexer::current_edition::sharding::Shards;
|
||||||
|
|
||||||
|
use crate::network::Network;
|
||||||
|
|
||||||
|
impl Network {
|
||||||
|
pub fn shards(&self) -> Option<Shards> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sharding(&self) -> bool {
|
||||||
|
// always false in CE
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,44 +3,27 @@
|
|||||||
// Use of this source code is governed by the Business Source License 1.1,
|
// Use of this source code is governed by the Business Source License 1.1,
|
||||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use milli::update::new::indexer::enterprise_edition::sharding::Shards;
|
use milli::update::new::indexer::enterprise_edition::sharding::Shards;
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
use crate::network::Network;
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct Network {
|
|
||||||
#[serde(default, rename = "self")]
|
|
||||||
pub local: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub remotes: BTreeMap<String, Remote>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub leader: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub version: Uuid,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Network {
|
impl Network {
|
||||||
pub fn shards(&self) -> Option<Shards> {
|
pub fn shards(&self) -> Option<Shards> {
|
||||||
if self.leader.is_some() {
|
if self.sharding {
|
||||||
Some(Shards::from_remotes_local(
|
let this = self.local.as_deref().expect("Inconsistent `sharding` and `self`");
|
||||||
self.remotes.keys().map(String::as_str),
|
let others = self
|
||||||
self.local.as_deref(),
|
.remotes
|
||||||
))
|
.keys()
|
||||||
|
.filter(|name| name.as_str() != this)
|
||||||
|
.map(|name| name.to_owned())
|
||||||
|
.collect();
|
||||||
|
Some(Shards { own: vec![this.to_owned()], others })
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
pub fn sharding(&self) -> bool {
|
||||||
#[serde(rename_all = "camelCase")]
|
self.sharding
|
||||||
pub struct Remote {
|
}
|
||||||
pub url: String,
|
|
||||||
#[serde(default)]
|
|
||||||
pub search_api_key: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub write_api_key: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ macro_rules! make_error_codes {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// return error name, used as error code
|
/// return error name, used as error code
|
||||||
pub fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
match self {
|
match self {
|
||||||
$(
|
$(
|
||||||
Code::$code_ident => stringify!($code_ident).to_case(convert_case::Case::Snake)
|
Code::$code_ident => stringify!($code_ident).to_case(convert_case::Case::Snake)
|
||||||
@@ -214,9 +214,6 @@ ImmutableApiKeyUid , InvalidRequest , BAD_REQU
|
|||||||
ImmutableApiKeyUpdatedAt , InvalidRequest , BAD_REQUEST;
|
ImmutableApiKeyUpdatedAt , InvalidRequest , BAD_REQUEST;
|
||||||
ImmutableIndexCreatedAt , InvalidRequest , BAD_REQUEST;
|
ImmutableIndexCreatedAt , InvalidRequest , BAD_REQUEST;
|
||||||
ImmutableIndexUpdatedAt , InvalidRequest , BAD_REQUEST;
|
ImmutableIndexUpdatedAt , InvalidRequest , BAD_REQUEST;
|
||||||
ImportTaskAlreadyReceived , InvalidRequest , PRECONDITION_FAILED;
|
|
||||||
ImportTaskUnknownRemote , InvalidRequest , PRECONDITION_FAILED;
|
|
||||||
ImportTaskWithoutNetworkTask , InvalidRequest , SERVICE_UNAVAILABLE;
|
|
||||||
IndexAlreadyExists , InvalidRequest , CONFLICT ;
|
IndexAlreadyExists , InvalidRequest , CONFLICT ;
|
||||||
IndexCreationFailed , Internal , INTERNAL_SERVER_ERROR;
|
IndexCreationFailed , Internal , INTERNAL_SERVER_ERROR;
|
||||||
IndexNotFound , InvalidRequest , NOT_FOUND;
|
IndexNotFound , InvalidRequest , NOT_FOUND;
|
||||||
@@ -273,9 +270,9 @@ InvalidMultiSearchQueryRankingRules , InvalidRequest , BAD_REQU
|
|||||||
InvalidMultiSearchQueryPosition , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchQueryPosition , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchRemote , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchRemote , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchWeight , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchWeight , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidNetworkLeader , InvalidRequest , BAD_REQUEST ;
|
|
||||||
InvalidNetworkRemotes , InvalidRequest , BAD_REQUEST ;
|
InvalidNetworkRemotes , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidNetworkSelf , InvalidRequest , BAD_REQUEST ;
|
InvalidNetworkSelf , InvalidRequest , BAD_REQUEST ;
|
||||||
|
InvalidNetworkSharding , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidNetworkSearchApiKey , InvalidRequest , BAD_REQUEST ;
|
InvalidNetworkSearchApiKey , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidNetworkWriteApiKey , InvalidRequest , BAD_REQUEST ;
|
InvalidNetworkWriteApiKey , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidNetworkUrl , InvalidRequest , BAD_REQUEST ;
|
InvalidNetworkUrl , InvalidRequest , BAD_REQUEST ;
|
||||||
@@ -380,9 +377,7 @@ MissingPayload , InvalidRequest , BAD_REQU
|
|||||||
MissingSearchHybrid , InvalidRequest , BAD_REQUEST ;
|
MissingSearchHybrid , InvalidRequest , BAD_REQUEST ;
|
||||||
MissingSwapIndexes , InvalidRequest , BAD_REQUEST ;
|
MissingSwapIndexes , InvalidRequest , BAD_REQUEST ;
|
||||||
MissingTaskFilters , InvalidRequest , BAD_REQUEST ;
|
MissingTaskFilters , InvalidRequest , BAD_REQUEST ;
|
||||||
NetworkVersionMismatch , InvalidRequest , PRECONDITION_FAILED ;
|
|
||||||
NoSpaceLeftOnDevice , System , UNPROCESSABLE_ENTITY;
|
NoSpaceLeftOnDevice , System , UNPROCESSABLE_ENTITY;
|
||||||
NotLeader , InvalidRequest , BAD_REQUEST ;
|
|
||||||
PayloadTooLarge , InvalidRequest , PAYLOAD_TOO_LARGE ;
|
PayloadTooLarge , InvalidRequest , PAYLOAD_TOO_LARGE ;
|
||||||
RemoteBadResponse , System , BAD_GATEWAY ;
|
RemoteBadResponse , System , BAD_GATEWAY ;
|
||||||
RemoteBadRequest , InvalidRequest , BAD_REQUEST ;
|
RemoteBadRequest , InvalidRequest , BAD_REQUEST ;
|
||||||
@@ -396,9 +391,6 @@ TaskFileNotFound , InvalidRequest , NOT_FOUN
|
|||||||
BatchNotFound , InvalidRequest , NOT_FOUND ;
|
BatchNotFound , InvalidRequest , NOT_FOUND ;
|
||||||
TooManyOpenFiles , System , UNPROCESSABLE_ENTITY ;
|
TooManyOpenFiles , System , UNPROCESSABLE_ENTITY ;
|
||||||
TooManyVectors , InvalidRequest , BAD_REQUEST ;
|
TooManyVectors , InvalidRequest , BAD_REQUEST ;
|
||||||
UnexpectedNetworkPreviousRemotes , InvalidRequest , BAD_REQUEST ;
|
|
||||||
NetworkVersionTooOld , InvalidRequest , BAD_REQUEST ;
|
|
||||||
UnprocessedNetworkTask , InvalidRequest , BAD_REQUEST ;
|
|
||||||
UnretrievableDocument , Internal , BAD_REQUEST ;
|
UnretrievableDocument , Internal , BAD_REQUEST ;
|
||||||
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
|
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
|
||||||
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
||||||
@@ -441,6 +433,7 @@ InvalidChatCompletionSearchQueryParamPrompt , InvalidRequest , BAD_REQU
|
|||||||
InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ;
|
InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ;
|
InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST ;
|
InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST ;
|
||||||
|
RequiresEnterpriseEdition , InvalidRequest , UNAVAILABLE_FOR_LEGAL_REASONS ;
|
||||||
// Webhooks
|
// Webhooks
|
||||||
InvalidWebhooks , InvalidRequest , BAD_REQUEST ;
|
InvalidWebhooks , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidWebhookUrl , InvalidRequest , BAD_REQUEST ;
|
InvalidWebhookUrl , InvalidRequest , BAD_REQUEST ;
|
||||||
|
|||||||
@@ -2,10 +2,17 @@
|
|||||||
|
|
||||||
pub mod batch_view;
|
pub mod batch_view;
|
||||||
pub mod batches;
|
pub mod batches;
|
||||||
|
#[cfg(not(feature = "enterprise"))]
|
||||||
|
pub mod community_edition;
|
||||||
pub mod compression;
|
pub mod compression;
|
||||||
pub mod deserr;
|
pub mod deserr;
|
||||||
pub mod document_formats;
|
pub mod document_formats;
|
||||||
|
#[cfg(feature = "enterprise")]
|
||||||
pub mod enterprise_edition;
|
pub mod enterprise_edition;
|
||||||
|
#[cfg(not(feature = "enterprise"))]
|
||||||
|
pub use community_edition as current_edition;
|
||||||
|
#[cfg(feature = "enterprise")]
|
||||||
|
pub use enterprise_edition as current_edition;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod facet_values_sort;
|
pub mod facet_values_sort;
|
||||||
pub mod features;
|
pub mod features;
|
||||||
@@ -13,6 +20,7 @@ pub mod index_uid;
|
|||||||
pub mod index_uid_pattern;
|
pub mod index_uid_pattern;
|
||||||
pub mod keys;
|
pub mod keys;
|
||||||
pub mod locales;
|
pub mod locales;
|
||||||
|
pub mod network;
|
||||||
pub mod settings;
|
pub mod settings;
|
||||||
pub mod star_or;
|
pub mod star_or;
|
||||||
pub mod task_view;
|
pub mod task_view;
|
||||||
|
|||||||
23
crates/meilisearch-types/src/network.rs
Normal file
23
crates/meilisearch-types/src/network.rs
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Network {
|
||||||
|
#[serde(default, rename = "self")]
|
||||||
|
pub local: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub remotes: BTreeMap<String, Remote>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub sharding: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Remote {
|
||||||
|
pub url: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub search_api_key: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub write_api_key: Option<String>,
|
||||||
|
}
|
||||||
@@ -9,12 +9,12 @@ use utoipa::ToSchema;
|
|||||||
use crate::batches::BatchId;
|
use crate::batches::BatchId;
|
||||||
use crate::error::ResponseError;
|
use crate::error::ResponseError;
|
||||||
use crate::settings::{Settings, Unchecked};
|
use crate::settings::{Settings, Unchecked};
|
||||||
use crate::tasks::enterprise_edition::network::DbTaskNetwork;
|
|
||||||
use crate::tasks::{
|
use crate::tasks::{
|
||||||
serialize_duration, Details, DetailsExportIndexSettings, IndexSwap, Kind, Status, Task, TaskId,
|
serialize_duration, Details, DetailsExportIndexSettings, IndexSwap, Kind, Status, Task, TaskId,
|
||||||
|
TaskNetwork,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, Clone, PartialEq, Serialize, ToSchema)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
#[schema(rename_all = "camelCase")]
|
#[schema(rename_all = "camelCase")]
|
||||||
pub struct TaskView {
|
pub struct TaskView {
|
||||||
@@ -54,7 +54,7 @@ pub struct TaskView {
|
|||||||
pub finished_at: Option<OffsetDateTime>,
|
pub finished_at: Option<OffsetDateTime>,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub network: Option<DbTaskNetwork>,
|
pub network: Option<TaskNetwork>,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub custom_metadata: Option<String>,
|
pub custom_metadata: Option<String>,
|
||||||
@@ -151,11 +151,6 @@ pub struct DetailsView {
|
|||||||
pub pre_compaction_size: Option<String>,
|
pub pre_compaction_size: Option<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub post_compaction_size: Option<String>,
|
pub post_compaction_size: Option<String>,
|
||||||
// network topology change
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub moved_documents: Option<u64>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub message: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DetailsView {
|
impl DetailsView {
|
||||||
@@ -166,17 +161,6 @@ impl DetailsView {
|
|||||||
(None, Some(doc)) | (Some(doc), None) => Some(doc),
|
(None, Some(doc)) | (Some(doc), None) => Some(doc),
|
||||||
(Some(left), Some(right)) => Some(left + right),
|
(Some(left), Some(right)) => Some(left + right),
|
||||||
},
|
},
|
||||||
moved_documents: match (self.moved_documents, other.moved_documents) {
|
|
||||||
(None, None) => None,
|
|
||||||
(None, Some(doc)) | (Some(doc), None) => Some(doc),
|
|
||||||
(Some(left), Some(right)) => Some(left + right),
|
|
||||||
},
|
|
||||||
message: match (&mut self.message, &other.message) {
|
|
||||||
(None, None) => None,
|
|
||||||
(None, Some(message)) => Some(message.clone()),
|
|
||||||
(Some(message), None) => Some(std::mem::take(message)),
|
|
||||||
(Some(message), Some(_)) => Some(std::mem::take(message)),
|
|
||||||
},
|
|
||||||
indexed_documents: match (self.indexed_documents, other.indexed_documents) {
|
indexed_documents: match (self.indexed_documents, other.indexed_documents) {
|
||||||
(None, None) => None,
|
(None, None) => None,
|
||||||
(None, Some(None)) | (Some(None), None) | (Some(None), Some(None)) => Some(None),
|
(None, Some(None)) | (Some(None), None) | (Some(None), Some(None)) => Some(None),
|
||||||
@@ -467,11 +451,6 @@ impl From<Details> for DetailsView {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Details::NetworkTopologyChange { moved_documents, message } => DetailsView {
|
|
||||||
moved_documents: Some(moved_documents),
|
|
||||||
message: Some(message),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,8 +23,6 @@ use crate::{versioning, InstanceUid};
|
|||||||
|
|
||||||
pub type TaskId = u32;
|
pub type TaskId = u32;
|
||||||
|
|
||||||
pub mod enterprise_edition;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct Task {
|
pub struct Task {
|
||||||
@@ -46,7 +44,7 @@ pub struct Task {
|
|||||||
pub kind: KindWithContent,
|
pub kind: KindWithContent,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub network: Option<enterprise_edition::network::DbTaskNetwork>,
|
pub network: Option<TaskNetwork>,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub custom_metadata: Option<String>,
|
pub custom_metadata: Option<String>,
|
||||||
@@ -63,7 +61,6 @@ impl Task {
|
|||||||
| TaskDeletion { .. }
|
| TaskDeletion { .. }
|
||||||
| Export { .. }
|
| Export { .. }
|
||||||
| UpgradeDatabase { .. }
|
| UpgradeDatabase { .. }
|
||||||
| NetworkTopologyChange { .. }
|
|
||||||
| IndexSwap { .. } => None,
|
| IndexSwap { .. } => None,
|
||||||
DocumentAdditionOrUpdate { index_uid, .. }
|
DocumentAdditionOrUpdate { index_uid, .. }
|
||||||
| DocumentEdition { index_uid, .. }
|
| DocumentEdition { index_uid, .. }
|
||||||
@@ -102,7 +99,6 @@ impl Task {
|
|||||||
| KindWithContent::SnapshotCreation
|
| KindWithContent::SnapshotCreation
|
||||||
| KindWithContent::Export { .. }
|
| KindWithContent::Export { .. }
|
||||||
| KindWithContent::UpgradeDatabase { .. }
|
| KindWithContent::UpgradeDatabase { .. }
|
||||||
| KindWithContent::NetworkTopologyChange { .. }
|
|
||||||
| KindWithContent::IndexCompaction { .. } => None,
|
| KindWithContent::IndexCompaction { .. } => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -182,7 +178,6 @@ pub enum KindWithContent {
|
|||||||
IndexCompaction {
|
IndexCompaction {
|
||||||
index_uid: String,
|
index_uid: String,
|
||||||
},
|
},
|
||||||
NetworkTopologyChange(enterprise_edition::network::NetworkTopologyChange),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
|
||||||
@@ -220,7 +215,6 @@ impl KindWithContent {
|
|||||||
KindWithContent::Export { .. } => Kind::Export,
|
KindWithContent::Export { .. } => Kind::Export,
|
||||||
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
|
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
|
||||||
KindWithContent::IndexCompaction { .. } => Kind::IndexCompaction,
|
KindWithContent::IndexCompaction { .. } => Kind::IndexCompaction,
|
||||||
KindWithContent::NetworkTopologyChange { .. } => Kind::NetworkTopologyChange,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,7 +227,6 @@ impl KindWithContent {
|
|||||||
| TaskCancelation { .. }
|
| TaskCancelation { .. }
|
||||||
| TaskDeletion { .. }
|
| TaskDeletion { .. }
|
||||||
| Export { .. }
|
| Export { .. }
|
||||||
| NetworkTopologyChange { .. }
|
|
||||||
| UpgradeDatabase { .. } => vec![],
|
| UpgradeDatabase { .. } => vec![],
|
||||||
DocumentAdditionOrUpdate { index_uid, .. }
|
DocumentAdditionOrUpdate { index_uid, .. }
|
||||||
| DocumentEdition { index_uid, .. }
|
| DocumentEdition { index_uid, .. }
|
||||||
@@ -347,10 +340,6 @@ impl KindWithContent {
|
|||||||
pre_compaction_size: None,
|
pre_compaction_size: None,
|
||||||
post_compaction_size: None,
|
post_compaction_size: None,
|
||||||
}),
|
}),
|
||||||
KindWithContent::NetworkTopologyChange { .. } => Some(Details::NetworkTopologyChange {
|
|
||||||
moved_documents: 0,
|
|
||||||
message: "processing tasks for previous network versions".into(),
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -403,7 +392,7 @@ impl KindWithContent {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
KindWithContent::IndexSwap { .. } => {
|
KindWithContent::IndexSwap { .. } => {
|
||||||
unimplemented!("do not call `default_finished_details` for `IndexSwap` tasks")
|
todo!()
|
||||||
}
|
}
|
||||||
KindWithContent::TaskCancelation { query, tasks } => Some(Details::TaskCancelation {
|
KindWithContent::TaskCancelation { query, tasks } => Some(Details::TaskCancelation {
|
||||||
matched_tasks: tasks.len(),
|
matched_tasks: tasks.len(),
|
||||||
@@ -438,9 +427,6 @@ impl KindWithContent {
|
|||||||
pre_compaction_size: None,
|
pre_compaction_size: None,
|
||||||
post_compaction_size: None,
|
post_compaction_size: None,
|
||||||
}),
|
}),
|
||||||
KindWithContent::NetworkTopologyChange(network_topology_change) => {
|
|
||||||
Some(network_topology_change.to_details())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -508,9 +494,6 @@ impl From<&KindWithContent> for Option<Details> {
|
|||||||
pre_compaction_size: None,
|
pre_compaction_size: None,
|
||||||
post_compaction_size: None,
|
post_compaction_size: None,
|
||||||
}),
|
}),
|
||||||
KindWithContent::NetworkTopologyChange(network_topology_change) => {
|
|
||||||
Some(network_topology_change.to_details())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -622,7 +605,6 @@ pub enum Kind {
|
|||||||
Export,
|
Export,
|
||||||
UpgradeDatabase,
|
UpgradeDatabase,
|
||||||
IndexCompaction,
|
IndexCompaction,
|
||||||
NetworkTopologyChange,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Kind {
|
impl Kind {
|
||||||
@@ -642,7 +624,6 @@ impl Kind {
|
|||||||
| Kind::DumpCreation
|
| Kind::DumpCreation
|
||||||
| Kind::Export
|
| Kind::Export
|
||||||
| Kind::UpgradeDatabase
|
| Kind::UpgradeDatabase
|
||||||
| Kind::NetworkTopologyChange
|
|
||||||
| Kind::SnapshotCreation => false,
|
| Kind::SnapshotCreation => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -665,7 +646,6 @@ impl Display for Kind {
|
|||||||
Kind::Export => write!(f, "export"),
|
Kind::Export => write!(f, "export"),
|
||||||
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
|
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
|
||||||
Kind::IndexCompaction => write!(f, "indexCompaction"),
|
Kind::IndexCompaction => write!(f, "indexCompaction"),
|
||||||
Kind::NetworkTopologyChange => write!(f, "networkTopologyChange"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -703,8 +683,6 @@ impl FromStr for Kind {
|
|||||||
Ok(Kind::UpgradeDatabase)
|
Ok(Kind::UpgradeDatabase)
|
||||||
} else if kind.eq_ignore_ascii_case("indexCompaction") {
|
} else if kind.eq_ignore_ascii_case("indexCompaction") {
|
||||||
Ok(Kind::IndexCompaction)
|
Ok(Kind::IndexCompaction)
|
||||||
} else if kind.eq_ignore_ascii_case("networkTopologyChange") {
|
|
||||||
Ok(Kind::NetworkTopologyChange)
|
|
||||||
} else {
|
} else {
|
||||||
Err(ParseTaskKindError(kind.to_owned()))
|
Err(ParseTaskKindError(kind.to_owned()))
|
||||||
}
|
}
|
||||||
@@ -795,10 +773,36 @@ pub enum Details {
|
|||||||
pre_compaction_size: Option<Byte>,
|
pre_compaction_size: Option<Byte>,
|
||||||
post_compaction_size: Option<Byte>,
|
post_compaction_size: Option<Byte>,
|
||||||
},
|
},
|
||||||
NetworkTopologyChange {
|
}
|
||||||
moved_documents: u64,
|
|
||||||
message: String,
|
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
},
|
#[serde(untagged, rename_all = "camelCase")]
|
||||||
|
pub enum TaskNetwork {
|
||||||
|
Origin { origin: Origin },
|
||||||
|
Remotes { remote_tasks: BTreeMap<String, RemoteTask> },
|
||||||
|
}
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Origin {
|
||||||
|
pub remote_name: String,
|
||||||
|
pub task_uid: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct RemoteTask {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
task_uid: Option<TaskId>,
|
||||||
|
error: Option<ResponseError>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Result<TaskId, ResponseError>> for RemoteTask {
|
||||||
|
fn from(res: Result<TaskId, ResponseError>) -> RemoteTask {
|
||||||
|
match res {
|
||||||
|
Ok(task_uid) => RemoteTask { task_uid: Some(task_uid), error: None },
|
||||||
|
Err(err) => RemoteTask { task_uid: None, error: Some(err) },
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
@@ -841,9 +845,6 @@ impl Details {
|
|||||||
| Self::Export { .. }
|
| Self::Export { .. }
|
||||||
| Self::UpgradeDatabase { .. }
|
| Self::UpgradeDatabase { .. }
|
||||||
| Self::IndexSwap { .. } => (),
|
| Self::IndexSwap { .. } => (),
|
||||||
Self::NetworkTopologyChange { moved_documents: _, message } => {
|
|
||||||
*message = format!("Failed. Previous status: {}", message);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
details
|
details
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
// Copyright © 2025 Meilisearch Some Rights Reserved
|
|
||||||
// This file is part of Meilisearch Enterprise Edition (EE).
|
|
||||||
// Use of this source code is governed by the Business Source License 1.1,
|
|
||||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
|
||||||
|
|
||||||
pub mod network;
|
|
||||||
@@ -1,681 +0,0 @@
|
|||||||
// Copyright © 2025 Meilisearch Some Rights Reserved
|
|
||||||
// This file is part of Meilisearch Enterprise Edition (EE).
|
|
||||||
// Use of this source code is governed by the Business Source License 1.1,
|
|
||||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
|
||||||
|
|
||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
|
|
||||||
use itertools::{EitherOrBoth, Itertools as _};
|
|
||||||
use milli::DocumentId;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use utoipa::ToSchema;
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::enterprise_edition::network::{Network, Remote};
|
|
||||||
use crate::error::ResponseError;
|
|
||||||
use crate::tasks::{Details, TaskId};
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
|
||||||
#[serde(untagged, rename_all = "camelCase")]
|
|
||||||
// This type is used in the database, care should be taken when modifying it.
|
|
||||||
pub enum DbTaskNetwork {
|
|
||||||
/// Tasks that were duplicated from `origin`
|
|
||||||
Origin { origin: Origin },
|
|
||||||
/// Tasks that were duplicated as `remote_tasks`
|
|
||||||
Remotes {
|
|
||||||
remote_tasks: BTreeMap<String, RemoteTask>,
|
|
||||||
#[serde(default)]
|
|
||||||
network_version: Uuid,
|
|
||||||
},
|
|
||||||
/// Document import tasks sent in the context of `network_change`
|
|
||||||
Import { import_from: ImportData, network_change: Origin },
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DbTaskNetwork {
|
|
||||||
pub fn network_version(&self) -> Uuid {
|
|
||||||
match self {
|
|
||||||
DbTaskNetwork::Origin { origin } => origin.network_version,
|
|
||||||
DbTaskNetwork::Remotes { remote_tasks: _, network_version } => *network_version,
|
|
||||||
DbTaskNetwork::Import { import_from: _, network_change } => {
|
|
||||||
network_change.network_version
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn import_data(&self) -> Option<&ImportData> {
|
|
||||||
match self {
|
|
||||||
DbTaskNetwork::Origin { .. } | DbTaskNetwork::Remotes { .. } => None,
|
|
||||||
DbTaskNetwork::Import { import_from, .. } => Some(import_from),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn origin(&self) -> Option<&Origin> {
|
|
||||||
match self {
|
|
||||||
DbTaskNetwork::Origin { origin } => Some(origin),
|
|
||||||
DbTaskNetwork::Remotes { .. } => None,
|
|
||||||
DbTaskNetwork::Import { network_change, .. } => Some(network_change),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum TaskNetwork {
|
|
||||||
/// Tasks that were duplicated from `origin`
|
|
||||||
Origin { origin: Origin },
|
|
||||||
/// Tasks that were duplicated as `remote_tasks`
|
|
||||||
Remotes { remote_tasks: BTreeMap<String, RemoteTask>, network_version: Uuid },
|
|
||||||
/// Document import tasks sent in the context of `network_change`
|
|
||||||
Import { import_from: ImportData, network_change: Origin, metadata: ImportMetadata },
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TaskNetwork {
|
|
||||||
pub fn network_version(&self) -> Uuid {
|
|
||||||
match self {
|
|
||||||
TaskNetwork::Origin { origin } => origin.network_version,
|
|
||||||
TaskNetwork::Remotes { remote_tasks: _, network_version } => *network_version,
|
|
||||||
TaskNetwork::Import { import_from: _, network_change, metadata: _ } => {
|
|
||||||
network_change.network_version
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<TaskNetwork> for DbTaskNetwork {
|
|
||||||
fn from(value: TaskNetwork) -> Self {
|
|
||||||
match value {
|
|
||||||
TaskNetwork::Origin { origin } => DbTaskNetwork::Origin { origin },
|
|
||||||
TaskNetwork::Remotes { remote_tasks, network_version } => {
|
|
||||||
DbTaskNetwork::Remotes { remote_tasks, network_version }
|
|
||||||
}
|
|
||||||
TaskNetwork::Import { import_from, network_change, metadata: _ } => {
|
|
||||||
DbTaskNetwork::Import { import_from, network_change }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct Origin {
|
|
||||||
pub remote_name: String,
|
|
||||||
pub task_uid: u32,
|
|
||||||
pub network_version: Uuid,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Import data stored in a task
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct ImportData {
|
|
||||||
/// Remote that this task is imported from
|
|
||||||
pub remote_name: String,
|
|
||||||
/// Index relevant to this task
|
|
||||||
pub index_name: Option<String>,
|
|
||||||
/// Number of documents in this task
|
|
||||||
pub document_count: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Import metadata associated with a task but not stored in the task
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct ImportMetadata {
|
|
||||||
/// Total number of indexes to import from this host
|
|
||||||
pub index_count: u64,
|
|
||||||
/// Key unique to this (network_change, index, host, key).
|
|
||||||
///
|
|
||||||
/// In practice, an internal document id of one of the documents to import.
|
|
||||||
pub task_key: Option<DocumentId>,
|
|
||||||
/// Total number of documents to import for this index from this host.
|
|
||||||
pub total_index_documents: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct RemoteTask {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
task_uid: Option<TaskId>,
|
|
||||||
error: Option<ResponseError>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Result<TaskId, ResponseError>> for RemoteTask {
|
|
||||||
fn from(res: Result<TaskId, ResponseError>) -> RemoteTask {
|
|
||||||
match res {
|
|
||||||
Ok(task_uid) => RemoteTask { task_uid: Some(task_uid), error: None },
|
|
||||||
Err(err) => RemoteTask { task_uid: None, error: Some(err) },
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Contains the full state of a network topology change.
|
|
||||||
///
|
|
||||||
/// A network topology change task is unique in that it can be processed in multiple different batches, as its resolution
|
|
||||||
/// depends on various document additions tasks being processed.
|
|
||||||
///
|
|
||||||
/// A network topology task has 4 states:
|
|
||||||
///
|
|
||||||
/// 1. Processing any task that was meant for an earlier version of the network. This is necessary to know that we have the right version of
|
|
||||||
/// documents.
|
|
||||||
/// 2. Sending all documents that must be moved to other remotes.
|
|
||||||
/// 3. Processing any task coming from the remotes.
|
|
||||||
/// 4. Finished.
|
|
||||||
///
|
|
||||||
/// Furthermore, it maintains some stats
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct NetworkTopologyChange {
|
|
||||||
state: NetworkTopologyState,
|
|
||||||
// in name, `None` if the node is no longer part of the network
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
in_name: Option<String>,
|
|
||||||
// out name, `None` if the node is new to the network
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
out_name: Option<String>,
|
|
||||||
out_remotes: BTreeMap<String, Remote>,
|
|
||||||
in_remotes: BTreeMap<String, InRemote>,
|
|
||||||
stats: NetworkTopologyStats,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NetworkTopologyChange {
|
|
||||||
pub fn new(old_network: Network, new_network: Network) -> Self {
|
|
||||||
// we use our old name as export name
|
|
||||||
let out_name = old_network.local;
|
|
||||||
// we use our new name as import name
|
|
||||||
let in_name = new_network.local;
|
|
||||||
// we export to the new network
|
|
||||||
let mut out_remotes = new_network.remotes;
|
|
||||||
// don't export to ourselves
|
|
||||||
if let Some(in_name) = &in_name {
|
|
||||||
out_remotes.remove(in_name);
|
|
||||||
}
|
|
||||||
let in_remotes = old_network
|
|
||||||
.remotes
|
|
||||||
.into_keys()
|
|
||||||
// don't await imports from ourselves
|
|
||||||
.filter(|name| Some(name.as_str()) != out_name.as_deref())
|
|
||||||
.map(|name| (name, InRemote::new()))
|
|
||||||
.collect();
|
|
||||||
Self {
|
|
||||||
state: NetworkTopologyState::WaitingForOlderTasks,
|
|
||||||
in_name,
|
|
||||||
out_name,
|
|
||||||
out_remotes,
|
|
||||||
in_remotes,
|
|
||||||
stats: NetworkTopologyStats { moved_documents: 0 },
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn state(&self) -> NetworkTopologyState {
|
|
||||||
self.state
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn out_name(&self) -> Option<&str> {
|
|
||||||
// unwrap: one of out name or in_name must be defined
|
|
||||||
self.out_name.as_deref()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn in_name(&self) -> Option<&str> {
|
|
||||||
self.in_name.as_deref()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn export_to_process(&self) -> Option<(&BTreeMap<String, Remote>, &str)> {
|
|
||||||
if self.state != NetworkTopologyState::ExportingDocuments {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.out_remotes.is_empty() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let out_name = self.out_name()?;
|
|
||||||
Some((&self.out_remotes, out_name))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_moved(&mut self, moved_documents: u64) {
|
|
||||||
self.stats.moved_documents = moved_documents;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compute the next state from the current state of the task.
|
|
||||||
pub fn update_state(&mut self) {
|
|
||||||
self.state = match self.state {
|
|
||||||
NetworkTopologyState::WaitingForOlderTasks => {
|
|
||||||
// no more older tasks, so finished waiting
|
|
||||||
NetworkTopologyState::ExportingDocuments
|
|
||||||
}
|
|
||||||
NetworkTopologyState::ExportingDocuments => {
|
|
||||||
// processed all exported documents
|
|
||||||
if self.is_import_finished() {
|
|
||||||
NetworkTopologyState::Finished
|
|
||||||
} else {
|
|
||||||
NetworkTopologyState::ImportingDocuments
|
|
||||||
}
|
|
||||||
}
|
|
||||||
NetworkTopologyState::ImportingDocuments => {
|
|
||||||
if self.is_import_finished() {
|
|
||||||
NetworkTopologyState::Finished
|
|
||||||
} else {
|
|
||||||
NetworkTopologyState::ImportingDocuments
|
|
||||||
}
|
|
||||||
}
|
|
||||||
NetworkTopologyState::Finished => NetworkTopologyState::Finished,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn receive_remote_task(
|
|
||||||
&mut self,
|
|
||||||
remote_name: &str,
|
|
||||||
index_name: Option<&str>,
|
|
||||||
task_key: Option<DocumentId>,
|
|
||||||
document_count: u64,
|
|
||||||
total_indexes: u64,
|
|
||||||
total_index_documents: u64,
|
|
||||||
) -> Result<(), ReceiveTaskError> {
|
|
||||||
let remote = self
|
|
||||||
.in_remotes
|
|
||||||
.get_mut(remote_name)
|
|
||||||
.ok_or_else(|| ReceiveTaskError::UnknownRemote(remote_name.to_string()))?;
|
|
||||||
remote.import_state = match std::mem::take(&mut remote.import_state) {
|
|
||||||
ImportState::WaitingForInitialTask => {
|
|
||||||
if total_indexes == 0 {
|
|
||||||
ImportState::Finished { total_indexes, total_documents: 0 }
|
|
||||||
} else {
|
|
||||||
let mut task_keys = BTreeSet::new();
|
|
||||||
if let Some(index_name) = index_name {
|
|
||||||
if let Some(task_key) = task_key {
|
|
||||||
task_keys.insert(task_key);
|
|
||||||
}
|
|
||||||
let mut import_index_state = BTreeMap::new();
|
|
||||||
import_index_state.insert(
|
|
||||||
index_name.to_owned(),
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents: total_index_documents,
|
|
||||||
received_documents: document_count,
|
|
||||||
task_keys,
|
|
||||||
processed_documents: 0,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
ImportState::Ongoing { import_index_state, total_indexes }
|
|
||||||
} else {
|
|
||||||
ImportState::WaitingForInitialTask
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ImportState::Ongoing { mut import_index_state, total_indexes } => {
|
|
||||||
if let Some(index_name) = index_name {
|
|
||||||
if let Some((index_name, mut index_state)) =
|
|
||||||
import_index_state.remove_entry(index_name)
|
|
||||||
{
|
|
||||||
index_state = match index_state {
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents,
|
|
||||||
received_documents: previously_received,
|
|
||||||
processed_documents,
|
|
||||||
mut task_keys,
|
|
||||||
} => {
|
|
||||||
if let Some(task_key) = task_key {
|
|
||||||
if !task_keys.insert(task_key) {
|
|
||||||
return Err(ReceiveTaskError::DuplicateTask(task_key));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents,
|
|
||||||
received_documents: previously_received + document_count,
|
|
||||||
processed_documents,
|
|
||||||
task_keys,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ImportIndexState::Finished { total_documents } => {
|
|
||||||
ImportIndexState::Finished { total_documents }
|
|
||||||
}
|
|
||||||
};
|
|
||||||
import_index_state.insert(index_name, index_state);
|
|
||||||
} else {
|
|
||||||
let mut task_keys = BTreeSet::new();
|
|
||||||
if let Some(task_key) = task_key {
|
|
||||||
task_keys.insert(task_key);
|
|
||||||
}
|
|
||||||
let state = ImportIndexState::Ongoing {
|
|
||||||
total_documents: total_index_documents,
|
|
||||||
received_documents: document_count,
|
|
||||||
processed_documents: 0,
|
|
||||||
task_keys,
|
|
||||||
};
|
|
||||||
import_index_state.insert(index_name.to_string(), state);
|
|
||||||
}
|
|
||||||
ImportState::Ongoing { import_index_state, total_indexes }
|
|
||||||
} else {
|
|
||||||
ImportState::Ongoing { import_index_state, total_indexes }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ImportState::Finished { total_indexes, total_documents } => {
|
|
||||||
ImportState::Finished { total_indexes, total_documents }
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn process_remote_tasks(
|
|
||||||
&mut self,
|
|
||||||
remote_name: &str,
|
|
||||||
index_name: &str,
|
|
||||||
document_count: u64,
|
|
||||||
) {
|
|
||||||
let remote = self.in_remotes.get_mut(remote_name).unwrap();
|
|
||||||
remote.import_state = match std::mem::take(&mut remote.import_state) {
|
|
||||||
ImportState::WaitingForInitialTask => panic!("no task received yet one processed"),
|
|
||||||
ImportState::Ongoing { mut import_index_state, total_indexes } => {
|
|
||||||
let (index_name, mut index_state) =
|
|
||||||
import_index_state.remove_entry(index_name).unwrap();
|
|
||||||
index_state = match index_state {
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents,
|
|
||||||
received_documents,
|
|
||||||
processed_documents: previously_processed,
|
|
||||||
task_keys,
|
|
||||||
} => {
|
|
||||||
let newly_processed_documents = previously_processed + document_count;
|
|
||||||
if newly_processed_documents >= total_documents {
|
|
||||||
ImportIndexState::Finished { total_documents }
|
|
||||||
} else {
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents,
|
|
||||||
received_documents,
|
|
||||||
processed_documents: newly_processed_documents,
|
|
||||||
task_keys,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ImportIndexState::Finished { total_documents } => {
|
|
||||||
ImportIndexState::Finished { total_documents }
|
|
||||||
}
|
|
||||||
};
|
|
||||||
import_index_state.insert(index_name, index_state);
|
|
||||||
if import_index_state.len() as u64 == total_indexes
|
|
||||||
&& import_index_state.values().all(|index| index.is_finished())
|
|
||||||
{
|
|
||||||
let total_documents =
|
|
||||||
import_index_state.values().map(|index| index.total_documents()).sum();
|
|
||||||
ImportState::Finished { total_indexes, total_documents }
|
|
||||||
} else {
|
|
||||||
ImportState::Ongoing { import_index_state, total_indexes }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ImportState::Finished { total_indexes, total_documents } => {
|
|
||||||
ImportState::Finished { total_indexes, total_documents }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn to_details(&self) -> Details {
|
|
||||||
let message = match self.state {
|
|
||||||
NetworkTopologyState::WaitingForOlderTasks => {
|
|
||||||
"Waiting for tasks enqueued before the network change to finish processing".into()
|
|
||||||
}
|
|
||||||
NetworkTopologyState::ExportingDocuments => "Exporting documents".into(),
|
|
||||||
NetworkTopologyState::ImportingDocuments => {
|
|
||||||
let mut finished_count = 0;
|
|
||||||
let mut first_ongoing = None;
|
|
||||||
let mut ongoing_total_indexes = 0;
|
|
||||||
let mut ongoing_processed_documents = 0;
|
|
||||||
let mut ongoing_missing_documents = 0;
|
|
||||||
let mut ongoing_total_documents = 0;
|
|
||||||
let mut other_ongoing_count = 0;
|
|
||||||
let mut first_waiting = None;
|
|
||||||
let mut other_waiting_count = 0;
|
|
||||||
for (remote_name, in_remote) in &self.in_remotes {
|
|
||||||
match &in_remote.import_state {
|
|
||||||
ImportState::WaitingForInitialTask => {
|
|
||||||
first_waiting = match first_waiting {
|
|
||||||
None => Some(remote_name),
|
|
||||||
first_waiting => {
|
|
||||||
other_waiting_count += 1;
|
|
||||||
first_waiting
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
ImportState::Ongoing { import_index_state, total_indexes } => {
|
|
||||||
first_ongoing = match first_ongoing {
|
|
||||||
None => {
|
|
||||||
ongoing_total_indexes = *total_indexes;
|
|
||||||
Some(remote_name)
|
|
||||||
}
|
|
||||||
first_ongoing => {
|
|
||||||
other_ongoing_count += 1;
|
|
||||||
first_ongoing
|
|
||||||
}
|
|
||||||
};
|
|
||||||
for import_state in import_index_state.values() {
|
|
||||||
match import_state {
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents,
|
|
||||||
processed_documents,
|
|
||||||
received_documents,
|
|
||||||
task_keys: _,
|
|
||||||
} => {
|
|
||||||
ongoing_total_documents += total_documents;
|
|
||||||
ongoing_processed_documents += processed_documents;
|
|
||||||
ongoing_missing_documents +=
|
|
||||||
total_documents.saturating_sub(*received_documents);
|
|
||||||
}
|
|
||||||
ImportIndexState::Finished { total_documents } => {
|
|
||||||
ongoing_total_documents += total_documents;
|
|
||||||
ongoing_processed_documents += total_documents;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ImportState::Finished { total_indexes, total_documents } => {
|
|
||||||
finished_count += 1;
|
|
||||||
ongoing_total_indexes = *total_indexes;
|
|
||||||
ongoing_total_documents += *total_documents;
|
|
||||||
ongoing_processed_documents += *total_documents;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
format!(
|
|
||||||
"Importing documents from {total} remotes{waiting}{ongoing}{finished}",
|
|
||||||
total = self.in_remotes.len(),
|
|
||||||
waiting = if let Some(first_waiting) = first_waiting {
|
|
||||||
&format!(
|
|
||||||
", waiting on first task from `{}`{others}",
|
|
||||||
first_waiting,
|
|
||||||
others = if other_waiting_count > 0 {
|
|
||||||
&format!(" and {other_waiting_count} other remotes")
|
|
||||||
} else {
|
|
||||||
""
|
|
||||||
}
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
""
|
|
||||||
},
|
|
||||||
ongoing = if let Some(first_ongoing) = first_ongoing {
|
|
||||||
&format!(", awaiting {ongoing_missing_documents} and processed {ongoing_processed_documents} out of {ongoing_total_documents} documents in {ongoing_total_indexes} indexes from `{first_ongoing}`{others}",
|
|
||||||
others=if other_ongoing_count > 0 {&format!(" and {other_ongoing_count} other remotes")} else {""})
|
|
||||||
} else {
|
|
||||||
""
|
|
||||||
},
|
|
||||||
finished = if finished_count >= 0 {
|
|
||||||
&format!(", {finished_count} remotes finished processing")
|
|
||||||
} else {
|
|
||||||
""
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
||||||
NetworkTopologyState::Finished => "Finished".into(),
|
|
||||||
};
|
|
||||||
Details::NetworkTopologyChange { moved_documents: self.stats.moved_documents, message }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_import_finished(&self) -> bool {
|
|
||||||
self.in_remotes.values().all(|remote| remote.is_finished())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn merge(&mut self, other: NetworkTopologyChange) {
|
|
||||||
// The topology change has a guarantee of forward progress, so for each field we're going to keep the "most advanced" values.
|
|
||||||
let Self { state, in_name: _, out_name: _, out_remotes: _, in_remotes, stats } = self;
|
|
||||||
|
|
||||||
*state = Ord::max(*state, other.state);
|
|
||||||
*stats = Ord::max(*stats, other.stats);
|
|
||||||
|
|
||||||
for (old_value, new_value) in other.in_remotes.into_values().zip(in_remotes.values_mut()) {
|
|
||||||
new_value.import_state = match (old_value.import_state, std::mem::take(&mut new_value.import_state)) {
|
|
||||||
// waiting for initial task is always older
|
|
||||||
(ImportState::WaitingForInitialTask, newer)
|
|
||||||
| (newer, ImportState::WaitingForInitialTask)
|
|
||||||
|
|
||||||
// finished is always newer
|
|
||||||
| (_, newer @ ImportState::Finished { .. })
|
|
||||||
| (newer @ ImportState::Finished { .. }, _) => newer,
|
|
||||||
(
|
|
||||||
ImportState::Ongoing { import_index_state: left_import, total_indexes: left_total_indexes },
|
|
||||||
ImportState::Ongoing { import_index_state: right_import, total_indexes: right_total_indexes },
|
|
||||||
) => {
|
|
||||||
let import_index_state = left_import.into_iter().merge_join_by(right_import.into_iter(), |(k,_), (x, _)|k.cmp(x)).map(|eob|
|
|
||||||
match eob {
|
|
||||||
EitherOrBoth::Both((name, left), (_, right)) => {
|
|
||||||
let newer = merge_import_index_state(left, right);
|
|
||||||
(name, newer)
|
|
||||||
},
|
|
||||||
EitherOrBoth::Left(import) |
|
|
||||||
EitherOrBoth::Right(import) => import,
|
|
||||||
}
|
|
||||||
).collect();
|
|
||||||
|
|
||||||
ImportState::Ongoing{ import_index_state, total_indexes : u64::max(left_total_indexes, right_total_indexes) }
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn merge_import_index_state(left: ImportIndexState, right: ImportIndexState) -> ImportIndexState {
|
|
||||||
match (left, right) {
|
|
||||||
(_, newer @ ImportIndexState::Finished { .. }) => newer,
|
|
||||||
(newer @ ImportIndexState::Finished { .. }, _) => newer,
|
|
||||||
(
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents: left_total_documents,
|
|
||||||
received_documents: left_received_documents,
|
|
||||||
processed_documents: left_processed_documents,
|
|
||||||
task_keys: mut left_task_keys,
|
|
||||||
},
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents: right_total_documents,
|
|
||||||
received_documents: right_received_documents,
|
|
||||||
processed_documents: right_processed_documents,
|
|
||||||
task_keys: mut right_task_keys,
|
|
||||||
},
|
|
||||||
) => {
|
|
||||||
let total_documents = u64::max(left_total_documents, right_total_documents);
|
|
||||||
let received_documents = u64::max(left_received_documents, right_received_documents);
|
|
||||||
let processed_documents = u64::max(left_processed_documents, right_processed_documents);
|
|
||||||
left_task_keys.append(&mut right_task_keys);
|
|
||||||
let task_keys = left_task_keys;
|
|
||||||
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents,
|
|
||||||
received_documents,
|
|
||||||
processed_documents,
|
|
||||||
task_keys,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum ReceiveTaskError {
|
|
||||||
UnknownRemote(String),
|
|
||||||
DuplicateTask(DocumentId),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq, PartialOrd, Ord)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub enum NetworkTopologyState {
|
|
||||||
WaitingForOlderTasks,
|
|
||||||
ExportingDocuments,
|
|
||||||
ImportingDocuments,
|
|
||||||
Finished,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq, PartialOrd, Ord)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct NetworkTopologyStats {
|
|
||||||
#[serde(default)]
|
|
||||||
pub moved_documents: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct InRemote {
|
|
||||||
import_state: ImportState,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InRemote {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self { import_state: ImportState::WaitingForInitialTask }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_finished(&self) -> bool {
|
|
||||||
matches!(self.import_state, ImportState::Finished { .. })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for InRemote {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
enum ImportState {
|
|
||||||
/// Initially Meilisearch doesn't know how many documents it should expect from a remote.
|
|
||||||
/// The first task for each remote contains the information of how many indexes will be imported,
|
|
||||||
/// and the first task for each index contains the number of documents to import for that index.
|
|
||||||
#[default]
|
|
||||||
WaitingForInitialTask,
|
|
||||||
Ongoing {
|
|
||||||
import_index_state: BTreeMap<String, ImportIndexState>,
|
|
||||||
total_indexes: u64,
|
|
||||||
},
|
|
||||||
Finished {
|
|
||||||
total_indexes: u64,
|
|
||||||
total_documents: u64,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
enum ImportIndexState {
|
|
||||||
Ongoing {
|
|
||||||
total_documents: u64,
|
|
||||||
received_documents: u64,
|
|
||||||
processed_documents: u64,
|
|
||||||
task_keys: BTreeSet<DocumentId>,
|
|
||||||
},
|
|
||||||
Finished {
|
|
||||||
total_documents: u64,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ImportIndexState {
|
|
||||||
pub fn is_finished(&self) -> bool {
|
|
||||||
matches!(self, ImportIndexState::Finished { .. })
|
|
||||||
}
|
|
||||||
|
|
||||||
fn total_documents(&self) -> u64 {
|
|
||||||
match *self {
|
|
||||||
ImportIndexState::Ongoing { total_documents, .. }
|
|
||||||
| ImportIndexState::Finished { total_documents } => total_documents,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub mod headers {
|
|
||||||
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "Meili-Proxy-Origin-Remote";
|
|
||||||
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "Meili-Proxy-Origin-TaskUid";
|
|
||||||
pub const PROXY_ORIGIN_NETWORK_VERSION_HEADER: &str = "Meili-Proxy-Origin-Network-Version";
|
|
||||||
pub const PROXY_IMPORT_REMOTE_HEADER: &str = "Meili-Proxy-Import-Remote";
|
|
||||||
pub const PROXY_IMPORT_INDEX_COUNT_HEADER: &str = "Meili-Proxy-Import-Index-Count";
|
|
||||||
pub const PROXY_IMPORT_INDEX_HEADER: &str = "Meili-Proxy-Import-Index";
|
|
||||||
pub const PROXY_IMPORT_TASK_KEY_HEADER: &str = "Meili-Proxy-Import-Task-Key";
|
|
||||||
pub const PROXY_IMPORT_DOCS_HEADER: &str = "Meili-Proxy-Import-Docs";
|
|
||||||
pub const PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER: &str = "Meili-Proxy-Import-Total-Index-Docs";
|
|
||||||
}
|
|
||||||
@@ -14,91 +14,91 @@ default-run = "meilisearch"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
actix-cors = "0.7.1"
|
actix-cors = "0.7.1"
|
||||||
actix-http = { version = "3.11.0", default-features = false, features = [
|
actix-http = { version = "3.11.2", default-features = false, features = [
|
||||||
"compress-brotli",
|
"compress-brotli",
|
||||||
"compress-gzip",
|
"compress-gzip",
|
||||||
"rustls-0_23",
|
"rustls-0_23",
|
||||||
] }
|
] }
|
||||||
actix-utils = "3.0.1"
|
actix-utils = "3.0.1"
|
||||||
actix-web = { version = "4.11.0", default-features = false, features = [
|
actix-web = { version = "4.12.0", default-features = false, features = [
|
||||||
"macros",
|
"macros",
|
||||||
"compress-brotli",
|
"compress-brotli",
|
||||||
"compress-gzip",
|
"compress-gzip",
|
||||||
"cookies",
|
"cookies",
|
||||||
"rustls-0_23",
|
"rustls-0_23",
|
||||||
] }
|
] }
|
||||||
anyhow = { version = "1.0.98", features = ["backtrace"] }
|
anyhow = { version = "1.0.100", features = ["backtrace"] }
|
||||||
bstr = "1.12.0"
|
bstr = "1.12.1"
|
||||||
byte-unit = { version = "5.1.6", features = ["serde"] }
|
byte-unit = { version = "5.1.6", features = ["serde"] }
|
||||||
bytes = "1.10.1"
|
bytes = "1.11.0"
|
||||||
bumpalo = "3.18.1"
|
bumpalo = "3.19.0"
|
||||||
clap = { version = "4.5.40", features = ["derive", "env"] }
|
clap = { version = "4.5.52", features = ["derive", "env"] }
|
||||||
crossbeam-channel = "0.5.15"
|
crossbeam-channel = "0.5.15"
|
||||||
deserr = { version = "0.6.3", features = ["actix-web"] }
|
deserr = { version = "0.6.4", features = ["actix-web"] }
|
||||||
dump = { path = "../dump" }
|
dump = { path = "../dump" }
|
||||||
either = "1.15.0"
|
either = "1.15.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.1.2"
|
flate2 = "1.1.5"
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3.31"
|
futures = "0.3.31"
|
||||||
futures-util = "0.3.31"
|
futures-util = "0.3.31"
|
||||||
index-scheduler = { path = "../index-scheduler" }
|
index-scheduler = { path = "../index-scheduler" }
|
||||||
indexmap = { version = "2.9.0", features = ["serde"] }
|
indexmap = { version = "2.12.0", features = ["serde"] }
|
||||||
is-terminal = "0.4.16"
|
is-terminal = "0.4.17"
|
||||||
itertools = "0.14.0"
|
itertools = "0.14.0"
|
||||||
jsonwebtoken = "9.3.1"
|
jsonwebtoken = "9.3.1"
|
||||||
lazy_static = "1.5.0"
|
lazy_static = "1.5.0"
|
||||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
memmap2 = "0.9.7"
|
memmap2 = "0.9.9"
|
||||||
mimalloc = { version = "0.1.47", default-features = false }
|
mimalloc = { version = "0.1.48", default-features = false }
|
||||||
mime = "0.3.17"
|
mime = "0.3.17"
|
||||||
num_cpus = "1.17.0"
|
num_cpus = "1.17.0"
|
||||||
obkv = "0.3.0"
|
obkv = "0.3.0"
|
||||||
once_cell = "1.21.3"
|
once_cell = "1.21.3"
|
||||||
ordered-float = "5.0.0"
|
ordered-float = "5.1.0"
|
||||||
parking_lot = "0.12.4"
|
parking_lot = "0.12.5"
|
||||||
permissive-json-pointer = { path = "../permissive-json-pointer" }
|
permissive-json-pointer = { path = "../permissive-json-pointer" }
|
||||||
pin-project-lite = "0.2.16"
|
pin-project-lite = "0.2.16"
|
||||||
platform-dirs = "0.3.0"
|
platform-dirs = "0.3.0"
|
||||||
prometheus = { version = "0.14.0", features = ["process"] }
|
prometheus = { version = "0.14.0", features = ["process"] }
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
rayon = "1.10.0"
|
rayon = "1.11.0"
|
||||||
regex = "1.11.1"
|
regex = "1.12.2"
|
||||||
reqwest = { version = "0.12.20", features = [
|
reqwest = { version = "0.12.24", features = [
|
||||||
"rustls-tls",
|
"rustls-tls",
|
||||||
"json",
|
"json",
|
||||||
], default-features = false }
|
], default-features = false }
|
||||||
rustls = { version = "0.23.28", features = ["ring"], default-features = false }
|
rustls = { version = "0.23.35", features = ["ring"], default-features = false }
|
||||||
rustls-pki-types = { version = "1.12.0", features = ["alloc"] }
|
rustls-pki-types = { version = "1.13.0", features = ["alloc"] }
|
||||||
rustls-pemfile = "2.2.0"
|
rustls-pemfile = "2.2.0"
|
||||||
segment = { version = "0.2.6" }
|
segment = { version = "0.2.6" }
|
||||||
serde = { version = "1.0.219", features = ["derive"] }
|
serde = { version = "1.0.228", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
||||||
sha2 = "0.10.9"
|
sha2 = "0.10.9"
|
||||||
siphasher = "1.0.1"
|
siphasher = "1.0.1"
|
||||||
slice-group-by = "0.3.1"
|
slice-group-by = "0.3.1"
|
||||||
static-files = { version = "0.2.5", optional = true }
|
static-files = { version = "0.3.1", optional = true }
|
||||||
sysinfo = "0.35.2"
|
sysinfo = "0.37.2"
|
||||||
tar = "0.4.44"
|
tar = "0.4.44"
|
||||||
tempfile = "3.20.0"
|
tempfile = "3.23.0"
|
||||||
thiserror = "2.0.12"
|
thiserror = "2.0.17"
|
||||||
time = { version = "0.3.41", features = [
|
time = { version = "0.3.44", features = [
|
||||||
"serde-well-known",
|
"serde-well-known",
|
||||||
"formatting",
|
"formatting",
|
||||||
"parsing",
|
"parsing",
|
||||||
"macros",
|
"macros",
|
||||||
] }
|
] }
|
||||||
tokio = { version = "1.45.1", features = ["full"] }
|
tokio = { version = "1.48.0", features = ["full"] }
|
||||||
toml = "0.8.23"
|
toml = "0.9.8"
|
||||||
uuid = { version = "1.18.0", features = ["serde", "v4", "v7"] }
|
uuid = { version = "1.18.1", features = ["serde", "v4", "v7"] }
|
||||||
serde_urlencoded = "0.7.1"
|
serde_urlencoded = "0.7.1"
|
||||||
termcolor = "1.4.1"
|
termcolor = "1.4.1"
|
||||||
url = { version = "2.5.4", features = ["serde"] }
|
url = { version = "2.5.7", features = ["serde"] }
|
||||||
tracing = "0.1.41"
|
tracing = "0.1.41"
|
||||||
tracing-subscriber = { version = "0.3.20", features = ["json"] }
|
tracing-subscriber = { version = "0.3.20", features = ["json"] }
|
||||||
tracing-trace = { version = "0.1.0", path = "../tracing-trace" }
|
tracing-trace = { version = "0.1.0", path = "../tracing-trace" }
|
||||||
tracing-actix-web = "0.7.18"
|
tracing-actix-web = "0.7.19"
|
||||||
build-info = { version = "1.7.0", path = "../build-info" }
|
build-info = { version = "1.7.0", path = "../build-info" }
|
||||||
roaring = "0.10.12"
|
roaring = "0.10.12"
|
||||||
mopa-maintained = "0.2.3"
|
mopa-maintained = "0.2.3"
|
||||||
@@ -114,35 +114,35 @@ utoipa = { version = "5.4.0", features = [
|
|||||||
utoipa-scalar = { version = "0.3.0", optional = true, features = ["actix-web"] }
|
utoipa-scalar = { version = "0.3.0", optional = true, features = ["actix-web"] }
|
||||||
async-openai = { git = "https://github.com/meilisearch/async-openai", branch = "better-error-handling" }
|
async-openai = { git = "https://github.com/meilisearch/async-openai", branch = "better-error-handling" }
|
||||||
secrecy = "0.10.3"
|
secrecy = "0.10.3"
|
||||||
actix-web-lab = { version = "0.24.1", default-features = false }
|
actix-web-lab = { version = "0.24.3", default-features = false }
|
||||||
urlencoding = "2.1.3"
|
urlencoding = "2.1.3"
|
||||||
backoff = { version = "0.4.0", features = ["tokio"] }
|
backoff = { version = "0.4.0", features = ["tokio"] }
|
||||||
|
humantime = { version = "2.3.0", default-features = false }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
actix-rt = "2.10.0"
|
actix-rt = "2.11.0"
|
||||||
brotli = "8.0.1"
|
brotli = "8.0.2"
|
||||||
# fixed version due to format breakages in v1.40
|
# fixed version due to format breakages in v1.40
|
||||||
insta = { version = "=1.39.0", features = ["redactions"] }
|
insta = { version = "=1.39.0", features = ["redactions"] }
|
||||||
manifest-dir-macros = "0.1.18"
|
manifest-dir-macros = "0.1.18"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
meili-snap = { path = "../meili-snap" }
|
meili-snap = { path = "../meili-snap" }
|
||||||
temp-env = "0.3.6"
|
temp-env = "0.3.6"
|
||||||
wiremock = "0.6.3"
|
wiremock = "0.6.5"
|
||||||
yaup = "0.3.1"
|
yaup = "0.3.1"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
anyhow = { version = "1.0.98", optional = true }
|
anyhow = { version = "1.0.100", optional = true }
|
||||||
cargo_toml = { version = "0.22.1", optional = true }
|
cargo_toml = { version = "0.22.3", optional = true }
|
||||||
hex = { version = "0.4.3", optional = true }
|
hex = { version = "0.4.3", optional = true }
|
||||||
reqwest = { version = "0.12.20", features = [
|
reqwest = { version = "0.12.24", features = [
|
||||||
"blocking",
|
"blocking",
|
||||||
"rustls-tls",
|
"rustls-tls",
|
||||||
], default-features = false, optional = true }
|
], default-features = false, optional = true }
|
||||||
sha-1 = { version = "0.10.1", optional = true }
|
sha-1 = { version = "0.10.1", optional = true }
|
||||||
static-files = { version = "0.2.5", optional = true }
|
static-files = { version = "0.3.1", optional = true }
|
||||||
tempfile = { version = "3.20.0", optional = true }
|
tempfile = { version = "3.23.0", optional = true }
|
||||||
zip = { version = "4.1.0", optional = true }
|
zip = { version = "6.0.0", optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["meilisearch-types/all-tokenizations", "mini-dashboard"]
|
default = ["meilisearch-types/all-tokenizations", "mini-dashboard"]
|
||||||
@@ -160,6 +160,7 @@ mini-dashboard = [
|
|||||||
]
|
]
|
||||||
chinese = ["meilisearch-types/chinese"]
|
chinese = ["meilisearch-types/chinese"]
|
||||||
chinese-pinyin = ["meilisearch-types/chinese-pinyin"]
|
chinese-pinyin = ["meilisearch-types/chinese-pinyin"]
|
||||||
|
enterprise = ["meilisearch-types/enterprise"]
|
||||||
hebrew = ["meilisearch-types/hebrew"]
|
hebrew = ["meilisearch-types/hebrew"]
|
||||||
japanese = ["meilisearch-types/japanese"]
|
japanese = ["meilisearch-types/japanese"]
|
||||||
korean = ["meilisearch-types/korean"]
|
korean = ["meilisearch-types/korean"]
|
||||||
|
|||||||
@@ -6,13 +6,8 @@ use meilisearch_types::error::{Code, ErrorCode, ResponseError};
|
|||||||
use meilisearch_types::index_uid::{IndexUid, IndexUidFormatError};
|
use meilisearch_types::index_uid::{IndexUid, IndexUidFormatError};
|
||||||
use meilisearch_types::milli;
|
use meilisearch_types::milli;
|
||||||
use meilisearch_types::milli::OrderBy;
|
use meilisearch_types::milli::OrderBy;
|
||||||
use meilisearch_types::tasks::enterprise_edition::network::headers::{
|
|
||||||
PROXY_IMPORT_DOCS_HEADER, PROXY_IMPORT_INDEX_COUNT_HEADER, PROXY_IMPORT_INDEX_HEADER,
|
|
||||||
PROXY_IMPORT_REMOTE_HEADER, PROXY_IMPORT_TASK_KEY_HEADER, PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
};
|
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use tokio::task::JoinError;
|
use tokio::task::JoinError;
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::routes::indexes::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER};
|
use crate::routes::indexes::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER};
|
||||||
|
|
||||||
@@ -98,58 +93,8 @@ pub enum MeilisearchHttpError {
|
|||||||
} else { PROXY_ORIGIN_TASK_UID_HEADER }
|
} else { PROXY_ORIGIN_TASK_UID_HEADER }
|
||||||
)]
|
)]
|
||||||
InconsistentOriginHeaders { is_remote_missing: bool },
|
InconsistentOriginHeaders { is_remote_missing: bool },
|
||||||
#[error("Inconsistent `Import` headers: {remote}: {remote_status}, {index}: {index_status}, {docs}: {docs_status}.\n - Hint: either all three headers should be provided, or none of them",
|
|
||||||
remote = PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
remote_status = if *is_remote_missing { "missing" } else{ "provided" },
|
|
||||||
index = PROXY_IMPORT_INDEX_HEADER,
|
|
||||||
index_status = if *is_index_missing { "missing" } else { "provided" },
|
|
||||||
docs = PROXY_IMPORT_DOCS_HEADER,
|
|
||||||
docs_status = if *is_docs_missing { "missing" } else { "provided" }
|
|
||||||
)]
|
|
||||||
InconsistentImportHeaders {
|
|
||||||
is_remote_missing: bool,
|
|
||||||
is_index_missing: bool,
|
|
||||||
is_docs_missing: bool,
|
|
||||||
},
|
|
||||||
#[error("Inconsistent `Import-Metadata` headers: {index_count}: {index_count_status}, {task_key}: {task_key_status}, {total_index_documents}: {total_index_documents_status}.\n - Hint: either all three headers should be provided, or none of them",
|
|
||||||
index_count = PROXY_IMPORT_INDEX_COUNT_HEADER,
|
|
||||||
index_count_status = if *is_index_count_missing { "missing" } else { "provided"},
|
|
||||||
task_key = PROXY_IMPORT_TASK_KEY_HEADER,
|
|
||||||
task_key_status = if *is_task_key_missing { "missing" } else { "provided"},
|
|
||||||
total_index_documents = PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
total_index_documents_status = if *is_total_index_documents_missing { "missing" } else { "provided"},
|
|
||||||
)]
|
|
||||||
InconsistentImportMetadataHeaders {
|
|
||||||
is_index_count_missing: bool,
|
|
||||||
is_task_key_missing: bool,
|
|
||||||
is_total_index_documents_missing: bool,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[error(
|
|
||||||
"Inconsistent task network headers: origin headers: {origin_status}, import headers: {import_status}, import metadata: {import_metadata_status}",
|
|
||||||
origin_status = if *is_missing_origin { "missing"} else { "present" },
|
|
||||||
import_status = if *is_missing_import { "missing"} else { "present" },
|
|
||||||
import_metadata_status = if *is_missing_import_metadata { "missing"} else { "present" })]
|
|
||||||
InconsistentTaskNetworkHeaders {
|
|
||||||
is_missing_origin: bool,
|
|
||||||
is_missing_import: bool,
|
|
||||||
is_missing_import_metadata: bool,
|
|
||||||
},
|
|
||||||
#[error("Invalid value for header {header_name}: {msg}")]
|
#[error("Invalid value for header {header_name}: {msg}")]
|
||||||
InvalidHeaderValue { header_name: &'static str, msg: String },
|
InvalidHeaderValue { header_name: &'static str, msg: String },
|
||||||
#[error("This remote is not the leader of the network.\n - Note: only the leader `{leader}` can receive new tasks.")]
|
|
||||||
NotLeader { leader: String },
|
|
||||||
#[error("Unexpected `previousRemotes` in network call.\n - Note: `previousRemote` is reserved for internal use.")]
|
|
||||||
UnexpectedNetworkPreviousRemotes,
|
|
||||||
#[error("The network version in request is too old.\n - Received: {received}\n - Expected at least: {expected_at_least}")]
|
|
||||||
NetworkVersionTooOld { received: Uuid, expected_at_least: Uuid },
|
|
||||||
#[error("Remote `{remote}` encountered an error: {error}")]
|
|
||||||
RemoteIndexScheduler { remote: String, error: index_scheduler::Error },
|
|
||||||
#[error("{if_remote}Already has a pending network task with uid {task_uid}.\n - Note: No network task can be registered while any previous network task is not done processing.\n - Hint: Wait for task {task_uid} to complete or cancel it.",
|
|
||||||
if_remote=if let Some(remote) = remote {
|
|
||||||
format!("Remote `{remote}` encountered an error: ")
|
|
||||||
} else {"".into()} )]
|
|
||||||
UnprocessedNetworkTask { remote: Option<String>, task_uid: meilisearch_types::tasks::TaskId },
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MeilisearchHttpError {
|
impl MeilisearchHttpError {
|
||||||
@@ -177,7 +122,6 @@ impl ErrorCode for MeilisearchHttpError {
|
|||||||
MeilisearchHttpError::SerdeJson(_) => Code::Internal,
|
MeilisearchHttpError::SerdeJson(_) => Code::Internal,
|
||||||
MeilisearchHttpError::HeedError(_) => Code::Internal,
|
MeilisearchHttpError::HeedError(_) => Code::Internal,
|
||||||
MeilisearchHttpError::IndexScheduler(e) => e.error_code(),
|
MeilisearchHttpError::IndexScheduler(e) => e.error_code(),
|
||||||
MeilisearchHttpError::RemoteIndexScheduler { error, .. } => error.error_code(),
|
|
||||||
MeilisearchHttpError::Milli { error, .. } => error.error_code(),
|
MeilisearchHttpError::Milli { error, .. } => error.error_code(),
|
||||||
MeilisearchHttpError::Payload(e) => e.error_code(),
|
MeilisearchHttpError::Payload(e) => e.error_code(),
|
||||||
MeilisearchHttpError::FileStore(_) => Code::Internal,
|
MeilisearchHttpError::FileStore(_) => Code::Internal,
|
||||||
@@ -198,19 +142,10 @@ impl ErrorCode for MeilisearchHttpError {
|
|||||||
MeilisearchHttpError::PersonalizationInFederatedQuery(_) => {
|
MeilisearchHttpError::PersonalizationInFederatedQuery(_) => {
|
||||||
Code::InvalidMultiSearchQueryPersonalization
|
Code::InvalidMultiSearchQueryPersonalization
|
||||||
}
|
}
|
||||||
MeilisearchHttpError::InconsistentOriginHeaders { .. }
|
MeilisearchHttpError::InconsistentOriginHeaders { .. } => {
|
||||||
| MeilisearchHttpError::InconsistentImportHeaders { .. }
|
|
||||||
| MeilisearchHttpError::InconsistentImportMetadataHeaders { .. }
|
|
||||||
| MeilisearchHttpError::InconsistentTaskNetworkHeaders { .. } => {
|
|
||||||
Code::InconsistentDocumentChangeHeaders
|
Code::InconsistentDocumentChangeHeaders
|
||||||
}
|
}
|
||||||
MeilisearchHttpError::InvalidHeaderValue { .. } => Code::InvalidHeaderValue,
|
MeilisearchHttpError::InvalidHeaderValue { .. } => Code::InvalidHeaderValue,
|
||||||
MeilisearchHttpError::NotLeader { .. } => Code::NotLeader,
|
|
||||||
MeilisearchHttpError::UnexpectedNetworkPreviousRemotes => {
|
|
||||||
Code::UnexpectedNetworkPreviousRemotes
|
|
||||||
}
|
|
||||||
MeilisearchHttpError::NetworkVersionTooOld { .. } => Code::NetworkVersionTooOld,
|
|
||||||
MeilisearchHttpError::UnprocessedNetworkTask { .. } => Code::UnprocessedNetworkTask,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use prometheus::{
|
use prometheus::{
|
||||||
opts, register_gauge, register_histogram_vec, register_int_counter_vec, register_int_gauge,
|
opts, register_gauge, register_gauge_vec, register_histogram_vec, register_int_counter_vec,
|
||||||
register_int_gauge_vec, Gauge, HistogramVec, IntCounterVec, IntGauge, IntGaugeVec,
|
register_int_gauge, register_int_gauge_vec, Gauge, GaugeVec, HistogramVec, IntCounterVec,
|
||||||
|
IntGauge, IntGaugeVec,
|
||||||
};
|
};
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
@@ -73,6 +74,20 @@ lazy_static! {
|
|||||||
&["kind", "value"]
|
&["kind", "value"]
|
||||||
)
|
)
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
|
pub static ref MEILISEARCH_BATCH_RUNNING_PROGRESS_TRACE: GaugeVec = register_gauge_vec!(
|
||||||
|
opts!("meilisearch_batch_running_progress_trace", "The currently running progress trace"),
|
||||||
|
&["batch_uid", "step_name"]
|
||||||
|
)
|
||||||
|
.expect("Can't create a metric");
|
||||||
|
pub static ref MEILISEARCH_LAST_FINISHED_BATCHES_PROGRESS_TRACE_MS: IntGaugeVec =
|
||||||
|
register_int_gauge_vec!(
|
||||||
|
opts!(
|
||||||
|
"meilisearch_last_finished_batches_progress_trace_ms",
|
||||||
|
"The last few batches progress trace in milliseconds"
|
||||||
|
),
|
||||||
|
&["batch_uid", "step_name"]
|
||||||
|
)
|
||||||
|
.expect("Can't create a metric");
|
||||||
pub static ref MEILISEARCH_LAST_UPDATE: IntGauge =
|
pub static ref MEILISEARCH_LAST_UPDATE: IntGauge =
|
||||||
register_int_gauge!(opts!("meilisearch_last_update", "Meilisearch Last Update"))
|
register_int_gauge!(opts!("meilisearch_last_update", "Meilisearch Last Update"))
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
|
|||||||
39
crates/meilisearch/src/routes/indexes/community_edition.rs
Normal file
39
crates/meilisearch/src/routes/indexes/community_edition.rs
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
pub mod proxy {
|
||||||
|
|
||||||
|
use std::fs::File;
|
||||||
|
|
||||||
|
use actix_web::HttpRequest;
|
||||||
|
use index_scheduler::IndexScheduler;
|
||||||
|
|
||||||
|
use crate::error::MeilisearchHttpError;
|
||||||
|
|
||||||
|
pub enum Body<T: serde::Serialize> {
|
||||||
|
NdJsonPayload,
|
||||||
|
Inline(T),
|
||||||
|
None,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Body<()> {
|
||||||
|
pub fn with_ndjson_payload(_file: File) -> Self {
|
||||||
|
Self::NdJsonPayload
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn none() -> Self {
|
||||||
|
Self::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "Meili-Proxy-Origin-Remote";
|
||||||
|
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "Meili-Proxy-Origin-TaskUid";
|
||||||
|
|
||||||
|
pub async fn proxy<T: serde::Serialize>(
|
||||||
|
_index_scheduler: &IndexScheduler,
|
||||||
|
_index_uid: &str,
|
||||||
|
_req: &HttpRequest,
|
||||||
|
_network: meilisearch_types::network::Network,
|
||||||
|
_body: Body<T>,
|
||||||
|
_task: &meilisearch_types::tasks::Task,
|
||||||
|
) -> Result<(), MeilisearchHttpError> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -45,9 +45,7 @@ use crate::extractors::authentication::policies::*;
|
|||||||
use crate::extractors::authentication::GuardedData;
|
use crate::extractors::authentication::GuardedData;
|
||||||
use crate::extractors::payload::Payload;
|
use crate::extractors::payload::Payload;
|
||||||
use crate::extractors::sequential_extractor::SeqHandler;
|
use crate::extractors::sequential_extractor::SeqHandler;
|
||||||
use crate::routes::indexes::enterprise_edition::proxy::{
|
use crate::routes::indexes::current_edition::proxy::{proxy, Body};
|
||||||
proxy, task_network_and_check_leader_and_version, Body,
|
|
||||||
};
|
|
||||||
use crate::routes::indexes::search::fix_sort_query_parameters;
|
use crate::routes::indexes::search::fix_sort_query_parameters;
|
||||||
use crate::routes::{
|
use crate::routes::{
|
||||||
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
|
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
|
||||||
@@ -344,7 +342,6 @@ pub async fn delete_document(
|
|||||||
let DocumentParam { index_uid, document_id } = path.into_inner();
|
let DocumentParam { index_uid, document_id } = path.into_inner();
|
||||||
let index_uid = IndexUid::try_from(index_uid)?;
|
let index_uid = IndexUid::try_from(index_uid)?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
let task_network = task_network_and_check_leader_and_version(&req, &network)?;
|
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
DocumentsDeletionAggregator {
|
DocumentsDeletionAggregator {
|
||||||
@@ -362,23 +359,16 @@ pub async fn delete_document(
|
|||||||
};
|
};
|
||||||
let uid = get_task_id(&req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let mut task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
index_scheduler.register_with_custom_metadata(
|
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||||
task,
|
|
||||||
uid,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await??
|
.await??
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
if network.sharding() && !dry_run {
|
||||||
proxy(&index_scheduler, Some(&index_uid), &req, task_network, network, Body::none(), &task)
|
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
@@ -977,7 +967,6 @@ async fn document_addition(
|
|||||||
) -> Result<SummarizedTaskView, MeilisearchHttpError> {
|
) -> Result<SummarizedTaskView, MeilisearchHttpError> {
|
||||||
let mime_type = extract_mime_type(req)?;
|
let mime_type = extract_mime_type(req)?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
let task_network = task_network_and_check_leader_and_version(req, &network)?;
|
|
||||||
|
|
||||||
let format = match (
|
let format = match (
|
||||||
mime_type.as_ref().map(|m| (m.type_().as_str(), m.subtype().as_str())),
|
mime_type.as_ref().map(|m| (m.type_().as_str(), m.subtype().as_str())),
|
||||||
@@ -1096,16 +1085,9 @@ async fn document_addition(
|
|||||||
index_uid: index_uid.to_string(),
|
index_uid: index_uid.to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// FIXME: not new to #6000, but _any_ error here will cause the payload to unduly persist
|
|
||||||
let scheduler = index_scheduler.clone();
|
let scheduler = index_scheduler.clone();
|
||||||
let mut task = match tokio::task::spawn_blocking(move || {
|
let task = match tokio::task::spawn_blocking(move || {
|
||||||
scheduler.register_with_custom_metadata(
|
scheduler.register_with_custom_metadata(task, task_id, custom_metadata, dry_run)
|
||||||
task,
|
|
||||||
task_id,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await?
|
.await?
|
||||||
{
|
{
|
||||||
@@ -1116,13 +1098,12 @@ async fn document_addition(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
if network.sharding() {
|
||||||
if let Some(file) = file {
|
if let Some(file) = file {
|
||||||
proxy(
|
proxy(
|
||||||
&index_scheduler,
|
&index_scheduler,
|
||||||
Some(&index_uid),
|
&index_uid,
|
||||||
req,
|
req,
|
||||||
task_network,
|
|
||||||
network,
|
network,
|
||||||
Body::with_ndjson_payload(file),
|
Body::with_ndjson_payload(file),
|
||||||
&task,
|
&task,
|
||||||
@@ -1213,7 +1194,6 @@ pub async fn delete_documents_batch(
|
|||||||
|
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
let task_network = task_network_and_check_leader_and_version(&req, &network)?;
|
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
DocumentsDeletionAggregator {
|
DocumentsDeletionAggregator {
|
||||||
@@ -1234,31 +1214,16 @@ pub async fn delete_documents_batch(
|
|||||||
KindWithContent::DocumentDeletion { index_uid: index_uid.to_string(), documents_ids: ids };
|
KindWithContent::DocumentDeletion { index_uid: index_uid.to_string(), documents_ids: ids };
|
||||||
let uid = get_task_id(&req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let mut task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
index_scheduler.register_with_custom_metadata(
|
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||||
task,
|
|
||||||
uid,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await??
|
.await??
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
if network.sharding() && !dry_run {
|
||||||
proxy(
|
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
|
||||||
&index_scheduler,
|
|
||||||
Some(&index_uid),
|
|
||||||
&req,
|
|
||||||
task_network,
|
|
||||||
network,
|
|
||||||
Body::inline(body),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
@@ -1321,7 +1286,6 @@ pub async fn delete_documents_by_filter(
|
|||||||
let index_uid = index_uid.into_inner();
|
let index_uid = index_uid.into_inner();
|
||||||
let filter = body.into_inner();
|
let filter = body.into_inner();
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
let task_network = task_network_and_check_leader_and_version(&req, &network)?;
|
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
DocumentsDeletionAggregator {
|
DocumentsDeletionAggregator {
|
||||||
@@ -1348,31 +1312,16 @@ pub async fn delete_documents_by_filter(
|
|||||||
|
|
||||||
let uid = get_task_id(&req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let mut task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
index_scheduler.register_with_custom_metadata(
|
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||||
task,
|
|
||||||
uid,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await??
|
.await??
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
if network.sharding() && !dry_run {
|
||||||
proxy(
|
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(filter), &task).await?;
|
||||||
&index_scheduler,
|
|
||||||
Some(&index_uid),
|
|
||||||
&req,
|
|
||||||
task_network,
|
|
||||||
network,
|
|
||||||
Body::inline(filter),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
@@ -1472,7 +1421,6 @@ pub async fn edit_documents_by_function(
|
|||||||
.check_edit_documents_by_function("Using the documents edit route")?;
|
.check_edit_documents_by_function("Using the documents edit route")?;
|
||||||
|
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
let task_network = task_network_and_check_leader_and_version(&req, &network)?;
|
|
||||||
|
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let index_uid = index_uid.into_inner();
|
let index_uid = index_uid.into_inner();
|
||||||
@@ -1519,31 +1467,16 @@ pub async fn edit_documents_by_function(
|
|||||||
|
|
||||||
let uid = get_task_id(&req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let mut task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
index_scheduler.register_with_custom_metadata(
|
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||||
task,
|
|
||||||
uid,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await??
|
.await??
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
if network.sharding() && !dry_run {
|
||||||
proxy(
|
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
|
||||||
&index_scheduler,
|
|
||||||
Some(&index_uid),
|
|
||||||
&req,
|
|
||||||
task_network,
|
|
||||||
network,
|
|
||||||
Body::inline(body),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
@@ -1592,7 +1525,6 @@ pub async fn clear_all_documents(
|
|||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
||||||
let task_network = task_network_and_check_leader_and_version(&req, &network)?;
|
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
DocumentsDeletionAggregator {
|
DocumentsDeletionAggregator {
|
||||||
@@ -1608,24 +1540,17 @@ pub async fn clear_all_documents(
|
|||||||
let uid = get_task_id(&req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
|
|
||||||
let mut task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
|
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
index_scheduler.register_with_custom_metadata(
|
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||||
task,
|
|
||||||
uid,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await??
|
.await??
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
if network.sharding() && !dry_run {
|
||||||
proxy(&index_scheduler, Some(&index_uid), &req, task_network, network, Body::none(), &task)
|
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
// Use of this source code is governed by the Business Source License 1.1,
|
// Use of this source code is governed by the Business Source License 1.1,
|
||||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||||
|
|
||||||
use std::borrow::Cow;
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
|
|
||||||
@@ -11,41 +10,25 @@ use actix_web::http::header::CONTENT_TYPE;
|
|||||||
use actix_web::HttpRequest;
|
use actix_web::HttpRequest;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use index_scheduler::IndexScheduler;
|
use index_scheduler::IndexScheduler;
|
||||||
use meilisearch_types::enterprise_edition::network::Remote;
|
|
||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::milli::DocumentId;
|
use meilisearch_types::tasks::{Origin, RemoteTask, TaskNetwork};
|
||||||
use meilisearch_types::tasks::enterprise_edition::network::headers::{
|
|
||||||
PROXY_IMPORT_DOCS_HEADER, PROXY_IMPORT_INDEX_COUNT_HEADER, PROXY_IMPORT_INDEX_HEADER,
|
|
||||||
PROXY_IMPORT_REMOTE_HEADER, PROXY_IMPORT_TASK_KEY_HEADER, PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
PROXY_ORIGIN_NETWORK_VERSION_HEADER, PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER,
|
|
||||||
};
|
|
||||||
use meilisearch_types::tasks::enterprise_edition::network::{
|
|
||||||
DbTaskNetwork, ImportData, ImportMetadata, Origin, TaskNetwork,
|
|
||||||
};
|
|
||||||
use meilisearch_types::tasks::Task;
|
|
||||||
use reqwest::StatusCode;
|
use reqwest::StatusCode;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::error::MeilisearchHttpError;
|
use crate::error::MeilisearchHttpError;
|
||||||
pub use crate::routes::indexes::enterprise_edition::proxy::error::{
|
use crate::routes::indexes::enterprise_edition::proxy::error::{
|
||||||
ProxyError, ReqwestErrorWithoutUrl,
|
ProxyDocumentChangeError, ReqwestErrorWithoutUrl,
|
||||||
};
|
};
|
||||||
use crate::routes::SummarizedTaskView;
|
use crate::routes::SummarizedTaskView;
|
||||||
|
|
||||||
pub enum Body<T, F>
|
pub enum Body<T: serde::Serialize> {
|
||||||
where
|
|
||||||
T: serde::Serialize,
|
|
||||||
F: FnMut(&str, &Remote, &mut T),
|
|
||||||
{
|
|
||||||
NdJsonPayload(File),
|
NdJsonPayload(File),
|
||||||
Inline(T),
|
Inline(T),
|
||||||
Generated(T, F),
|
|
||||||
None,
|
None,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Body<(), fn(&str, &Remote, &mut ())> {
|
impl Body<()> {
|
||||||
pub fn with_ndjson_payload(file: File) -> Self {
|
pub fn with_ndjson_payload(file: File) -> Self {
|
||||||
Self::NdJsonPayload(file)
|
Self::NdJsonPayload(file)
|
||||||
}
|
}
|
||||||
@@ -55,146 +38,7 @@ impl Body<(), fn(&str, &Remote, &mut ())> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> Body<T, fn(&str, &Remote, &mut T)>
|
/// If necessary, proxies the passed request to the network and update the task description.
|
||||||
where
|
|
||||||
T: serde::Serialize,
|
|
||||||
{
|
|
||||||
pub fn inline(payload: T) -> Self {
|
|
||||||
Self::Inline(payload)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, F> Body<T, F>
|
|
||||||
where
|
|
||||||
T: serde::Serialize,
|
|
||||||
F: FnMut(&str, &Remote, &mut T),
|
|
||||||
{
|
|
||||||
pub fn generated(initial: T, f: F) -> Self {
|
|
||||||
Self::Generated(initial, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, F> Body<T, F>
|
|
||||||
where
|
|
||||||
T: serde::Serialize,
|
|
||||||
F: FnMut(&str, &Remote, &mut T),
|
|
||||||
{
|
|
||||||
pub fn into_bytes_iter(
|
|
||||||
self,
|
|
||||||
remotes: impl IntoIterator<Item = (String, Remote)>,
|
|
||||||
) -> Result<
|
|
||||||
impl Iterator<Item = (Option<Bytes>, (String, Remote))>,
|
|
||||||
meilisearch_types::milli::Error,
|
|
||||||
> {
|
|
||||||
let bytes = match self {
|
|
||||||
Body::NdJsonPayload(file) => {
|
|
||||||
Some(Bytes::from_owner(unsafe { memmap2::Mmap::map(&file)? }))
|
|
||||||
}
|
|
||||||
|
|
||||||
Body::Inline(payload) => {
|
|
||||||
Some(Bytes::copy_from_slice(&serde_json::to_vec(&payload).unwrap()))
|
|
||||||
}
|
|
||||||
|
|
||||||
Body::None => None,
|
|
||||||
|
|
||||||
Body::Generated(mut initial, mut f) => {
|
|
||||||
return Ok(either::Right(remotes.into_iter().map(move |(name, remote)| {
|
|
||||||
f(&name, &remote, &mut initial);
|
|
||||||
let bytes =
|
|
||||||
Some(Bytes::copy_from_slice(&serde_json::to_vec(&initial).unwrap()));
|
|
||||||
(bytes, (name, remote))
|
|
||||||
})));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(either::Left(std::iter::repeat(bytes).zip(remotes)))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn into_bytes(
|
|
||||||
self,
|
|
||||||
remote_name: &str,
|
|
||||||
remote: &Remote,
|
|
||||||
) -> Result<Option<Bytes>, meilisearch_types::milli::Error> {
|
|
||||||
Ok(match self {
|
|
||||||
Body::NdJsonPayload(file) => {
|
|
||||||
Some(Bytes::from_owner(unsafe { memmap2::Mmap::map(&file)? }))
|
|
||||||
}
|
|
||||||
|
|
||||||
Body::Inline(payload) => {
|
|
||||||
Some(Bytes::copy_from_slice(&serde_json::to_vec(&payload).unwrap()))
|
|
||||||
}
|
|
||||||
|
|
||||||
Body::None => None,
|
|
||||||
|
|
||||||
Body::Generated(mut initial, mut f) => {
|
|
||||||
f(remote_name, remote, &mut initial);
|
|
||||||
Some(Bytes::copy_from_slice(&serde_json::to_vec(&initial).unwrap()))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parses the header to determine if this task is a duplicate and originates with a remote.
|
|
||||||
///
|
|
||||||
/// If not, checks whether this remote is the leader and return `MeilisearchHttpError::NotLeader` if not.
|
|
||||||
///
|
|
||||||
/// If there is no leader, returns `Ok(None)`
|
|
||||||
///
|
|
||||||
/// # Errors
|
|
||||||
///
|
|
||||||
/// - `MeiliearchHttpError::NotLeader`: if the following are true simultaneously:
|
|
||||||
/// 1. The task originates with the current node
|
|
||||||
/// 2. There's a declared `leader`
|
|
||||||
/// 3. The declared leader is **not** the current node
|
|
||||||
/// - `MeilisearchHttpError::InvalidHeaderValue`: if headers cannot be parsed as a task network.
|
|
||||||
/// - `MeilisearchHttpError::InconsistentTaskNetwork`: if only some of the headers are present.
|
|
||||||
pub fn task_network_and_check_leader_and_version(
|
|
||||||
req: &HttpRequest,
|
|
||||||
network: &meilisearch_types::enterprise_edition::network::Network,
|
|
||||||
) -> Result<Option<TaskNetwork>, MeilisearchHttpError> {
|
|
||||||
let task_network =
|
|
||||||
match (origin_from_req(req)?, import_data_from_req(req)?, import_metadata_from_req(req)?) {
|
|
||||||
(Some(network_change), Some(import_from), Some(metadata)) => {
|
|
||||||
TaskNetwork::Import { import_from, network_change, metadata }
|
|
||||||
}
|
|
||||||
(Some(origin), None, None) => TaskNetwork::Origin { origin },
|
|
||||||
(None, None, None) => {
|
|
||||||
match (network.leader.as_deref(), network.local.as_deref()) {
|
|
||||||
// 1. Always allowed if there is no leader
|
|
||||||
(None, _) => return Ok(None),
|
|
||||||
// 2. Allowed if the leader is self
|
|
||||||
(Some(leader), Some(this)) if leader == this => (),
|
|
||||||
// 3. Any other change is disallowed
|
|
||||||
(Some(leader), _) => {
|
|
||||||
return Err(MeilisearchHttpError::NotLeader { leader: leader.to_string() })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TaskNetwork::Remotes {
|
|
||||||
remote_tasks: Default::default(),
|
|
||||||
network_version: network.version,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// all good cases were matched, so this is always an error
|
|
||||||
(origin, import_from, metadata) => {
|
|
||||||
return Err(MeilisearchHttpError::InconsistentTaskNetworkHeaders {
|
|
||||||
is_missing_origin: origin.is_none(),
|
|
||||||
is_missing_import: import_from.is_none(),
|
|
||||||
is_missing_import_metadata: metadata.is_none(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if task_network.network_version() < network.version {
|
|
||||||
return Err(MeilisearchHttpError::NetworkVersionTooOld {
|
|
||||||
received: task_network.network_version(),
|
|
||||||
expected_at_least: network.version,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Some(task_network))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Updates the task description and, if necessary, proxies the passed request to the network and update the task description.
|
|
||||||
///
|
///
|
||||||
/// This function reads the custom headers from the request to determine if must proxy the request or if the request
|
/// This function reads the custom headers from the request to determine if must proxy the request or if the request
|
||||||
/// has already been proxied.
|
/// has already been proxied.
|
||||||
@@ -204,254 +48,152 @@ pub fn task_network_and_check_leader_and_version(
|
|||||||
/// with the task ids from the task queues of the remotes.
|
/// with the task ids from the task queues of the remotes.
|
||||||
/// - when the request has already been proxied, the custom headers contains information about the remote that created the initial task.
|
/// - when the request has already been proxied, the custom headers contains information about the remote that created the initial task.
|
||||||
/// This information is copied to the passed task.
|
/// This information is copied to the passed task.
|
||||||
///
|
pub async fn proxy<T: serde::Serialize>(
|
||||||
/// # Returns
|
|
||||||
///
|
|
||||||
/// The updated task. The task is read back from the database to avoid erasing concurrent changes.
|
|
||||||
pub async fn proxy<T, F>(
|
|
||||||
index_scheduler: &IndexScheduler,
|
index_scheduler: &IndexScheduler,
|
||||||
index_uid: Option<&str>,
|
index_uid: &str,
|
||||||
req: &HttpRequest,
|
req: &HttpRequest,
|
||||||
mut task_network: DbTaskNetwork,
|
network: meilisearch_types::network::Network,
|
||||||
network: meilisearch_types::enterprise_edition::network::Network,
|
body: Body<T>,
|
||||||
body: Body<T, F>,
|
|
||||||
task: &meilisearch_types::tasks::Task,
|
task: &meilisearch_types::tasks::Task,
|
||||||
) -> Result<Task, MeilisearchHttpError>
|
) -> Result<(), MeilisearchHttpError> {
|
||||||
where
|
match origin_from_req(req)? {
|
||||||
T: serde::Serialize,
|
Some(origin) => {
|
||||||
F: FnMut(&str, &Remote, &mut T),
|
index_scheduler.set_task_network(task.uid, TaskNetwork::Origin { origin })?
|
||||||
{
|
|
||||||
if let DbTaskNetwork::Remotes { remote_tasks, network_version } = &mut task_network {
|
|
||||||
let network_version = *network_version;
|
|
||||||
let this = network
|
|
||||||
.local
|
|
||||||
.as_deref()
|
|
||||||
.expect("inconsistent `network.sharding` and `network.self`")
|
|
||||||
.to_owned();
|
|
||||||
|
|
||||||
let content_type = match &body {
|
|
||||||
// for file bodies, force x-ndjson
|
|
||||||
Body::NdJsonPayload(_) => Some(b"application/x-ndjson".as_slice()),
|
|
||||||
// otherwise get content type from request
|
|
||||||
_ => req.headers().get(CONTENT_TYPE).map(|h| h.as_bytes()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut in_flight_remote_queries = BTreeMap::new();
|
|
||||||
let client = reqwest::ClientBuilder::new()
|
|
||||||
.connect_timeout(std::time::Duration::from_secs(3))
|
|
||||||
.build()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let method = from_old_http_method(req.method());
|
|
||||||
|
|
||||||
// send payload to all remotes
|
|
||||||
for (body, (node_name, node)) in body
|
|
||||||
.into_bytes_iter(network.remotes.into_iter().filter(|(name, _)| name.as_str() != this))
|
|
||||||
.map_err(|err| {
|
|
||||||
MeilisearchHttpError::from_milli(err, index_uid.map(ToOwned::to_owned))
|
|
||||||
})?
|
|
||||||
{
|
|
||||||
tracing::trace!(node_name, "proxying task to remote");
|
|
||||||
|
|
||||||
let client = client.clone();
|
|
||||||
let api_key = node.write_api_key;
|
|
||||||
let this = this.clone();
|
|
||||||
let method = method.clone();
|
|
||||||
let path_and_query = req.uri().path_and_query().map(|paq| paq.as_str()).unwrap_or("/");
|
|
||||||
|
|
||||||
in_flight_remote_queries.insert(
|
|
||||||
node_name,
|
|
||||||
tokio::spawn({
|
|
||||||
let url = format!("{}{}", node.url, path_and_query);
|
|
||||||
|
|
||||||
let url_encoded_this = urlencoding::encode(&this).into_owned();
|
|
||||||
let url_encoded_task_uid = task.uid.to_string(); // it's url encoded i promize
|
|
||||||
|
|
||||||
let content_type = content_type.map(|b| b.to_owned());
|
|
||||||
|
|
||||||
let backoff = backoff::ExponentialBackoffBuilder::new()
|
|
||||||
.with_max_elapsed_time(Some(std::time::Duration::from_secs(25)))
|
|
||||||
.build();
|
|
||||||
|
|
||||||
backoff::future::retry(backoff, move || {
|
|
||||||
let url = url.clone();
|
|
||||||
let client = client.clone();
|
|
||||||
let url_encoded_this = url_encoded_this.clone();
|
|
||||||
let url_encoded_task_uid = url_encoded_task_uid.clone();
|
|
||||||
let content_type = content_type.clone();
|
|
||||||
|
|
||||||
let body = body.clone();
|
|
||||||
let api_key = api_key.clone();
|
|
||||||
let method = method.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
try_proxy(
|
|
||||||
method,
|
|
||||||
&url,
|
|
||||||
content_type.as_deref(),
|
|
||||||
network_version,
|
|
||||||
api_key.as_deref(),
|
|
||||||
&client,
|
|
||||||
&url_encoded_this,
|
|
||||||
&url_encoded_task_uid,
|
|
||||||
body,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
None => {
|
||||||
|
let this = network
|
||||||
|
.local
|
||||||
|
.as_deref()
|
||||||
|
.expect("inconsistent `network.sharding` and `network.self`")
|
||||||
|
.to_owned();
|
||||||
|
|
||||||
// wait for all in-flight queries to finish and collect their results
|
let content_type = match &body {
|
||||||
for (node_name, handle) in in_flight_remote_queries {
|
// for file bodies, force x-ndjson
|
||||||
match handle.await {
|
Body::NdJsonPayload(_) => Some(b"application/x-ndjson".as_slice()),
|
||||||
Ok(Ok(res)) => {
|
// otherwise get content type from request
|
||||||
let task_uid = res.task_uid;
|
_ => req.headers().get(CONTENT_TYPE).map(|h| h.as_bytes()),
|
||||||
|
};
|
||||||
|
|
||||||
remote_tasks.insert(node_name, Ok(task_uid).into());
|
let body = match body {
|
||||||
|
Body::NdJsonPayload(file) => Some(Bytes::from_owner(unsafe {
|
||||||
|
memmap2::Mmap::map(&file).map_err(|err| {
|
||||||
|
MeilisearchHttpError::from_milli(err.into(), Some(index_uid.to_owned()))
|
||||||
|
})?
|
||||||
|
})),
|
||||||
|
|
||||||
|
Body::Inline(payload) => {
|
||||||
|
Some(Bytes::copy_from_slice(&serde_json::to_vec(&payload).unwrap()))
|
||||||
}
|
}
|
||||||
Ok(Err(error)) => {
|
|
||||||
remote_tasks.insert(node_name, Err(error.as_response_error()).into());
|
Body::None => None,
|
||||||
}
|
};
|
||||||
Err(panic) => match panic.try_into_panic() {
|
|
||||||
Ok(panic) => {
|
let mut in_flight_remote_queries = BTreeMap::new();
|
||||||
let msg = match panic.downcast_ref::<&'static str>() {
|
let client = reqwest::ClientBuilder::new()
|
||||||
Some(s) => *s,
|
.connect_timeout(std::time::Duration::from_secs(3))
|
||||||
None => match panic.downcast_ref::<String>() {
|
.build()
|
||||||
Some(s) => &s[..],
|
.unwrap();
|
||||||
None => "Box<dyn Any>",
|
|
||||||
},
|
let method = from_old_http_method(req.method());
|
||||||
};
|
|
||||||
remote_tasks.insert(
|
// send payload to all remotes
|
||||||
node_name,
|
for (node_name, node) in
|
||||||
Err(ResponseError::from_msg(
|
network.remotes.into_iter().filter(|(name, _)| name.as_str() != this)
|
||||||
msg.to_string(),
|
{
|
||||||
meilisearch_types::error::Code::Internal,
|
let body = body.clone();
|
||||||
))
|
let client = client.clone();
|
||||||
.into(),
|
let api_key = node.write_api_key;
|
||||||
);
|
let this = this.clone();
|
||||||
}
|
let method = method.clone();
|
||||||
Err(_) => {
|
let path_and_query =
|
||||||
tracing::error!("proxy task was unexpectedly cancelled")
|
req.uri().path_and_query().map(|paq| paq.as_str()).unwrap_or("/");
|
||||||
}
|
|
||||||
},
|
in_flight_remote_queries.insert(
|
||||||
|
node_name,
|
||||||
|
tokio::spawn({
|
||||||
|
let url = format!("{}{}", node.url, path_and_query);
|
||||||
|
|
||||||
|
let url_encoded_this = urlencoding::encode(&this).into_owned();
|
||||||
|
let url_encoded_task_uid = task.uid.to_string(); // it's url encoded i promize
|
||||||
|
|
||||||
|
let content_type = content_type.map(|b| b.to_owned());
|
||||||
|
|
||||||
|
let backoff = backoff::ExponentialBackoffBuilder::new()
|
||||||
|
.with_max_elapsed_time(Some(std::time::Duration::from_secs(25)))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
backoff::future::retry(backoff, move || {
|
||||||
|
let url = url.clone();
|
||||||
|
let client = client.clone();
|
||||||
|
let url_encoded_this = url_encoded_this.clone();
|
||||||
|
let url_encoded_task_uid = url_encoded_task_uid.clone();
|
||||||
|
let content_type = content_type.clone();
|
||||||
|
|
||||||
|
let body = body.clone();
|
||||||
|
let api_key = api_key.clone();
|
||||||
|
let method = method.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
try_proxy(
|
||||||
|
method,
|
||||||
|
&url,
|
||||||
|
content_type.as_deref(),
|
||||||
|
api_key.as_deref(),
|
||||||
|
&client,
|
||||||
|
&url_encoded_this,
|
||||||
|
&url_encoded_task_uid,
|
||||||
|
body,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// wait for all in-flight queries to finish and collect their results
|
||||||
|
let mut remote_tasks: BTreeMap<String, RemoteTask> = BTreeMap::new();
|
||||||
|
for (node_name, handle) in in_flight_remote_queries {
|
||||||
|
match handle.await {
|
||||||
|
Ok(Ok(res)) => {
|
||||||
|
let task_uid = res.task_uid;
|
||||||
|
|
||||||
|
remote_tasks.insert(node_name, Ok(task_uid).into());
|
||||||
|
}
|
||||||
|
Ok(Err(error)) => {
|
||||||
|
remote_tasks.insert(node_name, Err(error.as_response_error()).into());
|
||||||
|
}
|
||||||
|
Err(panic) => match panic.try_into_panic() {
|
||||||
|
Ok(panic) => {
|
||||||
|
let msg = match panic.downcast_ref::<&'static str>() {
|
||||||
|
Some(s) => *s,
|
||||||
|
None => match panic.downcast_ref::<String>() {
|
||||||
|
Some(s) => &s[..],
|
||||||
|
None => "Box<dyn Any>",
|
||||||
|
},
|
||||||
|
};
|
||||||
|
remote_tasks.insert(
|
||||||
|
node_name,
|
||||||
|
Err(ResponseError::from_msg(
|
||||||
|
msg.to_string(),
|
||||||
|
meilisearch_types::error::Code::Internal,
|
||||||
|
))
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
tracing::error!("proxy task was unexpectedly cancelled")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// edit details to contain the return values from the remotes
|
||||||
|
index_scheduler.set_task_network(task.uid, TaskNetwork::Remotes { remote_tasks })?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(index_scheduler.set_task_network(task.uid, task_network)?)
|
Ok(())
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_request<T, F, U>(
|
|
||||||
path_and_query: &str,
|
|
||||||
method: reqwest::Method,
|
|
||||||
content_type: Option<String>,
|
|
||||||
body: Body<T, F>,
|
|
||||||
remote_name: &str,
|
|
||||||
remote: &Remote,
|
|
||||||
) -> Result<U, ProxyError>
|
|
||||||
where
|
|
||||||
T: serde::Serialize,
|
|
||||||
F: FnMut(&str, &Remote, &mut T),
|
|
||||||
U: DeserializeOwned,
|
|
||||||
{
|
|
||||||
let content_type = match &body {
|
|
||||||
// for file bodies, force x-ndjson
|
|
||||||
Body::NdJsonPayload(_) => Some("application/x-ndjson".into()),
|
|
||||||
// otherwise get content type from request
|
|
||||||
_ => content_type,
|
|
||||||
};
|
|
||||||
|
|
||||||
let body = body.into_bytes(remote_name, remote).map_err(Box::new)?;
|
|
||||||
|
|
||||||
let client = reqwest::ClientBuilder::new()
|
|
||||||
.connect_timeout(std::time::Duration::from_secs(3))
|
|
||||||
.build()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let url = format!("{}{}", remote.url, path_and_query);
|
|
||||||
|
|
||||||
// send payload to remote
|
|
||||||
tracing::trace!(remote_name, "sending request to remote");
|
|
||||||
let api_key = remote.write_api_key.clone();
|
|
||||||
|
|
||||||
let backoff = backoff::ExponentialBackoffBuilder::new()
|
|
||||||
.with_max_elapsed_time(Some(std::time::Duration::from_secs(25)))
|
|
||||||
.build();
|
|
||||||
|
|
||||||
backoff::future::retry(backoff, move || {
|
|
||||||
let url = url.clone();
|
|
||||||
let client = client.clone();
|
|
||||||
let content_type = content_type.clone();
|
|
||||||
|
|
||||||
let body = body.clone();
|
|
||||||
let api_key = api_key.clone();
|
|
||||||
let method = method.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
let request = client.request(method, url).timeout(std::time::Duration::from_secs(30));
|
|
||||||
let request = if let Some(body) = body { request.body(body) } else { request };
|
|
||||||
let request =
|
|
||||||
if let Some(api_key) = api_key { request.bearer_auth(api_key) } else { request };
|
|
||||||
let request = if let Some(content_type) = content_type {
|
|
||||||
request.header(CONTENT_TYPE.as_str(), content_type)
|
|
||||||
} else {
|
|
||||||
request
|
|
||||||
};
|
|
||||||
|
|
||||||
let response = request.send().await;
|
|
||||||
let response = match response {
|
|
||||||
Ok(response) => response,
|
|
||||||
Err(error) if error.is_timeout() => {
|
|
||||||
return Err(backoff::Error::transient(ProxyError::Timeout))
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
return Err(backoff::Error::transient(ProxyError::CouldNotSendRequest(
|
|
||||||
ReqwestErrorWithoutUrl::new(error),
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
match response.status() {
|
|
||||||
status_code if status_code.is_success() => (),
|
|
||||||
StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => {
|
|
||||||
return Err(backoff::Error::Permanent(ProxyError::AuthenticationError))
|
|
||||||
}
|
|
||||||
status_code if status_code.is_client_error() => {
|
|
||||||
let response = parse_error(response).await;
|
|
||||||
return Err(backoff::Error::Permanent(ProxyError::BadRequest {
|
|
||||||
status_code,
|
|
||||||
response,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
status_code if status_code.is_server_error() => {
|
|
||||||
let response = parse_error(response).await;
|
|
||||||
return Err(backoff::Error::transient(ProxyError::RemoteError {
|
|
||||||
status_code,
|
|
||||||
response,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
status_code => {
|
|
||||||
tracing::warn!(
|
|
||||||
status_code = status_code.as_u16(),
|
|
||||||
"remote replied with unexpected status code"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let response: U = match parse_response(response).await {
|
|
||||||
Ok(response) => response,
|
|
||||||
Err(response) => {
|
|
||||||
return Err(backoff::Error::transient(ProxyError::CouldNotParseResponse {
|
|
||||||
response,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_old_http_method(method: &actix_http::Method) -> reqwest::Method {
|
fn from_old_http_method(method: &actix_http::Method) -> reqwest::Method {
|
||||||
@@ -474,18 +216,16 @@ async fn try_proxy(
|
|||||||
method: reqwest::Method,
|
method: reqwest::Method,
|
||||||
url: &str,
|
url: &str,
|
||||||
content_type: Option<&[u8]>,
|
content_type: Option<&[u8]>,
|
||||||
network_version: Uuid,
|
|
||||||
api_key: Option<&str>,
|
api_key: Option<&str>,
|
||||||
client: &reqwest::Client,
|
client: &reqwest::Client,
|
||||||
url_encoded_this: &str,
|
url_encoded_this: &str,
|
||||||
url_encoded_task_uid: &str,
|
url_encoded_task_uid: &str,
|
||||||
body: Option<Bytes>,
|
body: Option<Bytes>,
|
||||||
) -> Result<SummarizedTaskView, backoff::Error<ProxyError>> {
|
) -> Result<SummarizedTaskView, backoff::Error<ProxyDocumentChangeError>> {
|
||||||
let request = client.request(method, url).timeout(std::time::Duration::from_secs(30));
|
let request = client.request(method, url).timeout(std::time::Duration::from_secs(30));
|
||||||
let request = if let Some(body) = body { request.body(body) } else { request };
|
let request = if let Some(body) = body { request.body(body) } else { request };
|
||||||
let request = if let Some(api_key) = api_key { request.bearer_auth(api_key) } else { request };
|
let request = if let Some(api_key) = api_key { request.bearer_auth(api_key) } else { request };
|
||||||
let request = request.header(PROXY_ORIGIN_TASK_UID_HEADER, url_encoded_task_uid);
|
let request = request.header(PROXY_ORIGIN_TASK_UID_HEADER, url_encoded_task_uid);
|
||||||
let request = request.header(PROXY_ORIGIN_NETWORK_VERSION_HEADER, &network_version.to_string());
|
|
||||||
let request = request.header(PROXY_ORIGIN_REMOTE_HEADER, url_encoded_this);
|
let request = request.header(PROXY_ORIGIN_REMOTE_HEADER, url_encoded_this);
|
||||||
let request = if let Some(content_type) = content_type {
|
let request = if let Some(content_type) = content_type {
|
||||||
request.header(CONTENT_TYPE.as_str(), content_type)
|
request.header(CONTENT_TYPE.as_str(), content_type)
|
||||||
@@ -497,10 +237,10 @@ async fn try_proxy(
|
|||||||
let response = match response {
|
let response = match response {
|
||||||
Ok(response) => response,
|
Ok(response) => response,
|
||||||
Err(error) if error.is_timeout() => {
|
Err(error) if error.is_timeout() => {
|
||||||
return Err(backoff::Error::transient(ProxyError::Timeout))
|
return Err(backoff::Error::transient(ProxyDocumentChangeError::Timeout))
|
||||||
}
|
}
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
return Err(backoff::Error::transient(ProxyError::CouldNotSendRequest(
|
return Err(backoff::Error::transient(ProxyDocumentChangeError::CouldNotSendRequest(
|
||||||
ReqwestErrorWithoutUrl::new(error),
|
ReqwestErrorWithoutUrl::new(error),
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
@@ -509,18 +249,18 @@ async fn try_proxy(
|
|||||||
match response.status() {
|
match response.status() {
|
||||||
status_code if status_code.is_success() => (),
|
status_code if status_code.is_success() => (),
|
||||||
StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => {
|
StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => {
|
||||||
return Err(backoff::Error::Permanent(ProxyError::AuthenticationError))
|
return Err(backoff::Error::Permanent(ProxyDocumentChangeError::AuthenticationError))
|
||||||
}
|
}
|
||||||
status_code if status_code.is_client_error() => {
|
status_code if status_code.is_client_error() => {
|
||||||
let response = parse_error(response).await;
|
let response = parse_error(response).await;
|
||||||
return Err(backoff::Error::Permanent(ProxyError::BadRequest {
|
return Err(backoff::Error::Permanent(ProxyDocumentChangeError::BadRequest {
|
||||||
status_code,
|
status_code,
|
||||||
response,
|
response,
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
status_code if status_code.is_server_error() => {
|
status_code if status_code.is_server_error() => {
|
||||||
let response = parse_error(response).await;
|
let response = parse_error(response).await;
|
||||||
return Err(backoff::Error::transient(ProxyError::RemoteError {
|
return Err(backoff::Error::transient(ProxyDocumentChangeError::RemoteError {
|
||||||
status_code,
|
status_code,
|
||||||
response,
|
response,
|
||||||
}));
|
}));
|
||||||
@@ -536,7 +276,9 @@ async fn try_proxy(
|
|||||||
let response = match parse_response(response).await {
|
let response = match parse_response(response).await {
|
||||||
Ok(response) => response,
|
Ok(response) => response,
|
||||||
Err(response) => {
|
Err(response) => {
|
||||||
return Err(backoff::Error::transient(ProxyError::CouldNotParseResponse { response }))
|
return Err(backoff::Error::transient(
|
||||||
|
ProxyDocumentChangeError::CouldNotParseResponse { response },
|
||||||
|
))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -574,11 +316,11 @@ async fn parse_response<T: DeserializeOwned>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
mod error {
|
mod error {
|
||||||
use meilisearch_types::error::{ErrorCode as _, ResponseError};
|
use meilisearch_types::error::ResponseError;
|
||||||
use reqwest::StatusCode;
|
use reqwest::StatusCode;
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
pub enum ProxyError {
|
pub enum ProxyDocumentChangeError {
|
||||||
#[error("{0}")]
|
#[error("{0}")]
|
||||||
CouldNotSendRequest(ReqwestErrorWithoutUrl),
|
CouldNotSendRequest(ReqwestErrorWithoutUrl),
|
||||||
#[error("could not authenticate against the remote host\n - hint: check that the remote instance was registered with a valid API key having the `documents.add` action")]
|
#[error("could not authenticate against the remote host\n - hint: check that the remote instance was registered with a valid API key having the `documents.add` action")]
|
||||||
@@ -594,25 +336,19 @@ mod error {
|
|||||||
Timeout,
|
Timeout,
|
||||||
#[error("remote host responded with code {}{}", status_code.as_u16(), response_from_remote(response))]
|
#[error("remote host responded with code {}{}", status_code.as_u16(), response_from_remote(response))]
|
||||||
RemoteError { status_code: StatusCode, response: Result<String, ReqwestErrorWithoutUrl> },
|
RemoteError { status_code: StatusCode, response: Result<String, ReqwestErrorWithoutUrl> },
|
||||||
#[error("error while preparing the request: {error}")]
|
|
||||||
Milli {
|
|
||||||
#[from]
|
|
||||||
error: Box<meilisearch_types::milli::Error>,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ProxyError {
|
impl ProxyDocumentChangeError {
|
||||||
pub fn as_response_error(&self) -> ResponseError {
|
pub fn as_response_error(&self) -> ResponseError {
|
||||||
use meilisearch_types::error::Code;
|
use meilisearch_types::error::Code;
|
||||||
let message = self.to_string();
|
let message = self.to_string();
|
||||||
let code = match self {
|
let code = match self {
|
||||||
ProxyError::CouldNotSendRequest(_) => Code::RemoteCouldNotSendRequest,
|
ProxyDocumentChangeError::CouldNotSendRequest(_) => Code::RemoteCouldNotSendRequest,
|
||||||
ProxyError::AuthenticationError => Code::RemoteInvalidApiKey,
|
ProxyDocumentChangeError::AuthenticationError => Code::RemoteInvalidApiKey,
|
||||||
ProxyError::BadRequest { .. } => Code::RemoteBadRequest,
|
ProxyDocumentChangeError::BadRequest { .. } => Code::RemoteBadRequest,
|
||||||
ProxyError::Timeout => Code::RemoteTimeout,
|
ProxyDocumentChangeError::Timeout => Code::RemoteTimeout,
|
||||||
ProxyError::RemoteError { .. } => Code::RemoteRemoteError,
|
ProxyDocumentChangeError::RemoteError { .. } => Code::RemoteRemoteError,
|
||||||
ProxyError::CouldNotParseResponse { .. } => Code::RemoteBadResponse,
|
ProxyDocumentChangeError::CouldNotParseResponse { .. } => Code::RemoteBadResponse,
|
||||||
ProxyError::Milli { error } => error.error_code(),
|
|
||||||
};
|
};
|
||||||
ResponseError::from_msg(message, code)
|
ResponseError::from_msg(message, code)
|
||||||
}
|
}
|
||||||
@@ -639,23 +375,25 @@ mod error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "Meili-Proxy-Origin-Remote";
|
||||||
|
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "Meili-Proxy-Origin-TaskUid";
|
||||||
|
|
||||||
pub fn origin_from_req(req: &HttpRequest) -> Result<Option<Origin>, MeilisearchHttpError> {
|
pub fn origin_from_req(req: &HttpRequest) -> Result<Option<Origin>, MeilisearchHttpError> {
|
||||||
let (remote_name, task_uid, network_version) = match (
|
let (remote_name, task_uid) = match (
|
||||||
req.headers().get(PROXY_ORIGIN_REMOTE_HEADER),
|
req.headers().get(PROXY_ORIGIN_REMOTE_HEADER),
|
||||||
req.headers().get(PROXY_ORIGIN_TASK_UID_HEADER),
|
req.headers().get(PROXY_ORIGIN_TASK_UID_HEADER),
|
||||||
req.headers().get(PROXY_ORIGIN_NETWORK_VERSION_HEADER),
|
|
||||||
) {
|
) {
|
||||||
(None, None, _) => return Ok(None),
|
(None, None) => return Ok(None),
|
||||||
(None, Some(_), _) => {
|
(None, Some(_)) => {
|
||||||
return Err(MeilisearchHttpError::InconsistentOriginHeaders { is_remote_missing: true })
|
return Err(MeilisearchHttpError::InconsistentOriginHeaders { is_remote_missing: true })
|
||||||
}
|
}
|
||||||
(Some(_), None, _) => {
|
(Some(_), None) => {
|
||||||
return Err(MeilisearchHttpError::InconsistentOriginHeaders {
|
return Err(MeilisearchHttpError::InconsistentOriginHeaders {
|
||||||
is_remote_missing: false,
|
is_remote_missing: false,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
(Some(remote_name), Some(task_uid), network_version) => {
|
(Some(remote_name), Some(task_uid)) => (
|
||||||
let remote_name = urlencoding::decode(remote_name.to_str().map_err(|err| {
|
urlencoding::decode(remote_name.to_str().map_err(|err| {
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
MeilisearchHttpError::InvalidHeaderValue {
|
||||||
header_name: PROXY_ORIGIN_REMOTE_HEADER,
|
header_name: PROXY_ORIGIN_REMOTE_HEADER,
|
||||||
msg: format!("while parsing remote name as UTF-8: {err}"),
|
msg: format!("while parsing remote name as UTF-8: {err}"),
|
||||||
@@ -664,8 +402,8 @@ pub fn origin_from_req(req: &HttpRequest) -> Result<Option<Origin>, MeilisearchH
|
|||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||||
header_name: PROXY_ORIGIN_REMOTE_HEADER,
|
header_name: PROXY_ORIGIN_REMOTE_HEADER,
|
||||||
msg: format!("while URL-decoding remote name: {err}"),
|
msg: format!("while URL-decoding remote name: {err}"),
|
||||||
})?;
|
})?,
|
||||||
let task_uid = urlencoding::decode(task_uid.to_str().map_err(|err| {
|
urlencoding::decode(task_uid.to_str().map_err(|err| {
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
MeilisearchHttpError::InvalidHeaderValue {
|
||||||
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
||||||
msg: format!("while parsing task UID as UTF-8: {err}"),
|
msg: format!("while parsing task UID as UTF-8: {err}"),
|
||||||
@@ -674,235 +412,15 @@ pub fn origin_from_req(req: &HttpRequest) -> Result<Option<Origin>, MeilisearchH
|
|||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||||
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
||||||
msg: format!("while URL-decoding task UID: {err}"),
|
msg: format!("while URL-decoding task UID: {err}"),
|
||||||
})?;
|
})?,
|
||||||
let network_version = match network_version {
|
),
|
||||||
Some(network_version) => {
|
|
||||||
urlencoding::decode(network_version.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_ORIGIN_NETWORK_VERSION_HEADER,
|
|
||||||
msg: format!("while parsing network version as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_ORIGIN_NETWORK_VERSION_HEADER,
|
|
||||||
msg: format!("while URL-decoding network version: {err}"),
|
|
||||||
}
|
|
||||||
})?
|
|
||||||
}
|
|
||||||
None => Cow::Borrowed("0"),
|
|
||||||
};
|
|
||||||
(remote_name, task_uid, network_version)
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let task_uid: u32 =
|
let task_uid: usize =
|
||||||
task_uid.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
task_uid.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||||
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
||||||
msg: format!("while parsing the task UID as an integer: {err}"),
|
msg: format!("while parsing the task UID as an integer: {err}"),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let network_version: Uuid = Uuid::parse_str(&network_version).map_err(|err| {
|
Ok(Some(Origin { remote_name: remote_name.into_owned(), task_uid }))
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_ORIGIN_NETWORK_VERSION_HEADER,
|
|
||||||
msg: format!("while parsing the network version as an UUID: {err}"),
|
|
||||||
}
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(Some(Origin { remote_name: remote_name.into_owned(), task_uid, network_version }))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn import_data_from_req(req: &HttpRequest) -> Result<Option<ImportData>, MeilisearchHttpError> {
|
|
||||||
let (remote_name, index_name, documents) = match (
|
|
||||||
req.headers().get(PROXY_IMPORT_REMOTE_HEADER),
|
|
||||||
req.headers().get(PROXY_IMPORT_INDEX_HEADER),
|
|
||||||
req.headers().get(PROXY_IMPORT_DOCS_HEADER),
|
|
||||||
) {
|
|
||||||
(None, None, None) => return Ok(None),
|
|
||||||
(Some(remote_name), Some(index_name), Some(documents)) => {
|
|
||||||
let remote_name = urlencoding::decode(remote_name.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
msg: format!("while parsing import remote name as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
msg: format!("while URL-decoding import remote name: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let index_name = urlencoding::decode(index_name.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_INDEX_HEADER,
|
|
||||||
msg: format!("while parsing import index name as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_INDEX_HEADER,
|
|
||||||
msg: format!("while URL-decoding import index name: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let documents = urlencoding::decode(documents.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_DOCS_HEADER,
|
|
||||||
msg: format!("while parsing documents as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_DOCS_HEADER,
|
|
||||||
msg: format!("while URL-decoding documents: {err}"),
|
|
||||||
})?;
|
|
||||||
(remote_name, Some(index_name), documents)
|
|
||||||
}
|
|
||||||
(Some(remote_name), None, Some(documents)) => {
|
|
||||||
let remote_name = urlencoding::decode(remote_name.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
msg: format!("while parsing import remote name as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
msg: format!("while URL-decoding import remote name: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let documents = urlencoding::decode(documents.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_DOCS_HEADER,
|
|
||||||
msg: format!("while parsing documents as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_DOCS_HEADER,
|
|
||||||
msg: format!("while URL-decoding documents: {err}"),
|
|
||||||
})?;
|
|
||||||
(remote_name, None, documents)
|
|
||||||
}
|
|
||||||
// catch-all pattern that has to contain an inconsistency since we already matched (None, None, None) and (Some, Some, Some)
|
|
||||||
(remote_name, index_name, documents) => {
|
|
||||||
return Err(MeilisearchHttpError::InconsistentImportHeaders {
|
|
||||||
is_remote_missing: remote_name.is_none(),
|
|
||||||
is_index_missing: index_name.is_none(),
|
|
||||||
is_docs_missing: documents.is_none(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let document_count: u64 =
|
|
||||||
documents.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_DOCS_HEADER,
|
|
||||||
msg: format!("while parsing the documents as an integer: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(Some(ImportData {
|
|
||||||
remote_name: remote_name.to_string(),
|
|
||||||
index_name: index_name.map(|index_name| index_name.to_string()),
|
|
||||||
document_count,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn import_metadata_from_req(
|
|
||||||
req: &HttpRequest,
|
|
||||||
) -> Result<Option<ImportMetadata>, MeilisearchHttpError> {
|
|
||||||
let (index_count, task_key, total_index_documents) = match (
|
|
||||||
req.headers().get(PROXY_IMPORT_INDEX_COUNT_HEADER),
|
|
||||||
req.headers().get(PROXY_IMPORT_TASK_KEY_HEADER),
|
|
||||||
req.headers().get(PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER),
|
|
||||||
) {
|
|
||||||
(None, None, None) => return Ok(None),
|
|
||||||
(Some(index_count), Some(task_key), Some(total_index_documents)) => {
|
|
||||||
let index_count = urlencoding::decode(index_count.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
msg: format!("while parsing import index count as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_INDEX_COUNT_HEADER,
|
|
||||||
msg: format!("while URL-decoding import index count: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let task_key = urlencoding::decode(task_key.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TASK_KEY_HEADER,
|
|
||||||
msg: format!("while parsing import task key as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TASK_KEY_HEADER,
|
|
||||||
msg: format!("while URL-decoding import task key: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let total_index_documents =
|
|
||||||
urlencoding::decode(total_index_documents.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
msg: format!("while parsing total index documents as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
msg: format!("while URL-decoding total index documents: {err}"),
|
|
||||||
})?;
|
|
||||||
(index_count, Some(task_key), total_index_documents)
|
|
||||||
}
|
|
||||||
(Some(index_count), None, Some(total_index_documents)) => {
|
|
||||||
let index_count = urlencoding::decode(index_count.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
msg: format!("while parsing import index count as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_INDEX_COUNT_HEADER,
|
|
||||||
msg: format!("while URL-decoding import index count: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let total_index_documents =
|
|
||||||
urlencoding::decode(total_index_documents.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
msg: format!("while parsing total index documents as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
msg: format!("while URL-decoding total index documents: {err}"),
|
|
||||||
})?;
|
|
||||||
(index_count, None, total_index_documents)
|
|
||||||
}
|
|
||||||
// catch-all pattern that has to contain an inconsistency since we already matched (None, None, None) and (Some, Some, Some)
|
|
||||||
(index_count, task_key, total_index_documents) => {
|
|
||||||
return Err(MeilisearchHttpError::InconsistentImportMetadataHeaders {
|
|
||||||
is_index_count_missing: index_count.is_none(),
|
|
||||||
is_task_key_missing: task_key.is_none(),
|
|
||||||
is_total_index_documents_missing: total_index_documents.is_none(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let index_count: u64 =
|
|
||||||
index_count.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_INDEX_COUNT_HEADER,
|
|
||||||
msg: format!("while parsing the index count as an integer: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let task_key = task_key
|
|
||||||
.map(|task_key| {
|
|
||||||
let task_key: Result<DocumentId, _> =
|
|
||||||
task_key.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TASK_KEY_HEADER,
|
|
||||||
msg: format!("while parsing import task key as an integer: {err}"),
|
|
||||||
});
|
|
||||||
task_key
|
|
||||||
})
|
|
||||||
.transpose()?;
|
|
||||||
|
|
||||||
let total_index_documents: u64 =
|
|
||||||
total_index_documents.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
msg: format!("while parsing the total index documents as an integer: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(Some(ImportMetadata { index_count, task_key, total_index_documents }))
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,7 +30,16 @@ use crate::Opt;
|
|||||||
|
|
||||||
pub mod compact;
|
pub mod compact;
|
||||||
pub mod documents;
|
pub mod documents;
|
||||||
pub mod enterprise_edition;
|
|
||||||
|
#[cfg(not(feature = "enterprise"))]
|
||||||
|
mod community_edition;
|
||||||
|
#[cfg(feature = "enterprise")]
|
||||||
|
mod enterprise_edition;
|
||||||
|
#[cfg(not(feature = "enterprise"))]
|
||||||
|
use community_edition as current_edition;
|
||||||
|
#[cfg(feature = "enterprise")]
|
||||||
|
use enterprise_edition as current_edition;
|
||||||
|
|
||||||
pub mod facet_search;
|
pub mod facet_search;
|
||||||
pub mod search;
|
pub mod search;
|
||||||
mod search_analytics;
|
mod search_analytics;
|
||||||
@@ -41,9 +50,7 @@ mod settings_analytics;
|
|||||||
pub mod similar;
|
pub mod similar;
|
||||||
mod similar_analytics;
|
mod similar_analytics;
|
||||||
|
|
||||||
pub use meilisearch_types::tasks::enterprise_edition::network::headers::{
|
pub use current_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER};
|
||||||
PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(OpenApi)]
|
#[derive(OpenApi)]
|
||||||
#[openapi(
|
#[openapi(
|
||||||
|
|||||||
@@ -17,9 +17,6 @@ use super::settings_analytics::*;
|
|||||||
use crate::analytics::Analytics;
|
use crate::analytics::Analytics;
|
||||||
use crate::extractors::authentication::policies::*;
|
use crate::extractors::authentication::policies::*;
|
||||||
use crate::extractors::authentication::GuardedData;
|
use crate::extractors::authentication::GuardedData;
|
||||||
use crate::routes::indexes::enterprise_edition::proxy::{
|
|
||||||
proxy, task_network_and_check_leader_and_version, Body,
|
|
||||||
};
|
|
||||||
use crate::routes::{get_task_id, is_dry_run, SummarizedTaskView};
|
use crate::routes::{get_task_id, is_dry_run, SummarizedTaskView};
|
||||||
use crate::Opt;
|
use crate::Opt;
|
||||||
|
|
||||||
@@ -79,13 +76,14 @@ macro_rules! make_setting_route {
|
|||||||
use meilisearch_types::index_uid::IndexUid;
|
use meilisearch_types::index_uid::IndexUid;
|
||||||
use meilisearch_types::milli::update::Setting;
|
use meilisearch_types::milli::update::Setting;
|
||||||
use meilisearch_types::settings::{settings, Settings};
|
use meilisearch_types::settings::{settings, Settings};
|
||||||
|
use meilisearch_types::tasks::KindWithContent;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
use $crate::analytics::Analytics;
|
use $crate::analytics::Analytics;
|
||||||
use $crate::extractors::authentication::policies::*;
|
use $crate::extractors::authentication::policies::*;
|
||||||
use $crate::extractors::authentication::GuardedData;
|
use $crate::extractors::authentication::GuardedData;
|
||||||
use $crate::extractors::sequential_extractor::SeqHandler;
|
use $crate::extractors::sequential_extractor::SeqHandler;
|
||||||
use $crate::Opt;
|
use $crate::Opt;
|
||||||
use $crate::routes::SummarizedTaskView;
|
use $crate::routes::{is_dry_run, get_task_id, SummarizedTaskView};
|
||||||
#[allow(unused_imports)]
|
#[allow(unused_imports)]
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
@@ -132,7 +130,21 @@ macro_rules! make_setting_route {
|
|||||||
|
|
||||||
let new_settings = Settings { $attr: Setting::Reset.into(), ..Default::default() };
|
let new_settings = Settings { $attr: Setting::Reset.into(), ..Default::default() };
|
||||||
|
|
||||||
let task = register_new_settings(new_settings, true, index_scheduler, &req, index_uid, opt).await?;
|
let allow_index_creation =
|
||||||
|
index_scheduler.filters().allow_index_creation(&index_uid);
|
||||||
|
|
||||||
|
let task = KindWithContent::SettingsUpdate {
|
||||||
|
index_uid: index_uid.to_string(),
|
||||||
|
new_settings: Box::new(new_settings),
|
||||||
|
is_deletion: true,
|
||||||
|
allow_index_creation,
|
||||||
|
};
|
||||||
|
let uid = get_task_id(&req, &opt)?;
|
||||||
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
|
let task: SummarizedTaskView =
|
||||||
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run))
|
||||||
|
.await??
|
||||||
|
.into();
|
||||||
|
|
||||||
debug!(returns = ?task, "Delete settings");
|
debug!(returns = ?task, "Delete settings");
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
Ok(HttpResponse::Accepted().json(task))
|
||||||
@@ -204,7 +216,21 @@ macro_rules! make_setting_route {
|
|||||||
&index_scheduler,
|
&index_scheduler,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let task = register_new_settings(new_settings, false, index_scheduler, &req, index_uid, opt).await?;
|
let allow_index_creation =
|
||||||
|
index_scheduler.filters().allow_index_creation(&index_uid);
|
||||||
|
|
||||||
|
let task = KindWithContent::SettingsUpdate {
|
||||||
|
index_uid: index_uid.to_string(),
|
||||||
|
new_settings: Box::new(new_settings),
|
||||||
|
is_deletion: false,
|
||||||
|
allow_index_creation,
|
||||||
|
};
|
||||||
|
let uid = get_task_id(&req, &opt)?;
|
||||||
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
|
let task: SummarizedTaskView =
|
||||||
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run))
|
||||||
|
.await??
|
||||||
|
.into();
|
||||||
|
|
||||||
debug!(returns = ?task, "Update settings");
|
debug!(returns = ?task, "Update settings");
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
Ok(HttpResponse::Accepted().json(task))
|
||||||
@@ -545,12 +571,12 @@ pub async fn update_all(
|
|||||||
index_uid: web::Path<String>,
|
index_uid: web::Path<String>,
|
||||||
body: AwebJson<Settings<Unchecked>, DeserrJsonError>,
|
body: AwebJson<Settings<Unchecked>, DeserrJsonError>,
|
||||||
req: HttpRequest,
|
req: HttpRequest,
|
||||||
opt: Data<Opt>,
|
opt: web::Data<Opt>,
|
||||||
analytics: Data<Analytics>,
|
analytics: web::Data<Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
|
|
||||||
let new_settings: Settings<Unchecked> = body.into_inner();
|
let new_settings = body.into_inner();
|
||||||
debug!(parameters = ?new_settings, "Update all settings");
|
debug!(parameters = ?new_settings, "Update all settings");
|
||||||
let new_settings = validate_settings(new_settings, &index_scheduler)?;
|
let new_settings = validate_settings(new_settings, &index_scheduler)?;
|
||||||
|
|
||||||
@@ -600,55 +626,23 @@ pub async fn update_all(
|
|||||||
&req,
|
&req,
|
||||||
);
|
);
|
||||||
|
|
||||||
let task =
|
|
||||||
register_new_settings(new_settings, false, index_scheduler, &req, index_uid, opt).await?;
|
|
||||||
|
|
||||||
debug!(returns = ?task, "Update all settings");
|
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn register_new_settings(
|
|
||||||
new_settings: Settings<Unchecked>,
|
|
||||||
is_deletion: bool,
|
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::SETTINGS_UPDATE }>, Data<IndexScheduler>>,
|
|
||||||
req: &HttpRequest,
|
|
||||||
index_uid: IndexUid,
|
|
||||||
opt: Data<Opt>,
|
|
||||||
) -> Result<SummarizedTaskView, ResponseError> {
|
|
||||||
let network = index_scheduler.network();
|
|
||||||
let task_network = task_network_and_check_leader_and_version(req, &network)?;
|
|
||||||
|
|
||||||
let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid);
|
let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid);
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
||||||
let task = KindWithContent::SettingsUpdate {
|
let task = KindWithContent::SettingsUpdate {
|
||||||
index_uid: index_uid.clone(),
|
index_uid,
|
||||||
new_settings: Box::new(new_settings.clone()),
|
new_settings: Box::new(new_settings),
|
||||||
is_deletion,
|
is_deletion: false,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
};
|
};
|
||||||
let uid = get_task_id(req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
|
let task: SummarizedTaskView =
|
||||||
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run))
|
||||||
|
.await??
|
||||||
|
.into();
|
||||||
|
|
||||||
let scheduler = index_scheduler.clone();
|
debug!(returns = ?task, "Update all settings");
|
||||||
let mut task = tokio::task::spawn_blocking(move || {
|
Ok(HttpResponse::Accepted().json(task))
|
||||||
scheduler.register_with_custom_metadata(task, uid, None, dry_run, task_network)
|
|
||||||
})
|
|
||||||
.await??;
|
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
|
||||||
proxy(
|
|
||||||
&index_scheduler,
|
|
||||||
Some(&index_uid),
|
|
||||||
req,
|
|
||||||
task_network,
|
|
||||||
network,
|
|
||||||
Body::inline(new_settings),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(task.into())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[utoipa::path(
|
#[utoipa::path(
|
||||||
@@ -737,8 +731,20 @@ pub async fn delete_all(
|
|||||||
|
|
||||||
let new_settings = Settings::cleared().into_unchecked();
|
let new_settings = Settings::cleared().into_unchecked();
|
||||||
|
|
||||||
let task =
|
let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid);
|
||||||
register_new_settings(new_settings, true, index_scheduler, &req, index_uid, opt).await?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
||||||
|
let task = KindWithContent::SettingsUpdate {
|
||||||
|
index_uid,
|
||||||
|
new_settings: Box::new(new_settings),
|
||||||
|
is_deletion: true,
|
||||||
|
allow_index_creation,
|
||||||
|
};
|
||||||
|
let uid = get_task_id(&req, &opt)?;
|
||||||
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
|
let task: SummarizedTaskView =
|
||||||
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run))
|
||||||
|
.await??
|
||||||
|
.into();
|
||||||
|
|
||||||
debug!(returns = ?task, "Delete all settings");
|
debug!(returns = ?task, "Delete all settings");
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
Ok(HttpResponse::Accepted().json(task))
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ use index_scheduler::{IndexScheduler, Query};
|
|||||||
use meilisearch_auth::AuthController;
|
use meilisearch_auth::AuthController;
|
||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::keys::actions;
|
use meilisearch_types::keys::actions;
|
||||||
|
use meilisearch_types::milli::progress::ProgressStepView;
|
||||||
use meilisearch_types::tasks::Status;
|
use meilisearch_types::tasks::Status;
|
||||||
use prometheus::{Encoder, TextEncoder};
|
use prometheus::{Encoder, TextEncoder};
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
@@ -38,6 +39,12 @@ pub fn configure(config: &mut web::ServiceConfig) {
|
|||||||
# HELP meilisearch_db_size_bytes Meilisearch DB Size In Bytes
|
# HELP meilisearch_db_size_bytes Meilisearch DB Size In Bytes
|
||||||
# TYPE meilisearch_db_size_bytes gauge
|
# TYPE meilisearch_db_size_bytes gauge
|
||||||
meilisearch_db_size_bytes 1130496
|
meilisearch_db_size_bytes 1130496
|
||||||
|
# HELP meilisearch_batch_running_progress_trace The currently running progress trace
|
||||||
|
# TYPE meilisearch_batch_running_progress_trace gauge
|
||||||
|
meilisearch_batch_running_progress_trace{batch_uid="0",step_name="document"} 0.710618582519409
|
||||||
|
meilisearch_batch_running_progress_trace{batch_uid="0",step_name="extracting word proximity"} 0.2222222222222222
|
||||||
|
meilisearch_batch_running_progress_trace{batch_uid="0",step_name="indexing"} 0.6666666666666666
|
||||||
|
meilisearch_batch_running_progress_trace{batch_uid="0",step_name="processing tasks"} 0
|
||||||
# HELP meilisearch_http_requests_total Meilisearch HTTP requests total
|
# HELP meilisearch_http_requests_total Meilisearch HTTP requests total
|
||||||
# TYPE meilisearch_http_requests_total counter
|
# TYPE meilisearch_http_requests_total counter
|
||||||
meilisearch_http_requests_total{method="GET",path="/metrics",status="400"} 1
|
meilisearch_http_requests_total{method="GET",path="/metrics",status="400"} 1
|
||||||
@@ -61,6 +68,13 @@ meilisearch_http_response_time_seconds_bucket{method="GET",path="/metrics",le="1
|
|||||||
meilisearch_http_response_time_seconds_bucket{method="GET",path="/metrics",le="+Inf"} 0
|
meilisearch_http_response_time_seconds_bucket{method="GET",path="/metrics",le="+Inf"} 0
|
||||||
meilisearch_http_response_time_seconds_sum{method="GET",path="/metrics"} 0
|
meilisearch_http_response_time_seconds_sum{method="GET",path="/metrics"} 0
|
||||||
meilisearch_http_response_time_seconds_count{method="GET",path="/metrics"} 0
|
meilisearch_http_response_time_seconds_count{method="GET",path="/metrics"} 0
|
||||||
|
# HELP meilisearch_last_finished_batches_progress_trace_ms The last few batches progress trace in milliseconds
|
||||||
|
# TYPE meilisearch_last_finished_batches_progress_trace_ms gauge
|
||||||
|
meilisearch_last_finished_batches_progress_trace_ms{batch_uid="0",step_name="processing tasks"} 19360
|
||||||
|
meilisearch_last_finished_batches_progress_trace_ms{batch_uid="0",step_name="processing tasks > computing document changes"} 368
|
||||||
|
meilisearch_last_finished_batches_progress_trace_ms{batch_uid="0",step_name="processing tasks > computing document changes > preparing payloads"} 367
|
||||||
|
meilisearch_last_finished_batches_progress_trace_ms{batch_uid="0",step_name="processing tasks > computing document changes > preparing payloads > payload"} 367
|
||||||
|
meilisearch_last_finished_batches_progress_trace_ms{batch_uid="0",step_name="processing tasks > indexing"} 18970
|
||||||
# HELP meilisearch_index_count Meilisearch Index Count
|
# HELP meilisearch_index_count Meilisearch Index Count
|
||||||
# TYPE meilisearch_index_count gauge
|
# TYPE meilisearch_index_count gauge
|
||||||
meilisearch_index_count 1
|
meilisearch_index_count 1
|
||||||
@@ -148,6 +162,50 @@ pub async fn get_metrics(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fetch and expose the current progressing step
|
||||||
|
crate::metrics::MEILISEARCH_BATCH_RUNNING_PROGRESS_TRACE.reset();
|
||||||
|
let (batches, _total) = index_scheduler.get_batches_from_authorized_indexes(
|
||||||
|
&Query { statuses: Some(vec![Status::Processing]), ..Query::default() },
|
||||||
|
auth_filters,
|
||||||
|
)?;
|
||||||
|
if let Some(batch) = batches.into_iter().next() {
|
||||||
|
let batch_uid = batch.uid.to_string();
|
||||||
|
if let Some(progress) = batch.progress {
|
||||||
|
for ProgressStepView { current_step, finished, total } in progress.steps {
|
||||||
|
crate::metrics::MEILISEARCH_BATCH_RUNNING_PROGRESS_TRACE
|
||||||
|
.with_label_values(&[batch_uid.as_str(), current_step.as_ref()])
|
||||||
|
// We return the completion ratio of the current step
|
||||||
|
.set(finished as f64 / total as f64);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
crate::metrics::MEILISEARCH_LAST_FINISHED_BATCHES_PROGRESS_TRACE_MS.reset();
|
||||||
|
let (batches, _total) = index_scheduler.get_batches_from_authorized_indexes(
|
||||||
|
// Fetch the finished batches...
|
||||||
|
&Query {
|
||||||
|
statuses: Some(vec![Status::Succeeded, Status::Failed]),
|
||||||
|
limit: Some(1),
|
||||||
|
..Query::default()
|
||||||
|
},
|
||||||
|
auth_filters,
|
||||||
|
)?;
|
||||||
|
// ...and get the last batch only.
|
||||||
|
if let Some(batch) = batches.into_iter().next() {
|
||||||
|
let batch_uid = batch.uid.to_string();
|
||||||
|
for (step_name, duration_str) in batch.stats.progress_trace {
|
||||||
|
let Some(duration_str) = duration_str.as_str() else { continue };
|
||||||
|
match humantime::parse_duration(duration_str) {
|
||||||
|
Ok(duration) => {
|
||||||
|
crate::metrics::MEILISEARCH_LAST_FINISHED_BATCHES_PROGRESS_TRACE_MS
|
||||||
|
.with_label_values(&[&batch_uid, &step_name])
|
||||||
|
.set(duration.as_millis() as i64);
|
||||||
|
}
|
||||||
|
Err(e) => tracing::error!("Failed to parse duration: {e}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(last_update) = response.last_update {
|
if let Some(last_update) = response.last_update {
|
||||||
crate::metrics::MEILISEARCH_LAST_UPDATE.set(last_update.unix_timestamp());
|
crate::metrics::MEILISEARCH_LAST_UPDATE.set(last_update.unix_timestamp());
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,36 +4,25 @@ use actix_web::web::{self, Data};
|
|||||||
use actix_web::{HttpRequest, HttpResponse};
|
use actix_web::{HttpRequest, HttpResponse};
|
||||||
use deserr::actix_web::AwebJson;
|
use deserr::actix_web::AwebJson;
|
||||||
use deserr::Deserr;
|
use deserr::Deserr;
|
||||||
use futures::TryStreamExt;
|
use index_scheduler::IndexScheduler;
|
||||||
use index_scheduler::{IndexScheduler, Query, RoFeatures};
|
|
||||||
use itertools::{EitherOrBoth, Itertools};
|
use itertools::{EitherOrBoth, Itertools};
|
||||||
use meilisearch_auth::AuthFilter;
|
|
||||||
use meilisearch_types::deserr::DeserrJsonError;
|
use meilisearch_types::deserr::DeserrJsonError;
|
||||||
use meilisearch_types::enterprise_edition::network::{Network as DbNetwork, Remote as DbRemote};
|
|
||||||
use meilisearch_types::error::deserr_codes::{
|
use meilisearch_types::error::deserr_codes::{
|
||||||
InvalidNetworkLeader, InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf,
|
InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf, InvalidNetworkSharding,
|
||||||
InvalidNetworkUrl, InvalidNetworkWriteApiKey,
|
InvalidNetworkUrl, InvalidNetworkWriteApiKey,
|
||||||
};
|
};
|
||||||
use meilisearch_types::error::{Code, ResponseError};
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::features::RuntimeTogglableFeatures;
|
|
||||||
use meilisearch_types::keys::actions;
|
use meilisearch_types::keys::actions;
|
||||||
use meilisearch_types::milli::update::Setting;
|
use meilisearch_types::milli::update::Setting;
|
||||||
use meilisearch_types::tasks::enterprise_edition::network::{
|
use meilisearch_types::network::{Network as DbNetwork, Remote as DbRemote};
|
||||||
headers, NetworkTopologyChange, Origin, TaskNetwork,
|
|
||||||
};
|
|
||||||
use meilisearch_types::tasks::KindWithContent;
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
use utoipa::{OpenApi, ToSchema};
|
use utoipa::{OpenApi, ToSchema};
|
||||||
|
|
||||||
use crate::analytics::{Aggregate, Analytics};
|
use crate::analytics::{Aggregate, Analytics};
|
||||||
use crate::error::MeilisearchHttpError;
|
|
||||||
use crate::extractors::authentication::policies::ActionPolicy;
|
use crate::extractors::authentication::policies::ActionPolicy;
|
||||||
use crate::extractors::authentication::GuardedData;
|
use crate::extractors::authentication::GuardedData;
|
||||||
use crate::extractors::sequential_extractor::SeqHandler;
|
use crate::extractors::sequential_extractor::SeqHandler;
|
||||||
use crate::routes::indexes::enterprise_edition::proxy::{self, proxy, Body, ProxyError};
|
|
||||||
use crate::routes::tasks::AllTasks;
|
|
||||||
use crate::routes::SummarizedTaskView;
|
|
||||||
|
|
||||||
#[derive(OpenApi)]
|
#[derive(OpenApi)]
|
||||||
#[openapi(
|
#[openapi(
|
||||||
@@ -94,7 +83,7 @@ async fn get_network(
|
|||||||
Ok(HttpResponse::Ok().json(network))
|
Ok(HttpResponse::Ok().json(network))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserr, ToSchema, Serialize)]
|
#[derive(Debug, Deserr, ToSchema, Serialize)]
|
||||||
#[deserr(error = DeserrJsonError<InvalidNetworkRemotes>, rename_all = camelCase, deny_unknown_fields)]
|
#[deserr(error = DeserrJsonError<InvalidNetworkRemotes>, rename_all = camelCase, deny_unknown_fields)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
#[schema(rename_all = "camelCase")]
|
#[schema(rename_all = "camelCase")]
|
||||||
@@ -117,19 +106,12 @@ pub struct Remote {
|
|||||||
pub write_api_key: Setting<String>,
|
pub write_api_key: Setting<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserr, ToSchema, Serialize)]
|
#[derive(Debug, Deserr, ToSchema, Serialize)]
|
||||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
#[schema(rename_all = "camelCase")]
|
#[schema(rename_all = "camelCase")]
|
||||||
pub struct Network {
|
pub struct Network {
|
||||||
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!({
|
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!("http://localhost:7700"))]
|
||||||
"ms-00": {
|
|
||||||
"url": "http://localhost:7700"
|
|
||||||
},
|
|
||||||
"ms-01": {
|
|
||||||
"url": "http://localhost:7701"
|
|
||||||
}
|
|
||||||
}))]
|
|
||||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
|
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub remotes: Setting<BTreeMap<String, Option<Remote>>>,
|
pub remotes: Setting<BTreeMap<String, Option<Remote>>>,
|
||||||
@@ -137,21 +119,10 @@ pub struct Network {
|
|||||||
#[serde(default, rename = "self")]
|
#[serde(default, rename = "self")]
|
||||||
#[deserr(default, rename = "self", error = DeserrJsonError<InvalidNetworkSelf>)]
|
#[deserr(default, rename = "self", error = DeserrJsonError<InvalidNetworkSelf>)]
|
||||||
pub local: Setting<String>,
|
pub local: Setting<String>,
|
||||||
#[schema(value_type = Option<String>, example = json!("ms-00"))]
|
#[schema(value_type = Option<bool>, example = json!(true))]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkLeader>)]
|
#[deserr(default, error = DeserrJsonError<InvalidNetworkSharding>)]
|
||||||
pub leader: Setting<String>,
|
pub sharding: Setting<bool>,
|
||||||
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!({
|
|
||||||
"ms-00": {
|
|
||||||
"url": "http://localhost:7700"
|
|
||||||
},
|
|
||||||
"ms-01": {
|
|
||||||
"url": "http://localhost:7701"
|
|
||||||
}
|
|
||||||
}))]
|
|
||||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub previous_remotes: Setting<BTreeMap<String, Option<Remote>>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Remote {
|
impl Remote {
|
||||||
@@ -236,359 +207,39 @@ async fn patch_network(
|
|||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
index_scheduler.features().check_network("Using the /network route")?;
|
index_scheduler.features().check_network("Using the /network route")?;
|
||||||
|
|
||||||
match (
|
|
||||||
proxy::origin_from_req(&req)?,
|
|
||||||
proxy::import_data_from_req(&req)?,
|
|
||||||
proxy::import_metadata_from_req(&req)?,
|
|
||||||
) {
|
|
||||||
(Some(origin), None, None) => {
|
|
||||||
patch_network_with_origin(index_scheduler, new_network, req, origin, analytics).await
|
|
||||||
}
|
|
||||||
(None, None, None) => {
|
|
||||||
patch_network_without_origin(index_scheduler, new_network, req, analytics).await
|
|
||||||
}
|
|
||||||
(Some(origin), Some(import_data), Some(metadata)) => {
|
|
||||||
if metadata.index_count == 0 {
|
|
||||||
tokio::task::spawn_blocking(move || {
|
|
||||||
index_scheduler.network_no_index_for_remote(import_data.remote_name, origin)
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.map_err(|e| ResponseError::from_msg(e.to_string(), Code::Internal))??;
|
|
||||||
Ok(HttpResponse::Ok().finish())
|
|
||||||
} else {
|
|
||||||
Err(MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: headers::PROXY_IMPORT_INDEX_COUNT_HEADER,
|
|
||||||
msg: format!("Expected 0 indexes, got `{}`", metadata.index_count),
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(origin, import_data, metadata) => {
|
|
||||||
Err(MeilisearchHttpError::InconsistentTaskNetworkHeaders {
|
|
||||||
is_missing_origin: origin.is_none(),
|
|
||||||
is_missing_import: import_data.is_none(),
|
|
||||||
is_missing_import_metadata: metadata.is_none(),
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn patch_network_without_origin(
|
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::NETWORK_UPDATE }>, Data<IndexScheduler>>,
|
|
||||||
new_network: AwebJson<Network, DeserrJsonError>,
|
|
||||||
req: HttpRequest,
|
|
||||||
analytics: Data<Analytics>,
|
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
|
||||||
let new_network = new_network.0;
|
let new_network = new_network.0;
|
||||||
let old_network = index_scheduler.network();
|
let old_network = index_scheduler.network();
|
||||||
debug!(parameters = ?new_network, "Patch network");
|
debug!(parameters = ?new_network, "Patch network");
|
||||||
|
|
||||||
if !matches!(new_network.previous_remotes, Setting::NotSet) {
|
#[cfg(not(feature = "enterprise"))]
|
||||||
return Err(MeilisearchHttpError::UnexpectedNetworkPreviousRemotes.into());
|
if new_network.sharding.set().is_some() {
|
||||||
|
use meilisearch_types::error::Code;
|
||||||
|
|
||||||
|
return Err(ResponseError::from_msg(
|
||||||
|
"Meilisearch Enterprise Edition is required to set `network.sharding`".into(),
|
||||||
|
Code::RequiresEnterpriseEdition,
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let merged_network = merge_networks(old_network.clone(), new_network)?;
|
|
||||||
|
|
||||||
// When a network task must be created, perform some sanity checks against common errors:
|
|
||||||
// - missing experimental feature on an host from the network
|
|
||||||
// - a network task is already enqueued
|
|
||||||
//
|
|
||||||
// These checks are by no mean perfect (they are not atomic since the network is involved), but they should
|
|
||||||
// help preventing a bad situation.
|
|
||||||
if merged_network.leader.is_some() {
|
|
||||||
let query = Query {
|
|
||||||
statuses: Some(vec![
|
|
||||||
meilisearch_types::tasks::Status::Enqueued,
|
|
||||||
meilisearch_types::tasks::Status::Processing,
|
|
||||||
]),
|
|
||||||
types: Some(vec![meilisearch_types::tasks::Kind::NetworkTopologyChange]),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let filters = AuthFilter::default();
|
|
||||||
let (tasks, _) = index_scheduler.get_task_ids_from_authorized_indexes(&query, &filters)?;
|
|
||||||
|
|
||||||
if let Some(first) = tasks.min() {
|
|
||||||
return Err(MeilisearchHttpError::UnprocessedNetworkTask {
|
|
||||||
remote: None,
|
|
||||||
task_uid: first,
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
futures::stream::iter(
|
|
||||||
old_network
|
|
||||||
.remotes
|
|
||||||
.iter()
|
|
||||||
.merge_join_by(merged_network.remotes.iter(), |(left, _), (right, _)| {
|
|
||||||
left.cmp(right)
|
|
||||||
})
|
|
||||||
.map(|eob| -> Result<_, ResponseError> {
|
|
||||||
Ok(async move {
|
|
||||||
let (remote_name, remote, allow_unreachable) = match eob {
|
|
||||||
EitherOrBoth::Both(_, (remote_name, remote))
|
|
||||||
| EitherOrBoth::Right((remote_name, remote)) => {
|
|
||||||
(remote_name, remote, false)
|
|
||||||
}
|
|
||||||
EitherOrBoth::Left((remote_name, remote)) => {
|
|
||||||
(remote_name, remote, true)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
{
|
|
||||||
// 1. check that the experimental feature is enabled
|
|
||||||
let remote_features: RuntimeTogglableFeatures = match proxy::send_request(
|
|
||||||
"/experimental-features",
|
|
||||||
reqwest::Method::GET,
|
|
||||||
None,
|
|
||||||
Body::none(),
|
|
||||||
remote_name,
|
|
||||||
remote,
|
|
||||||
)
|
|
||||||
.await {
|
|
||||||
Ok(remote_features) => remote_features,
|
|
||||||
Err(ProxyError::Timeout | ProxyError::CouldNotSendRequest(_)) if allow_unreachable => {
|
|
||||||
return Ok(())
|
|
||||||
},
|
|
||||||
Err(err) => return Err(err.as_response_error()),
|
|
||||||
};
|
|
||||||
let remote_features =
|
|
||||||
RoFeatures::from_runtime_features(remote_features);
|
|
||||||
remote_features
|
|
||||||
.check_network("receiving a proxied network task")
|
|
||||||
.map_err(|error| MeilisearchHttpError::RemoteIndexScheduler {
|
|
||||||
remote: remote_name.to_owned(),
|
|
||||||
error,
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// 2. check whether there are any unfinished network task
|
|
||||||
let network_tasks: AllTasks = match proxy::send_request(
|
|
||||||
"/tasks?types=networkTopologyChange&statuses=enqueued,processing&limit=1",
|
|
||||||
reqwest::Method::GET,
|
|
||||||
None,
|
|
||||||
Body::none(),
|
|
||||||
remote_name,
|
|
||||||
remote).await {
|
|
||||||
Ok(network_tasks) => network_tasks,
|
|
||||||
Err(ProxyError::Timeout | ProxyError::CouldNotSendRequest(_)) if allow_unreachable => {
|
|
||||||
return Ok(())
|
|
||||||
},
|
|
||||||
Err(err) => return Err(err.as_response_error()),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let [first, ..] = network_tasks.results.as_slice() {
|
|
||||||
return Err(ResponseError::from(
|
|
||||||
MeilisearchHttpError::UnprocessedNetworkTask {
|
|
||||||
remote: Some(remote_name.to_owned()),
|
|
||||||
task_uid: first.uid,
|
|
||||||
},
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.try_buffer_unordered(40)
|
|
||||||
.try_collect::<()>()
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
index_scheduler.put_network(merged_network.clone())?;
|
|
||||||
|
|
||||||
analytics.publish(
|
|
||||||
PatchNetworkAnalytics {
|
|
||||||
network_size: merged_network.remotes.len(),
|
|
||||||
network_has_self: merged_network.local.is_some(),
|
|
||||||
},
|
|
||||||
&req,
|
|
||||||
);
|
|
||||||
|
|
||||||
if merged_network.leader.is_some() {
|
|
||||||
let network_topology_change =
|
|
||||||
NetworkTopologyChange::new(old_network.clone(), merged_network.clone());
|
|
||||||
let task = KindWithContent::NetworkTopologyChange(network_topology_change);
|
|
||||||
let mut task = {
|
|
||||||
let index_scheduler = index_scheduler.clone();
|
|
||||||
tokio::task::spawn_blocking(move || {
|
|
||||||
index_scheduler.register_with_custom_metadata(
|
|
||||||
task,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
Some(TaskNetwork::Remotes {
|
|
||||||
remote_tasks: Default::default(),
|
|
||||||
network_version: merged_network.version,
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.await??
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut proxied_network = Network {
|
|
||||||
remotes: Setting::Set(to_settings_remotes(&merged_network.remotes)),
|
|
||||||
local: Setting::NotSet,
|
|
||||||
leader: Setting::some_or_not_set(merged_network.leader.clone()),
|
|
||||||
previous_remotes: Setting::Set(to_settings_remotes(&old_network.remotes)),
|
|
||||||
};
|
|
||||||
let mut deleted_network = old_network;
|
|
||||||
|
|
||||||
let deleted_remotes = &mut deleted_network.remotes;
|
|
||||||
deleted_remotes.retain(|node, _| !merged_network.remotes.contains_key(node));
|
|
||||||
|
|
||||||
// proxy network change to the remaining remotes.
|
|
||||||
let updated_task = proxy(
|
|
||||||
&index_scheduler,
|
|
||||||
None,
|
|
||||||
&req,
|
|
||||||
task.network.take().unwrap(), // set in register
|
|
||||||
merged_network,
|
|
||||||
Body::generated(proxied_network.clone(), |name, _remote, network| {
|
|
||||||
network.local = Setting::Set(name.to_string());
|
|
||||||
}),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
// unwrap: network was set by `proxy`
|
|
||||||
let task_network = updated_task.network.unwrap();
|
|
||||||
|
|
||||||
proxied_network.previous_remotes = Setting::NotSet;
|
|
||||||
|
|
||||||
if deleted_network.leader.is_some() {
|
|
||||||
// proxy network change to the deleted remotes
|
|
||||||
proxy(
|
|
||||||
&index_scheduler,
|
|
||||||
None,
|
|
||||||
&req,
|
|
||||||
task_network,
|
|
||||||
deleted_network,
|
|
||||||
Body::generated(proxied_network.clone(), |_name, _remote, network| {
|
|
||||||
network.local = Setting::Reset;
|
|
||||||
}),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
|
||||||
debug!("returns: {:?}", task);
|
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
|
||||||
} else {
|
|
||||||
Ok(HttpResponse::Ok().json(merged_network))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn patch_network_with_origin(
|
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::NETWORK_UPDATE }>, Data<IndexScheduler>>,
|
|
||||||
merged_network: AwebJson<Network, DeserrJsonError>,
|
|
||||||
req: HttpRequest,
|
|
||||||
origin: Origin,
|
|
||||||
analytics: Data<Analytics>,
|
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
|
||||||
let merged_network = merged_network.into_inner();
|
|
||||||
debug!(parameters = ?merged_network, ?origin, "Patch network");
|
|
||||||
let mut remotes = BTreeMap::new();
|
|
||||||
let mut old_network = index_scheduler.network();
|
|
||||||
|
|
||||||
for (name, remote) in merged_network.remotes.set().into_iter().flat_map(|x| x.into_iter()) {
|
|
||||||
let Some(remote) = remote else { continue };
|
|
||||||
let remote = remote.try_into_db_node(&name)?;
|
|
||||||
remotes.insert(name, remote);
|
|
||||||
}
|
|
||||||
let mut previous_remotes = BTreeMap::new();
|
|
||||||
for (name, remote) in
|
|
||||||
merged_network.previous_remotes.set().into_iter().flat_map(|x| x.into_iter())
|
|
||||||
{
|
|
||||||
let Some(remote) = remote else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let remote = remote.try_into_db_node(&name)?;
|
|
||||||
previous_remotes.insert(name, remote);
|
|
||||||
}
|
|
||||||
|
|
||||||
old_network.remotes = previous_remotes;
|
|
||||||
|
|
||||||
let new_network = DbNetwork {
|
|
||||||
local: merged_network.local.set(),
|
|
||||||
remotes,
|
|
||||||
leader: merged_network.leader.set(),
|
|
||||||
version: origin.network_version,
|
|
||||||
};
|
|
||||||
index_scheduler.put_network(new_network.clone())?;
|
|
||||||
|
|
||||||
analytics.publish(
|
|
||||||
PatchNetworkAnalytics {
|
|
||||||
network_size: new_network.remotes.len(),
|
|
||||||
network_has_self: new_network.local.is_some(),
|
|
||||||
},
|
|
||||||
&req,
|
|
||||||
);
|
|
||||||
|
|
||||||
let network_topology_change = NetworkTopologyChange::new(old_network, new_network);
|
|
||||||
let task = KindWithContent::NetworkTopologyChange(network_topology_change);
|
|
||||||
let task = {
|
|
||||||
let index_scheduler = index_scheduler.clone();
|
|
||||||
tokio::task::spawn_blocking(move || {
|
|
||||||
index_scheduler.register_with_custom_metadata(
|
|
||||||
task,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
Some(TaskNetwork::Origin { origin }),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.await??
|
|
||||||
};
|
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
|
||||||
debug!("returns: {:?}", task);
|
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_settings_remotes(
|
|
||||||
db_remotes: &BTreeMap<String, DbRemote>,
|
|
||||||
) -> BTreeMap<String, Option<Remote>> {
|
|
||||||
db_remotes
|
|
||||||
.iter()
|
|
||||||
.map(|(name, remote)| {
|
|
||||||
(
|
|
||||||
name.clone(),
|
|
||||||
Some(Remote {
|
|
||||||
url: Setting::Set(remote.url.clone()),
|
|
||||||
search_api_key: Setting::some_or_not_set(remote.search_api_key.clone()),
|
|
||||||
write_api_key: Setting::some_or_not_set(remote.write_api_key.clone()),
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn merge_networks(
|
|
||||||
old_network: DbNetwork,
|
|
||||||
new_network: Network,
|
|
||||||
) -> Result<DbNetwork, ResponseError> {
|
|
||||||
let merged_self = match new_network.local {
|
let merged_self = match new_network.local {
|
||||||
Setting::Set(new_self) => Some(new_self),
|
Setting::Set(new_self) => Some(new_self),
|
||||||
Setting::Reset => None,
|
Setting::Reset => None,
|
||||||
Setting::NotSet => old_network.local,
|
Setting::NotSet => old_network.local,
|
||||||
};
|
};
|
||||||
let merged_leader = match new_network.leader {
|
|
||||||
Setting::Set(new_leader) => Some(new_leader),
|
let merged_sharding = match new_network.sharding {
|
||||||
Setting::Reset => None,
|
Setting::Set(new_sharding) => new_sharding,
|
||||||
Setting::NotSet => old_network.leader,
|
Setting::Reset => false,
|
||||||
|
Setting::NotSet => old_network.sharding,
|
||||||
};
|
};
|
||||||
match (merged_leader.as_deref(), merged_self.as_deref()) {
|
|
||||||
// 1. Always allowed if there is no leader
|
if merged_sharding && merged_self.is_none() {
|
||||||
(None, _) => (),
|
return Err(ResponseError::from_msg(
|
||||||
// 2. Allowed if the leader is self
|
"`.sharding`: enabling the sharding requires `.self` to be set\n - Hint: Disable `sharding` or set `self` to a value.".into(),
|
||||||
(Some(leader), Some(this)) if leader == this => (),
|
meilisearch_types::error::Code::InvalidNetworkSharding,
|
||||||
// 3. Any other change is disallowed
|
));
|
||||||
(Some(leader), _) => {
|
|
||||||
return Err(MeilisearchHttpError::NotLeader { leader: leader.to_string() }.into())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
let new_version = uuid::Uuid::now_v7();
|
|
||||||
let merged_remotes = match new_network.remotes {
|
let merged_remotes = match new_network.remotes {
|
||||||
Setting::Set(new_remotes) => {
|
Setting::Set(new_remotes) => {
|
||||||
let mut merged_remotes = BTreeMap::new();
|
let mut merged_remotes = BTreeMap::new();
|
||||||
@@ -660,11 +311,19 @@ fn merge_networks(
|
|||||||
Setting::Reset => BTreeMap::new(),
|
Setting::Reset => BTreeMap::new(),
|
||||||
Setting::NotSet => old_network.remotes,
|
Setting::NotSet => old_network.remotes,
|
||||||
};
|
};
|
||||||
let merged_network = DbNetwork {
|
|
||||||
local: merged_self,
|
analytics.publish(
|
||||||
remotes: merged_remotes,
|
PatchNetworkAnalytics {
|
||||||
leader: merged_leader,
|
network_size: merged_remotes.len(),
|
||||||
version: new_version,
|
network_has_self: merged_self.is_some(),
|
||||||
};
|
},
|
||||||
Ok(merged_network)
|
&req,
|
||||||
|
);
|
||||||
|
|
||||||
|
let merged_network =
|
||||||
|
DbNetwork { local: merged_self, remotes: merged_remotes, sharding: merged_sharding };
|
||||||
|
|
||||||
|
index_scheduler.put_network(merged_network.clone())?;
|
||||||
|
debug!(returns = ?merged_network, "Patch network");
|
||||||
|
Ok(HttpResponse::Ok().json(merged_network))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use meilisearch_types::index_uid::IndexUid;
|
|||||||
use meilisearch_types::star_or::{OptionStarOr, OptionStarOrList};
|
use meilisearch_types::star_or::{OptionStarOr, OptionStarOrList};
|
||||||
use meilisearch_types::task_view::TaskView;
|
use meilisearch_types::task_view::TaskView;
|
||||||
use meilisearch_types::tasks::{Kind, KindWithContent, Status};
|
use meilisearch_types::tasks::{Kind, KindWithContent, Status};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::Serialize;
|
||||||
use time::format_description::well_known::Rfc3339;
|
use time::format_description::well_known::Rfc3339;
|
||||||
use time::macros::format_description;
|
use time::macros::format_description;
|
||||||
use time::{Date, Duration, OffsetDateTime, Time};
|
use time::{Date, Duration, OffsetDateTime, Time};
|
||||||
@@ -488,18 +488,18 @@ async fn delete_tasks(
|
|||||||
Ok(HttpResponse::Ok().json(task))
|
Ok(HttpResponse::Ok().json(task))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, Serialize, ToSchema)]
|
||||||
pub struct AllTasks {
|
pub struct AllTasks {
|
||||||
/// The list of tasks that matched the filter.
|
/// The list of tasks that matched the filter.
|
||||||
pub results: Vec<TaskView>,
|
results: Vec<TaskView>,
|
||||||
/// Total number of browsable results using offset/limit parameters for the given resource.
|
/// Total number of browsable results using offset/limit parameters for the given resource.
|
||||||
pub total: u64,
|
total: u64,
|
||||||
/// Limit given for the query. If limit is not provided as a query parameter, this parameter displays the default limit value.
|
/// Limit given for the query. If limit is not provided as a query parameter, this parameter displays the default limit value.
|
||||||
pub limit: u32,
|
limit: u32,
|
||||||
/// The first task uid returned.
|
/// The first task uid returned.
|
||||||
pub from: Option<u32>,
|
from: Option<u32>,
|
||||||
/// Represents the value to send in from to fetch the next slice of the results. The first item for the next slice starts at this exact number. When the returned value is null, it means that all the data have been browsed in the given order.
|
/// Represents the value to send in from to fetch the next slice of the results. The first item for the next slice starts at this exact number. When the returned value is null, it means that all the data have been browsed in the given order.
|
||||||
pub next: Option<u32>,
|
next: Option<u32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get all tasks
|
/// Get all tasks
|
||||||
|
|||||||
@@ -228,7 +228,7 @@ mod tests {
|
|||||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||||
snapshot!(meili_snap::json_string!(err), @r###"
|
snapshot!(meili_snap::json_string!(err), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`, `networkTopologyChange`.",
|
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
|
|||||||
@@ -9,12 +9,12 @@ use std::vec::{IntoIter, Vec};
|
|||||||
use actix_http::StatusCode;
|
use actix_http::StatusCode;
|
||||||
use index_scheduler::{IndexScheduler, RoFeatures};
|
use index_scheduler::{IndexScheduler, RoFeatures};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use meilisearch_types::enterprise_edition::network::{Network, Remote};
|
|
||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::milli::order_by_map::OrderByMap;
|
use meilisearch_types::milli::order_by_map::OrderByMap;
|
||||||
use meilisearch_types::milli::score_details::{ScoreDetails, WeightedScoreValue};
|
use meilisearch_types::milli::score_details::{ScoreDetails, WeightedScoreValue};
|
||||||
use meilisearch_types::milli::vector::Embedding;
|
use meilisearch_types::milli::vector::Embedding;
|
||||||
use meilisearch_types::milli::{self, DocumentId, OrderBy, TimeBudget, DEFAULT_VALUES_PER_FACET};
|
use meilisearch_types::milli::{self, DocumentId, OrderBy, TimeBudget, DEFAULT_VALUES_PER_FACET};
|
||||||
|
use meilisearch_types::network::{Network, Remote};
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
use tokio::task::JoinHandle;
|
use tokio::task::JoinHandle;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
pub use error::ProxySearchError;
|
pub use error::ProxySearchError;
|
||||||
use error::ReqwestErrorWithoutUrl;
|
use error::ReqwestErrorWithoutUrl;
|
||||||
use meilisearch_types::enterprise_edition::network::Remote;
|
use meilisearch_types::network::Remote;
|
||||||
use rand::Rng as _;
|
use rand::Rng as _;
|
||||||
use reqwest::{Client, Response, StatusCode};
|
use reqwest::{Client, Response, StatusCode};
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ async fn batch_bad_types() {
|
|||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`, `networkTopologyChange`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
|
|||||||
@@ -143,8 +143,6 @@ impl Display for Value {
|
|||||||
".processingTimeMs" => "[duration]",
|
".processingTimeMs" => "[duration]",
|
||||||
".details.embedders.*.url" => "[url]",
|
".details.embedders.*.url" => "[url]",
|
||||||
".details.dumpUid" => "[dump_uid]",
|
".details.dumpUid" => "[dump_uid]",
|
||||||
".network.network_version" => "[version]",
|
|
||||||
".network.origin.networkVersion" => "[version]",
|
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -93,20 +93,6 @@ impl Service {
|
|||||||
self.request(req).await
|
self.request(req).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn patch_str(
|
|
||||||
&self,
|
|
||||||
url: impl AsRef<str>,
|
|
||||||
body: impl AsRef<str>,
|
|
||||||
headers: Vec<(&str, &str)>,
|
|
||||||
) -> (Value, StatusCode) {
|
|
||||||
let mut req =
|
|
||||||
test::TestRequest::patch().uri(url.as_ref()).set_payload(body.as_ref().to_string());
|
|
||||||
for header in headers {
|
|
||||||
req = req.insert_header(header);
|
|
||||||
}
|
|
||||||
self.request(req).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn patch(&self, url: impl AsRef<str>, body: Value) -> (Value, StatusCode) {
|
pub async fn patch(&self, url: impl AsRef<str>, body: Value) -> (Value, StatusCode) {
|
||||||
self.patch_encoded(url, body, Encoder::Plain).await
|
self.patch_encoded(url, body, Encoder::Plain).await
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1453,3 +1453,152 @@ async fn test_fetch_documents_pagination_with_sorting() {
|
|||||||
]
|
]
|
||||||
"###);
|
"###);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// <https://github.com/meilisearch/meilisearch/issues/5998>
|
||||||
|
#[actix_rt::test]
|
||||||
|
async fn get_document_sort_field_not_in_any_document() {
|
||||||
|
let server = Server::new_shared();
|
||||||
|
let index = server.unique_index();
|
||||||
|
let (task, _code) = index.create(None).await;
|
||||||
|
server.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
|
let (task, _code) = index.update_settings_sortable_attributes(json!(["created_at"])).await;
|
||||||
|
server.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
|
let documents = json!([
|
||||||
|
{ "id": 1, "name": "Document 1" },
|
||||||
|
{ "id": 2, "name": "Document 2" }
|
||||||
|
]);
|
||||||
|
let (task, _code) = index.add_documents(documents, None).await;
|
||||||
|
server.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
|
let (response, code) = index
|
||||||
|
.fetch_documents(json!({
|
||||||
|
"sort": ["created_at:asc"]
|
||||||
|
}))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"results": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"name": "Document 1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"name": "Document 2"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"offset": 0,
|
||||||
|
"limit": 20,
|
||||||
|
"total": 2
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[actix_rt::test]
|
||||||
|
async fn get_document_sort_includes_docs_without_field() {
|
||||||
|
let server = Server::new_shared();
|
||||||
|
let index = server.unique_index();
|
||||||
|
let (task, _code) = index.create(None).await;
|
||||||
|
server.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
|
let (task, _code) = index.update_settings_sortable_attributes(json!(["created_at"])).await;
|
||||||
|
server.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
|
let documents = json!([
|
||||||
|
{ "id": 1, "name": "Doc without created_at" },
|
||||||
|
{ "id": 2, "name": "Doc with created_at", "created_at": "2025-01-15" },
|
||||||
|
{ "id": 3, "name": "Another doc without created_at" },
|
||||||
|
{ "id": 4, "name": "Another doc with created_at", "created_at": "2025-01-10" }
|
||||||
|
]);
|
||||||
|
let (task, _code) = index.add_documents(documents, None).await;
|
||||||
|
server.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
|
let (response, code) = index
|
||||||
|
.fetch_documents(json!({
|
||||||
|
"sort": ["created_at:asc"]
|
||||||
|
}))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"results": [
|
||||||
|
{
|
||||||
|
"id": 4,
|
||||||
|
"name": "Another doc with created_at",
|
||||||
|
"created_at": "2025-01-10"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"name": "Doc with created_at",
|
||||||
|
"created_at": "2025-01-15"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"name": "Doc without created_at"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"name": "Another doc without created_at"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"offset": 0,
|
||||||
|
"limit": 20,
|
||||||
|
"total": 4
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[actix_rt::test]
|
||||||
|
async fn get_document_sort_desc_includes_docs_without_field() {
|
||||||
|
let server = Server::new_shared();
|
||||||
|
let index = server.unique_index();
|
||||||
|
let (task, _code) = index.create(None).await;
|
||||||
|
server.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
|
let (task, _code) = index.update_settings_sortable_attributes(json!(["priority"])).await;
|
||||||
|
server.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
|
let documents = json!([
|
||||||
|
{ "id": 1, "name": "Low priority", "priority": 1 },
|
||||||
|
{ "id": 2, "name": "No priority" },
|
||||||
|
{ "id": 3, "name": "High priority", "priority": 10 }
|
||||||
|
]);
|
||||||
|
let (task, _code) = index.add_documents(documents, None).await;
|
||||||
|
server.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
|
let (response, code) = index
|
||||||
|
.fetch_documents(json!({
|
||||||
|
"sort": ["priority:desc"]
|
||||||
|
}))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"results": [
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"name": "High priority",
|
||||||
|
"priority": 10
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"name": "Low priority",
|
||||||
|
"priority": 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"name": "No priority"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"offset": 0,
|
||||||
|
"limit": 20,
|
||||||
|
"total": 3
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
}
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ async fn errors_on_param() {
|
|||||||
meili_snap::snapshot!(code, @"400 Bad Request");
|
meili_snap::snapshot!(code, @"400 Bad Request");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Unknown field `selfie`: expected one of `remotes`, `self`, `leader`, `previousRemotes`",
|
"message": "Unknown field `selfie`: expected one of `remotes`, `self`, `sharding`",
|
||||||
"code": "bad_request",
|
"code": "bad_request",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||||
@@ -186,7 +186,7 @@ async fn errors_on_param() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": null,
|
"self": null,
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -196,8 +196,7 @@ async fn errors_on_param() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = server
|
let (response, code) = server
|
||||||
@@ -266,24 +265,22 @@ async fn auth() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "master",
|
"self": "master",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
let (response, code) = server.get_network().await;
|
let (response, code) = server.get_network().await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "master",
|
"self": "master",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -292,12 +289,11 @@ async fn auth() {
|
|||||||
let (response, code) = server.get_network().await;
|
let (response, code) = server.get_network().await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "master",
|
"self": "master",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -311,12 +307,11 @@ async fn auth() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "api_key",
|
"self": "api_key",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -395,20 +390,18 @@ async fn get_and_set_network() {
|
|||||||
{
|
{
|
||||||
"self": null,
|
"self": null,
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "00000000-0000-0000-0000-000000000000"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
// adding self
|
// adding self
|
||||||
let (response, code) = server.set_network(json!({"self": "myself"})).await;
|
let (response, code) = server.set_network(json!({"self": "myself"})).await;
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "myself",
|
"self": "myself",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -426,7 +419,7 @@ async fn get_and_set_network() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "myself",
|
"self": "myself",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -441,8 +434,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -456,7 +448,7 @@ async fn get_and_set_network() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "myself",
|
"self": "myself",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -471,8 +463,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -487,7 +478,7 @@ async fn get_and_set_network() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "myself",
|
"self": "myself",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -507,8 +498,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -520,7 +510,7 @@ async fn get_and_set_network() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "myself",
|
"self": "myself",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -535,8 +525,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -544,7 +533,7 @@ async fn get_and_set_network() {
|
|||||||
let (response, code) = server.set_network(json!({"self": Null})).await;
|
let (response, code) = server.set_network(json!({"self": Null})).await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": null,
|
"self": null,
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -559,8 +548,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -568,7 +556,7 @@ async fn get_and_set_network() {
|
|||||||
let (response, code) = server.set_network(json!({"self": "thy"})).await;
|
let (response, code) = server.set_network(json!({"self": "thy"})).await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "thy",
|
"self": "thy",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -583,8 +571,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -592,7 +579,7 @@ async fn get_and_set_network() {
|
|||||||
let (response, code) = server.set_network(json!({})).await;
|
let (response, code) = server.set_network(json!({})).await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "thy",
|
"self": "thy",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -607,8 +594,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -616,7 +602,7 @@ async fn get_and_set_network() {
|
|||||||
let (response, code) = server.set_network(json!({"remotes": {}})).await;
|
let (response, code) = server.set_network(json!({"remotes": {}})).await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "thy",
|
"self": "thy",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -631,8 +617,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -640,7 +625,7 @@ async fn get_and_set_network() {
|
|||||||
let (response, code) = server.get_network().await;
|
let (response, code) = server.get_network().await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "thy",
|
"self": "thy",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -655,8 +640,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -668,12 +652,11 @@ async fn get_and_set_network() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "thy",
|
"self": "thy",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -128,32 +128,29 @@ async fn remote_sharding() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
|
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms2",
|
"self": "ms2",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -192,6 +189,8 @@ async fn remote_sharding() {
|
|||||||
}
|
}
|
||||||
}});
|
}});
|
||||||
|
|
||||||
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
@@ -447,32 +446,29 @@ async fn remote_sharding_retrieve_vectors() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
|
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms2",
|
"self": "ms2",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -948,22 +944,20 @@ async fn error_unregistered_remote() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -1070,22 +1064,20 @@ async fn error_no_weighted_score() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -1208,22 +1200,20 @@ async fn error_bad_response() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -1350,22 +1340,20 @@ async fn error_bad_request() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -1485,22 +1473,20 @@ async fn error_bad_request_facets_by_index() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -1631,22 +1617,20 @@ async fn error_bad_request_facets_by_index_facet() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -1786,7 +1770,7 @@ async fn error_remote_does_not_answer() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
@@ -1795,7 +1779,7 @@ async fn error_remote_does_not_answer() {
|
|||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
@@ -1989,22 +1973,20 @@ async fn error_remote_404() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -2190,22 +2172,20 @@ async fn error_remote_sharding_auth() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -2355,22 +2335,20 @@ async fn remote_sharding_auth() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -2515,22 +2493,20 @@ async fn error_remote_500() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -2700,22 +2676,20 @@ async fn error_remote_500_once() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -2889,7 +2863,7 @@ async fn error_remote_timeout() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
@@ -2898,7 +2872,7 @@ async fn error_remote_timeout() {
|
|||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
@@ -3108,8 +3082,8 @@ impl LocalMeili {
|
|||||||
let (value, code) = rt.block_on(async {
|
let (value, code) = rt.block_on(async {
|
||||||
match req.method.as_str() {
|
match req.method.as_str() {
|
||||||
"POST" => server.service.post_str(&req.url, body, headers.clone()).await,
|
"POST" => server.service.post_str(&req.url, body, headers.clone()).await,
|
||||||
"PUT" => server.service.put_str(&req.url, body, headers.clone()).await,
|
"PUT" => server.service.put_str(&req.url, body, headers).await,
|
||||||
"PATCH" => server.service.patch_str(&req.url, body, headers).await,
|
"PATCH" => server.service.patch(&req.url, req.body_json().unwrap()).await,
|
||||||
"GET" => server.service.get(&req.url).await,
|
"GET" => server.service.get(&req.url).await,
|
||||||
"DELETE" => server.service.delete(&req.url).await,
|
"DELETE" => server.service.delete(&req.url).await,
|
||||||
_ => unimplemented!(),
|
_ => unimplemented!(),
|
||||||
@@ -3168,6 +3142,7 @@ fn fail(override_response_body: Option<&str>) -> ResponseTemplate {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "enterprise")]
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn remote_auto_sharding() {
|
async fn remote_auto_sharding() {
|
||||||
let ms0 = Server::new().await;
|
let ms0 = Server::new().await;
|
||||||
@@ -3186,6 +3161,35 @@ async fn remote_auto_sharding() {
|
|||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response["network"]), @"true");
|
snapshot!(json_string!(response["network"]), @"true");
|
||||||
|
|
||||||
|
// set self & sharding
|
||||||
|
let (response, code) = ms0.set_network(json!({"self": "ms0", "sharding": true})).await;
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"self": "ms0",
|
||||||
|
"remotes": {},
|
||||||
|
"sharding": true
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
let (response, code) = ms1.set_network(json!({"self": "ms1", "sharding": true})).await;
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"self": "ms1",
|
||||||
|
"remotes": {},
|
||||||
|
"sharding": true
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
let (response, code) = ms2.set_network(json!({"self": "ms2", "sharding": true})).await;
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"self": "ms2",
|
||||||
|
"remotes": {},
|
||||||
|
"sharding": true
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
|
||||||
// wrap servers
|
// wrap servers
|
||||||
let ms0 = Arc::new(ms0);
|
let ms0 = Arc::new(ms0);
|
||||||
let ms1 = Arc::new(ms1);
|
let ms1 = Arc::new(ms1);
|
||||||
@@ -3196,10 +3200,7 @@ async fn remote_auto_sharding() {
|
|||||||
let rms2 = LocalMeili::new(ms2.clone()).await;
|
let rms2 = LocalMeili::new(ms2.clone()).await;
|
||||||
|
|
||||||
// set network
|
// set network
|
||||||
let network = json!({
|
let network = json!({"remotes": {
|
||||||
"self": "ms0",
|
|
||||||
"leader": "ms0",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
"ms0": {
|
||||||
"url": rms0.url()
|
"url": rms0.url()
|
||||||
},
|
},
|
||||||
@@ -3213,99 +3214,12 @@ async fn remote_auto_sharding() {
|
|||||||
|
|
||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (task, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"202 Accepted");
|
|
||||||
|
|
||||||
let t0 = task.uid();
|
|
||||||
let (t, _) = ms0.get_task(t0).await;
|
|
||||||
|
|
||||||
let t1 = t["network"]["remote_tasks"]["ms1"]["taskUid"].as_u64().unwrap();
|
|
||||||
let t2 = t["network"]["remote_tasks"]["ms2"]["taskUid"].as_u64().unwrap();
|
|
||||||
|
|
||||||
ms0.wait_task(t0).await.succeeded();
|
|
||||||
ms1.wait_task(t1).await.succeeded();
|
|
||||||
ms2.wait_task(t2).await.succeeded();
|
|
||||||
|
|
||||||
let (response, status_code) = ms0.get_network().await;
|
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]", ".remotes.*.url" => "[url]"}), @r###"
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
{
|
|
||||||
"self": "ms0",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms1": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms2": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"leader": "ms0",
|
|
||||||
"version": "[version]"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, status_code) = ms1.get_network().await;
|
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]", ".remotes.*.url" => "[url]"}), @r###"
|
let (_response, status_code) = ms2.set_network(network.clone()).await;
|
||||||
{
|
|
||||||
"self": "ms1",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms1": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms2": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"leader": "ms0",
|
|
||||||
"version": "[version]"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, status_code) = ms2.get_network().await;
|
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]", ".remotes.*.url" => "[url]"}), @r###"
|
|
||||||
{
|
|
||||||
"self": "ms2",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms1": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms2": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"leader": "ms0",
|
|
||||||
"version": "[version]"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
// add documents
|
// add documents
|
||||||
let documents = SCORE_DOCUMENTS.clone();
|
let documents = SCORE_DOCUMENTS.clone();
|
||||||
@@ -3548,6 +3462,30 @@ async fn remote_auto_sharding() {
|
|||||||
"###);
|
"###);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "enterprise"))]
|
||||||
|
#[actix_rt::test]
|
||||||
|
async fn sharding_not_enterprise() {
|
||||||
|
let ms0 = Server::new().await;
|
||||||
|
|
||||||
|
// enable feature
|
||||||
|
|
||||||
|
let (response, code) = ms0.set_features(json!({"network": true})).await;
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response["network"]), @"true");
|
||||||
|
|
||||||
|
let (response, code) = ms0.set_network(json!({"self": "ms0", "sharding": true})).await;
|
||||||
|
snapshot!(code, @"451 Unavailable For Legal Reasons");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"message": "Meilisearch Enterprise Edition is required to set `network.sharding`",
|
||||||
|
"code": "requires_enterprise_edition",
|
||||||
|
"type": "invalid_request",
|
||||||
|
"link": "https://docs.meilisearch.com/errors#requires_enterprise_edition"
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "enterprise")]
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn remote_auto_sharding_with_custom_metadata() {
|
async fn remote_auto_sharding_with_custom_metadata() {
|
||||||
let ms0 = Server::new().await;
|
let ms0 = Server::new().await;
|
||||||
@@ -3566,6 +3504,36 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response["network"]), @"true");
|
snapshot!(json_string!(response["network"]), @"true");
|
||||||
|
|
||||||
|
// set self & sharding
|
||||||
|
|
||||||
|
let (response, code) = ms0.set_network(json!({"self": "ms0", "sharding": true})).await;
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"self": "ms0",
|
||||||
|
"remotes": {},
|
||||||
|
"sharding": true
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
let (response, code) = ms1.set_network(json!({"self": "ms1", "sharding": true})).await;
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"self": "ms1",
|
||||||
|
"remotes": {},
|
||||||
|
"sharding": true
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
let (response, code) = ms2.set_network(json!({"self": "ms2", "sharding": true})).await;
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"self": "ms2",
|
||||||
|
"remotes": {},
|
||||||
|
"sharding": true
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
|
||||||
// wrap servers
|
// wrap servers
|
||||||
let ms0 = Arc::new(ms0);
|
let ms0 = Arc::new(ms0);
|
||||||
let ms1 = Arc::new(ms1);
|
let ms1 = Arc::new(ms1);
|
||||||
@@ -3576,10 +3544,7 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
let rms2 = LocalMeili::new(ms2.clone()).await;
|
let rms2 = LocalMeili::new(ms2.clone()).await;
|
||||||
|
|
||||||
// set network
|
// set network
|
||||||
let network = json!({
|
let network = json!({"remotes": {
|
||||||
"self": "ms0",
|
|
||||||
"leader": "ms0",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
"ms0": {
|
||||||
"url": rms0.url()
|
"url": rms0.url()
|
||||||
},
|
},
|
||||||
@@ -3593,99 +3558,12 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
|
|
||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (task, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"202 Accepted");
|
|
||||||
|
|
||||||
let t0 = task.uid();
|
|
||||||
let (t, _) = ms0.get_task(t0).await;
|
|
||||||
|
|
||||||
let t1 = t["network"]["remote_tasks"]["ms1"]["taskUid"].as_u64().unwrap();
|
|
||||||
let t2 = t["network"]["remote_tasks"]["ms2"]["taskUid"].as_u64().unwrap();
|
|
||||||
|
|
||||||
ms0.wait_task(t0).await.succeeded();
|
|
||||||
ms1.wait_task(t1).await.succeeded();
|
|
||||||
ms2.wait_task(t2).await.succeeded();
|
|
||||||
|
|
||||||
let (response, status_code) = ms0.get_network().await;
|
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]", ".remotes.*.url" => "[url]"}), @r###"
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
{
|
|
||||||
"self": "ms0",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms1": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms2": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"leader": "ms0",
|
|
||||||
"version": "[version]"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, status_code) = ms1.get_network().await;
|
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]", ".remotes.*.url" => "[url]"}), @r###"
|
let (_response, status_code) = ms2.set_network(network.clone()).await;
|
||||||
{
|
|
||||||
"self": "ms1",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms1": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms2": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"leader": "ms0",
|
|
||||||
"version": "[version]"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, status_code) = ms2.get_network().await;
|
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]", ".remotes.*.url" => "[url]"}), @r###"
|
|
||||||
{
|
|
||||||
"self": "ms2",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms1": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms2": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"leader": "ms0",
|
|
||||||
"version": "[version]"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
// add documents
|
// add documents
|
||||||
let documents = SCORE_DOCUMENTS.clone();
|
let documents = SCORE_DOCUMENTS.clone();
|
||||||
@@ -3708,7 +3586,6 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
let t2 = t["network"]["remote_tasks"]["ms2"]["taskUid"].as_u64().unwrap();
|
let t2 = t["network"]["remote_tasks"]["ms2"]["taskUid"].as_u64().unwrap();
|
||||||
|
|
||||||
let t = ms0.wait_task(t0).await.succeeded();
|
let t = ms0.wait_task(t0).await.succeeded();
|
||||||
|
|
||||||
snapshot!(t, @r###"
|
snapshot!(t, @r###"
|
||||||
{
|
{
|
||||||
"uid": "[uid]",
|
"uid": "[uid]",
|
||||||
@@ -3729,15 +3606,14 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
"network": {
|
"network": {
|
||||||
"remote_tasks": {
|
"remote_tasks": {
|
||||||
"ms1": {
|
"ms1": {
|
||||||
"taskUid": 1,
|
"taskUid": 0,
|
||||||
"error": null
|
"error": null
|
||||||
},
|
},
|
||||||
"ms2": {
|
"ms2": {
|
||||||
"taskUid": 1,
|
"taskUid": 0,
|
||||||
"error": null
|
"error": null
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
"network_version": "[version]"
|
|
||||||
},
|
},
|
||||||
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
||||||
}
|
}
|
||||||
@@ -3764,8 +3640,7 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
"network": {
|
"network": {
|
||||||
"origin": {
|
"origin": {
|
||||||
"remoteName": "ms0",
|
"remoteName": "ms0",
|
||||||
"taskUid": 1,
|
"taskUid": 0
|
||||||
"networkVersion": "[version]"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
||||||
@@ -3793,8 +3668,7 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
"network": {
|
"network": {
|
||||||
"origin": {
|
"origin": {
|
||||||
"remoteName": "ms0",
|
"remoteName": "ms0",
|
||||||
"taskUid": 1,
|
"taskUid": 0
|
||||||
"networkVersion": "[version]"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
||||||
|
|||||||
@@ -197,7 +197,7 @@ test_setting_routes!(
|
|||||||
{
|
{
|
||||||
setting: vector_store,
|
setting: vector_store,
|
||||||
update_verb: patch,
|
update_verb: patch,
|
||||||
default_value: null
|
default_value: "experimental"
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ mod chat;
|
|||||||
mod distinct;
|
mod distinct;
|
||||||
mod errors;
|
mod errors;
|
||||||
mod get_settings;
|
mod get_settings;
|
||||||
|
mod parent_seachable_fields;
|
||||||
mod prefix_search_settings;
|
mod prefix_search_settings;
|
||||||
mod proximity_settings;
|
mod proximity_settings;
|
||||||
mod tokenizer_customization;
|
mod tokenizer_customization;
|
||||||
|
|||||||
114
crates/meilisearch/tests/settings/parent_seachable_fields.rs
Normal file
114
crates/meilisearch/tests/settings/parent_seachable_fields.rs
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
use meili_snap::{json_string, snapshot};
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
|
||||||
|
use crate::common::Server;
|
||||||
|
use crate::json;
|
||||||
|
|
||||||
|
static DOCUMENTS: Lazy<crate::common::Value> = Lazy::new(|| {
|
||||||
|
json!([
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"meta": {
|
||||||
|
"title": "Soup of the day",
|
||||||
|
"description": "many the fish",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"meta": {
|
||||||
|
"title": "Soup of day",
|
||||||
|
"description": "many the lazy fish",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"meta": {
|
||||||
|
"title": "the Soup of day",
|
||||||
|
"description": "many the fish",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
])
|
||||||
|
});
|
||||||
|
|
||||||
|
#[actix_rt::test]
|
||||||
|
async fn nested_field_becomes_searchable() {
|
||||||
|
let server = Server::new_shared();
|
||||||
|
let index = server.unique_index();
|
||||||
|
|
||||||
|
let (task, _status_code) = index.add_documents(DOCUMENTS.clone(), None).await;
|
||||||
|
server.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
|
let (response, code) = index
|
||||||
|
.update_settings(json!({
|
||||||
|
"searchableAttributes": ["meta.title"]
|
||||||
|
}))
|
||||||
|
.await;
|
||||||
|
assert_eq!("202", code.as_str(), "{response:?}");
|
||||||
|
server.wait_task(response.uid()).await.succeeded();
|
||||||
|
|
||||||
|
// We expect no documents when searching for
|
||||||
|
// a nested non-searchable field
|
||||||
|
index
|
||||||
|
.search(json!({"q": "many fish"}), |response, code| {
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response["hits"]), @r###"[]"###);
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let (response, code) = index
|
||||||
|
.update_settings(json!({
|
||||||
|
"searchableAttributes": ["meta.title", "meta.description"]
|
||||||
|
}))
|
||||||
|
.await;
|
||||||
|
assert_eq!("202", code.as_str(), "{response:?}");
|
||||||
|
server.wait_task(response.uid()).await.succeeded();
|
||||||
|
|
||||||
|
// We expect all the documents when the nested field becomes searchable
|
||||||
|
index
|
||||||
|
.search(json!({"q": "many fish"}), |response, code| {
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response["hits"]), @r###"
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"meta": {
|
||||||
|
"title": "Soup of the day",
|
||||||
|
"description": "many the fish"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"meta": {
|
||||||
|
"title": "the Soup of day",
|
||||||
|
"description": "many the fish"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"meta": {
|
||||||
|
"title": "Soup of day",
|
||||||
|
"description": "many the lazy fish"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
"###);
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let (response, code) = index
|
||||||
|
.update_settings(json!({
|
||||||
|
"searchableAttributes": ["meta.title"]
|
||||||
|
}))
|
||||||
|
.await;
|
||||||
|
assert_eq!("202", code.as_str(), "{response:?}");
|
||||||
|
server.wait_task(response.uid()).await.succeeded();
|
||||||
|
|
||||||
|
// We expect no documents when searching for
|
||||||
|
// a nested non-searchable field
|
||||||
|
index
|
||||||
|
.search(json!({"q": "many fish"}), |response, code| {
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response["hits"]), @r###"[]"###);
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
}
|
||||||
@@ -95,36 +95,36 @@ async fn task_bad_types() {
|
|||||||
|
|
||||||
let (response, code) = server.tasks_filter("types=doggo").await;
|
let (response, code) = server.tasks_filter("types=doggo").await;
|
||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r#"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`, `networkTopologyChange`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
}
|
}
|
||||||
"###);
|
"#);
|
||||||
|
|
||||||
let (response, code) = server.cancel_tasks("types=doggo").await;
|
let (response, code) = server.cancel_tasks("types=doggo").await;
|
||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r#"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`, `networkTopologyChange`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
}
|
}
|
||||||
"###);
|
"#);
|
||||||
|
|
||||||
let (response, code) = server.delete_tasks("types=doggo").await;
|
let (response, code) = server.delete_tasks("types=doggo").await;
|
||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r#"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`, `networkTopologyChange`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
}
|
}
|
||||||
"###);
|
"#);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ async fn version_too_old() {
|
|||||||
std::fs::write(db_path.join("VERSION"), "1.11.9999").unwrap();
|
std::fs::write(db_path.join("VERSION"), "1.11.9999").unwrap();
|
||||||
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
|
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
|
||||||
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
|
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
|
||||||
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.27.0");
|
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.28.2");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
@@ -58,7 +58,7 @@ async fn version_requires_downgrade() {
|
|||||||
std::fs::write(db_path.join("VERSION"), format!("{major}.{minor}.{patch}")).unwrap();
|
std::fs::write(db_path.join("VERSION"), format!("{major}.{minor}.{patch}")).unwrap();
|
||||||
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
|
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
|
||||||
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
|
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
|
||||||
snapshot!(err, @"Database version 1.27.1 is higher than the Meilisearch version 1.27.0. Downgrade is not supported");
|
snapshot!(err, @"Database version 1.28.3 is higher than the Meilisearch version 1.28.2. Downgrade is not supported");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
|||||||
"progress": null,
|
"progress": null,
|
||||||
"details": {
|
"details": {
|
||||||
"upgradeFrom": "v1.12.0",
|
"upgradeFrom": "v1.12.0",
|
||||||
"upgradeTo": "v1.27.0"
|
"upgradeTo": "v1.28.2"
|
||||||
},
|
},
|
||||||
"stats": {
|
"stats": {
|
||||||
"totalNbTasks": 1,
|
"totalNbTasks": 1,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
|||||||
"progress": null,
|
"progress": null,
|
||||||
"details": {
|
"details": {
|
||||||
"upgradeFrom": "v1.12.0",
|
"upgradeFrom": "v1.12.0",
|
||||||
"upgradeTo": "v1.27.0"
|
"upgradeTo": "v1.28.2"
|
||||||
},
|
},
|
||||||
"stats": {
|
"stats": {
|
||||||
"totalNbTasks": 1,
|
"totalNbTasks": 1,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
|||||||
"progress": null,
|
"progress": null,
|
||||||
"details": {
|
"details": {
|
||||||
"upgradeFrom": "v1.12.0",
|
"upgradeFrom": "v1.12.0",
|
||||||
"upgradeTo": "v1.27.0"
|
"upgradeTo": "v1.28.2"
|
||||||
},
|
},
|
||||||
"stats": {
|
"stats": {
|
||||||
"totalNbTasks": 1,
|
"totalNbTasks": 1,
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"upgradeFrom": "v1.12.0",
|
"upgradeFrom": "v1.12.0",
|
||||||
"upgradeTo": "v1.27.0"
|
"upgradeTo": "v1.28.2"
|
||||||
},
|
},
|
||||||
"error": null,
|
"error": null,
|
||||||
"duration": "[duration]",
|
"duration": "[duration]",
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"upgradeFrom": "v1.12.0",
|
"upgradeFrom": "v1.12.0",
|
||||||
"upgradeTo": "v1.27.0"
|
"upgradeTo": "v1.28.2"
|
||||||
},
|
},
|
||||||
"error": null,
|
"error": null,
|
||||||
"duration": "[duration]",
|
"duration": "[duration]",
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"upgradeFrom": "v1.12.0",
|
"upgradeFrom": "v1.12.0",
|
||||||
"upgradeTo": "v1.27.0"
|
"upgradeTo": "v1.28.2"
|
||||||
},
|
},
|
||||||
"error": null,
|
"error": null,
|
||||||
"duration": "[duration]",
|
"duration": "[duration]",
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
|||||||
"progress": null,
|
"progress": null,
|
||||||
"details": {
|
"details": {
|
||||||
"upgradeFrom": "v1.12.0",
|
"upgradeFrom": "v1.12.0",
|
||||||
"upgradeTo": "v1.27.0"
|
"upgradeTo": "v1.28.2"
|
||||||
},
|
},
|
||||||
"stats": {
|
"stats": {
|
||||||
"totalNbTasks": 1,
|
"totalNbTasks": 1,
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"upgradeFrom": "v1.12.0",
|
"upgradeFrom": "v1.12.0",
|
||||||
"upgradeTo": "v1.27.0"
|
"upgradeTo": "v1.28.2"
|
||||||
},
|
},
|
||||||
"error": null,
|
"error": null,
|
||||||
"duration": "[duration]",
|
"duration": "[duration]",
|
||||||
|
|||||||
@@ -104,8 +104,8 @@ async fn binary_quantize_before_sending_documents() {
|
|||||||
"manual": {
|
"manual": {
|
||||||
"embeddings": [
|
"embeddings": [
|
||||||
[
|
[
|
||||||
-1.0,
|
0.0,
|
||||||
-1.0,
|
0.0,
|
||||||
1.0
|
1.0
|
||||||
]
|
]
|
||||||
],
|
],
|
||||||
@@ -122,7 +122,7 @@ async fn binary_quantize_before_sending_documents() {
|
|||||||
[
|
[
|
||||||
1.0,
|
1.0,
|
||||||
1.0,
|
1.0,
|
||||||
-1.0
|
0.0
|
||||||
]
|
]
|
||||||
],
|
],
|
||||||
"regenerate": false
|
"regenerate": false
|
||||||
@@ -191,8 +191,8 @@ async fn binary_quantize_after_sending_documents() {
|
|||||||
"manual": {
|
"manual": {
|
||||||
"embeddings": [
|
"embeddings": [
|
||||||
[
|
[
|
||||||
-1.0,
|
0.0,
|
||||||
-1.0,
|
0.0,
|
||||||
1.0
|
1.0
|
||||||
]
|
]
|
||||||
],
|
],
|
||||||
@@ -209,7 +209,7 @@ async fn binary_quantize_after_sending_documents() {
|
|||||||
[
|
[
|
||||||
1.0,
|
1.0,
|
||||||
1.0,
|
1.0,
|
||||||
-1.0
|
0.0
|
||||||
]
|
]
|
||||||
],
|
],
|
||||||
"regenerate": false
|
"regenerate": false
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user