mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-16 17:36:58 +00:00
Compare commits
26 Commits
openapi-co
...
measure-ne
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0c37ec37c7 | ||
|
|
da3f08a479 | ||
|
|
5b19df1dba | ||
|
|
2ca596003a | ||
|
|
9b31c09dde | ||
|
|
74a587785a | ||
|
|
d612ea2a90 | ||
|
|
63a7fe5586 | ||
|
|
53120eb2a4 | ||
|
|
19e512622e | ||
|
|
86e5f74fce | ||
|
|
a73f635013 | ||
|
|
10aac4d77f | ||
|
|
aa2f649713 | ||
|
|
a1f266dc03 | ||
|
|
566bb51eda | ||
|
|
c37396714d | ||
|
|
c5473dc2b5 | ||
|
|
3cdc7f2de4 | ||
|
|
343bae478a | ||
|
|
8b41f1a69d | ||
|
|
59a2f8d0ab | ||
|
|
508be2137e | ||
|
|
50bf485dc0 | ||
|
|
6e4855bbc5 | ||
|
|
ac5da77746 |
5
.github/ISSUE_TEMPLATE/new_feature_issue.md
vendored
5
.github/ISSUE_TEMPLATE/new_feature_issue.md
vendored
@@ -24,11 +24,6 @@ TBD
|
|||||||
- [ ] If not, add the `no db change` label to your PR, and you're good to merge.
|
- [ ] If not, add the `no db change` label to your PR, and you're good to merge.
|
||||||
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
|
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
|
||||||
|
|
||||||
### Reminders when adding features
|
|
||||||
|
|
||||||
- [ ] Write unit tests using insta
|
|
||||||
- [ ] Write declarative integration tests in [workloads/tests](https://github.com/meilisearch/meilisearch/tree/main/workloads/test). Specify the routes to call and then call `cargo xtask test workloads/tests/YOUR_TEST.json --update-responses` so that responses are automatically filled.
|
|
||||||
|
|
||||||
### Reminders when modifying the API
|
### Reminders when modifying the API
|
||||||
|
|
||||||
- [ ] Update the openAPI file with utoipa:
|
- [ ] Update the openAPI file with utoipa:
|
||||||
|
|||||||
2
.github/workflows/bench-manual.yml
vendored
2
.github/workflows/bench-manual.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/bench-pr.yml
vendored
4
.github/workflows/bench-pr.yml
vendored
@@ -66,7 +66,9 @@ jobs:
|
|||||||
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
||||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
|
||||||
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
4
.github/workflows/bench-push-indexing.yml
vendored
4
.github/workflows/bench-push-indexing.yml
vendored
@@ -12,7 +12,9 @@ jobs:
|
|||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
|
||||||
# Run benchmarks
|
# Run benchmarks
|
||||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
||||||
|
|||||||
2
.github/workflows/benchmarks-manual.yml
vendored
2
.github/workflows/benchmarks-manual.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/benchmarks-pr.yml
vendored
2
.github/workflows/benchmarks-pr.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/db-change-comments.yml
vendored
4
.github/workflows/db-change-comments.yml
vendored
@@ -19,7 +19,6 @@ env:
|
|||||||
|
|
||||||
- [ ] Detail the change to the DB format and why they are forward compatible
|
- [ ] Detail the change to the DB format and why they are forward compatible
|
||||||
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
|
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
|
||||||
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
|
|
||||||
|
|
||||||
|
|
||||||
## This PR makes breaking changes
|
## This PR makes breaking changes
|
||||||
@@ -36,7 +35,8 @@ env:
|
|||||||
- [ ] Write the code to go from the old database to the new one
|
- [ ] Write the code to go from the old database to the new one
|
||||||
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
|
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
|
||||||
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
|
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
|
||||||
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
|
- [ ] Write an integration test [here](https://github.com/meilisearch/meilisearch/blob/main/crates/meilisearch/tests/upgrade/mod.rs) ensuring you can read the old database, upgrade to the new database, and read the new database as expected
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
add-comment:
|
add-comment:
|
||||||
|
|||||||
10
.github/workflows/flaky-tests.yml
vendored
10
.github/workflows/flaky-tests.yml
vendored
@@ -3,7 +3,7 @@ name: Look for flaky tests
|
|||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 4 * * *" # Every day at 4:00AM
|
- cron: '0 4 * * *' # Every day at 4:00AM
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
flaky:
|
flaky:
|
||||||
@@ -13,17 +13,11 @@ jobs:
|
|||||||
image: ubuntu:22.04
|
image: ubuntu:22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
|
||||||
run: |
|
|
||||||
sudo rm -rf "/opt/ghc" || true
|
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Install cargo-flaky
|
- name: Install cargo-flaky
|
||||||
run: cargo install cargo-flaky
|
run: cargo install cargo-flaky
|
||||||
- name: Run cargo flaky in the dumps
|
- name: Run cargo flaky in the dumps
|
||||||
|
|||||||
4
.github/workflows/fuzzer-indexing.yml
vendored
4
.github/workflows/fuzzer-indexing.yml
vendored
@@ -12,7 +12,9 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
|
||||||
# Run benchmarks
|
# Run benchmarks
|
||||||
- name: Run the fuzzer
|
- name: Run the fuzzer
|
||||||
|
|||||||
8
.github/workflows/publish-apt-brew-pkg.yml
vendored
8
.github/workflows/publish-apt-brew-pkg.yml
vendored
@@ -25,13 +25,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
run: |
|
|
||||||
sudo rm -rf "/opt/ghc" || true
|
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
- name: Install cargo-deb
|
- name: Install cargo-deb
|
||||||
run: cargo install cargo-deb
|
run: cargo install cargo-deb
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
|||||||
175
.github/workflows/publish-docker-images.yml
vendored
175
.github/workflows/publish-docker-images.yml
vendored
@@ -14,105 +14,10 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
docker:
|
||||||
runs-on: ${{ matrix.runner }}
|
runs-on: docker
|
||||||
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
platform: [amd64, arm64]
|
|
||||||
edition: [community, enterprise]
|
|
||||||
include:
|
|
||||||
- platform: amd64
|
|
||||||
runner: ubuntu-24.04
|
|
||||||
- platform: arm64
|
|
||||||
runner: ubuntu-24.04-arm
|
|
||||||
- edition: community
|
|
||||||
registry: getmeili/meilisearch
|
|
||||||
feature-flag: ""
|
|
||||||
- edition: enterprise
|
|
||||||
registry: getmeili/meilisearch-enterprise
|
|
||||||
feature-flag: "--features enterprise"
|
|
||||||
|
|
||||||
permissions: {}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
|
|
||||||
- name: Prepare
|
|
||||||
run: |
|
|
||||||
platform=linux/${{ matrix.platform }}
|
|
||||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
with:
|
|
||||||
platforms: linux/${{ matrix.platform }}
|
|
||||||
install: true
|
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Docker meta
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
with:
|
|
||||||
images: ${{ matrix.registry }}
|
|
||||||
# Prevent `latest` to be updated for each new tag pushed.
|
|
||||||
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
|
|
||||||
flavor: latest=false
|
|
||||||
tags: |
|
|
||||||
type=ref,event=tag
|
|
||||||
type=raw,value=nightly,enable=${{ github.event_name != 'push' }}
|
|
||||||
type=semver,pattern=v{{major}}.{{minor}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
|
||||||
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
|
||||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
|
||||||
|
|
||||||
- name: Build and push by digest
|
|
||||||
uses: docker/build-push-action@v6
|
|
||||||
id: build-and-push
|
|
||||||
with:
|
|
||||||
platforms: linux/${{ matrix.platform }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
tags: ${{ matrix.registry }}
|
|
||||||
outputs: type=image,push-by-digest=true,name-canonical=true,push=true
|
|
||||||
build-args: |
|
|
||||||
COMMIT_SHA=${{ github.sha }}
|
|
||||||
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
|
||||||
GIT_TAG=${{ github.ref_name }}
|
|
||||||
EXTRA_ARGS=${{ matrix.feature-flag }}
|
|
||||||
|
|
||||||
- name: Export digest
|
|
||||||
run: |
|
|
||||||
mkdir -p ${{ runner.temp }}/digests
|
|
||||||
digest="${{ steps.build-and-push.outputs.digest }}"
|
|
||||||
touch "${{ runner.temp }}/digests/${digest#sha256:}"
|
|
||||||
|
|
||||||
- name: Upload digest
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: digests-${{ matrix.edition }}-${{ env.PLATFORM_PAIR }}
|
|
||||||
path: ${{ runner.temp }}/digests/*
|
|
||||||
if-no-files-found: error
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
merge:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
edition: [community, enterprise]
|
|
||||||
include:
|
|
||||||
- edition: community
|
|
||||||
registry: getmeili/meilisearch
|
|
||||||
- edition: enterprise
|
|
||||||
registry: getmeili/meilisearch-enterprise
|
|
||||||
needs:
|
|
||||||
- build
|
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
id-token: write # This is needed to use Cosign in keyless mode
|
id-token: write # This is needed to use Cosign in keyless mode
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
@@ -153,30 +58,26 @@ jobs:
|
|||||||
|
|
||||||
echo "date=$commit_date" >> $GITHUB_OUTPUT
|
echo "date=$commit_date" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
- name: Install cosign
|
- name: Install cosign
|
||||||
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # tag=v3.10.0
|
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # tag=v3.10.0
|
||||||
|
|
||||||
- name: Download digests
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
path: ${{ runner.temp }}/digests
|
|
||||||
pattern: digests-${{ matrix.edition }}-*
|
|
||||||
merge-multiple: true
|
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: ${{ matrix.registry }}
|
images: getmeili/meilisearch
|
||||||
# Prevent `latest` to be updated for each new tag pushed.
|
# Prevent `latest` to be updated for each new tag pushed.
|
||||||
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
|
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
|
||||||
flavor: latest=false
|
flavor: latest=false
|
||||||
@@ -187,31 +88,33 @@ jobs:
|
|||||||
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
||||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
||||||
|
|
||||||
- name: Create manifest list and push
|
- name: Build and push
|
||||||
working-directory: ${{ runner.temp }}/digests
|
uses: docker/build-push-action@v6
|
||||||
run: |
|
id: build-and-push
|
||||||
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
with:
|
||||||
$(printf '${{ matrix.registry }}@sha256:%s ' *)
|
push: true
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
- name: Inspect image to fetch digest to sign
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
run: |
|
build-args: |
|
||||||
digest=$(docker buildx imagetools inspect --format='{{ json .Manifest }}' ${{ matrix.registry }}:${{ steps.meta.outputs.version }} | jq -r '.digest')
|
COMMIT_SHA=${{ github.sha }}
|
||||||
echo "DIGEST=${digest}" >> $GITHUB_ENV
|
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
||||||
|
GIT_TAG=${{ github.ref_name }}
|
||||||
|
|
||||||
- name: Sign the images with GitHub OIDC Token
|
- name: Sign the images with GitHub OIDC Token
|
||||||
env:
|
env:
|
||||||
|
DIGEST: ${{ steps.build-and-push.outputs.digest }}
|
||||||
TAGS: ${{ steps.meta.outputs.tags }}
|
TAGS: ${{ steps.meta.outputs.tags }}
|
||||||
run: |
|
run: |
|
||||||
images=""
|
images=""
|
||||||
for tag in ${TAGS}; do
|
for tag in ${TAGS}; do
|
||||||
images+="${tag}@${{ env.DIGEST }} "
|
images+="${tag}@${DIGEST} "
|
||||||
done
|
done
|
||||||
cosign sign --yes ${images}
|
cosign sign --yes ${images}
|
||||||
|
|
||||||
# /!\ Don't touch this without checking with engineers working on the Cloud code base on #discussion-engineering Slack channel
|
# /!\ Don't touch this without checking with Cloud team
|
||||||
- name: Notify meilisearch-cloud
|
- name: Send CI information to Cloud team
|
||||||
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
|
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
|
||||||
if: ${{ (github.event_name == 'push') && (matrix.edition == 'enterprise') }}
|
if: github.event_name == 'push'
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v3
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
@@ -219,13 +122,21 @@ jobs:
|
|||||||
event-type: cloud-docker-build
|
event-type: cloud-docker-build
|
||||||
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
||||||
|
|
||||||
# /!\ Don't touch this without checking with integration team members on #discussion-integrations Slack channel
|
# Send notification to Swarmia to notify of a deployment: https://app.swarmia.com
|
||||||
- name: Notify meilisearch-kubernetes
|
# - name: 'Setup jq'
|
||||||
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event), or if not stable
|
# uses: dcarbone/install-jq-action
|
||||||
if: ${{ github.event_name == 'push' && matrix.edition == 'community' && steps.check-tag-format.outputs.stable == 'true' }}
|
# - name: Send deployment to Swarmia
|
||||||
uses: peter-evans/repository-dispatch@v3
|
# if: github.event_name == 'push' && success()
|
||||||
with:
|
# run: |
|
||||||
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
# JSON_STRING=$( jq --null-input --compact-output \
|
||||||
repository: meilisearch/meilisearch-kubernetes
|
# --arg version "${{ github.ref_name }}" \
|
||||||
event-type: meilisearch-release
|
# --arg appName "meilisearch" \
|
||||||
client-payload: '{ "version": "${{ github.ref_name }}" }'
|
# --arg environment "production" \
|
||||||
|
# --arg commitSha "${{ github.sha }}" \
|
||||||
|
# --arg repositoryFullName "${{ github.repository }}" \
|
||||||
|
# '{"version": $version, "appName": $appName, "environment": $environment, "commitSha": $commitSha, "repositoryFullName": $repositoryFullName}' )
|
||||||
|
|
||||||
|
# curl -H "Authorization: ${{ secrets.SWARMIA_DEPLOYMENTS_AUTHORIZATION }}" \
|
||||||
|
# -H "Content-Type: application/json" \
|
||||||
|
# -d "$JSON_STRING" \
|
||||||
|
# https://hook.swarmia.com/deployments
|
||||||
|
|||||||
190
.github/workflows/publish-release-assets.yml
vendored
190
.github/workflows/publish-release-assets.yml
vendored
@@ -32,61 +32,157 @@ jobs:
|
|||||||
if: github.event_name == 'release' && steps.check-tag-format.outputs.stable == 'true'
|
if: github.event_name == 'release' && steps.check-tag-format.outputs.stable == 'true'
|
||||||
run: bash .github/scripts/check-release.sh
|
run: bash .github/scripts/check-release.sh
|
||||||
|
|
||||||
publish-binaries:
|
publish-linux:
|
||||||
name: Publish binary for ${{ matrix.release }} ${{ matrix.edition }} edition
|
name: Publish binary for Linux
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
edition: [community, enterprise]
|
|
||||||
release:
|
|
||||||
[macos-amd64, macos-aarch64, windows, linux-amd64, linux-aarch64]
|
|
||||||
include:
|
|
||||||
- edition: "community"
|
|
||||||
feature-flag: ""
|
|
||||||
edition-suffix: ""
|
|
||||||
- edition: "enterprise"
|
|
||||||
feature-flag: "--features enterprise"
|
|
||||||
edition-suffix: "enterprise-"
|
|
||||||
- release: macos-amd64
|
|
||||||
os: macos-15-intel
|
|
||||||
binary_path: release/meilisearch
|
|
||||||
asset_name: macos-amd64
|
|
||||||
extra-args: ""
|
|
||||||
- release: macos-aarch64
|
|
||||||
os: macos-14
|
|
||||||
binary_path: aarch64-apple-darwin/release/meilisearch
|
|
||||||
asset_name: macos-apple-silicon
|
|
||||||
extra-args: "--target aarch64-apple-darwin"
|
|
||||||
- release: windows
|
|
||||||
os: windows-2022
|
|
||||||
binary_path: release/meilisearch.exe
|
|
||||||
asset_name: windows-amd64.exe
|
|
||||||
extra-args: ""
|
|
||||||
- release: linux-amd64
|
|
||||||
os: ubuntu-22.04
|
|
||||||
binary_path: x86_64-unknown-linux-gnu/release/meilisearch
|
|
||||||
asset_name: linux-amd64
|
|
||||||
extra-args: "--target x86_64-unknown-linux-gnu"
|
|
||||||
- release: linux-aarch64
|
|
||||||
os: ubuntu-22.04-arm
|
|
||||||
binary_path: aarch64-unknown-linux-gnu/release/meilisearch
|
|
||||||
asset_name: linux-aarch64
|
|
||||||
extra-args: "--target aarch64-unknown-linux-gnu"
|
|
||||||
needs: check-version
|
needs: check-version
|
||||||
|
container:
|
||||||
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
|
image: ubuntu:22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- name: Install needed dependencies
|
||||||
|
run: |
|
||||||
|
apt-get update && apt-get install -y curl
|
||||||
|
apt-get install build-essential -y
|
||||||
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --release --locked ${{ matrix.feature-flag }} ${{ matrix.extra-args }}
|
run: cargo build --release --locked
|
||||||
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
||||||
- name: Upload binaries to release
|
- name: Upload binaries to release
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.11.2
|
uses: svenstaro/upload-release-action@2.11.2
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/${{ matrix.binary_path }}
|
file: target/release/meilisearch
|
||||||
asset_name: meilisearch-${{ matrix.edition-suffix }}${{ matrix.asset_name }}
|
asset_name: meilisearch-linux-amd64
|
||||||
|
tag: ${{ github.ref }}
|
||||||
|
|
||||||
|
publish-macos-windows:
|
||||||
|
name: Publish binary for ${{ matrix.os }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
needs: check-version
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os: [macos-13, windows-2022]
|
||||||
|
include:
|
||||||
|
- os: macos-13
|
||||||
|
artifact_name: meilisearch
|
||||||
|
asset_name: meilisearch-macos-amd64
|
||||||
|
- os: windows-2022
|
||||||
|
artifact_name: meilisearch.exe
|
||||||
|
asset_name: meilisearch-windows-amd64.exe
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v5
|
||||||
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
|
- name: Build
|
||||||
|
run: cargo build --release --locked
|
||||||
|
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
||||||
|
- name: Upload binaries to release
|
||||||
|
if: github.event_name == 'release'
|
||||||
|
uses: svenstaro/upload-release-action@2.11.2
|
||||||
|
with:
|
||||||
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
|
file: target/release/${{ matrix.artifact_name }}
|
||||||
|
asset_name: ${{ matrix.asset_name }}
|
||||||
|
tag: ${{ github.ref }}
|
||||||
|
|
||||||
|
publish-macos-apple-silicon:
|
||||||
|
name: Publish binary for macOS silicon
|
||||||
|
runs-on: macos-13
|
||||||
|
needs: check-version
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- target: aarch64-apple-darwin
|
||||||
|
asset_name: meilisearch-macos-apple-silicon
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
- name: Installing Rust toolchain
|
||||||
|
uses: dtolnay/rust-toolchain@1.89
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
target: ${{ matrix.target }}
|
||||||
|
- name: Cargo build
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: build
|
||||||
|
args: --release --target ${{ matrix.target }}
|
||||||
|
- name: Upload the binary to release
|
||||||
|
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
||||||
|
if: github.event_name == 'release'
|
||||||
|
uses: svenstaro/upload-release-action@2.11.2
|
||||||
|
with:
|
||||||
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
|
file: target/${{ matrix.target }}/release/meilisearch
|
||||||
|
asset_name: ${{ matrix.asset_name }}
|
||||||
|
tag: ${{ github.ref }}
|
||||||
|
|
||||||
|
publish-aarch64:
|
||||||
|
name: Publish binary for aarch64
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: check-version
|
||||||
|
env:
|
||||||
|
DEBIAN_FRONTEND: noninteractive
|
||||||
|
container:
|
||||||
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
|
image: ubuntu:22.04
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- target: aarch64-unknown-linux-gnu
|
||||||
|
asset_name: meilisearch-linux-aarch64
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
- name: Install needed dependencies
|
||||||
|
run: |
|
||||||
|
apt-get update -y && apt upgrade -y
|
||||||
|
apt-get install -y curl build-essential gcc-aarch64-linux-gnu
|
||||||
|
- name: Set up Docker for cross compilation
|
||||||
|
run: |
|
||||||
|
apt-get install -y curl apt-transport-https ca-certificates software-properties-common
|
||||||
|
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||||
|
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||||
|
apt-get update -y && apt-get install -y docker-ce
|
||||||
|
- name: Installing Rust toolchain
|
||||||
|
uses: dtolnay/rust-toolchain@1.89
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
target: ${{ matrix.target }}
|
||||||
|
- name: Configure target aarch64 GNU
|
||||||
|
## Environment variable is not passed using env:
|
||||||
|
## LD gold won't work with MUSL
|
||||||
|
# env:
|
||||||
|
# JEMALLOC_SYS_WITH_LG_PAGE: 16
|
||||||
|
# RUSTFLAGS: '-Clink-arg=-fuse-ld=gold'
|
||||||
|
run: |
|
||||||
|
echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config
|
||||||
|
echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
||||||
|
echo 'JEMALLOC_SYS_WITH_LG_PAGE=16' >> $GITHUB_ENV
|
||||||
|
- name: Install a default toolchain that will be used to build cargo cross
|
||||||
|
run: |
|
||||||
|
rustup default stable
|
||||||
|
- name: Cargo build
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: build
|
||||||
|
use-cross: true
|
||||||
|
args: --release --target ${{ matrix.target }}
|
||||||
|
env:
|
||||||
|
CROSS_DOCKER_IN_DOCKER: true
|
||||||
|
- name: List target output files
|
||||||
|
run: ls -lR ./target
|
||||||
|
- name: Upload the binary to release
|
||||||
|
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
||||||
|
if: github.event_name == 'release'
|
||||||
|
uses: svenstaro/upload-release-action@2.11.2
|
||||||
|
with:
|
||||||
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
|
file: target/${{ matrix.target }}/release/meilisearch
|
||||||
|
asset_name: ${{ matrix.asset_name }}
|
||||||
tag: ${{ github.ref }}
|
tag: ${{ github.ref }}
|
||||||
|
|
||||||
publish-openapi-file:
|
publish-openapi-file:
|
||||||
@@ -104,13 +200,13 @@ jobs:
|
|||||||
- name: Generate OpenAPI file
|
- name: Generate OpenAPI file
|
||||||
run: |
|
run: |
|
||||||
cd crates/openapi-generator
|
cd crates/openapi-generator
|
||||||
cargo run --release -- --pretty --output ../../meilisearch-openapi.json
|
cargo run --release -- --pretty --output ../../meilisearch.json
|
||||||
- name: Upload OpenAPI to Release
|
- name: Upload OpenAPI to Release
|
||||||
# No need to upload for dry run (cron or workflow_dispatch)
|
# No need to upload for dry run (cron or workflow_dispatch)
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.11.2
|
uses: svenstaro/upload-release-action@2.11.2
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: ./meilisearch-openapi.json
|
file: ./meilisearch.json
|
||||||
asset_name: meilisearch-openapi.json
|
asset_name: meilisearch-openapi.json
|
||||||
tag: ${{ github.ref }}
|
tag: ${{ github.ref }}
|
||||||
|
|||||||
36
.github/workflows/sdks-tests.yml
vendored
36
.github/workflows/sdks-tests.yml
vendored
@@ -25,18 +25,14 @@ jobs:
|
|||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Define the Docker image we need to use
|
- name: Define the Docker image we need to use
|
||||||
id: define-image
|
id: define-image
|
||||||
env:
|
|
||||||
EVENT_NAME: ${{ github.event_name }}
|
|
||||||
DOCKER_IMAGE_INPUT: ${{ github.event.inputs.docker_image }}
|
|
||||||
run: |
|
run: |
|
||||||
|
event=${{ github.event_name }}
|
||||||
echo "docker-image=nightly" >> $GITHUB_OUTPUT
|
echo "docker-image=nightly" >> $GITHUB_OUTPUT
|
||||||
if [[ "$EVENT_NAME" == 'workflow_dispatch' ]]; then
|
if [[ $event == 'workflow_dispatch' ]]; then
|
||||||
echo "docker-image=$DOCKER_IMAGE_INPUT" >> $GITHUB_OUTPUT
|
echo "docker-image=${{ github.event.inputs.docker_image }}" >> $GITHUB_OUTPUT
|
||||||
fi
|
fi
|
||||||
- name: Docker image is ${{ steps.define-image.outputs.docker-image }}
|
- name: Docker image is ${{ steps.define-image.outputs.docker-image }}
|
||||||
env:
|
run: echo "Docker image is ${{ steps.define-image.outputs.docker-image }}"
|
||||||
DOCKER_IMAGE: ${{ steps.define-image.outputs.docker-image }}
|
|
||||||
run: echo "Docker image is $DOCKER_IMAGE"
|
|
||||||
|
|
||||||
##########
|
##########
|
||||||
## SDKs ##
|
## SDKs ##
|
||||||
@@ -72,7 +68,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -96,7 +92,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -126,7 +122,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -153,7 +149,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -188,7 +184,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -217,7 +213,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -242,7 +238,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -267,7 +263,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -288,7 +284,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -311,7 +307,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -342,7 +338,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -374,7 +370,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
|
|||||||
164
.github/workflows/test-suite.yml
vendored
164
.github/workflows/test-suite.yml
vendored
@@ -15,40 +15,31 @@ env:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test-linux:
|
test-linux:
|
||||||
name: Tests on Ubuntu
|
name: Tests on ubuntu-22.04
|
||||||
runs-on: ${{ matrix.runner }}
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
container:
|
||||||
matrix:
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
runner: [ubuntu-22.04, ubuntu-22.04-arm]
|
image: ubuntu:22.04
|
||||||
features: ["", "--features enterprise"]
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: check free space before
|
- name: Install needed dependencies
|
||||||
run: df -h
|
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
|
||||||
run: |
|
run: |
|
||||||
sudo rm -rf "/opt/ghc" || true
|
apt-get update && apt-get install -y curl
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
apt-get install build-essential -y
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- name: check free space after
|
|
||||||
run: df -h
|
|
||||||
- name: Setup test with Rust stable
|
- name: Setup test with Rust stable
|
||||||
uses: dtolnay/rust-toolchain@1.91.1
|
uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
with:
|
- name: Run cargo check without any default features
|
||||||
key: ${{ matrix.features }}
|
|
||||||
- name: Run cargo build without any default features
|
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: build
|
command: build
|
||||||
args: --locked --no-default-features --all
|
args: --locked --release --no-default-features --all
|
||||||
- name: Run cargo test
|
- name: Run cargo test
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: test
|
command: test
|
||||||
args: --locked --all ${{ matrix.features }}
|
args: --locked --release --all
|
||||||
|
|
||||||
test-others:
|
test-others:
|
||||||
name: Tests on ${{ matrix.os }}
|
name: Tests on ${{ matrix.os }}
|
||||||
@@ -56,58 +47,51 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [macos-14, windows-2022]
|
os: [macos-13, windows-2022]
|
||||||
features: ["", "--features enterprise"]
|
|
||||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Run cargo build without any default features
|
- name: Run cargo check without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: build
|
command: build
|
||||||
args: --locked --no-default-features --all
|
args: --locked --release --no-default-features --all
|
||||||
- name: Run cargo test
|
- name: Run cargo test
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: test
|
command: test
|
||||||
args: --locked --all ${{ matrix.features }}
|
args: --locked --release --all
|
||||||
|
|
||||||
test-all-features:
|
test-all-features:
|
||||||
name: Tests almost all features
|
name: Tests almost all features
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
|
image: ubuntu:22.04
|
||||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo rm -rf "/opt/ghc" || true
|
apt-get update
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
apt-get install --assume-yes build-essential curl
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
- name: Run cargo build with almost all features
|
- name: Run cargo build with almost all features
|
||||||
run: |
|
run: |
|
||||||
cargo build --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||||
- name: Run cargo test with almost all features
|
- name: Run cargo test with almost all features
|
||||||
run: |
|
run: |
|
||||||
cargo test --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
cargo test --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||||
|
|
||||||
ollama-ubuntu:
|
ollama-ubuntu:
|
||||||
name: Test with Ollama
|
name: Test with Ollama
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
|
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
|
||||||
run: |
|
|
||||||
sudo rm -rf "/opt/ghc" || true
|
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- name: Install Ollama
|
- name: Install Ollama
|
||||||
run: |
|
run: |
|
||||||
curl -fsSL https://ollama.com/install.sh | sudo -E sh
|
curl -fsSL https://ollama.com/install.sh | sudo -E sh
|
||||||
@@ -131,21 +115,21 @@ jobs:
|
|||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: test
|
command: test
|
||||||
args: --locked -p meilisearch --features test-ollama ollama
|
args: --locked --release --all --features test-ollama ollama
|
||||||
|
|
||||||
test-disabled-tokenization:
|
test-disabled-tokenization:
|
||||||
name: Test disabled tokenization
|
name: Test disabled tokenization
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ubuntu:22.04
|
||||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo rm -rf "/opt/ghc" || true
|
apt-get update
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
apt-get install --assume-yes build-essential curl
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
- name: Run cargo tree without default features and check lindera is not present
|
- name: Run cargo tree without default features and check lindera is not present
|
||||||
run: |
|
run: |
|
||||||
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
||||||
@@ -156,39 +140,36 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cargo tree -f '{p} {f}' -e normal | grep lindera -qz
|
cargo tree -f '{p} {f}' -e normal | grep lindera -qz
|
||||||
|
|
||||||
build:
|
# We run tests in debug also, to make sure that the debug_assertions are hit
|
||||||
name: Build in release
|
test-debug:
|
||||||
runs-on: ubuntu-22.04
|
name: Run tests in debug
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
|
image: ubuntu:22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo rm -rf "/opt/ghc" || true
|
apt-get update && apt-get install -y curl
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
apt-get install build-essential -y
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
- name: Build
|
- name: Run tests in debug
|
||||||
run: cargo build --release --locked --target x86_64-unknown-linux-gnu
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: test
|
||||||
|
args: --locked --all
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Run Clippy
|
name: Run Clippy
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
features: ["", "--features enterprise"]
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
run: |
|
|
||||||
sudo rm -rf "/opt/ghc" || true
|
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
with:
|
with:
|
||||||
|
profile: minimal
|
||||||
components: clippy
|
components: clippy
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
@@ -196,21 +177,18 @@ jobs:
|
|||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: clippy
|
command: clippy
|
||||||
args: --all-targets ${{ matrix.features }} -- --deny warnings
|
args: --all-targets -- --deny warnings
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Run Rustfmt
|
name: Run Rustfmt
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
run: |
|
|
||||||
sudo rm -rf "/opt/ghc" || true
|
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
with:
|
with:
|
||||||
|
profile: minimal
|
||||||
|
toolchain: nightly-2024-07-09
|
||||||
|
override: true
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
@@ -221,23 +199,3 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs
|
echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs
|
||||||
cargo fmt --all -- --check
|
cargo fmt --all -- --check
|
||||||
|
|
||||||
declarative-tests:
|
|
||||||
name: Run declarative tests
|
|
||||||
runs-on: ubuntu-22.04-arm
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
|
||||||
run: |
|
|
||||||
sudo rm -rf "/opt/ghc" || true
|
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
- name: Cache dependencies
|
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
|
||||||
- name: Run declarative tests
|
|
||||||
run: |
|
|
||||||
cargo xtask test workloads/tests/*.json
|
|
||||||
|
|||||||
10
.github/workflows/update-cargo-toml-version.yml
vendored
10
.github/workflows/update-cargo-toml-version.yml
vendored
@@ -18,13 +18,9 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
run: |
|
with:
|
||||||
sudo rm -rf "/opt/ghc" || true
|
profile: minimal
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
- name: Install sd
|
- name: Install sd
|
||||||
run: cargo install sd
|
run: cargo install sd
|
||||||
- name: Update Cargo.toml file
|
- name: Update Cargo.toml file
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -29,6 +29,3 @@ crates/meilisearch/db.snapshot
|
|||||||
|
|
||||||
# Fuzzcheck data for the facet indexing fuzz test
|
# Fuzzcheck data for the facet indexing fuzz test
|
||||||
crates/milli/fuzz/update::facet::incremental::fuzz::fuzz/
|
crates/milli/fuzz/update::facet::incremental::fuzz::fuzz/
|
||||||
|
|
||||||
# OpenAPI generator
|
|
||||||
**/meilisearch-openapi.json
|
|
||||||
|
|||||||
@@ -124,7 +124,6 @@ They are JSON files with the following structure (comments are not actually supp
|
|||||||
{
|
{
|
||||||
// Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard.
|
// Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard.
|
||||||
"name": "hackernews.ndjson_1M,no-threads",
|
"name": "hackernews.ndjson_1M,no-threads",
|
||||||
"type": "bench",
|
|
||||||
// Number of consecutive runs of the commands that should be performed.
|
// Number of consecutive runs of the commands that should be performed.
|
||||||
// Each run uses a fresh instance of Meilisearch and a fresh database.
|
// Each run uses a fresh instance of Meilisearch and a fresh database.
|
||||||
// Each run produces its own report file.
|
// Each run produces its own report file.
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ With swagger:
|
|||||||
With the internal crate:
|
With the internal crate:
|
||||||
```bash
|
```bash
|
||||||
cd crates/openapi-generator
|
cd crates/openapi-generator
|
||||||
cargo run --release -- --pretty
|
cargo run --release -- --pretty --output meilisearch.json
|
||||||
```
|
```
|
||||||
|
|
||||||
### Logging
|
### Logging
|
||||||
|
|||||||
2291
Cargo.lock
generated
2291
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -23,7 +23,7 @@ members = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.29.0"
|
version = "1.24.0"
|
||||||
authors = [
|
authors = [
|
||||||
"Quentin de Quelen <quentin@dequelen.me>",
|
"Quentin de Quelen <quentin@dequelen.me>",
|
||||||
"Clément Renault <clement@meilisearch.com>",
|
"Clément Renault <clement@meilisearch.com>",
|
||||||
@@ -50,5 +50,3 @@ opt-level = 3
|
|||||||
opt-level = 3
|
opt-level = 3
|
||||||
[profile.dev.package.roaring]
|
[profile.dev.package.roaring]
|
||||||
opt-level = 3
|
opt-level = 3
|
||||||
[profile.dev.package.gemm-f16]
|
|
||||||
opt-level = 3
|
|
||||||
|
|||||||
7
Cross.toml
Normal file
7
Cross.toml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
[build.env]
|
||||||
|
passthrough = [
|
||||||
|
"RUST_BACKTRACE",
|
||||||
|
"CARGO_TERM_COLOR",
|
||||||
|
"RUSTFLAGS",
|
||||||
|
"JEMALLOC_SYS_WITH_LG_PAGE"
|
||||||
|
]
|
||||||
@@ -8,14 +8,16 @@ WORKDIR /
|
|||||||
ARG COMMIT_SHA
|
ARG COMMIT_SHA
|
||||||
ARG COMMIT_DATE
|
ARG COMMIT_DATE
|
||||||
ARG GIT_TAG
|
ARG GIT_TAG
|
||||||
ARG EXTRA_ARGS
|
|
||||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_DESCRIBE=${GIT_TAG}
|
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_DESCRIBE=${GIT_TAG}
|
||||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN set -eux; \
|
RUN set -eux; \
|
||||||
apkArch="$(apk --print-arch)"; \
|
apkArch="$(apk --print-arch)"; \
|
||||||
cargo build --release -p meilisearch -p meilitool ${EXTRA_ARGS}
|
if [ "$apkArch" = "aarch64" ]; then \
|
||||||
|
export JEMALLOC_SYS_WITH_LG_PAGE=16; \
|
||||||
|
fi && \
|
||||||
|
cargo build --release -p meilisearch -p meilitool
|
||||||
|
|
||||||
# Run
|
# Run
|
||||||
FROM alpine:3.22
|
FROM alpine:3.22
|
||||||
|
|||||||
@@ -39,7 +39,6 @@
|
|||||||
## 🖥 Examples
|
## 🖥 Examples
|
||||||
|
|
||||||
- [**Movies**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — An application to help you find streaming platforms to watch movies using [hybrid search](https://www.meilisearch.com/solutions/hybrid-search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos).
|
- [**Movies**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — An application to help you find streaming platforms to watch movies using [hybrid search](https://www.meilisearch.com/solutions/hybrid-search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos).
|
||||||
- [**Flickr**](https://flickr.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — Search and explore one hundred million Flickr images with semantic search.
|
|
||||||
- [**Ecommerce**](https://ecommerce.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Ecommerce website using disjunctive [facets](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos), range and rating filtering, and pagination.
|
- [**Ecommerce**](https://ecommerce.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Ecommerce website using disjunctive [facets](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos), range and rating filtering, and pagination.
|
||||||
- [**Songs**](https://music.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search through 47 million of songs.
|
- [**Songs**](https://music.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search through 47 million of songs.
|
||||||
- [**SaaS**](https://saas.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search for contacts, deals, and companies in this [multi-tenant](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) CRM application.
|
- [**SaaS**](https://saas.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search for contacts, deals, and companies in this [multi-tenant](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) CRM application.
|
||||||
|
|||||||
326
TESTING.md
326
TESTING.md
@@ -1,326 +0,0 @@
|
|||||||
# Declarative tests
|
|
||||||
|
|
||||||
Declarative tests ensure that Meilisearch features remain stable across versions.
|
|
||||||
|
|
||||||
While we already have unit tests, those are run against **temporary databases** that are created fresh each time and therefore never risk corruption.
|
|
||||||
|
|
||||||
Declarative tests instead **simulate the lifetime of a database**: they chain together commands and requests to change the binary, verifying that database state and API responses remain consistent.
|
|
||||||
|
|
||||||
## Basic example
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"type": "test",
|
|
||||||
"name": "api-keys",
|
|
||||||
"binary": { // the first command will run on the binary following this specification.
|
|
||||||
"source": "release", // get the binary as a release from GitHub
|
|
||||||
"version": "1.19.0", // version to fetch
|
|
||||||
"edition": "community" // edition to fetch
|
|
||||||
},
|
|
||||||
"commands": []
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This example defines a no-op test (it does nothing).
|
|
||||||
|
|
||||||
If the file is saved at `workloads/tests/example.json`, you can run it with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cargo xtask test workloads/tests/example.json
|
|
||||||
```
|
|
||||||
|
|
||||||
## Commands
|
|
||||||
|
|
||||||
Commands represent API requests sent to Meilisearch endpoints during a test.
|
|
||||||
|
|
||||||
They are executed sequentially, and their responses can be validated to ensure consistent behavior across upgrades.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
|
|
||||||
{
|
|
||||||
"route": "keys",
|
|
||||||
"method": "POST",
|
|
||||||
"body": {
|
|
||||||
"inline": {
|
|
||||||
"actions": [
|
|
||||||
"search",
|
|
||||||
"documents.add"
|
|
||||||
],
|
|
||||||
"description": "Test API Key",
|
|
||||||
"expiresAt": null,
|
|
||||||
"indexes": [ "movies" ]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This command issues a `POST /keys` request, creating an API key with permissions to search and add documents in the `movies` index.
|
|
||||||
|
|
||||||
### Using assets in commands
|
|
||||||
|
|
||||||
To keep tests concise and reusable, you can define **assets** at the root of the workload file.
|
|
||||||
|
|
||||||
Assets are external data sources (such as datasets) that are cached between runs, making tests faster and easier to read.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"type": "test",
|
|
||||||
"name": "movies",
|
|
||||||
"binary": {
|
|
||||||
"source": "release",
|
|
||||||
"version": "1.19.0",
|
|
||||||
"edition": "community"
|
|
||||||
},
|
|
||||||
"assets": {
|
|
||||||
"movies.json": {
|
|
||||||
"local_location": null,
|
|
||||||
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
|
|
||||||
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"commands": [
|
|
||||||
{
|
|
||||||
"route": "indexes/movies/documents",
|
|
||||||
"method": "POST",
|
|
||||||
"body": {
|
|
||||||
"asset": "movies.json"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
In this example:
|
|
||||||
- The `movies.json` dataset is defined as an asset, pointing to a remote URL.
|
|
||||||
- The SHA-256 checksum ensures integrity.
|
|
||||||
- The `POST /indexes/movies/documents` command uses this asset as the request body.
|
|
||||||
|
|
||||||
This makes the test much cleaner than inlining a large dataset directly into the command.
|
|
||||||
|
|
||||||
For asset handling, please refer to the [declarative benchmarks documentation](/BENCHMARKS.md#adding-new-assets).
|
|
||||||
|
|
||||||
### Asserting responses
|
|
||||||
|
|
||||||
Commands can specify both the **expected status code** and the **expected response body**.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"route": "indexes/movies/documents",
|
|
||||||
"method": "POST",
|
|
||||||
"body": {
|
|
||||||
"asset": "movies.json"
|
|
||||||
},
|
|
||||||
"expectedStatus": 202,
|
|
||||||
"expectedResponse": {
|
|
||||||
"enqueuedAt": "[timestamp]", // Set to a bracketed string to ignore the value
|
|
||||||
"indexUid": "movies",
|
|
||||||
"status": "enqueued",
|
|
||||||
"taskUid": 1,
|
|
||||||
"type": "documentAdditionOrUpdate"
|
|
||||||
},
|
|
||||||
"synchronous": "WaitForTask"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Manually writing `expectedResponse` fields can be tedious.
|
|
||||||
|
|
||||||
Instead, you can let the test runner populate them automatically:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Run the workload to populate expected fields. Only adds the missing ones, doesn't change existing data
|
|
||||||
cargo xtask test workloads/tests/example.json --add-missing-responses
|
|
||||||
|
|
||||||
# OR
|
|
||||||
|
|
||||||
# Run the workload to populate expected fields. Updates all fields including existing ones
|
|
||||||
cargo xtask test workloads/tests/example.json --update-responses
|
|
||||||
```
|
|
||||||
|
|
||||||
This workflow is recommended:
|
|
||||||
|
|
||||||
1. Write the test without expected fields.
|
|
||||||
2. Run it with `--add-missing-responses` to capture the actual responses.
|
|
||||||
3. Review and commit the generated expectations.
|
|
||||||
|
|
||||||
## Changing binary
|
|
||||||
|
|
||||||
It is possible to insert an instruction to change the current Meilisearch instance from one binary specification to another during a test.
|
|
||||||
|
|
||||||
When executed, such an instruction will:
|
|
||||||
1. Stop the current Meilisearch instance.
|
|
||||||
2. Fetch the binary specified by the instruction.
|
|
||||||
3. Restart the server with the specified binary on the same database.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"type": "test",
|
|
||||||
"name": "movies",
|
|
||||||
"binary": {
|
|
||||||
"source": "release",
|
|
||||||
"version": "1.19.0", // start with version v1.19.0
|
|
||||||
"edition": "community"
|
|
||||||
},
|
|
||||||
"assets": {
|
|
||||||
"movies.json": {
|
|
||||||
"local_location": null,
|
|
||||||
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
|
|
||||||
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"commands": [
|
|
||||||
// setup some data
|
|
||||||
{
|
|
||||||
"route": "indexes/movies/documents",
|
|
||||||
"method": "POST",
|
|
||||||
"body": {
|
|
||||||
"asset": "movies.json"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
// switch binary to v1.24.0
|
|
||||||
{
|
|
||||||
"binary": {
|
|
||||||
"source": "release",
|
|
||||||
"version": "1.24.0",
|
|
||||||
"edition": "community"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Typical Usage
|
|
||||||
|
|
||||||
In most cases, the change binary instruction will be used to update a database.
|
|
||||||
|
|
||||||
- **Set up** some data using commands on an older version.
|
|
||||||
- **Upgrade** to the latest version.
|
|
||||||
- **Assert** that the data and API behavior remain correct after the upgrade.
|
|
||||||
|
|
||||||
To properly test the dumpless upgrade, one should typically:
|
|
||||||
|
|
||||||
1. Open the database without processing the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` and `--experimental-max-number-of-batched-tasks=0` as extra CLI arguments
|
|
||||||
2. Check that the search, stats and task queue still work.
|
|
||||||
3. Open the database and process the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` as the extra CLI argument. Use a `health` command to wait for the upgrade task to finish.
|
|
||||||
4. Check that the indexing, search, stats, and task queue still work.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"type": "test",
|
|
||||||
"name": "movies",
|
|
||||||
"binary": {
|
|
||||||
"source": "release",
|
|
||||||
"version": "1.12.0",
|
|
||||||
"edition": "community"
|
|
||||||
},
|
|
||||||
"commands": [
|
|
||||||
// 0. Run commands to populate the database
|
|
||||||
{
|
|
||||||
// ..
|
|
||||||
},
|
|
||||||
// 1. Open the database with new MS without processing the update task
|
|
||||||
{
|
|
||||||
"binary": {
|
|
||||||
"source": "build", // build the binary from the sources in the current git repository
|
|
||||||
"edition": "community",
|
|
||||||
"extraCliArgs": [
|
|
||||||
"--experimental-dumpless-upgrade", // allows to open with a newer MS
|
|
||||||
"--experimental-max-number-of-batched-tasks=0" // prevent processing of the update task
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
// 2. Check the search etc.
|
|
||||||
{
|
|
||||||
// ..
|
|
||||||
},
|
|
||||||
// 3. Open the database with new MS and processing the update task
|
|
||||||
{
|
|
||||||
"binary": {
|
|
||||||
"source": "build", // build the binary from the sources in the current git repository
|
|
||||||
"edition": "community",
|
|
||||||
"extraCliArgs": [
|
|
||||||
"--experimental-dumpless-upgrade" // allows to open with a newer MS
|
|
||||||
// no `--experimental-max-number-of-batched-tasks=0`
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
// 4. Check the indexing, search, etc.
|
|
||||||
{
|
|
||||||
// ..
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This ensures backward compatibility: databases created with older Meilisearch versions should remain functional and consistent after an upgrade.
|
|
||||||
|
|
||||||
## Variables
|
|
||||||
|
|
||||||
Sometimes a command needs to use a value returned by a **previous response**.
|
|
||||||
These values can be captured and reused using the register field.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"route": "keys",
|
|
||||||
"method": "POST",
|
|
||||||
"body": {
|
|
||||||
"inline": {
|
|
||||||
"actions": [
|
|
||||||
"search",
|
|
||||||
"documents.add"
|
|
||||||
],
|
|
||||||
"description": "Test API Key",
|
|
||||||
"expiresAt": null,
|
|
||||||
"indexes": [ "movies" ]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"expectedResponse": {
|
|
||||||
"key": "c6f64630bad2996b1f675007c8800168e14adf5d6a7bb1a400a6d2b158050eaf",
|
|
||||||
// ...
|
|
||||||
},
|
|
||||||
"register": {
|
|
||||||
"key": "/key"
|
|
||||||
},
|
|
||||||
"synchronous": "WaitForResponse"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The `register` field captures the value at the JSON path `/key` from the response.
|
|
||||||
Paths follow the **JavaScript Object Notation Pointer (RFC 6901)** format.
|
|
||||||
Registered variables are available for all subsequent commands.
|
|
||||||
|
|
||||||
Registered variables can be referenced by wrapping their name in double curly braces:
|
|
||||||
|
|
||||||
In the route/path:
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"route": "tasks/{{ task_id }}",
|
|
||||||
"method": "GET"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
In the request body:
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"route": "indexes/movies/documents",
|
|
||||||
"method": "PATCH",
|
|
||||||
"body": {
|
|
||||||
"inline": {
|
|
||||||
"id": "{{ document_id }}",
|
|
||||||
"overview": "Shazam turns evil and the world is in danger.",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Or they can be referenced by their name (**without curly braces**) as an API key:
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"route": "indexes/movies/documents",
|
|
||||||
"method": "POST",
|
|
||||||
"body": { /* ... */ },
|
|
||||||
"apiKeyVariable": "key" // The **content** of the key variable will be used as an API key
|
|
||||||
}
|
|
||||||
```
|
|
||||||
@@ -11,27 +11,27 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.98"
|
||||||
bumpalo = "3.19.0"
|
bumpalo = "3.18.1"
|
||||||
csv = "1.4.0"
|
csv = "1.3.1"
|
||||||
memmap2 = "0.9.9"
|
memmap2 = "0.9.7"
|
||||||
milli = { path = "../milli" }
|
milli = { path = "../milli" }
|
||||||
mimalloc = { version = "0.1.48", default-features = false }
|
mimalloc = { version = "0.1.47", default-features = false }
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = { version = "0.7.0", features = ["html_reports"] }
|
criterion = { version = "0.6.0", features = ["html_reports"] }
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
rand_chacha = "0.3.1"
|
rand_chacha = "0.3.1"
|
||||||
roaring = "0.10.12"
|
roaring = "0.10.12"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.98"
|
||||||
bytes = "1.11.0"
|
bytes = "1.10.1"
|
||||||
convert_case = "0.9.0"
|
convert_case = "0.8.0"
|
||||||
flate2 = "1.1.5"
|
flate2 = "1.1.2"
|
||||||
reqwest = { version = "0.12.24", features = ["blocking", "rustls-tls"], default-features = false }
|
reqwest = { version = "0.12.20", features = ["blocking", "rustls-tls"], default-features = false }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["milli/all-tokenizations"]
|
default = ["milli/all-tokenizations"]
|
||||||
|
|||||||
@@ -21,10 +21,6 @@ use roaring::RoaringBitmap;
|
|||||||
#[global_allocator]
|
#[global_allocator]
|
||||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||||
|
|
||||||
fn no_cancel() -> bool {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
const BENCHMARK_ITERATION: usize = 10;
|
const BENCHMARK_ITERATION: usize = 10;
|
||||||
|
|
||||||
fn setup_dir(path: impl AsRef<Path>) {
|
fn setup_dir(path: impl AsRef<Path>) {
|
||||||
@@ -69,7 +65,7 @@ fn setup_settings<'t>(
|
|||||||
let sortable_fields = sortable_fields.iter().map(|s| s.to_string()).collect();
|
let sortable_fields = sortable_fields.iter().map(|s| s.to_string()).collect();
|
||||||
builder.set_sortable_fields(sortable_fields);
|
builder.set_sortable_fields(sortable_fields);
|
||||||
|
|
||||||
builder.execute(&no_cancel, &Progress::default(), Default::default()).unwrap();
|
builder.execute(&|| false, &Progress::default(), Default::default()).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn setup_index_with_settings(
|
fn setup_index_with_settings(
|
||||||
@@ -156,7 +152,7 @@ fn indexing_songs_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -172,7 +168,7 @@ fn indexing_songs_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -224,7 +220,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -240,7 +236,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -270,7 +266,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -286,7 +282,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -340,7 +336,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -356,7 +352,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -418,7 +414,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -434,7 +430,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -464,7 +460,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -480,7 +476,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -506,7 +502,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -522,7 +518,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -575,7 +571,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -591,7 +587,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -643,7 +639,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -659,7 +655,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -711,7 +707,7 @@ fn indexing_wiki(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -727,7 +723,7 @@ fn indexing_wiki(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -778,7 +774,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -794,7 +790,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -824,7 +820,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -840,7 +836,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -893,7 +889,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -909,7 +905,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -971,7 +967,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -987,7 +983,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1018,7 +1014,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1034,7 +1030,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1061,7 +1057,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1077,7 +1073,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1129,7 +1125,7 @@ fn indexing_movies_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1145,7 +1141,7 @@ fn indexing_movies_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1196,7 +1192,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1212,7 +1208,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1242,7 +1238,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1258,7 +1254,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1311,7 +1307,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1327,7 +1323,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1376,7 +1372,7 @@ fn delete_documents_from_ids(index: Index, document_ids_to_delete: Vec<RoaringBi
|
|||||||
Some(primary_key),
|
Some(primary_key),
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1426,7 +1422,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1442,7 +1438,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1472,7 +1468,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1488,7 +1484,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1514,7 +1510,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1530,7 +1526,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1605,7 +1601,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1621,7 +1617,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1697,7 +1693,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1713,7 +1709,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1781,7 +1777,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1797,7 +1793,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1849,7 +1845,7 @@ fn indexing_geo(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1865,7 +1861,7 @@ fn indexing_geo(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1916,7 +1912,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1932,7 +1928,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1962,7 +1958,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1978,7 +1974,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -2031,7 +2027,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -2047,7 +2043,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -11,8 +11,8 @@ license.workspace = true
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
time = { version = "0.3.44", features = ["parsing"] }
|
time = { version = "0.3.41", features = ["parsing"] }
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.98"
|
||||||
vergen-gitcl = "1.0.8"
|
vergen-git2 = "1.0.7"
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ fn emit_git_variables() -> anyhow::Result<()> {
|
|||||||
// Note: any code that needs VERGEN_ environment variables should take care to define them manually in the Dockerfile and pass them
|
// Note: any code that needs VERGEN_ environment variables should take care to define them manually in the Dockerfile and pass them
|
||||||
// in the corresponding GitHub workflow (publish_docker.yml).
|
// in the corresponding GitHub workflow (publish_docker.yml).
|
||||||
// This is due to the Dockerfile building the binary outside of the git directory.
|
// This is due to the Dockerfile building the binary outside of the git directory.
|
||||||
let mut builder = vergen_gitcl::GitclBuilder::default();
|
let mut builder = vergen_git2::Git2Builder::default();
|
||||||
|
|
||||||
builder.branch(true);
|
builder.branch(true);
|
||||||
builder.commit_timestamp(true);
|
builder.commit_timestamp(true);
|
||||||
@@ -25,5 +25,5 @@ fn emit_git_variables() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
let git2 = builder.build()?;
|
let git2 = builder.build()?;
|
||||||
|
|
||||||
vergen_gitcl::Emitter::default().fail_on_error().add_instructions(&git2)?.emit()
|
vergen_git2::Emitter::default().fail_on_error().add_instructions(&git2)?.emit()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
use build_info::BuildInfo;
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let info = BuildInfo::from_build();
|
|
||||||
dbg!(info);
|
|
||||||
}
|
|
||||||
@@ -11,27 +11,24 @@ readme.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.98"
|
||||||
flate2 = "1.1.5"
|
flate2 = "1.1.2"
|
||||||
http = "1.3.1"
|
http = "1.3.1"
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
once_cell = "1.21.3"
|
once_cell = "1.21.3"
|
||||||
regex = "1.12.2"
|
regex = "1.11.1"
|
||||||
roaring = { version = "0.10.12", features = ["serde"] }
|
roaring = { version = "0.10.12", features = ["serde"] }
|
||||||
serde = { version = "1.0.228", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
tar = "0.4.44"
|
tar = "0.4.44"
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
thiserror = "2.0.17"
|
thiserror = "2.0.12"
|
||||||
time = { version = "0.3.44", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
time = { version = "0.3.41", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||||
tracing = "0.1.41"
|
tracing = "0.1.41"
|
||||||
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
big_s = "1.0.2"
|
big_s = "1.0.2"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
meili-snap = { path = "../meili-snap" }
|
meili-snap = { path = "../meili-snap" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
|
|
||||||
[features]
|
|
||||||
enterprise = ["meilisearch-types/enterprise"]
|
|
||||||
@@ -96,8 +96,6 @@ pub struct TaskDump {
|
|||||||
pub finished_at: Option<OffsetDateTime>,
|
pub finished_at: Option<OffsetDateTime>,
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub network: Option<TaskNetwork>,
|
pub network: Option<TaskNetwork>,
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub custom_metadata: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A `Kind` specific version made for the dump. If modified you may break the dump.
|
// A `Kind` specific version made for the dump. If modified you may break the dump.
|
||||||
@@ -180,7 +178,6 @@ impl From<Task> for TaskDump {
|
|||||||
started_at: task.started_at,
|
started_at: task.started_at,
|
||||||
finished_at: task.finished_at,
|
finished_at: task.finished_at,
|
||||||
network: task.network,
|
network: task.network,
|
||||||
custom_metadata: task.custom_metadata,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -262,13 +259,13 @@ pub(crate) mod test {
|
|||||||
use big_s::S;
|
use big_s::S;
|
||||||
use maplit::{btreemap, btreeset};
|
use maplit::{btreemap, btreeset};
|
||||||
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
|
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
|
||||||
|
use meilisearch_types::enterprise_edition::network::{Network, Remote};
|
||||||
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
||||||
use meilisearch_types::features::RuntimeTogglableFeatures;
|
use meilisearch_types::features::RuntimeTogglableFeatures;
|
||||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||||
use meilisearch_types::keys::{Action, Key};
|
use meilisearch_types::keys::{Action, Key};
|
||||||
use meilisearch_types::milli::update::Setting;
|
use meilisearch_types::milli::update::Setting;
|
||||||
use meilisearch_types::milli::{self, FilterableAttributesRule};
|
use meilisearch_types::milli::{self, FilterableAttributesRule};
|
||||||
use meilisearch_types::network::{Network, Remote};
|
|
||||||
use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
|
use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
|
||||||
use meilisearch_types::task_view::DetailsView;
|
use meilisearch_types::task_view::DetailsView;
|
||||||
use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status};
|
use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status};
|
||||||
@@ -399,7 +396,6 @@ pub(crate) mod test {
|
|||||||
started_at: Some(datetime!(2022-11-20 0:00 UTC)),
|
started_at: Some(datetime!(2022-11-20 0:00 UTC)),
|
||||||
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
||||||
network: None,
|
network: None,
|
||||||
custom_metadata: None,
|
|
||||||
},
|
},
|
||||||
None,
|
None,
|
||||||
),
|
),
|
||||||
@@ -425,7 +421,6 @@ pub(crate) mod test {
|
|||||||
started_at: None,
|
started_at: None,
|
||||||
finished_at: None,
|
finished_at: None,
|
||||||
network: None,
|
network: None,
|
||||||
custom_metadata: None,
|
|
||||||
},
|
},
|
||||||
Some(vec![
|
Some(vec![
|
||||||
json!({ "id": 4, "race": "leonberg" }).as_object().unwrap().clone(),
|
json!({ "id": 4, "race": "leonberg" }).as_object().unwrap().clone(),
|
||||||
@@ -446,7 +441,6 @@ pub(crate) mod test {
|
|||||||
started_at: None,
|
started_at: None,
|
||||||
finished_at: None,
|
finished_at: None,
|
||||||
network: None,
|
network: None,
|
||||||
custom_metadata: None,
|
|
||||||
},
|
},
|
||||||
None,
|
None,
|
||||||
),
|
),
|
||||||
|
|||||||
@@ -164,7 +164,6 @@ impl CompatV5ToV6 {
|
|||||||
started_at: task_view.started_at,
|
started_at: task_view.started_at,
|
||||||
finished_at: task_view.finished_at,
|
finished_at: task_view.finished_at,
|
||||||
network: None,
|
network: None,
|
||||||
custom_metadata: None,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
(task, content_file)
|
(task, content_file)
|
||||||
|
|||||||
@@ -107,14 +107,19 @@ impl Settings<Unchecked> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
#[default]
|
|
||||||
NotSet,
|
NotSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Setting<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::NotSet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Setting<T> {
|
impl<T> Setting<T> {
|
||||||
pub const fn is_not_set(&self) -> bool {
|
pub const fn is_not_set(&self) -> bool {
|
||||||
matches!(self, Self::NotSet)
|
matches!(self, Self::NotSet)
|
||||||
|
|||||||
@@ -161,14 +161,19 @@ pub struct Facets {
|
|||||||
pub min_level_size: Option<NonZeroUsize>,
|
pub min_level_size: Option<NonZeroUsize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
#[default]
|
|
||||||
NotSet,
|
NotSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Setting<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::NotSet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Setting<T> {
|
impl<T> Setting<T> {
|
||||||
pub fn map<U, F>(self, f: F) -> Setting<U>
|
pub fn map<U, F>(self, f: F) -> Setting<U>
|
||||||
where
|
where
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
use std::fmt::{self, Display, Formatter};
|
use std::fmt::{self, Display, Formatter};
|
||||||
|
use std::marker::PhantomData;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use serde::Deserialize;
|
use serde::de::Visitor;
|
||||||
|
use serde::{Deserialize, Deserializer};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use super::settings::{Settings, Unchecked};
|
use super::settings::{Settings, Unchecked};
|
||||||
@@ -80,3 +82,59 @@ impl Display for IndexUidFormatError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl std::error::Error for IndexUidFormatError {}
|
impl std::error::Error for IndexUidFormatError {}
|
||||||
|
|
||||||
|
/// A type that tries to match either a star (*) or
|
||||||
|
/// any other thing that implements `FromStr`.
|
||||||
|
#[derive(Debug)]
|
||||||
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
|
pub enum StarOr<T> {
|
||||||
|
Star,
|
||||||
|
Other(T),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de, T, E> Deserialize<'de> for StarOr<T>
|
||||||
|
where
|
||||||
|
T: FromStr<Err = E>,
|
||||||
|
E: Display,
|
||||||
|
{
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
/// Serde can't differentiate between `StarOr::Star` and `StarOr::Other` without a tag.
|
||||||
|
/// Simply using `#[serde(untagged)]` + `#[serde(rename="*")]` will lead to attempting to
|
||||||
|
/// deserialize everything as a `StarOr::Other`, including "*".
|
||||||
|
/// [`#[serde(other)]`](https://serde.rs/variant-attrs.html#other) might have helped but is
|
||||||
|
/// not supported on untagged enums.
|
||||||
|
struct StarOrVisitor<T>(PhantomData<T>);
|
||||||
|
|
||||||
|
impl<T, FE> Visitor<'_> for StarOrVisitor<T>
|
||||||
|
where
|
||||||
|
T: FromStr<Err = FE>,
|
||||||
|
FE: Display,
|
||||||
|
{
|
||||||
|
type Value = StarOr<T>;
|
||||||
|
|
||||||
|
fn expecting(&self, formatter: &mut Formatter) -> std::fmt::Result {
|
||||||
|
formatter.write_str("a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_str<SE>(self, v: &str) -> Result<Self::Value, SE>
|
||||||
|
where
|
||||||
|
SE: serde::de::Error,
|
||||||
|
{
|
||||||
|
match v {
|
||||||
|
"*" => Ok(StarOr::Star),
|
||||||
|
v => {
|
||||||
|
let other = FromStr::from_str(v).map_err(|e: T::Err| {
|
||||||
|
SE::custom(format!("Invalid `other` value: {}", e))
|
||||||
|
})?;
|
||||||
|
Ok(StarOr::Other(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
deserializer.deserialize_str(StarOrVisitor(PhantomData))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -192,14 +192,19 @@ pub struct Facets {
|
|||||||
pub min_level_size: Option<NonZeroUsize>,
|
pub min_level_size: Option<NonZeroUsize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)]
|
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
#[default]
|
|
||||||
NotSet,
|
NotSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Setting<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::NotSet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Setting<T> {
|
impl<T> Setting<T> {
|
||||||
pub fn set(self) -> Option<T> {
|
pub fn set(self) -> Option<T> {
|
||||||
match self {
|
match self {
|
||||||
|
|||||||
@@ -47,15 +47,20 @@ pub struct Settings<T> {
|
|||||||
pub _kind: PhantomData<T>,
|
pub _kind: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)]
|
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
#[default]
|
|
||||||
NotSet,
|
NotSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Setting<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::NotSet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Setting<T> {
|
impl<T> Setting<T> {
|
||||||
pub fn set(self) -> Option<T> {
|
pub fn set(self) -> Option<T> {
|
||||||
match self {
|
match self {
|
||||||
|
|||||||
@@ -322,7 +322,7 @@ impl From<Task> for TaskView {
|
|||||||
_ => None,
|
_ => None,
|
||||||
});
|
});
|
||||||
|
|
||||||
let duration = finished_at.zip(started_at).map(|(tf, ts)| tf - ts);
|
let duration = finished_at.zip(started_at).map(|(tf, ts)| (tf - ts));
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
uid: id,
|
uid: id,
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ pub type Batch = meilisearch_types::batches::Batch;
|
|||||||
pub type Key = meilisearch_types::keys::Key;
|
pub type Key = meilisearch_types::keys::Key;
|
||||||
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
|
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
|
||||||
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
|
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
|
||||||
pub type Network = meilisearch_types::network::Network;
|
pub type Network = meilisearch_types::enterprise_edition::network::Network;
|
||||||
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
|
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
|
||||||
|
|
||||||
// ===== Other types to clarify the code of the compat module
|
// ===== Other types to clarify the code of the compat module
|
||||||
|
|||||||
@@ -5,9 +5,9 @@ use std::path::PathBuf;
|
|||||||
use flate2::write::GzEncoder;
|
use flate2::write::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use meilisearch_types::batches::Batch;
|
use meilisearch_types::batches::Batch;
|
||||||
|
use meilisearch_types::enterprise_edition::network::Network;
|
||||||
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
|
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
|
||||||
use meilisearch_types::keys::Key;
|
use meilisearch_types::keys::Key;
|
||||||
use meilisearch_types::network::Network;
|
|
||||||
use meilisearch_types::settings::{Checked, Settings};
|
use meilisearch_types::settings::{Checked, Settings};
|
||||||
use meilisearch_types::webhooks::WebhooksDumpView;
|
use meilisearch_types::webhooks::WebhooksDumpView;
|
||||||
use serde_json::{Map, Value};
|
use serde_json::{Map, Value};
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
thiserror = "2.0.17"
|
thiserror = "2.0.12"
|
||||||
tracing = "0.1.41"
|
tracing = "0.1.41"
|
||||||
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ impl FileStore {
|
|||||||
|
|
||||||
/// Returns the file corresponding to the requested uuid.
|
/// Returns the file corresponding to the requested uuid.
|
||||||
pub fn get_update(&self, uuid: Uuid) -> Result<StdFile> {
|
pub fn get_update(&self, uuid: Uuid) -> Result<StdFile> {
|
||||||
let path = self.update_path(uuid);
|
let path = self.get_update_path(uuid);
|
||||||
let file = match StdFile::open(path) {
|
let file = match StdFile::open(path) {
|
||||||
Ok(file) => file,
|
Ok(file) => file,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@@ -72,7 +72,7 @@ impl FileStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the path that correspond to this uuid, the path could not exists.
|
/// Returns the path that correspond to this uuid, the path could not exists.
|
||||||
pub fn update_path(&self, uuid: Uuid) -> PathBuf {
|
pub fn get_update_path(&self, uuid: Uuid) -> PathBuf {
|
||||||
self.path.join(uuid.to_string())
|
self.path.join(uuid.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ license.workspace = true
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = { version = "0.7.0", features = ["html_reports"] }
|
criterion = { version = "0.6.0", features = ["html_reports"] }
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "benchmarks"
|
name = "benchmarks"
|
||||||
|
|||||||
@@ -11,12 +11,12 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
arbitrary = { version = "1.4.2", features = ["derive"] }
|
arbitrary = { version = "1.4.1", features = ["derive"] }
|
||||||
bumpalo = "3.19.0"
|
bumpalo = "3.18.1"
|
||||||
clap = { version = "4.5.52", features = ["derive"] }
|
clap = { version = "4.5.40", features = ["derive"] }
|
||||||
either = "1.15.0"
|
either = "1.15.0"
|
||||||
fastrand = "2.3.0"
|
fastrand = "2.3.0"
|
||||||
milli = { path = "../milli" }
|
milli = { path = "../milli" }
|
||||||
serde = { version = "1.0.228", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
|
|||||||
@@ -11,33 +11,31 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.98"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
byte-unit = "5.1.6"
|
byte-unit = "5.1.6"
|
||||||
bytes = "1.11.0"
|
bumpalo = "3.18.1"
|
||||||
bumpalo = "3.19.0"
|
|
||||||
bumparaw-collections = "0.1.4"
|
bumparaw-collections = "0.1.4"
|
||||||
convert_case = "0.9.0"
|
convert_case = "0.8.0"
|
||||||
csv = "1.4.0"
|
csv = "1.3.1"
|
||||||
derive_builder = "0.20.2"
|
derive_builder = "0.20.2"
|
||||||
dump = { path = "../dump" }
|
dump = { path = "../dump" }
|
||||||
enum-iterator = "2.3.0"
|
enum-iterator = "2.1.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.1.5"
|
flate2 = "1.1.2"
|
||||||
indexmap = "2.12.0"
|
indexmap = "2.9.0"
|
||||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
memmap2 = "0.9.9"
|
memmap2 = "0.9.7"
|
||||||
page_size = "0.6.0"
|
page_size = "0.6.0"
|
||||||
rayon = "1.11.0"
|
rayon = "1.10.0"
|
||||||
roaring = { version = "0.10.12", features = ["serde"] }
|
roaring = { version = "0.10.12", features = ["serde"] }
|
||||||
serde = { version = "1.0.228", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
tar = "0.4.44"
|
|
||||||
synchronoise = "1.0.1"
|
synchronoise = "1.0.1"
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
thiserror = "2.0.17"
|
thiserror = "2.0.12"
|
||||||
time = { version = "0.3.44", features = [
|
time = { version = "0.3.41", features = [
|
||||||
"serde-well-known",
|
"serde-well-known",
|
||||||
"formatting",
|
"formatting",
|
||||||
"parsing",
|
"parsing",
|
||||||
@@ -45,11 +43,8 @@ time = { version = "0.3.44", features = [
|
|||||||
] }
|
] }
|
||||||
tracing = "0.1.41"
|
tracing = "0.1.41"
|
||||||
ureq = "2.12.1"
|
ureq = "2.12.1"
|
||||||
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||||
backoff = "0.4.0"
|
backoff = "0.4.0"
|
||||||
reqwest = { version = "0.12.24", features = ["rustls-tls", "http2"], default-features = false }
|
|
||||||
rusty-s3 = "0.8.1"
|
|
||||||
tokio = { version = "1.48.0", features = ["full"] }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
big_s = "1.0.2"
|
big_s = "1.0.2"
|
||||||
|
|||||||
@@ -150,7 +150,6 @@ impl<'a> Dump<'a> {
|
|||||||
details: task.details,
|
details: task.details,
|
||||||
status: task.status,
|
status: task.status,
|
||||||
network: task.network,
|
network: task.network,
|
||||||
custom_metadata: task.custom_metadata,
|
|
||||||
kind: match task.kind {
|
kind: match task.kind {
|
||||||
KindDump::DocumentImport {
|
KindDump::DocumentImport {
|
||||||
primary_key,
|
primary_key,
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ use meilisearch_types::error::{Code, ErrorCode};
|
|||||||
use meilisearch_types::milli::index::RollbackOutcome;
|
use meilisearch_types::milli::index::RollbackOutcome;
|
||||||
use meilisearch_types::tasks::{Kind, Status};
|
use meilisearch_types::tasks::{Kind, Status};
|
||||||
use meilisearch_types::{heed, milli};
|
use meilisearch_types::{heed, milli};
|
||||||
use reqwest::StatusCode;
|
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
use crate::TaskId;
|
use crate::TaskId;
|
||||||
@@ -128,14 +127,6 @@ pub enum Error {
|
|||||||
#[error("Aborted task")]
|
#[error("Aborted task")]
|
||||||
AbortedTask,
|
AbortedTask,
|
||||||
|
|
||||||
#[error("S3 error: status: {status}, body: {body}")]
|
|
||||||
S3Error { status: StatusCode, body: String },
|
|
||||||
#[error("S3 HTTP error: {0}")]
|
|
||||||
S3HttpError(reqwest::Error),
|
|
||||||
#[error("S3 XML error: {0}")]
|
|
||||||
S3XmlError(Box<dyn std::error::Error + Send + Sync>),
|
|
||||||
#[error("S3 bucket error: {0}")]
|
|
||||||
S3BucketError(rusty_s3::BucketError),
|
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
Dump(#[from] dump::Error),
|
Dump(#[from] dump::Error),
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
@@ -235,10 +226,6 @@ impl Error {
|
|||||||
| Error::TaskCancelationWithEmptyQuery
|
| Error::TaskCancelationWithEmptyQuery
|
||||||
| Error::FromRemoteWhenExporting { .. }
|
| Error::FromRemoteWhenExporting { .. }
|
||||||
| Error::AbortedTask
|
| Error::AbortedTask
|
||||||
| Error::S3Error { .. }
|
|
||||||
| Error::S3HttpError(_)
|
|
||||||
| Error::S3XmlError(_)
|
|
||||||
| Error::S3BucketError(_)
|
|
||||||
| Error::Dump(_)
|
| Error::Dump(_)
|
||||||
| Error::Heed(_)
|
| Error::Heed(_)
|
||||||
| Error::Milli { .. }
|
| Error::Milli { .. }
|
||||||
@@ -306,14 +293,8 @@ impl ErrorCode for Error {
|
|||||||
Error::BatchNotFound(_) => Code::BatchNotFound,
|
Error::BatchNotFound(_) => Code::BatchNotFound,
|
||||||
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
|
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
|
||||||
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
|
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
|
||||||
|
// TODO: not sure of the Code to use
|
||||||
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
|
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
|
||||||
Error::S3Error { status, .. } if status.is_client_error() => {
|
|
||||||
Code::InvalidS3SnapshotRequest
|
|
||||||
}
|
|
||||||
Error::S3Error { .. } => Code::S3SnapshotServerError,
|
|
||||||
Error::S3HttpError(_) => Code::S3SnapshotServerError,
|
|
||||||
Error::S3XmlError(_) => Code::S3SnapshotServerError,
|
|
||||||
Error::S3BucketError(_) => Code::InvalidS3SnapshotParameters,
|
|
||||||
Error::Dump(e) => e.error_code(),
|
Error::Dump(e) => e.error_code(),
|
||||||
Error::Milli { error, .. } => error.error_code(),
|
Error::Milli { error, .. } => error.error_code(),
|
||||||
Error::ProcessBatchPanicked(_) => Code::Internal,
|
Error::ProcessBatchPanicked(_) => Code::Internal,
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
|
use meilisearch_types::enterprise_edition::network::Network;
|
||||||
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
|
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
|
||||||
use meilisearch_types::heed::types::{SerdeJson, Str};
|
use meilisearch_types::heed::types::{SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
|
||||||
use meilisearch_types::network::Network;
|
|
||||||
|
|
||||||
use crate::error::FeatureNotEnabledError;
|
use crate::error::FeatureNotEnabledError;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str};
|
|||||||
use meilisearch_types::heed::{Database, RoTxn};
|
use meilisearch_types::heed::{Database, RoTxn};
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
||||||
use meilisearch_types::tasks::{Details, Kind, Status, Task};
|
use meilisearch_types::tasks::{Details, Kind, Status, Task};
|
||||||
use meilisearch_types::versioning::{self, VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
use meilisearch_types::versioning;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
|
|
||||||
use crate::index_mapper::IndexMapper;
|
use crate::index_mapper::IndexMapper;
|
||||||
@@ -36,7 +36,6 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
|||||||
run_loop_iteration: _,
|
run_loop_iteration: _,
|
||||||
embedders: _,
|
embedders: _,
|
||||||
chat_settings: _,
|
chat_settings: _,
|
||||||
runtime: _,
|
|
||||||
} = scheduler;
|
} = scheduler;
|
||||||
|
|
||||||
let rtxn = env.read_txn().unwrap();
|
let rtxn = env.read_txn().unwrap();
|
||||||
@@ -232,7 +231,6 @@ pub fn snapshot_task(task: &Task) -> String {
|
|||||||
status,
|
status,
|
||||||
kind,
|
kind,
|
||||||
network,
|
network,
|
||||||
custom_metadata,
|
|
||||||
} = task;
|
} = task;
|
||||||
snap.push('{');
|
snap.push('{');
|
||||||
snap.push_str(&format!("uid: {uid}, "));
|
snap.push_str(&format!("uid: {uid}, "));
|
||||||
@@ -253,9 +251,6 @@ pub fn snapshot_task(task: &Task) -> String {
|
|||||||
if let Some(network) = network {
|
if let Some(network) = network {
|
||||||
snap.push_str(&format!("network: {network:?}, "))
|
snap.push_str(&format!("network: {network:?}, "))
|
||||||
}
|
}
|
||||||
if let Some(custom_metadata) = custom_metadata {
|
|
||||||
snap.push_str(&format!("custom_metadata: {custom_metadata:?}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
snap.push('}');
|
snap.push('}');
|
||||||
snap
|
snap
|
||||||
@@ -320,12 +315,8 @@ fn snapshot_details(d: &Details) -> String {
|
|||||||
format!("{{ url: {url:?}, api_key: {api_key:?}, payload_size: {payload_size:?}, indexes: {indexes:?} }}")
|
format!("{{ url: {url:?}, api_key: {api_key:?}, payload_size: {payload_size:?}, indexes: {indexes:?} }}")
|
||||||
}
|
}
|
||||||
Details::UpgradeDatabase { from, to } => {
|
Details::UpgradeDatabase { from, to } => {
|
||||||
if to == &(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) {
|
|
||||||
format!("{{ from: {from:?}, to: [current version] }}")
|
|
||||||
} else {
|
|
||||||
format!("{{ from: {from:?}, to: {to:?} }}")
|
format!("{{ from: {from:?}, to: {to:?} }}")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
|
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
|
||||||
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
|
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
|
||||||
}
|
}
|
||||||
@@ -404,21 +395,7 @@ pub fn snapshot_batch(batch: &Batch) -> String {
|
|||||||
|
|
||||||
snap.push('{');
|
snap.push('{');
|
||||||
snap.push_str(&format!("uid: {uid}, "));
|
snap.push_str(&format!("uid: {uid}, "));
|
||||||
let details = if let Some(upgrade_to) = &details.upgrade_to {
|
snap.push_str(&format!("details: {}, ", serde_json::to_string(details).unwrap()));
|
||||||
if upgrade_to.as_str()
|
|
||||||
== format!("v{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_PATCH}").as_str()
|
|
||||||
{
|
|
||||||
let mut details = details.clone();
|
|
||||||
|
|
||||||
details.upgrade_to = Some("[current version]".into());
|
|
||||||
serde_json::to_string(&details).unwrap()
|
|
||||||
} else {
|
|
||||||
serde_json::to_string(details).unwrap()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
serde_json::to_string(details).unwrap()
|
|
||||||
};
|
|
||||||
snap.push_str(&format!("details: {details}, "));
|
|
||||||
snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap()));
|
snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap()));
|
||||||
if !embedder_stats.skip_serializing() {
|
if !embedder_stats.skip_serializing() {
|
||||||
snap.push_str(&format!(
|
snap.push_str(&format!(
|
||||||
|
|||||||
@@ -54,6 +54,7 @@ pub use features::RoFeatures;
|
|||||||
use flate2::bufread::GzEncoder;
|
use flate2::bufread::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use meilisearch_types::batches::Batch;
|
use meilisearch_types::batches::Batch;
|
||||||
|
use meilisearch_types::enterprise_edition::network::Network;
|
||||||
use meilisearch_types::features::{
|
use meilisearch_types::features::{
|
||||||
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
|
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
|
||||||
};
|
};
|
||||||
@@ -66,7 +67,6 @@ use meilisearch_types::milli::vector::{
|
|||||||
Embedder, EmbedderOptions, RuntimeEmbedder, RuntimeEmbedders, RuntimeFragment,
|
Embedder, EmbedderOptions, RuntimeEmbedder, RuntimeEmbedders, RuntimeFragment,
|
||||||
};
|
};
|
||||||
use meilisearch_types::milli::{self, Index};
|
use meilisearch_types::milli::{self, Index};
|
||||||
use meilisearch_types::network::Network;
|
|
||||||
use meilisearch_types::task_view::TaskView;
|
use meilisearch_types::task_view::TaskView;
|
||||||
use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork};
|
use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork};
|
||||||
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
|
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
|
||||||
@@ -216,9 +216,6 @@ pub struct IndexScheduler {
|
|||||||
/// A counter that is incremented before every call to [`tick`](IndexScheduler::tick)
|
/// A counter that is incremented before every call to [`tick`](IndexScheduler::tick)
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
run_loop_iteration: Arc<RwLock<usize>>,
|
run_loop_iteration: Arc<RwLock<usize>>,
|
||||||
|
|
||||||
/// The tokio runtime used for asynchronous tasks.
|
|
||||||
runtime: Option<tokio::runtime::Handle>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IndexScheduler {
|
impl IndexScheduler {
|
||||||
@@ -245,7 +242,6 @@ impl IndexScheduler {
|
|||||||
run_loop_iteration: self.run_loop_iteration.clone(),
|
run_loop_iteration: self.run_loop_iteration.clone(),
|
||||||
features: self.features.clone(),
|
features: self.features.clone(),
|
||||||
chat_settings: self.chat_settings,
|
chat_settings: self.chat_settings,
|
||||||
runtime: self.runtime.clone(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -259,23 +255,13 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create an index scheduler and start its run loop.
|
/// Create an index scheduler and start its run loop.
|
||||||
|
#[allow(private_interfaces)] // because test_utils is private
|
||||||
pub fn new(
|
pub fn new(
|
||||||
options: IndexSchedulerOptions,
|
options: IndexSchedulerOptions,
|
||||||
auth_env: Env<WithoutTls>,
|
auth_env: Env<WithoutTls>,
|
||||||
from_db_version: (u32, u32, u32),
|
from_db_version: (u32, u32, u32),
|
||||||
runtime: Option<tokio::runtime::Handle>,
|
#[cfg(test)] test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
|
||||||
) -> Result<Self> {
|
#[cfg(test)] planned_failures: Vec<(usize, test_utils::FailureLocation)>,
|
||||||
let this = Self::new_without_run(options, auth_env, from_db_version, runtime)?;
|
|
||||||
|
|
||||||
this.run();
|
|
||||||
Ok(this)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_without_run(
|
|
||||||
options: IndexSchedulerOptions,
|
|
||||||
auth_env: Env<WithoutTls>,
|
|
||||||
from_db_version: (u32, u32, u32),
|
|
||||||
runtime: Option<tokio::runtime::Handle>,
|
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
std::fs::create_dir_all(&options.tasks_path)?;
|
std::fs::create_dir_all(&options.tasks_path)?;
|
||||||
std::fs::create_dir_all(&options.update_file_path)?;
|
std::fs::create_dir_all(&options.update_file_path)?;
|
||||||
@@ -330,7 +316,8 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
|
|
||||||
Ok(Self {
|
// allow unreachable_code to get rids of the warning in the case of a test build.
|
||||||
|
let this = Self {
|
||||||
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
|
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
|
||||||
version,
|
version,
|
||||||
queue,
|
queue,
|
||||||
@@ -346,32 +333,15 @@ impl IndexScheduler {
|
|||||||
webhooks: Arc::new(webhooks),
|
webhooks: Arc::new(webhooks),
|
||||||
embedders: Default::default(),
|
embedders: Default::default(),
|
||||||
|
|
||||||
#[cfg(test)] // Will be replaced in `new_tests` in test environments
|
#[cfg(test)]
|
||||||
test_breakpoint_sdr: crossbeam_channel::bounded(0).0,
|
test_breakpoint_sdr,
|
||||||
#[cfg(test)] // Will be replaced in `new_tests` in test environments
|
#[cfg(test)]
|
||||||
planned_failures: Default::default(),
|
planned_failures,
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
run_loop_iteration: Arc::new(RwLock::new(0)),
|
run_loop_iteration: Arc::new(RwLock::new(0)),
|
||||||
features,
|
features,
|
||||||
chat_settings,
|
chat_settings,
|
||||||
runtime,
|
};
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create an index scheduler and start its run loop.
|
|
||||||
#[cfg(test)]
|
|
||||||
fn new_test(
|
|
||||||
options: IndexSchedulerOptions,
|
|
||||||
auth_env: Env<WithoutTls>,
|
|
||||||
from_db_version: (u32, u32, u32),
|
|
||||||
runtime: Option<tokio::runtime::Handle>,
|
|
||||||
test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
|
|
||||||
planned_failures: Vec<(usize, test_utils::FailureLocation)>,
|
|
||||||
) -> Result<Self> {
|
|
||||||
let mut this = Self::new_without_run(options, auth_env, from_db_version, runtime)?;
|
|
||||||
|
|
||||||
this.test_breakpoint_sdr = test_breakpoint_sdr;
|
|
||||||
this.planned_failures = planned_failures;
|
|
||||||
|
|
||||||
this.run();
|
this.run();
|
||||||
Ok(this)
|
Ok(this)
|
||||||
@@ -756,19 +726,6 @@ impl IndexScheduler {
|
|||||||
kind: KindWithContent,
|
kind: KindWithContent,
|
||||||
task_id: Option<TaskId>,
|
task_id: Option<TaskId>,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
) -> Result<Task> {
|
|
||||||
self.register_with_custom_metadata(kind, task_id, None, dry_run)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Register a new task in the scheduler, with metadata.
|
|
||||||
///
|
|
||||||
/// If it fails and data was associated with the task, it tries to delete the associated data.
|
|
||||||
pub fn register_with_custom_metadata(
|
|
||||||
&self,
|
|
||||||
kind: KindWithContent,
|
|
||||||
task_id: Option<TaskId>,
|
|
||||||
custom_metadata: Option<String>,
|
|
||||||
dry_run: bool,
|
|
||||||
) -> Result<Task> {
|
) -> Result<Task> {
|
||||||
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
|
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
|
||||||
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
|
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
|
||||||
@@ -779,7 +736,7 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut wtxn = self.env.write_txn()?;
|
let mut wtxn = self.env.write_txn()?;
|
||||||
let task = self.queue.register(&mut wtxn, &kind, task_id, custom_metadata, dry_run)?;
|
let task = self.queue.register(&mut wtxn, &kind, task_id, dry_run)?;
|
||||||
|
|
||||||
// If the registered task is a task cancelation
|
// If the registered task is a task cancelation
|
||||||
// we inform the processing tasks to stop (if necessary).
|
// we inform the processing tasks to stop (if necessary).
|
||||||
|
|||||||
@@ -257,7 +257,6 @@ impl Queue {
|
|||||||
wtxn: &mut RwTxn,
|
wtxn: &mut RwTxn,
|
||||||
kind: &KindWithContent,
|
kind: &KindWithContent,
|
||||||
task_id: Option<TaskId>,
|
task_id: Option<TaskId>,
|
||||||
custom_metadata: Option<String>,
|
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
) -> Result<Task> {
|
) -> Result<Task> {
|
||||||
let next_task_id = self.tasks.next_task_id(wtxn)?;
|
let next_task_id = self.tasks.next_task_id(wtxn)?;
|
||||||
@@ -281,7 +280,6 @@ impl Queue {
|
|||||||
status: Status::Enqueued,
|
status: Status::Enqueued,
|
||||||
kind: kind.clone(),
|
kind: kind.clone(),
|
||||||
network: None,
|
network: None,
|
||||||
custom_metadata,
|
|
||||||
};
|
};
|
||||||
// For deletion and cancelation tasks, we want to make extra sure that they
|
// For deletion and cancelation tasks, we want to make extra sure that they
|
||||||
// don't attempt to delete/cancel tasks that are newer than themselves.
|
// don't attempt to delete/cancel tasks that are newer than themselves.
|
||||||
@@ -346,7 +344,6 @@ impl Queue {
|
|||||||
tasks: to_delete,
|
tasks: to_delete,
|
||||||
},
|
},
|
||||||
None,
|
None,
|
||||||
None,
|
|
||||||
false,
|
false,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ use convert_case::{Case, Casing as _};
|
|||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::heed::{Env, WithoutTls};
|
use meilisearch_types::heed::{Env, WithoutTls};
|
||||||
use meilisearch_types::milli;
|
use meilisearch_types::milli;
|
||||||
use meilisearch_types::milli::update::S3SnapshotOptions;
|
|
||||||
use meilisearch_types::tasks::Status;
|
use meilisearch_types::tasks::Status;
|
||||||
use process_batch::ProcessBatchInfo;
|
use process_batch::ProcessBatchInfo;
|
||||||
use rayon::current_num_threads;
|
use rayon::current_num_threads;
|
||||||
@@ -88,14 +87,11 @@ pub struct Scheduler {
|
|||||||
|
|
||||||
/// Snapshot compaction status.
|
/// Snapshot compaction status.
|
||||||
pub(crate) experimental_no_snapshot_compaction: bool,
|
pub(crate) experimental_no_snapshot_compaction: bool,
|
||||||
|
|
||||||
/// S3 Snapshot options.
|
|
||||||
pub(crate) s3_snapshot_options: Option<S3SnapshotOptions>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Scheduler {
|
impl Scheduler {
|
||||||
pub(crate) fn private_clone(&self) -> Self {
|
pub(crate) fn private_clone(&self) -> Scheduler {
|
||||||
Self {
|
Scheduler {
|
||||||
must_stop_processing: self.must_stop_processing.clone(),
|
must_stop_processing: self.must_stop_processing.clone(),
|
||||||
wake_up: self.wake_up.clone(),
|
wake_up: self.wake_up.clone(),
|
||||||
autobatching_enabled: self.autobatching_enabled,
|
autobatching_enabled: self.autobatching_enabled,
|
||||||
@@ -107,52 +103,23 @@ impl Scheduler {
|
|||||||
version_file_path: self.version_file_path.clone(),
|
version_file_path: self.version_file_path.clone(),
|
||||||
embedding_cache_cap: self.embedding_cache_cap,
|
embedding_cache_cap: self.embedding_cache_cap,
|
||||||
experimental_no_snapshot_compaction: self.experimental_no_snapshot_compaction,
|
experimental_no_snapshot_compaction: self.experimental_no_snapshot_compaction,
|
||||||
s3_snapshot_options: self.s3_snapshot_options.clone(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(options: &IndexSchedulerOptions, auth_env: Env<WithoutTls>) -> Scheduler {
|
pub fn new(options: &IndexSchedulerOptions, auth_env: Env<WithoutTls>) -> Scheduler {
|
||||||
let IndexSchedulerOptions {
|
|
||||||
version_file_path,
|
|
||||||
auth_path: _,
|
|
||||||
tasks_path: _,
|
|
||||||
update_file_path: _,
|
|
||||||
indexes_path: _,
|
|
||||||
snapshots_path,
|
|
||||||
dumps_path,
|
|
||||||
cli_webhook_url: _,
|
|
||||||
cli_webhook_authorization: _,
|
|
||||||
task_db_size: _,
|
|
||||||
index_base_map_size: _,
|
|
||||||
enable_mdb_writemap: _,
|
|
||||||
index_growth_amount: _,
|
|
||||||
index_count: _,
|
|
||||||
indexer_config,
|
|
||||||
autobatching_enabled,
|
|
||||||
cleanup_enabled: _,
|
|
||||||
max_number_of_tasks: _,
|
|
||||||
max_number_of_batched_tasks,
|
|
||||||
batched_tasks_size_limit,
|
|
||||||
instance_features: _,
|
|
||||||
auto_upgrade: _,
|
|
||||||
embedding_cache_cap,
|
|
||||||
experimental_no_snapshot_compaction,
|
|
||||||
} = options;
|
|
||||||
|
|
||||||
Scheduler {
|
Scheduler {
|
||||||
must_stop_processing: MustStopProcessing::default(),
|
must_stop_processing: MustStopProcessing::default(),
|
||||||
// we want to start the loop right away in case meilisearch was ctrl+Ced while processing things
|
// we want to start the loop right away in case meilisearch was ctrl+Ced while processing things
|
||||||
wake_up: Arc::new(SignalEvent::auto(true)),
|
wake_up: Arc::new(SignalEvent::auto(true)),
|
||||||
autobatching_enabled: *autobatching_enabled,
|
autobatching_enabled: options.autobatching_enabled,
|
||||||
max_number_of_batched_tasks: *max_number_of_batched_tasks,
|
max_number_of_batched_tasks: options.max_number_of_batched_tasks,
|
||||||
batched_tasks_size_limit: *batched_tasks_size_limit,
|
batched_tasks_size_limit: options.batched_tasks_size_limit,
|
||||||
dumps_path: dumps_path.clone(),
|
dumps_path: options.dumps_path.clone(),
|
||||||
snapshots_path: snapshots_path.clone(),
|
snapshots_path: options.snapshots_path.clone(),
|
||||||
auth_env,
|
auth_env,
|
||||||
version_file_path: version_file_path.clone(),
|
version_file_path: options.version_file_path.clone(),
|
||||||
embedding_cache_cap: *embedding_cache_cap,
|
embedding_cache_cap: options.embedding_cache_cap,
|
||||||
experimental_no_snapshot_compaction: *experimental_no_snapshot_compaction,
|
experimental_no_snapshot_compaction: options.experimental_no_snapshot_compaction,
|
||||||
s3_snapshot_options: indexer_config.s3_snapshot_options.clone(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,8 +12,6 @@ use crate::processing::{AtomicUpdateFileStep, SnapshotCreationProgress};
|
|||||||
use crate::queue::TaskQueue;
|
use crate::queue::TaskQueue;
|
||||||
use crate::{Error, IndexScheduler, Result};
|
use crate::{Error, IndexScheduler, Result};
|
||||||
|
|
||||||
const UPDATE_FILES_DIR_NAME: &str = "update_files";
|
|
||||||
|
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// See [`EnvOpenOptions::open`].
|
/// See [`EnvOpenOptions::open`].
|
||||||
@@ -80,32 +78,10 @@ impl IndexScheduler {
|
|||||||
pub(super) fn process_snapshot(
|
pub(super) fn process_snapshot(
|
||||||
&self,
|
&self,
|
||||||
progress: Progress,
|
progress: Progress,
|
||||||
tasks: Vec<Task>,
|
mut tasks: Vec<Task>,
|
||||||
) -> Result<Vec<Task>> {
|
) -> Result<Vec<Task>> {
|
||||||
progress.update_progress(SnapshotCreationProgress::StartTheSnapshotCreation);
|
progress.update_progress(SnapshotCreationProgress::StartTheSnapshotCreation);
|
||||||
|
|
||||||
match self.scheduler.s3_snapshot_options.clone() {
|
|
||||||
Some(options) => {
|
|
||||||
#[cfg(not(unix))]
|
|
||||||
{
|
|
||||||
let _ = options;
|
|
||||||
panic!("Non-unix platform does not support S3 snapshotting");
|
|
||||||
}
|
|
||||||
#[cfg(unix)]
|
|
||||||
self.runtime
|
|
||||||
.as_ref()
|
|
||||||
.expect("Runtime not initialized")
|
|
||||||
.block_on(self.process_snapshot_to_s3(progress, options, tasks))
|
|
||||||
}
|
|
||||||
None => self.process_snapshots_to_disk(progress, tasks),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_snapshots_to_disk(
|
|
||||||
&self,
|
|
||||||
progress: Progress,
|
|
||||||
mut tasks: Vec<Task>,
|
|
||||||
) -> Result<Vec<Task>, Error> {
|
|
||||||
fs::create_dir_all(&self.scheduler.snapshots_path)?;
|
fs::create_dir_all(&self.scheduler.snapshots_path)?;
|
||||||
let temp_snapshot_dir = tempfile::tempdir()?;
|
let temp_snapshot_dir = tempfile::tempdir()?;
|
||||||
|
|
||||||
@@ -152,7 +128,7 @@ impl IndexScheduler {
|
|||||||
let rtxn = self.env.read_txn()?;
|
let rtxn = self.env.read_txn()?;
|
||||||
|
|
||||||
// 2.4 Create the update files directory
|
// 2.4 Create the update files directory
|
||||||
let update_files_dir = temp_snapshot_dir.path().join(UPDATE_FILES_DIR_NAME);
|
let update_files_dir = temp_snapshot_dir.path().join("update_files");
|
||||||
fs::create_dir_all(&update_files_dir)?;
|
fs::create_dir_all(&update_files_dir)?;
|
||||||
|
|
||||||
// 2.5 Only copy the update files of the enqueued tasks
|
// 2.5 Only copy the update files of the enqueued tasks
|
||||||
@@ -164,7 +140,7 @@ impl IndexScheduler {
|
|||||||
let task =
|
let task =
|
||||||
self.queue.tasks.get_task(&rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
self.queue.tasks.get_task(&rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
if let Some(content_uuid) = task.content_uuid() {
|
if let Some(content_uuid) = task.content_uuid() {
|
||||||
let src = self.queue.file_store.update_path(content_uuid);
|
let src = self.queue.file_store.get_update_path(content_uuid);
|
||||||
let dst = update_files_dir.join(content_uuid.to_string());
|
let dst = update_files_dir.join(content_uuid.to_string());
|
||||||
fs::copy(src, dst)?;
|
fs::copy(src, dst)?;
|
||||||
}
|
}
|
||||||
@@ -230,407 +206,4 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
Ok(tasks)
|
Ok(tasks)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
pub(super) async fn process_snapshot_to_s3(
|
|
||||||
&self,
|
|
||||||
progress: Progress,
|
|
||||||
opts: meilisearch_types::milli::update::S3SnapshotOptions,
|
|
||||||
mut tasks: Vec<Task>,
|
|
||||||
) -> Result<Vec<Task>> {
|
|
||||||
use meilisearch_types::milli::update::S3SnapshotOptions;
|
|
||||||
|
|
||||||
let S3SnapshotOptions {
|
|
||||||
s3_bucket_url,
|
|
||||||
s3_bucket_region,
|
|
||||||
s3_bucket_name,
|
|
||||||
s3_snapshot_prefix,
|
|
||||||
s3_access_key,
|
|
||||||
s3_secret_key,
|
|
||||||
s3_max_in_flight_parts,
|
|
||||||
s3_compression_level: level,
|
|
||||||
s3_signature_duration,
|
|
||||||
s3_multipart_part_size,
|
|
||||||
} = opts;
|
|
||||||
|
|
||||||
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
|
||||||
let retry_backoff = backoff::ExponentialBackoff::default();
|
|
||||||
let db_name = {
|
|
||||||
let mut base_path = self.env.path().to_owned();
|
|
||||||
base_path.pop();
|
|
||||||
base_path.file_name().and_then(OsStr::to_str).unwrap_or("data.ms").to_string()
|
|
||||||
};
|
|
||||||
|
|
||||||
let (reader, writer) = std::io::pipe()?;
|
|
||||||
let uploader_task = tokio::spawn(multipart_stream_to_s3(
|
|
||||||
s3_bucket_url,
|
|
||||||
s3_bucket_region,
|
|
||||||
s3_bucket_name,
|
|
||||||
s3_snapshot_prefix,
|
|
||||||
s3_access_key,
|
|
||||||
s3_secret_key,
|
|
||||||
s3_max_in_flight_parts,
|
|
||||||
s3_signature_duration,
|
|
||||||
s3_multipart_part_size,
|
|
||||||
must_stop_processing,
|
|
||||||
retry_backoff,
|
|
||||||
db_name,
|
|
||||||
reader,
|
|
||||||
));
|
|
||||||
|
|
||||||
let index_scheduler = IndexScheduler::private_clone(self);
|
|
||||||
let builder_task = tokio::task::spawn_blocking(move || {
|
|
||||||
stream_tarball_into_pipe(progress, level, writer, index_scheduler)
|
|
||||||
});
|
|
||||||
|
|
||||||
let (uploader_result, builder_result) = tokio::join!(uploader_task, builder_task);
|
|
||||||
|
|
||||||
// Check uploader result first to early return on task abortion.
|
|
||||||
// safety: JoinHandle can return an error if the task was aborted, cancelled, or panicked.
|
|
||||||
uploader_result.unwrap()?;
|
|
||||||
builder_result.unwrap()?;
|
|
||||||
|
|
||||||
for task in &mut tasks {
|
|
||||||
task.status = Status::Succeeded;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(tasks)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Streams a tarball of the database content into a pipe.
|
|
||||||
#[cfg(unix)]
|
|
||||||
fn stream_tarball_into_pipe(
|
|
||||||
progress: Progress,
|
|
||||||
level: u32,
|
|
||||||
writer: std::io::PipeWriter,
|
|
||||||
index_scheduler: IndexScheduler,
|
|
||||||
) -> std::result::Result<(), Error> {
|
|
||||||
use std::io::Write as _;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
let writer = flate2::write::GzEncoder::new(writer, flate2::Compression::new(level));
|
|
||||||
let mut tarball = tar::Builder::new(writer);
|
|
||||||
|
|
||||||
// 1. Snapshot the version file
|
|
||||||
tarball
|
|
||||||
.append_path_with_name(&index_scheduler.scheduler.version_file_path, VERSION_FILE_NAME)?;
|
|
||||||
|
|
||||||
// 2. Snapshot the index scheduler LMDB env
|
|
||||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexScheduler);
|
|
||||||
let tasks_env_file = index_scheduler.env.try_clone_inner_file()?;
|
|
||||||
let path = Path::new("tasks").join("data.mdb");
|
|
||||||
append_file_to_tarball(&mut tarball, path, tasks_env_file)?;
|
|
||||||
|
|
||||||
// 2.3 Create a read transaction on the index-scheduler
|
|
||||||
let rtxn = index_scheduler.env.read_txn()?;
|
|
||||||
|
|
||||||
// 2.4 Create the update files directory
|
|
||||||
// And only copy the update files of the enqueued tasks
|
|
||||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheUpdateFiles);
|
|
||||||
let enqueued = index_scheduler.queue.tasks.get_status(&rtxn, Status::Enqueued)?;
|
|
||||||
let (atomic, update_file_progress) = AtomicUpdateFileStep::new(enqueued.len() as u32);
|
|
||||||
progress.update_progress(update_file_progress);
|
|
||||||
|
|
||||||
// We create the update_files directory so that it
|
|
||||||
// always exists even if there are no update files
|
|
||||||
let update_files_dir = Path::new(UPDATE_FILES_DIR_NAME);
|
|
||||||
let src_update_files_dir = {
|
|
||||||
let mut path = index_scheduler.env.path().to_path_buf();
|
|
||||||
path.pop();
|
|
||||||
path.join(UPDATE_FILES_DIR_NAME)
|
|
||||||
};
|
|
||||||
tarball.append_dir(update_files_dir, src_update_files_dir)?;
|
|
||||||
|
|
||||||
for task_id in enqueued {
|
|
||||||
let task = index_scheduler
|
|
||||||
.queue
|
|
||||||
.tasks
|
|
||||||
.get_task(&rtxn, task_id)?
|
|
||||||
.ok_or(Error::CorruptedTaskQueue)?;
|
|
||||||
if let Some(content_uuid) = task.content_uuid() {
|
|
||||||
use std::fs::File;
|
|
||||||
|
|
||||||
let src = index_scheduler.queue.file_store.update_path(content_uuid);
|
|
||||||
let mut update_file = File::open(src)?;
|
|
||||||
let path = update_files_dir.join(content_uuid.to_string());
|
|
||||||
tarball.append_file(path, &mut update_file)?;
|
|
||||||
}
|
|
||||||
atomic.fetch_add(1, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. Snapshot every indexes
|
|
||||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexes);
|
|
||||||
let index_mapping = index_scheduler.index_mapper.index_mapping;
|
|
||||||
let nb_indexes = index_mapping.len(&rtxn)? as u32;
|
|
||||||
let indexes_dir = Path::new("indexes");
|
|
||||||
let indexes_references: Vec<_> = index_scheduler
|
|
||||||
.index_mapper
|
|
||||||
.index_mapping
|
|
||||||
.iter(&rtxn)?
|
|
||||||
.map(|res| res.map_err(Error::from).map(|(name, uuid)| (name.to_string(), uuid)))
|
|
||||||
.collect::<Result<_, Error>>()?;
|
|
||||||
|
|
||||||
// It's prettier to use a for loop instead of the IndexMapper::try_for_each_index
|
|
||||||
// method, especially when we need to access the UUID, local path and index number.
|
|
||||||
for (i, (name, uuid)) in indexes_references.into_iter().enumerate() {
|
|
||||||
progress.update_progress(VariableNameStep::<SnapshotCreationProgress>::new(
|
|
||||||
&name, i as u32, nb_indexes,
|
|
||||||
));
|
|
||||||
let path = indexes_dir.join(uuid.to_string()).join("data.mdb");
|
|
||||||
let index = index_scheduler.index_mapper.index(&rtxn, &name)?;
|
|
||||||
let index_file = index.try_clone_inner_file()?;
|
|
||||||
tracing::trace!("Appending index file for {name} in {}", path.display());
|
|
||||||
append_file_to_tarball(&mut tarball, path, index_file)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(rtxn);
|
|
||||||
|
|
||||||
// 4. Snapshot the auth LMDB env
|
|
||||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheApiKeys);
|
|
||||||
let auth_env_file = index_scheduler.scheduler.auth_env.try_clone_inner_file()?;
|
|
||||||
let path = Path::new("auth").join("data.mdb");
|
|
||||||
append_file_to_tarball(&mut tarball, path, auth_env_file)?;
|
|
||||||
|
|
||||||
let mut gzencoder = tarball.into_inner()?;
|
|
||||||
gzencoder.flush()?;
|
|
||||||
gzencoder.try_finish()?;
|
|
||||||
let mut writer = gzencoder.finish()?;
|
|
||||||
writer.flush()?;
|
|
||||||
|
|
||||||
Result::<_, Error>::Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
fn append_file_to_tarball<W, P>(
|
|
||||||
tarball: &mut tar::Builder<W>,
|
|
||||||
path: P,
|
|
||||||
mut auth_env_file: fs::File,
|
|
||||||
) -> Result<(), Error>
|
|
||||||
where
|
|
||||||
W: std::io::Write,
|
|
||||||
P: AsRef<std::path::Path>,
|
|
||||||
{
|
|
||||||
use std::io::{Seek as _, SeekFrom};
|
|
||||||
|
|
||||||
// Note: A previous snapshot operation may have left the cursor
|
|
||||||
// at the end of the file so we need to seek to the start.
|
|
||||||
auth_env_file.seek(SeekFrom::Start(0))?;
|
|
||||||
tarball.append_file(path, &mut auth_env_file)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Streams the content read from the given reader to S3.
|
|
||||||
#[cfg(unix)]
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
async fn multipart_stream_to_s3(
|
|
||||||
s3_bucket_url: String,
|
|
||||||
s3_bucket_region: String,
|
|
||||||
s3_bucket_name: String,
|
|
||||||
s3_snapshot_prefix: String,
|
|
||||||
s3_access_key: String,
|
|
||||||
s3_secret_key: String,
|
|
||||||
s3_max_in_flight_parts: std::num::NonZero<usize>,
|
|
||||||
s3_signature_duration: std::time::Duration,
|
|
||||||
s3_multipart_part_size: u64,
|
|
||||||
must_stop_processing: super::MustStopProcessing,
|
|
||||||
retry_backoff: backoff::exponential::ExponentialBackoff<backoff::SystemClock>,
|
|
||||||
db_name: String,
|
|
||||||
reader: std::io::PipeReader,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
use std::collections::VecDeque;
|
|
||||||
use std::io;
|
|
||||||
use std::os::fd::OwnedFd;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
use bytes::{Bytes, BytesMut};
|
|
||||||
use reqwest::{Client, Response};
|
|
||||||
use rusty_s3::actions::CreateMultipartUpload;
|
|
||||||
use rusty_s3::{Bucket, BucketError, Credentials, S3Action as _, UrlStyle};
|
|
||||||
use tokio::task::JoinHandle;
|
|
||||||
|
|
||||||
let reader = OwnedFd::from(reader);
|
|
||||||
let reader = tokio::net::unix::pipe::Receiver::from_owned_fd(reader)?;
|
|
||||||
let s3_snapshot_prefix = PathBuf::from(s3_snapshot_prefix);
|
|
||||||
let url =
|
|
||||||
s3_bucket_url.parse().map_err(BucketError::ParseError).map_err(Error::S3BucketError)?;
|
|
||||||
let bucket = Bucket::new(url, UrlStyle::Path, s3_bucket_name, s3_bucket_region)
|
|
||||||
.map_err(Error::S3BucketError)?;
|
|
||||||
let credential = Credentials::new(s3_access_key, s3_secret_key);
|
|
||||||
|
|
||||||
// Note for the future (rust 1.91+): use with_added_extension, it's prettier
|
|
||||||
let object_path = s3_snapshot_prefix.join(format!("{db_name}.snapshot"));
|
|
||||||
// Note: It doesn't work on Windows and if a port to this platform is needed,
|
|
||||||
// use the slash-path crate or similar to get the correct path separator.
|
|
||||||
let object = object_path.display().to_string();
|
|
||||||
|
|
||||||
let action = bucket.create_multipart_upload(Some(&credential), &object);
|
|
||||||
let url = action.sign(s3_signature_duration);
|
|
||||||
|
|
||||||
let client = Client::new();
|
|
||||||
let resp = client.post(url).send().await.map_err(Error::S3HttpError)?;
|
|
||||||
let status = resp.status();
|
|
||||||
|
|
||||||
let body = match resp.error_for_status_ref() {
|
|
||||||
Ok(_) => resp.text().await.map_err(Error::S3HttpError)?,
|
|
||||||
Err(_) => {
|
|
||||||
return Err(Error::S3Error { status, body: resp.text().await.unwrap_or_default() })
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let multipart =
|
|
||||||
CreateMultipartUpload::parse_response(&body).map_err(|e| Error::S3XmlError(Box::new(e)))?;
|
|
||||||
tracing::debug!("Starting the upload of the snapshot to {object}");
|
|
||||||
|
|
||||||
// We use this bumpalo for etags strings.
|
|
||||||
let bump = bumpalo::Bump::new();
|
|
||||||
let mut etags = Vec::<&str>::new();
|
|
||||||
let mut in_flight = VecDeque::<(JoinHandle<reqwest::Result<Response>>, Bytes)>::with_capacity(
|
|
||||||
s3_max_in_flight_parts.get(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Part numbers start at 1 and cannot be larger than 10k
|
|
||||||
for part_number in 1u16.. {
|
|
||||||
if must_stop_processing.get() {
|
|
||||||
return Err(Error::AbortedTask);
|
|
||||||
}
|
|
||||||
|
|
||||||
let part_upload =
|
|
||||||
bucket.upload_part(Some(&credential), &object, part_number, multipart.upload_id());
|
|
||||||
let url = part_upload.sign(s3_signature_duration);
|
|
||||||
|
|
||||||
// Wait for a buffer to be ready if there are in-flight parts that landed
|
|
||||||
let mut buffer = if in_flight.len() >= s3_max_in_flight_parts.get() {
|
|
||||||
let (handle, buffer) = in_flight.pop_front().expect("At least one in flight request");
|
|
||||||
let resp = join_and_map_error(handle).await?;
|
|
||||||
extract_and_append_etag(&bump, &mut etags, resp.headers())?;
|
|
||||||
|
|
||||||
let mut buffer = match buffer.try_into_mut() {
|
|
||||||
Ok(buffer) => buffer,
|
|
||||||
Err(_) => unreachable!("All bytes references were consumed in the task"),
|
|
||||||
};
|
|
||||||
buffer.clear();
|
|
||||||
buffer
|
|
||||||
} else {
|
|
||||||
BytesMut::with_capacity(s3_multipart_part_size as usize)
|
|
||||||
};
|
|
||||||
|
|
||||||
// If we successfully read enough bytes,
|
|
||||||
// we can continue and send the buffer/part
|
|
||||||
while buffer.len() < (s3_multipart_part_size as usize / 2) {
|
|
||||||
// Wait for the pipe to be readable
|
|
||||||
|
|
||||||
reader.readable().await?;
|
|
||||||
|
|
||||||
match reader.try_read_buf(&mut buffer) {
|
|
||||||
Ok(0) => break,
|
|
||||||
// We read some bytes but maybe not enough
|
|
||||||
Ok(_) => continue,
|
|
||||||
// The readiness event is a false positive.
|
|
||||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
|
|
||||||
Err(e) => return Err(e.into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if buffer.is_empty() {
|
|
||||||
// Break the loop if the buffer is
|
|
||||||
// empty after we tried to read bytes
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
let body = buffer.freeze();
|
|
||||||
tracing::trace!("Sending part {part_number}");
|
|
||||||
let task = tokio::spawn({
|
|
||||||
let client = client.clone();
|
|
||||||
let body = body.clone();
|
|
||||||
backoff::future::retry(retry_backoff.clone(), move || {
|
|
||||||
let client = client.clone();
|
|
||||||
let url = url.clone();
|
|
||||||
let body = body.clone();
|
|
||||||
async move {
|
|
||||||
match client.put(url).body(body).send().await {
|
|
||||||
Ok(resp) if resp.status().is_client_error() => {
|
|
||||||
resp.error_for_status().map_err(backoff::Error::Permanent)
|
|
||||||
}
|
|
||||||
Ok(resp) => Ok(resp),
|
|
||||||
Err(e) => Err(backoff::Error::transient(e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
});
|
|
||||||
in_flight.push_back((task, body));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (handle, _buffer) in in_flight {
|
|
||||||
let resp = join_and_map_error(handle).await?;
|
|
||||||
extract_and_append_etag(&bump, &mut etags, resp.headers())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing::debug!("Finalizing the multipart upload");
|
|
||||||
|
|
||||||
let action = bucket.complete_multipart_upload(
|
|
||||||
Some(&credential),
|
|
||||||
&object,
|
|
||||||
multipart.upload_id(),
|
|
||||||
etags.iter().map(AsRef::as_ref),
|
|
||||||
);
|
|
||||||
let url = action.sign(s3_signature_duration);
|
|
||||||
let body = action.body();
|
|
||||||
let resp = backoff::future::retry(retry_backoff, move || {
|
|
||||||
let client = client.clone();
|
|
||||||
let url = url.clone();
|
|
||||||
let body = body.clone();
|
|
||||||
async move {
|
|
||||||
match client.post(url).body(body).send().await {
|
|
||||||
Ok(resp) if resp.status().is_client_error() => {
|
|
||||||
Err(backoff::Error::Permanent(Error::S3Error {
|
|
||||||
status: resp.status(),
|
|
||||||
body: resp.text().await.unwrap_or_default(),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
Ok(resp) => Ok(resp),
|
|
||||||
Err(e) => Err(backoff::Error::transient(Error::S3HttpError(e))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let status = resp.status();
|
|
||||||
let body = resp.text().await.map_err(|e| Error::S3Error { status, body: e.to_string() })?;
|
|
||||||
if status.is_success() {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(Error::S3Error { status, body })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
async fn join_and_map_error(
|
|
||||||
join_handle: tokio::task::JoinHandle<Result<reqwest::Response, reqwest::Error>>,
|
|
||||||
) -> Result<reqwest::Response> {
|
|
||||||
// safety: Panic happens if the task (JoinHandle) was aborted, cancelled, or panicked
|
|
||||||
let request = join_handle.await.unwrap();
|
|
||||||
let resp = request.map_err(Error::S3HttpError)?;
|
|
||||||
match resp.error_for_status_ref() {
|
|
||||||
Ok(_) => Ok(resp),
|
|
||||||
Err(_) => Err(Error::S3Error {
|
|
||||||
status: resp.status(),
|
|
||||||
body: resp.text().await.unwrap_or_default(),
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
fn extract_and_append_etag<'b>(
|
|
||||||
bump: &'b bumpalo::Bump,
|
|
||||||
etags: &mut Vec<&'b str>,
|
|
||||||
headers: &reqwest::header::HeaderMap,
|
|
||||||
) -> Result<()> {
|
|
||||||
use reqwest::header::ETAG;
|
|
||||||
|
|
||||||
let etag = headers.get(ETAG).ok_or_else(|| Error::S3XmlError("Missing ETag header".into()))?;
|
|
||||||
let etag = etag.to_str().map_err(|e| Error::S3XmlError(Box::new(e)))?;
|
|
||||||
etags.push(bump.alloc_str(etag));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [4,]
|
[timestamp] [4,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||||
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }
|
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,]
|
enqueued [0,]
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
@@ -37,7 +37,7 @@ catto [1,]
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
@@ -40,7 +40,7 @@ doggo [2,]
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
@@ -43,7 +43,7 @@ doggo [2,3,]
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -126,7 +126,7 @@ impl IndexScheduler {
|
|||||||
std::fs::create_dir_all(&options.auth_path).unwrap();
|
std::fs::create_dir_all(&options.auth_path).unwrap();
|
||||||
let auth_env = open_auth_store_env(&options.auth_path).unwrap();
|
let auth_env = open_auth_store_env(&options.auth_path).unwrap();
|
||||||
let index_scheduler =
|
let index_scheduler =
|
||||||
Self::new_test(options, auth_env, version, None, sender, planned_failures).unwrap();
|
Self::new(options, auth_env, version, sender, planned_failures).unwrap();
|
||||||
|
|
||||||
// To be 100% consistent between all test we're going to start the scheduler right now
|
// To be 100% consistent between all test we're going to start the scheduler right now
|
||||||
// and ensure it's in the expected starting state.
|
// and ensure it's in the expected starting state.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use anyhow::bail;
|
use anyhow::bail;
|
||||||
use meilisearch_types::heed::{Env, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Env, RwTxn, WithoutTls};
|
||||||
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
|
||||||
use meilisearch_types::versioning;
|
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
@@ -9,82 +9,79 @@ use crate::queue::TaskQueue;
|
|||||||
use crate::versioning::Versioning;
|
use crate::versioning::Versioning;
|
||||||
|
|
||||||
trait UpgradeIndexScheduler {
|
trait UpgradeIndexScheduler {
|
||||||
fn upgrade(&self, env: &Env<WithoutTls>, wtxn: &mut RwTxn) -> anyhow::Result<()>;
|
fn upgrade(
|
||||||
/// Whether the migration should be applied, depending on the initial version of the index scheduler before
|
&self,
|
||||||
/// any migration was applied
|
env: &Env<WithoutTls>,
|
||||||
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool;
|
wtxn: &mut RwTxn,
|
||||||
/// A progress-centric description of the migration
|
original: (u32, u32, u32),
|
||||||
fn description(&self) -> &'static str;
|
) -> anyhow::Result<()>;
|
||||||
|
fn target_version(&self) -> (u32, u32, u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Upgrade the index scheduler to the binary version.
|
|
||||||
///
|
|
||||||
/// # Warning
|
|
||||||
///
|
|
||||||
/// The current implementation uses a single wtxn to the index scheduler for the whole duration of the upgrade.
|
|
||||||
/// If migrations start taking take a long time, it might prevent tasks from being registered.
|
|
||||||
/// If this issue manifests, then it can be mitigated by adding a `fn target_version` to `UpgradeIndexScheduler`,
|
|
||||||
/// to be able to write intermediate versions and drop the wtxn between applying migrations.
|
|
||||||
pub fn upgrade_index_scheduler(
|
pub fn upgrade_index_scheduler(
|
||||||
env: &Env<WithoutTls>,
|
env: &Env<WithoutTls>,
|
||||||
versioning: &Versioning,
|
versioning: &Versioning,
|
||||||
initial_version: (u32, u32, u32),
|
from: (u32, u32, u32),
|
||||||
|
to: (u32, u32, u32),
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let target_major: u32 = versioning::VERSION_MAJOR;
|
let current_major = to.0;
|
||||||
let target_minor: u32 = versioning::VERSION_MINOR;
|
let current_minor = to.1;
|
||||||
let target_patch: u32 = versioning::VERSION_PATCH;
|
let current_patch = to.2;
|
||||||
let target_version = (target_major, target_minor, target_patch);
|
|
||||||
|
|
||||||
if initial_version == target_version {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[
|
let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[
|
||||||
// List all upgrade functions to apply in order here.
|
// This is the last upgrade function, it will be called when the index is up to date.
|
||||||
|
// any other upgrade function should be added before this one.
|
||||||
|
&ToCurrentNoOp {},
|
||||||
];
|
];
|
||||||
|
|
||||||
let (initial_major, initial_minor, initial_patch) = initial_version;
|
let start = match from {
|
||||||
|
(1, 12, _) => 0,
|
||||||
if initial_version > target_version {
|
(1, 13, _) => 0,
|
||||||
|
(1, 14, _) => 0,
|
||||||
|
(1, 15, _) => 0,
|
||||||
|
(1, 16, _) => 0,
|
||||||
|
(1, 17, _) => 0,
|
||||||
|
(1, 18, _) => 0,
|
||||||
|
(1, 19, _) => 0,
|
||||||
|
(1, 20, _) => 0,
|
||||||
|
(1, 21, _) => 0,
|
||||||
|
(1, 22, _) => 0,
|
||||||
|
(1, 23, _) => 0,
|
||||||
|
(1, 24, _) => 0,
|
||||||
|
(major, minor, patch) => {
|
||||||
|
if major > current_major
|
||||||
|
|| (major == current_major && minor > current_minor)
|
||||||
|
|| (major == current_major && minor == current_minor && patch > current_patch)
|
||||||
|
{
|
||||||
bail!(
|
bail!(
|
||||||
"Database version {initial_major}.{initial_minor}.{initial_patch} is higher than the Meilisearch version {target_major}.{target_minor}.{target_patch}. Downgrade is not supported",
|
"Database version {major}.{minor}.{patch} is higher than the Meilisearch version {current_major}.{current_minor}.{current_patch}. Downgrade is not supported",
|
||||||
);
|
);
|
||||||
}
|
} else if major < 1 || (major == current_major && minor < 12) {
|
||||||
|
|
||||||
if initial_version < (1, 12, 0) {
|
|
||||||
bail!(
|
bail!(
|
||||||
"Database version {initial_major}.{initial_minor}.{initial_patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{initial_major}.{initial_minor}.{initial_patch} and import it in the v{target_major}.{target_minor}.{target_patch}",
|
"Database version {major}.{minor}.{patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{major}.{minor}.{patch} and import it in the v{current_major}.{current_minor}.{current_patch}",
|
||||||
);
|
);
|
||||||
|
} else {
|
||||||
|
bail!("Unknown database version: v{major}.{minor}.{patch}");
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
info!("Upgrading the task queue");
|
info!("Upgrading the task queue");
|
||||||
let mut wtxn = env.write_txn()?;
|
let mut local_from = from;
|
||||||
let migration_count = upgrade_functions.len();
|
for upgrade in upgrade_functions[start..].iter() {
|
||||||
for (migration_index, upgrade) in upgrade_functions.iter().enumerate() {
|
let target = upgrade.target_version();
|
||||||
if upgrade.must_upgrade(initial_version) {
|
|
||||||
info!(
|
info!(
|
||||||
"[{migration_index}/{migration_count}]Applying migration: {}",
|
"Upgrading from v{}.{}.{} to v{}.{}.{}",
|
||||||
upgrade.description()
|
local_from.0, local_from.1, local_from.2, target.0, target.1, target.2
|
||||||
);
|
);
|
||||||
|
let mut wtxn = env.write_txn()?;
|
||||||
upgrade.upgrade(env, &mut wtxn)?;
|
upgrade.upgrade(env, &mut wtxn, local_from)?;
|
||||||
|
versioning.set_version(&mut wtxn, target)?;
|
||||||
info!(
|
wtxn.commit()?;
|
||||||
"[{}/{migration_count}]Migration applied: {}",
|
local_from = target;
|
||||||
migration_index + 1,
|
|
||||||
upgrade.description()
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
info!(
|
|
||||||
"[{migration_index}/{migration_count}]Skipping unnecessary migration: {}",
|
|
||||||
upgrade.description()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
versioning.set_version(&mut wtxn, target_version)?;
|
let mut wtxn = env.write_txn()?;
|
||||||
info!("Task queue upgraded, spawning the upgrade database task");
|
|
||||||
|
|
||||||
let queue = TaskQueue::new(env, &mut wtxn)?;
|
let queue = TaskQueue::new(env, &mut wtxn)?;
|
||||||
let uid = queue.next_task_id(&wtxn)?;
|
let uid = queue.next_task_id(&wtxn)?;
|
||||||
queue.register(
|
queue.register(
|
||||||
@@ -97,14 +94,31 @@ pub fn upgrade_index_scheduler(
|
|||||||
finished_at: None,
|
finished_at: None,
|
||||||
error: None,
|
error: None,
|
||||||
canceled_by: None,
|
canceled_by: None,
|
||||||
details: Some(Details::UpgradeDatabase { from: initial_version, to: target_version }),
|
details: Some(Details::UpgradeDatabase { from, to }),
|
||||||
status: Status::Enqueued,
|
status: Status::Enqueued,
|
||||||
kind: KindWithContent::UpgradeDatabase { from: initial_version },
|
kind: KindWithContent::UpgradeDatabase { from },
|
||||||
network: None,
|
network: None,
|
||||||
custom_metadata: None,
|
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
struct ToCurrentNoOp {}
|
||||||
|
|
||||||
|
impl UpgradeIndexScheduler for ToCurrentNoOp {
|
||||||
|
fn upgrade(
|
||||||
|
&self,
|
||||||
|
_env: &Env<WithoutTls>,
|
||||||
|
_wtxn: &mut RwTxn,
|
||||||
|
_original: (u32, u32, u32),
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn target_version(&self) -> (u32, u32, u32) {
|
||||||
|
(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -379,7 +379,6 @@ impl crate::IndexScheduler {
|
|||||||
status,
|
status,
|
||||||
kind,
|
kind,
|
||||||
network: _,
|
network: _,
|
||||||
custom_metadata: _,
|
|
||||||
} = task;
|
} = task;
|
||||||
assert_eq!(uid, task.uid);
|
assert_eq!(uid, task.uid);
|
||||||
if task.status != Status::Enqueued {
|
if task.status != Status::Enqueued {
|
||||||
|
|||||||
@@ -64,7 +64,14 @@ impl Versioning {
|
|||||||
};
|
};
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
|
|
||||||
upgrade_index_scheduler(env, &this, from)?;
|
let bin_major: u32 = versioning::VERSION_MAJOR;
|
||||||
|
let bin_minor: u32 = versioning::VERSION_MINOR;
|
||||||
|
let bin_patch: u32 = versioning::VERSION_PATCH;
|
||||||
|
let to = (bin_major, bin_minor, bin_patch);
|
||||||
|
|
||||||
|
if from != to {
|
||||||
|
upgrade_index_scheduler(env, &this, from, to)?;
|
||||||
|
}
|
||||||
|
|
||||||
// Once we reach this point it means the upgrade process, if there was one is entirely finished
|
// Once we reach this point it means the upgrade process, if there was one is entirely finished
|
||||||
// we can safely say we reached the latest version of the index scheduler
|
// we can safely say we reached the latest version of the index scheduler
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ license.workspace = true
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = "0.7.0"
|
criterion = "0.6.0"
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "depth"
|
name = "depth"
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ license.workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
# fixed version due to format breakages in v1.40
|
# fixed version due to format breakages in v1.40
|
||||||
insta = { version = "=1.39.0", features = ["json", "redactions"] }
|
insta = { version = "=1.39.0", features = ["json", "redactions"] }
|
||||||
md5 = "0.8.0"
|
md5 = "0.7.0"
|
||||||
once_cell = "1.21"
|
once_cell = "1.21"
|
||||||
regex-lite = "0.1.8"
|
regex-lite = "0.1.6"
|
||||||
uuid = { version = "1.18.1", features = ["v4"] }
|
uuid = { version = "1.17.0", features = ["v4"] }
|
||||||
|
|||||||
@@ -12,15 +12,15 @@ license.workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64 = "0.22.1"
|
base64 = "0.22.1"
|
||||||
enum-iterator = "2.3.0"
|
enum-iterator = "2.1.0"
|
||||||
hmac = "0.12.1"
|
hmac = "0.12.1"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
roaring = { version = "0.10.12", features = ["serde"] }
|
roaring = { version = "0.10.12", features = ["serde"] }
|
||||||
serde = { version = "1.0.228", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
sha2 = "0.10.9"
|
sha2 = "0.10.9"
|
||||||
thiserror = "2.0.17"
|
thiserror = "2.0.12"
|
||||||
time = { version = "0.3.44", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
time = { version = "0.3.41", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||||
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||||
|
|||||||
@@ -11,38 +11,38 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
actix-web = { version = "4.12.0", default-features = false }
|
actix-web = { version = "4.11.0", default-features = false }
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.98"
|
||||||
bumpalo = "3.19.0"
|
bumpalo = "3.18.1"
|
||||||
bumparaw-collections = "0.1.4"
|
bumparaw-collections = "0.1.4"
|
||||||
byte-unit = { version = "5.1.6", features = ["serde"] }
|
byte-unit = { version = "5.1.6", features = ["serde"] }
|
||||||
convert_case = "0.9.0"
|
convert_case = "0.8.0"
|
||||||
csv = "1.4.0"
|
csv = "1.3.1"
|
||||||
deserr = { version = "0.6.4", features = ["actix-web"] }
|
deserr = { version = "0.6.3", features = ["actix-web"] }
|
||||||
either = { version = "1.15.0", features = ["serde"] }
|
either = { version = "1.15.0", features = ["serde"] }
|
||||||
enum-iterator = "2.3.0"
|
enum-iterator = "2.1.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.1.5"
|
flate2 = "1.1.2"
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
memmap2 = "0.9.9"
|
memmap2 = "0.9.7"
|
||||||
milli = { path = "../milli" }
|
milli = { path = "../milli" }
|
||||||
roaring = { version = "0.10.12", features = ["serde"] }
|
roaring = { version = "0.10.12", features = ["serde"] }
|
||||||
rustc-hash = "2.1.1"
|
rustc-hash = "2.1.1"
|
||||||
serde = { version = "1.0.228", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde-cs = "0.2.4"
|
serde-cs = "0.2.4"
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
tar = "0.4.44"
|
tar = "0.4.44"
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
thiserror = "2.0.17"
|
thiserror = "2.0.12"
|
||||||
time = { version = "0.3.44", features = [
|
time = { version = "0.3.41", features = [
|
||||||
"serde-well-known",
|
"serde-well-known",
|
||||||
"formatting",
|
"formatting",
|
||||||
"parsing",
|
"parsing",
|
||||||
"macros",
|
"macros",
|
||||||
] }
|
] }
|
||||||
tokio = "1.48"
|
tokio = "1.45"
|
||||||
utoipa = { version = "5.4.0", features = ["macros"] }
|
utoipa = { version = "5.4.0", features = ["macros"] }
|
||||||
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
# fixed version due to format breakages in v1.40
|
# fixed version due to format breakages in v1.40
|
||||||
@@ -56,9 +56,6 @@ all-tokenizations = ["milli/all-tokenizations"]
|
|||||||
# chinese specialized tokenization
|
# chinese specialized tokenization
|
||||||
chinese = ["milli/chinese"]
|
chinese = ["milli/chinese"]
|
||||||
chinese-pinyin = ["milli/chinese-pinyin"]
|
chinese-pinyin = ["milli/chinese-pinyin"]
|
||||||
|
|
||||||
enterprise = ["milli/enterprise"]
|
|
||||||
|
|
||||||
# hebrew specialized tokenization
|
# hebrew specialized tokenization
|
||||||
hebrew = ["milli/hebrew"]
|
hebrew = ["milli/hebrew"]
|
||||||
# japanese specialized tokenization
|
# japanese specialized tokenization
|
||||||
|
|||||||
@@ -1,16 +0,0 @@
|
|||||||
pub mod network {
|
|
||||||
use milli::update::new::indexer::current_edition::sharding::Shards;
|
|
||||||
|
|
||||||
use crate::network::Network;
|
|
||||||
|
|
||||||
impl Network {
|
|
||||||
pub fn shards(&self) -> Option<Shards> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sharding(&self) -> bool {
|
|
||||||
// always false in CE
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,9 +3,21 @@
|
|||||||
// Use of this source code is governed by the Business Source License 1.1,
|
// Use of this source code is governed by the Business Source License 1.1,
|
||||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||||
|
|
||||||
use milli::update::new::indexer::enterprise_edition::sharding::Shards;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use crate::network::Network;
|
use milli::update::new::indexer::enterprise_edition::sharding::Shards;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Network {
|
||||||
|
#[serde(default, rename = "self")]
|
||||||
|
pub local: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub remotes: BTreeMap<String, Remote>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub sharding: bool,
|
||||||
|
}
|
||||||
|
|
||||||
impl Network {
|
impl Network {
|
||||||
pub fn shards(&self) -> Option<Shards> {
|
pub fn shards(&self) -> Option<Shards> {
|
||||||
@@ -22,8 +34,14 @@ impl Network {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn sharding(&self) -> bool {
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||||
self.sharding
|
#[serde(rename_all = "camelCase")]
|
||||||
}
|
pub struct Remote {
|
||||||
|
pub url: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub search_api_key: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub write_api_key: Option<String>,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -254,12 +254,10 @@ InvalidSearchHybridQuery , InvalidRequest , BAD_REQU
|
|||||||
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
|
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
|
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
|
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidIndexCustomMetadata , InvalidRequest , BAD_REQUEST ;
|
|
||||||
InvalidIndexUid , InvalidRequest , BAD_REQUEST ;
|
InvalidIndexUid , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchFacets , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchFacets , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchFacetsByIndex , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchFacetsByIndex , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchFacetOrder , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchFacetOrder , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchQueryPersonalization , InvalidRequest , BAD_REQUEST ;
|
|
||||||
InvalidMultiSearchFederated , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchFederated , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchFederationOptions , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchFederationOptions , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchMaxValuesPerFacet , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchMaxValuesPerFacet , InvalidRequest , BAD_REQUEST ;
|
||||||
@@ -317,8 +315,6 @@ InvalidSearchShowRankingScoreDetails , InvalidRequest , BAD_REQU
|
|||||||
InvalidSimilarShowRankingScoreDetails , InvalidRequest , BAD_REQUEST ;
|
InvalidSimilarShowRankingScoreDetails , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidSearchSort , InvalidRequest , BAD_REQUEST ;
|
InvalidSearchSort , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidSearchDistinct , InvalidRequest , BAD_REQUEST ;
|
InvalidSearchDistinct , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidSearchPersonalize , InvalidRequest , BAD_REQUEST ;
|
|
||||||
InvalidSearchPersonalizeUserContext , InvalidRequest , BAD_REQUEST ;
|
|
||||||
InvalidSearchMediaAndVector , InvalidRequest , BAD_REQUEST ;
|
InvalidSearchMediaAndVector , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidSettingsDisplayedAttributes , InvalidRequest , BAD_REQUEST ;
|
InvalidSettingsDisplayedAttributes , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidSettingsDistinctAttribute , InvalidRequest , BAD_REQUEST ;
|
InvalidSettingsDistinctAttribute , InvalidRequest , BAD_REQUEST ;
|
||||||
@@ -394,9 +390,6 @@ TooManyVectors , InvalidRequest , BAD_REQU
|
|||||||
UnretrievableDocument , Internal , BAD_REQUEST ;
|
UnretrievableDocument , Internal , BAD_REQUEST ;
|
||||||
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
|
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
|
||||||
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
||||||
InvalidS3SnapshotRequest , Internal , BAD_REQUEST ;
|
|
||||||
InvalidS3SnapshotParameters , Internal , BAD_REQUEST ;
|
|
||||||
S3SnapshotServerError , Internal , BAD_GATEWAY ;
|
|
||||||
|
|
||||||
// Experimental features
|
// Experimental features
|
||||||
VectorEmbeddingError , InvalidRequest , BAD_REQUEST ;
|
VectorEmbeddingError , InvalidRequest , BAD_REQUEST ;
|
||||||
@@ -433,7 +426,6 @@ InvalidChatCompletionSearchQueryParamPrompt , InvalidRequest , BAD_REQU
|
|||||||
InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ;
|
InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ;
|
InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST ;
|
InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST ;
|
||||||
RequiresEnterpriseEdition , InvalidRequest , UNAVAILABLE_FOR_LEGAL_REASONS ;
|
|
||||||
// Webhooks
|
// Webhooks
|
||||||
InvalidWebhooks , InvalidRequest , BAD_REQUEST ;
|
InvalidWebhooks , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidWebhookUrl , InvalidRequest , BAD_REQUEST ;
|
InvalidWebhookUrl , InvalidRequest , BAD_REQUEST ;
|
||||||
@@ -687,18 +679,6 @@ impl fmt::Display for deserr_codes::InvalidNetworkSearchApiKey {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for deserr_codes::InvalidSearchPersonalize {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(f, "the value of `personalize` is invalid, expected a JSON object with `userContext` string.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for deserr_codes::InvalidSearchPersonalizeUserContext {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(f, "the value of `userContext` is invalid, expected a string.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! internal_error {
|
macro_rules! internal_error {
|
||||||
($target:ty : $($other:path), *) => {
|
($target:ty : $($other:path), *) => {
|
||||||
|
|||||||
@@ -2,17 +2,10 @@
|
|||||||
|
|
||||||
pub mod batch_view;
|
pub mod batch_view;
|
||||||
pub mod batches;
|
pub mod batches;
|
||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
pub mod community_edition;
|
|
||||||
pub mod compression;
|
pub mod compression;
|
||||||
pub mod deserr;
|
pub mod deserr;
|
||||||
pub mod document_formats;
|
pub mod document_formats;
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub mod enterprise_edition;
|
pub mod enterprise_edition;
|
||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
pub use community_edition as current_edition;
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub use enterprise_edition as current_edition;
|
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod facet_values_sort;
|
pub mod facet_values_sort;
|
||||||
pub mod features;
|
pub mod features;
|
||||||
@@ -20,7 +13,6 @@ pub mod index_uid;
|
|||||||
pub mod index_uid_pattern;
|
pub mod index_uid_pattern;
|
||||||
pub mod keys;
|
pub mod keys;
|
||||||
pub mod locales;
|
pub mod locales;
|
||||||
pub mod network;
|
|
||||||
pub mod settings;
|
pub mod settings;
|
||||||
pub mod star_or;
|
pub mod star_or;
|
||||||
pub mod task_view;
|
pub mod task_view;
|
||||||
|
|||||||
@@ -1,24 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct Network {
|
|
||||||
#[serde(default, rename = "self")]
|
|
||||||
pub local: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub remotes: BTreeMap<String, Remote>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub sharding: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct Remote {
|
|
||||||
pub url: String,
|
|
||||||
#[serde(default)]
|
|
||||||
pub search_api_key: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub write_api_key: Option<String>,
|
|
||||||
}
|
|
||||||
@@ -346,26 +346,24 @@ impl<T> Settings<T> {
|
|||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
hide_secret(api_key, 0);
|
Self::hide_secret(api_key);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Redact a secret string, starting from the `secret_offset`th byte.
|
fn hide_secret(secret: &mut String) {
|
||||||
pub fn hide_secret(secret: &mut String, secret_offset: usize) {
|
match secret.len() {
|
||||||
match secret.len().checked_sub(secret_offset) {
|
x if x < 10 => {
|
||||||
None => (),
|
secret.replace_range(.., "XXX...");
|
||||||
Some(x) if x < 10 => {
|
|
||||||
secret.replace_range(secret_offset.., "XXX...");
|
|
||||||
}
|
}
|
||||||
Some(x) if x < 20 => {
|
x if x < 20 => {
|
||||||
secret.replace_range((secret_offset + 2).., "XXXX...");
|
secret.replace_range(2.., "XXXX...");
|
||||||
}
|
}
|
||||||
Some(x) if x < 30 => {
|
x if x < 30 => {
|
||||||
secret.replace_range((secret_offset + 3).., "XXXXX...");
|
secret.replace_range(3.., "XXXXX...");
|
||||||
|
}
|
||||||
|
_x => {
|
||||||
|
secret.replace_range(5.., "XXXXXX...");
|
||||||
}
|
}
|
||||||
Some(_x) => {
|
|
||||||
secret.replace_range((secret_offset + 5).., "XXXXXX...");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -55,9 +55,6 @@ pub struct TaskView {
|
|||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub network: Option<TaskNetwork>,
|
pub network: Option<TaskNetwork>,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub custom_metadata: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TaskView {
|
impl TaskView {
|
||||||
@@ -76,7 +73,6 @@ impl TaskView {
|
|||||||
started_at: task.started_at,
|
started_at: task.started_at,
|
||||||
finished_at: task.finished_at,
|
finished_at: task.finished_at,
|
||||||
network: task.network.clone(),
|
network: task.network.clone(),
|
||||||
custom_metadata: task.custom_metadata.clone(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -45,9 +45,6 @@ pub struct Task {
|
|||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub network: Option<TaskNetwork>,
|
pub network: Option<TaskNetwork>,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub custom_metadata: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Task {
|
impl Task {
|
||||||
|
|||||||
@@ -11,24 +11,6 @@ pub struct Webhook {
|
|||||||
pub headers: BTreeMap<String, String>,
|
pub headers: BTreeMap<String, String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Webhook {
|
|
||||||
pub fn redact_authorization_header(&mut self) {
|
|
||||||
// headers are case insensitive, so to make the redaction robust we iterate over qualifying headers
|
|
||||||
// rather than getting one canonical `Authorization` header.
|
|
||||||
for value in self
|
|
||||||
.headers
|
|
||||||
.iter_mut()
|
|
||||||
.filter_map(|(name, value)| name.eq_ignore_ascii_case("authorization").then_some(value))
|
|
||||||
{
|
|
||||||
if value.starts_with("Bearer ") {
|
|
||||||
crate::settings::hide_secret(value, "Bearer ".len());
|
|
||||||
} else {
|
|
||||||
crate::settings::hide_secret(value, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Default, Clone, PartialEq)]
|
#[derive(Debug, Serialize, Default, Clone, PartialEq)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct WebhooksView {
|
pub struct WebhooksView {
|
||||||
|
|||||||
@@ -14,91 +14,91 @@ default-run = "meilisearch"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
actix-cors = "0.7.1"
|
actix-cors = "0.7.1"
|
||||||
actix-http = { version = "3.11.2", default-features = false, features = [
|
actix-http = { version = "3.11.0", default-features = false, features = [
|
||||||
"compress-brotli",
|
"compress-brotli",
|
||||||
"compress-gzip",
|
"compress-gzip",
|
||||||
"rustls-0_23",
|
"rustls-0_23",
|
||||||
] }
|
] }
|
||||||
actix-utils = "3.0.1"
|
actix-utils = "3.0.1"
|
||||||
actix-web = { version = "4.12.0", default-features = false, features = [
|
actix-web = { version = "4.11.0", default-features = false, features = [
|
||||||
"macros",
|
"macros",
|
||||||
"compress-brotli",
|
"compress-brotli",
|
||||||
"compress-gzip",
|
"compress-gzip",
|
||||||
"cookies",
|
"cookies",
|
||||||
"rustls-0_23",
|
"rustls-0_23",
|
||||||
] }
|
] }
|
||||||
anyhow = { version = "1.0.100", features = ["backtrace"] }
|
anyhow = { version = "1.0.98", features = ["backtrace"] }
|
||||||
bstr = "1.12.1"
|
bstr = "1.12.0"
|
||||||
byte-unit = { version = "5.1.6", features = ["serde"] }
|
byte-unit = { version = "5.1.6", features = ["serde"] }
|
||||||
bytes = "1.11.0"
|
bytes = "1.10.1"
|
||||||
bumpalo = "3.19.0"
|
bumpalo = "3.18.1"
|
||||||
clap = { version = "4.5.52", features = ["derive", "env"] }
|
clap = { version = "4.5.40", features = ["derive", "env"] }
|
||||||
crossbeam-channel = "0.5.15"
|
crossbeam-channel = "0.5.15"
|
||||||
deserr = { version = "0.6.4", features = ["actix-web"] }
|
deserr = { version = "0.6.3", features = ["actix-web"] }
|
||||||
dump = { path = "../dump" }
|
dump = { path = "../dump" }
|
||||||
either = "1.15.0"
|
either = "1.15.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.1.5"
|
flate2 = "1.1.2"
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3.31"
|
futures = "0.3.31"
|
||||||
futures-util = "0.3.31"
|
futures-util = "0.3.31"
|
||||||
index-scheduler = { path = "../index-scheduler" }
|
index-scheduler = { path = "../index-scheduler" }
|
||||||
indexmap = { version = "2.12.0", features = ["serde"] }
|
indexmap = { version = "2.9.0", features = ["serde"] }
|
||||||
is-terminal = "0.4.17"
|
is-terminal = "0.4.16"
|
||||||
itertools = "0.14.0"
|
itertools = "0.14.0"
|
||||||
jsonwebtoken = "9.3.1"
|
jsonwebtoken = "9.3.1"
|
||||||
lazy_static = "1.5.0"
|
lazy_static = "1.5.0"
|
||||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
memmap2 = "0.9.9"
|
memmap2 = "0.9.7"
|
||||||
mimalloc = { version = "0.1.48", default-features = false }
|
mimalloc = { version = "0.1.47", default-features = false }
|
||||||
mime = "0.3.17"
|
mime = "0.3.17"
|
||||||
num_cpus = "1.17.0"
|
num_cpus = "1.17.0"
|
||||||
obkv = "0.3.0"
|
obkv = "0.3.0"
|
||||||
once_cell = "1.21.3"
|
once_cell = "1.21.3"
|
||||||
ordered-float = "5.1.0"
|
ordered-float = "5.0.0"
|
||||||
parking_lot = "0.12.5"
|
parking_lot = "0.12.4"
|
||||||
permissive-json-pointer = { path = "../permissive-json-pointer" }
|
permissive-json-pointer = { path = "../permissive-json-pointer" }
|
||||||
pin-project-lite = "0.2.16"
|
pin-project-lite = "0.2.16"
|
||||||
platform-dirs = "0.3.0"
|
platform-dirs = "0.3.0"
|
||||||
prometheus = { version = "0.14.0", features = ["process"] }
|
prometheus = { version = "0.14.0", features = ["process"] }
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
rayon = "1.11.0"
|
rayon = "1.10.0"
|
||||||
regex = "1.12.2"
|
regex = "1.11.1"
|
||||||
reqwest = { version = "0.12.24", features = [
|
reqwest = { version = "0.12.20", features = [
|
||||||
"rustls-tls",
|
"rustls-tls",
|
||||||
"json",
|
"json",
|
||||||
], default-features = false }
|
], default-features = false }
|
||||||
rustls = { version = "0.23.35", features = ["ring"], default-features = false }
|
rustls = { version = "0.23.28", features = ["ring"], default-features = false }
|
||||||
rustls-pki-types = { version = "1.13.0", features = ["alloc"] }
|
rustls-pki-types = { version = "1.12.0", features = ["alloc"] }
|
||||||
rustls-pemfile = "2.2.0"
|
rustls-pemfile = "2.2.0"
|
||||||
segment = { version = "0.2.6" }
|
segment = { version = "0.2.6" }
|
||||||
serde = { version = "1.0.228", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
sha2 = "0.10.9"
|
sha2 = "0.10.9"
|
||||||
siphasher = "1.0.1"
|
siphasher = "1.0.1"
|
||||||
slice-group-by = "0.3.1"
|
slice-group-by = "0.3.1"
|
||||||
static-files = { version = "0.3.1", optional = true }
|
static-files = { version = "0.2.5", optional = true }
|
||||||
sysinfo = "0.37.2"
|
sysinfo = "0.35.2"
|
||||||
tar = "0.4.44"
|
tar = "0.4.44"
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
thiserror = "2.0.17"
|
thiserror = "2.0.12"
|
||||||
time = { version = "0.3.44", features = [
|
time = { version = "0.3.41", features = [
|
||||||
"serde-well-known",
|
"serde-well-known",
|
||||||
"formatting",
|
"formatting",
|
||||||
"parsing",
|
"parsing",
|
||||||
"macros",
|
"macros",
|
||||||
] }
|
] }
|
||||||
tokio = { version = "1.48.0", features = ["full"] }
|
tokio = { version = "1.45.1", features = ["full"] }
|
||||||
toml = "0.9.8"
|
toml = "0.8.23"
|
||||||
uuid = { version = "1.18.1", features = ["serde", "v4", "v7"] }
|
uuid = { version = "1.18.0", features = ["serde", "v4", "v7"] }
|
||||||
serde_urlencoded = "0.7.1"
|
serde_urlencoded = "0.7.1"
|
||||||
termcolor = "1.4.1"
|
termcolor = "1.4.1"
|
||||||
url = { version = "2.5.7", features = ["serde"] }
|
url = { version = "2.5.4", features = ["serde"] }
|
||||||
tracing = "0.1.41"
|
tracing = "0.1.41"
|
||||||
tracing-subscriber = { version = "0.3.20", features = ["json"] }
|
tracing-subscriber = { version = "0.3.20", features = ["json"] }
|
||||||
tracing-trace = { version = "0.1.0", path = "../tracing-trace" }
|
tracing-trace = { version = "0.1.0", path = "../tracing-trace" }
|
||||||
tracing-actix-web = "0.7.19"
|
tracing-actix-web = "0.7.18"
|
||||||
build-info = { version = "1.7.0", path = "../build-info" }
|
build-info = { version = "1.7.0", path = "../build-info" }
|
||||||
roaring = "0.10.12"
|
roaring = "0.10.12"
|
||||||
mopa-maintained = "0.2.3"
|
mopa-maintained = "0.2.3"
|
||||||
@@ -114,35 +114,35 @@ utoipa = { version = "5.4.0", features = [
|
|||||||
utoipa-scalar = { version = "0.3.0", optional = true, features = ["actix-web"] }
|
utoipa-scalar = { version = "0.3.0", optional = true, features = ["actix-web"] }
|
||||||
async-openai = { git = "https://github.com/meilisearch/async-openai", branch = "better-error-handling" }
|
async-openai = { git = "https://github.com/meilisearch/async-openai", branch = "better-error-handling" }
|
||||||
secrecy = "0.10.3"
|
secrecy = "0.10.3"
|
||||||
actix-web-lab = { version = "0.24.3", default-features = false }
|
actix-web-lab = { version = "0.24.1", default-features = false }
|
||||||
urlencoding = "2.1.3"
|
urlencoding = "2.1.3"
|
||||||
backoff = { version = "0.4.0", features = ["tokio"] }
|
backoff = { version = "0.4.0", features = ["tokio"] }
|
||||||
humantime = { version = "2.3.0", default-features = false }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
actix-rt = "2.11.0"
|
actix-rt = "2.10.0"
|
||||||
brotli = "8.0.2"
|
brotli = "8.0.1"
|
||||||
# fixed version due to format breakages in v1.40
|
# fixed version due to format breakages in v1.40
|
||||||
insta = { version = "=1.39.0", features = ["redactions"] }
|
insta = { version = "=1.39.0", features = ["redactions"] }
|
||||||
manifest-dir-macros = "0.1.18"
|
manifest-dir-macros = "0.1.18"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
meili-snap = { path = "../meili-snap" }
|
meili-snap = { path = "../meili-snap" }
|
||||||
temp-env = "0.3.6"
|
temp-env = "0.3.6"
|
||||||
wiremock = "0.6.5"
|
wiremock = "0.6.3"
|
||||||
yaup = "0.3.1"
|
yaup = "0.3.1"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
anyhow = { version = "1.0.100", optional = true }
|
anyhow = { version = "1.0.98", optional = true }
|
||||||
cargo_toml = { version = "0.22.3", optional = true }
|
cargo_toml = { version = "0.22.1", optional = true }
|
||||||
hex = { version = "0.4.3", optional = true }
|
hex = { version = "0.4.3", optional = true }
|
||||||
reqwest = { version = "0.12.24", features = [
|
reqwest = { version = "0.12.20", features = [
|
||||||
"blocking",
|
"blocking",
|
||||||
"rustls-tls",
|
"rustls-tls",
|
||||||
], default-features = false, optional = true }
|
], default-features = false, optional = true }
|
||||||
sha-1 = { version = "0.10.1", optional = true }
|
sha-1 = { version = "0.10.1", optional = true }
|
||||||
static-files = { version = "0.3.1", optional = true }
|
static-files = { version = "0.2.5", optional = true }
|
||||||
tempfile = { version = "3.23.0", optional = true }
|
tempfile = { version = "3.20.0", optional = true }
|
||||||
zip = { version = "6.0.0", optional = true }
|
zip = { version = "4.1.0", optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["meilisearch-types/all-tokenizations", "mini-dashboard"]
|
default = ["meilisearch-types/all-tokenizations", "mini-dashboard"]
|
||||||
@@ -160,7 +160,6 @@ mini-dashboard = [
|
|||||||
]
|
]
|
||||||
chinese = ["meilisearch-types/chinese"]
|
chinese = ["meilisearch-types/chinese"]
|
||||||
chinese-pinyin = ["meilisearch-types/chinese-pinyin"]
|
chinese-pinyin = ["meilisearch-types/chinese-pinyin"]
|
||||||
enterprise = ["meilisearch-types/enterprise"]
|
|
||||||
hebrew = ["meilisearch-types/hebrew"]
|
hebrew = ["meilisearch-types/hebrew"]
|
||||||
japanese = ["meilisearch-types/japanese"]
|
japanese = ["meilisearch-types/japanese"]
|
||||||
korean = ["meilisearch-types/korean"]
|
korean = ["meilisearch-types/korean"]
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::any::TypeId;
|
use std::any::TypeId;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::Path;
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
@@ -195,7 +195,7 @@ struct Infos {
|
|||||||
experimental_enable_logs_route: bool,
|
experimental_enable_logs_route: bool,
|
||||||
experimental_reduce_indexing_memory_usage: bool,
|
experimental_reduce_indexing_memory_usage: bool,
|
||||||
experimental_max_number_of_batched_tasks: usize,
|
experimental_max_number_of_batched_tasks: usize,
|
||||||
experimental_limit_batched_tasks_total_size: Option<u64>,
|
experimental_limit_batched_tasks_total_size: u64,
|
||||||
experimental_network: bool,
|
experimental_network: bool,
|
||||||
experimental_multimodal: bool,
|
experimental_multimodal: bool,
|
||||||
experimental_chat_completions: bool,
|
experimental_chat_completions: bool,
|
||||||
@@ -208,7 +208,6 @@ struct Infos {
|
|||||||
experimental_no_edition_2024_for_prefix_post_processing: bool,
|
experimental_no_edition_2024_for_prefix_post_processing: bool,
|
||||||
experimental_no_edition_2024_for_facet_post_processing: bool,
|
experimental_no_edition_2024_for_facet_post_processing: bool,
|
||||||
experimental_vector_store_setting: bool,
|
experimental_vector_store_setting: bool,
|
||||||
experimental_personalization: bool,
|
|
||||||
gpu_enabled: bool,
|
gpu_enabled: bool,
|
||||||
db_path: bool,
|
db_path: bool,
|
||||||
import_dump: bool,
|
import_dump: bool,
|
||||||
@@ -218,7 +217,6 @@ struct Infos {
|
|||||||
import_snapshot: bool,
|
import_snapshot: bool,
|
||||||
schedule_snapshot: Option<u64>,
|
schedule_snapshot: Option<u64>,
|
||||||
snapshot_dir: bool,
|
snapshot_dir: bool,
|
||||||
uses_s3_snapshots: bool,
|
|
||||||
ignore_missing_snapshot: bool,
|
ignore_missing_snapshot: bool,
|
||||||
ignore_snapshot_if_db_exists: bool,
|
ignore_snapshot_if_db_exists: bool,
|
||||||
http_addr: bool,
|
http_addr: bool,
|
||||||
@@ -287,8 +285,6 @@ impl Infos {
|
|||||||
indexer_options,
|
indexer_options,
|
||||||
config_file_path,
|
config_file_path,
|
||||||
no_analytics: _,
|
no_analytics: _,
|
||||||
experimental_personalization_api_key,
|
|
||||||
s3_snapshot_options,
|
|
||||||
} = options;
|
} = options;
|
||||||
|
|
||||||
let schedule_snapshot = match schedule_snapshot {
|
let schedule_snapshot = match schedule_snapshot {
|
||||||
@@ -344,22 +340,21 @@ impl Infos {
|
|||||||
experimental_no_edition_2024_for_dumps,
|
experimental_no_edition_2024_for_dumps,
|
||||||
experimental_vector_store_setting: vector_store_setting,
|
experimental_vector_store_setting: vector_store_setting,
|
||||||
gpu_enabled: meilisearch_types::milli::vector::is_cuda_enabled(),
|
gpu_enabled: meilisearch_types::milli::vector::is_cuda_enabled(),
|
||||||
db_path: db_path != Path::new("./data.ms"),
|
db_path: db_path != PathBuf::from("./data.ms"),
|
||||||
import_dump: import_dump.is_some(),
|
import_dump: import_dump.is_some(),
|
||||||
dump_dir: dump_dir != Path::new("dumps/"),
|
dump_dir: dump_dir != PathBuf::from("dumps/"),
|
||||||
ignore_missing_dump,
|
ignore_missing_dump,
|
||||||
ignore_dump_if_db_exists,
|
ignore_dump_if_db_exists,
|
||||||
import_snapshot: import_snapshot.is_some(),
|
import_snapshot: import_snapshot.is_some(),
|
||||||
schedule_snapshot,
|
schedule_snapshot,
|
||||||
snapshot_dir: snapshot_dir != Path::new("snapshots/"),
|
snapshot_dir: snapshot_dir != PathBuf::from("snapshots/"),
|
||||||
uses_s3_snapshots: s3_snapshot_options.is_some(),
|
|
||||||
ignore_missing_snapshot,
|
ignore_missing_snapshot,
|
||||||
ignore_snapshot_if_db_exists,
|
ignore_snapshot_if_db_exists,
|
||||||
http_addr: http_addr != default_http_addr(),
|
http_addr: http_addr != default_http_addr(),
|
||||||
http_payload_size_limit,
|
http_payload_size_limit,
|
||||||
experimental_max_number_of_batched_tasks,
|
experimental_max_number_of_batched_tasks,
|
||||||
experimental_limit_batched_tasks_total_size:
|
experimental_limit_batched_tasks_total_size:
|
||||||
experimental_limit_batched_tasks_total_size.map(|size| size.as_u64()),
|
experimental_limit_batched_tasks_total_size.into(),
|
||||||
task_queue_webhook: task_webhook_url.is_some(),
|
task_queue_webhook: task_webhook_url.is_some(),
|
||||||
task_webhook_authorization_header: task_webhook_authorization_header.is_some(),
|
task_webhook_authorization_header: task_webhook_authorization_header.is_some(),
|
||||||
log_level: log_level.to_string(),
|
log_level: log_level.to_string(),
|
||||||
@@ -376,7 +371,6 @@ impl Infos {
|
|||||||
experimental_no_edition_2024_for_settings,
|
experimental_no_edition_2024_for_settings,
|
||||||
experimental_no_edition_2024_for_prefix_post_processing,
|
experimental_no_edition_2024_for_prefix_post_processing,
|
||||||
experimental_no_edition_2024_for_facet_post_processing,
|
experimental_no_edition_2024_for_facet_post_processing,
|
||||||
experimental_personalization: experimental_personalization_api_key.is_some(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,8 +38,6 @@ pub enum MeilisearchHttpError {
|
|||||||
PaginationInFederatedQuery(usize, &'static str),
|
PaginationInFederatedQuery(usize, &'static str),
|
||||||
#[error("Inside `.queries[{0}]`: Using facet options is not allowed in federated queries.\n - Hint: remove `facets` from query #{0} or remove `federation` from the request\n - Hint: pass `federation.facetsByIndex.{1}: {2:?}` for facets in federated search")]
|
#[error("Inside `.queries[{0}]`: Using facet options is not allowed in federated queries.\n - Hint: remove `facets` from query #{0} or remove `federation` from the request\n - Hint: pass `federation.facetsByIndex.{1}: {2:?}` for facets in federated search")]
|
||||||
FacetsInFederatedQuery(usize, String, Vec<String>),
|
FacetsInFederatedQuery(usize, String, Vec<String>),
|
||||||
#[error("Inside `.queries[{0}]`: Using `.personalize` is not allowed in federated queries.\n - Hint: remove `personalize` from query #{0} or remove `federation` from the request")]
|
|
||||||
PersonalizationInFederatedQuery(usize),
|
|
||||||
#[error("Inconsistent order for values in facet `{facet}`: index `{previous_uid}` orders {previous_facet_order}, but index `{current_uid}` orders {index_facet_order}.\n - Hint: Remove `federation.mergeFacets` or change `faceting.sortFacetValuesBy` to be consistent in settings.")]
|
#[error("Inconsistent order for values in facet `{facet}`: index `{previous_uid}` orders {previous_facet_order}, but index `{current_uid}` orders {index_facet_order}.\n - Hint: Remove `federation.mergeFacets` or change `faceting.sortFacetValuesBy` to be consistent in settings.")]
|
||||||
InconsistentFacetOrder {
|
InconsistentFacetOrder {
|
||||||
facet: String,
|
facet: String,
|
||||||
@@ -139,9 +137,6 @@ impl ErrorCode for MeilisearchHttpError {
|
|||||||
MeilisearchHttpError::InconsistentFacetOrder { .. } => {
|
MeilisearchHttpError::InconsistentFacetOrder { .. } => {
|
||||||
Code::InvalidMultiSearchFacetOrder
|
Code::InvalidMultiSearchFacetOrder
|
||||||
}
|
}
|
||||||
MeilisearchHttpError::PersonalizationInFederatedQuery(_) => {
|
|
||||||
Code::InvalidMultiSearchQueryPersonalization
|
|
||||||
}
|
|
||||||
MeilisearchHttpError::InconsistentOriginHeaders { .. } => {
|
MeilisearchHttpError::InconsistentOriginHeaders { .. } => {
|
||||||
Code::InconsistentDocumentChangeHeaders
|
Code::InconsistentDocumentChangeHeaders
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ pub mod middleware;
|
|||||||
pub mod option;
|
pub mod option;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod option_test;
|
mod option_test;
|
||||||
pub mod personalization;
|
|
||||||
pub mod routes;
|
pub mod routes;
|
||||||
pub mod search;
|
pub mod search;
|
||||||
pub mod search_queue;
|
pub mod search_queue;
|
||||||
@@ -59,7 +58,6 @@ use tracing::{error, info_span};
|
|||||||
use tracing_subscriber::filter::Targets;
|
use tracing_subscriber::filter::Targets;
|
||||||
|
|
||||||
use crate::error::MeilisearchHttpError;
|
use crate::error::MeilisearchHttpError;
|
||||||
use crate::personalization::PersonalizationService;
|
|
||||||
|
|
||||||
/// Default number of simultaneously opened indexes.
|
/// Default number of simultaneously opened indexes.
|
||||||
///
|
///
|
||||||
@@ -130,8 +128,12 @@ pub type LogStderrType = tracing_subscriber::filter::Filtered<
|
|||||||
>;
|
>;
|
||||||
|
|
||||||
pub fn create_app(
|
pub fn create_app(
|
||||||
services: ServicesData,
|
index_scheduler: Data<IndexScheduler>,
|
||||||
|
auth_controller: Data<AuthController>,
|
||||||
|
search_queue: Data<SearchQueue>,
|
||||||
opt: Opt,
|
opt: Opt,
|
||||||
|
logs: (LogRouteHandle, LogStderrHandle),
|
||||||
|
analytics: Data<Analytics>,
|
||||||
enable_dashboard: bool,
|
enable_dashboard: bool,
|
||||||
) -> actix_web::App<
|
) -> actix_web::App<
|
||||||
impl ServiceFactory<
|
impl ServiceFactory<
|
||||||
@@ -143,7 +145,17 @@ pub fn create_app(
|
|||||||
>,
|
>,
|
||||||
> {
|
> {
|
||||||
let app = actix_web::App::new()
|
let app = actix_web::App::new()
|
||||||
.configure(|s| configure_data(s, services, &opt))
|
.configure(|s| {
|
||||||
|
configure_data(
|
||||||
|
s,
|
||||||
|
index_scheduler.clone(),
|
||||||
|
auth_controller.clone(),
|
||||||
|
search_queue.clone(),
|
||||||
|
&opt,
|
||||||
|
logs,
|
||||||
|
analytics.clone(),
|
||||||
|
)
|
||||||
|
})
|
||||||
.configure(routes::configure)
|
.configure(routes::configure)
|
||||||
.configure(|s| dashboard(s, enable_dashboard));
|
.configure(|s| dashboard(s, enable_dashboard));
|
||||||
|
|
||||||
@@ -204,10 +216,7 @@ enum OnFailure {
|
|||||||
KeepDb,
|
KeepDb,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn setup_meilisearch(
|
pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
|
||||||
opt: &Opt,
|
|
||||||
handle: tokio::runtime::Handle,
|
|
||||||
) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
|
|
||||||
let index_scheduler_opt = IndexSchedulerOptions {
|
let index_scheduler_opt = IndexSchedulerOptions {
|
||||||
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
|
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
|
||||||
auth_path: opt.db_path.join("auth"),
|
auth_path: opt.db_path.join("auth"),
|
||||||
@@ -221,26 +230,12 @@ pub fn setup_meilisearch(
|
|||||||
task_db_size: opt.max_task_db_size.as_u64() as usize,
|
task_db_size: opt.max_task_db_size.as_u64() as usize,
|
||||||
index_base_map_size: opt.max_index_size.as_u64() as usize,
|
index_base_map_size: opt.max_index_size.as_u64() as usize,
|
||||||
enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage,
|
enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage,
|
||||||
indexer_config: Arc::new({
|
indexer_config: Arc::new((&opt.indexer_options).try_into()?),
|
||||||
let s3_snapshot_options =
|
|
||||||
opt.s3_snapshot_options.clone().map(|opt| opt.try_into()).transpose()?;
|
|
||||||
IndexerConfig { s3_snapshot_options, ..(&opt.indexer_options).try_into()? }
|
|
||||||
}),
|
|
||||||
autobatching_enabled: true,
|
autobatching_enabled: true,
|
||||||
cleanup_enabled: !opt.experimental_replication_parameters,
|
cleanup_enabled: !opt.experimental_replication_parameters,
|
||||||
max_number_of_tasks: 1_000_000,
|
max_number_of_tasks: 1_000_000,
|
||||||
max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks,
|
max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks,
|
||||||
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size.map_or_else(
|
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size.into(),
|
||||||
|| {
|
|
||||||
opt.indexer_options
|
|
||||||
.max_indexing_memory
|
|
||||||
// By default, we use half of the available memory to determine the size of batched tasks
|
|
||||||
.map_or(u64::MAX, |mem| mem.as_u64() / 2)
|
|
||||||
// And never exceed 10 GiB when we infer the limit
|
|
||||||
.min(10 * 1024 * 1024 * 1024)
|
|
||||||
},
|
|
||||||
|size| size.as_u64(),
|
|
||||||
),
|
|
||||||
index_growth_amount: byte_unit::Byte::from_str("10GiB").unwrap().as_u64() as usize,
|
index_growth_amount: byte_unit::Byte::from_str("10GiB").unwrap().as_u64() as usize,
|
||||||
index_count: DEFAULT_INDEX_COUNT,
|
index_count: DEFAULT_INDEX_COUNT,
|
||||||
instance_features: opt.to_instance_features(),
|
instance_features: opt.to_instance_features(),
|
||||||
@@ -261,7 +256,6 @@ pub fn setup_meilisearch(
|
|||||||
index_scheduler_opt,
|
index_scheduler_opt,
|
||||||
OnFailure::RemoveDb,
|
OnFailure::RemoveDb,
|
||||||
binary_version, // the db is empty
|
binary_version, // the db is empty
|
||||||
handle,
|
|
||||||
)?,
|
)?,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
std::fs::remove_dir_all(&opt.db_path)?;
|
std::fs::remove_dir_all(&opt.db_path)?;
|
||||||
@@ -279,7 +273,7 @@ pub fn setup_meilisearch(
|
|||||||
bail!("snapshot doesn't exist at {}", snapshot_path.display())
|
bail!("snapshot doesn't exist at {}", snapshot_path.display())
|
||||||
// the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag
|
// the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag
|
||||||
} else {
|
} else {
|
||||||
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
|
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
|
||||||
}
|
}
|
||||||
} else if let Some(ref path) = opt.import_dump {
|
} else if let Some(ref path) = opt.import_dump {
|
||||||
let src_path_exists = path.exists();
|
let src_path_exists = path.exists();
|
||||||
@@ -290,7 +284,6 @@ pub fn setup_meilisearch(
|
|||||||
index_scheduler_opt,
|
index_scheduler_opt,
|
||||||
OnFailure::RemoveDb,
|
OnFailure::RemoveDb,
|
||||||
binary_version, // the db is empty
|
binary_version, // the db is empty
|
||||||
handle,
|
|
||||||
)?;
|
)?;
|
||||||
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
|
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
|
||||||
Ok(()) => (index_scheduler, auth_controller),
|
Ok(()) => (index_scheduler, auth_controller),
|
||||||
@@ -311,10 +304,10 @@ pub fn setup_meilisearch(
|
|||||||
// the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag
|
// the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag
|
||||||
// or, the dump is missing but we can ignore that because of the ignore_missing_dump flag
|
// or, the dump is missing but we can ignore that because of the ignore_missing_dump flag
|
||||||
} else {
|
} else {
|
||||||
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
|
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
|
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
|
||||||
};
|
};
|
||||||
|
|
||||||
// We create a loop in a thread that registers snapshotCreation tasks
|
// We create a loop in a thread that registers snapshotCreation tasks
|
||||||
@@ -345,7 +338,6 @@ fn open_or_create_database_unchecked(
|
|||||||
index_scheduler_opt: IndexSchedulerOptions,
|
index_scheduler_opt: IndexSchedulerOptions,
|
||||||
on_failure: OnFailure,
|
on_failure: OnFailure,
|
||||||
version: (u32, u32, u32),
|
version: (u32, u32, u32),
|
||||||
handle: tokio::runtime::Handle,
|
|
||||||
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
||||||
// we don't want to create anything in the data.ms yet, thus we
|
// we don't want to create anything in the data.ms yet, thus we
|
||||||
// wrap our two builders in a closure that'll be executed later.
|
// wrap our two builders in a closure that'll be executed later.
|
||||||
@@ -353,7 +345,7 @@ fn open_or_create_database_unchecked(
|
|||||||
let auth_env = open_auth_store_env(&index_scheduler_opt.auth_path).unwrap();
|
let auth_env = open_auth_store_env(&index_scheduler_opt.auth_path).unwrap();
|
||||||
let auth_controller = AuthController::new(auth_env.clone(), &opt.master_key);
|
let auth_controller = AuthController::new(auth_env.clone(), &opt.master_key);
|
||||||
let index_scheduler_builder = || -> anyhow::Result<_> {
|
let index_scheduler_builder = || -> anyhow::Result<_> {
|
||||||
Ok(IndexScheduler::new(index_scheduler_opt, auth_env, version, Some(handle))?)
|
Ok(IndexScheduler::new(index_scheduler_opt, auth_env, version)?)
|
||||||
};
|
};
|
||||||
|
|
||||||
match (
|
match (
|
||||||
@@ -460,7 +452,6 @@ fn open_or_create_database(
|
|||||||
index_scheduler_opt: IndexSchedulerOptions,
|
index_scheduler_opt: IndexSchedulerOptions,
|
||||||
empty_db: bool,
|
empty_db: bool,
|
||||||
binary_version: (u32, u32, u32),
|
binary_version: (u32, u32, u32),
|
||||||
handle: tokio::runtime::Handle,
|
|
||||||
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
||||||
let version = if !empty_db {
|
let version = if !empty_db {
|
||||||
check_version(opt, &index_scheduler_opt, binary_version)?
|
check_version(opt, &index_scheduler_opt, binary_version)?
|
||||||
@@ -468,7 +459,7 @@ fn open_or_create_database(
|
|||||||
binary_version
|
binary_version
|
||||||
};
|
};
|
||||||
|
|
||||||
open_or_create_database_unchecked(opt, index_scheduler_opt, OnFailure::KeepDb, version, handle)
|
open_or_create_database_unchecked(opt, index_scheduler_opt, OnFailure::KeepDb, version)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn import_dump(
|
fn import_dump(
|
||||||
@@ -536,11 +527,7 @@ fn import_dump(
|
|||||||
let indexer_config = if base_config.max_threads.is_none() {
|
let indexer_config = if base_config.max_threads.is_none() {
|
||||||
let (thread_pool, _) = default_thread_pool_and_threads();
|
let (thread_pool, _) = default_thread_pool_and_threads();
|
||||||
|
|
||||||
let _config = IndexerConfig {
|
let _config = IndexerConfig { thread_pool, ..*base_config };
|
||||||
thread_pool,
|
|
||||||
s3_snapshot_options: base_config.s3_snapshot_options.clone(),
|
|
||||||
..*base_config
|
|
||||||
};
|
|
||||||
backup_config = _config;
|
backup_config = _config;
|
||||||
&backup_config
|
&backup_config
|
||||||
} else {
|
} else {
|
||||||
@@ -688,26 +675,23 @@ fn import_dump(
|
|||||||
Ok(index_scheduler_dump.finish()?)
|
Ok(index_scheduler_dump.finish()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn configure_data(config: &mut web::ServiceConfig, services: ServicesData, opt: &Opt) {
|
pub fn configure_data(
|
||||||
let ServicesData {
|
config: &mut web::ServiceConfig,
|
||||||
index_scheduler,
|
index_scheduler: Data<IndexScheduler>,
|
||||||
auth,
|
auth: Data<AuthController>,
|
||||||
search_queue,
|
search_queue: Data<SearchQueue>,
|
||||||
personalization_service,
|
opt: &Opt,
|
||||||
logs_route_handle,
|
(logs_route, logs_stderr): (LogRouteHandle, LogStderrHandle),
|
||||||
logs_stderr_handle,
|
analytics: Data<Analytics>,
|
||||||
analytics,
|
) {
|
||||||
} = services;
|
|
||||||
|
|
||||||
let http_payload_size_limit = opt.http_payload_size_limit.as_u64() as usize;
|
let http_payload_size_limit = opt.http_payload_size_limit.as_u64() as usize;
|
||||||
config
|
config
|
||||||
.app_data(index_scheduler)
|
.app_data(index_scheduler)
|
||||||
.app_data(auth)
|
.app_data(auth)
|
||||||
.app_data(search_queue)
|
.app_data(search_queue)
|
||||||
.app_data(analytics)
|
.app_data(analytics)
|
||||||
.app_data(personalization_service)
|
.app_data(web::Data::new(logs_route))
|
||||||
.app_data(logs_route_handle)
|
.app_data(web::Data::new(logs_stderr))
|
||||||
.app_data(logs_stderr_handle)
|
|
||||||
.app_data(web::Data::new(opt.clone()))
|
.app_data(web::Data::new(opt.clone()))
|
||||||
.app_data(
|
.app_data(
|
||||||
web::JsonConfig::default()
|
web::JsonConfig::default()
|
||||||
@@ -768,14 +752,3 @@ pub fn dashboard(config: &mut web::ServiceConfig, enable_frontend: bool) {
|
|||||||
pub fn dashboard(config: &mut web::ServiceConfig, _enable_frontend: bool) {
|
pub fn dashboard(config: &mut web::ServiceConfig, _enable_frontend: bool) {
|
||||||
config.service(web::resource("/").route(web::get().to(routes::running)));
|
config.service(web::resource("/").route(web::get().to(routes::running)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct ServicesData {
|
|
||||||
pub index_scheduler: Data<IndexScheduler>,
|
|
||||||
pub auth: Data<AuthController>,
|
|
||||||
pub search_queue: Data<SearchQueue>,
|
|
||||||
pub personalization_service: Data<PersonalizationService>,
|
|
||||||
pub logs_route_handle: Data<LogRouteHandle>,
|
|
||||||
pub logs_stderr_handle: Data<LogStderrHandle>,
|
|
||||||
pub analytics: Data<Analytics>,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -14,11 +14,10 @@ use index_scheduler::IndexScheduler;
|
|||||||
use is_terminal::IsTerminal;
|
use is_terminal::IsTerminal;
|
||||||
use meilisearch::analytics::Analytics;
|
use meilisearch::analytics::Analytics;
|
||||||
use meilisearch::option::LogMode;
|
use meilisearch::option::LogMode;
|
||||||
use meilisearch::personalization::PersonalizationService;
|
|
||||||
use meilisearch::search_queue::SearchQueue;
|
use meilisearch::search_queue::SearchQueue;
|
||||||
use meilisearch::{
|
use meilisearch::{
|
||||||
analytics, create_app, setup_meilisearch, LogRouteHandle, LogRouteType, LogStderrHandle,
|
analytics, create_app, setup_meilisearch, LogRouteHandle, LogRouteType, LogStderrHandle,
|
||||||
LogStderrType, Opt, ServicesData, SubscriberForSecondLayer,
|
LogStderrType, Opt, SubscriberForSecondLayer,
|
||||||
};
|
};
|
||||||
use meilisearch_auth::{generate_master_key, AuthController, MASTER_KEY_MIN_SIZE};
|
use meilisearch_auth::{generate_master_key, AuthController, MASTER_KEY_MIN_SIZE};
|
||||||
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
|
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
|
||||||
@@ -77,10 +76,7 @@ fn on_panic(info: &std::panic::PanicHookInfo) {
|
|||||||
|
|
||||||
#[actix_web::main]
|
#[actix_web::main]
|
||||||
async fn main() -> anyhow::Result<()> {
|
async fn main() -> anyhow::Result<()> {
|
||||||
// won't panic inside of tokio::main
|
try_main().await.inspect_err(|error| {
|
||||||
let runtime = tokio::runtime::Handle::current();
|
|
||||||
|
|
||||||
try_main(runtime).await.inspect_err(|error| {
|
|
||||||
tracing::error!(%error);
|
tracing::error!(%error);
|
||||||
let mut current = error.source();
|
let mut current = error.source();
|
||||||
let mut depth = 0;
|
let mut depth = 0;
|
||||||
@@ -92,7 +88,7 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn try_main(runtime: tokio::runtime::Handle) -> anyhow::Result<()> {
|
async fn try_main() -> anyhow::Result<()> {
|
||||||
let (opt, config_read_from) = Opt::try_build()?;
|
let (opt, config_read_from) = Opt::try_build()?;
|
||||||
|
|
||||||
std::panic::set_hook(Box::new(on_panic));
|
std::panic::set_hook(Box::new(on_panic));
|
||||||
@@ -126,7 +122,7 @@ async fn try_main(runtime: tokio::runtime::Handle) -> anyhow::Result<()> {
|
|||||||
_ => (),
|
_ => (),
|
||||||
}
|
}
|
||||||
|
|
||||||
let (index_scheduler, auth_controller) = setup_meilisearch(&opt, runtime)?;
|
let (index_scheduler, auth_controller) = setup_meilisearch(&opt)?;
|
||||||
|
|
||||||
let analytics =
|
let analytics =
|
||||||
analytics::Analytics::new(&opt, index_scheduler.clone(), auth_controller.clone()).await;
|
analytics::Analytics::new(&opt, index_scheduler.clone(), auth_controller.clone()).await;
|
||||||
@@ -153,15 +149,8 @@ async fn run_http(
|
|||||||
let enable_dashboard = &opt.env == "development";
|
let enable_dashboard = &opt.env == "development";
|
||||||
let opt_clone = opt.clone();
|
let opt_clone = opt.clone();
|
||||||
let index_scheduler = Data::from(index_scheduler);
|
let index_scheduler = Data::from(index_scheduler);
|
||||||
let auth = Data::from(auth_controller);
|
let auth_controller = Data::from(auth_controller);
|
||||||
let analytics = Data::from(analytics);
|
let analytics = Data::from(analytics);
|
||||||
// Create personalization service with API key from options
|
|
||||||
let personalization_service = Data::new(
|
|
||||||
opt.experimental_personalization_api_key
|
|
||||||
.clone()
|
|
||||||
.map(PersonalizationService::cohere)
|
|
||||||
.unwrap_or_else(PersonalizationService::disabled),
|
|
||||||
);
|
|
||||||
let search_queue = SearchQueue::new(
|
let search_queue = SearchQueue::new(
|
||||||
opt.experimental_search_queue_size,
|
opt.experimental_search_queue_size,
|
||||||
available_parallelism()
|
available_parallelism()
|
||||||
@@ -173,22 +162,18 @@ async fn run_http(
|
|||||||
usize::from(opt.experimental_drop_search_after) as u64
|
usize::from(opt.experimental_drop_search_after) as u64
|
||||||
));
|
));
|
||||||
let search_queue = Data::new(search_queue);
|
let search_queue = Data::new(search_queue);
|
||||||
let (logs_route_handle, logs_stderr_handle) = logs;
|
|
||||||
let logs_route_handle = Data::new(logs_route_handle);
|
|
||||||
let logs_stderr_handle = Data::new(logs_stderr_handle);
|
|
||||||
|
|
||||||
let services = ServicesData {
|
let http_server = HttpServer::new(move || {
|
||||||
index_scheduler,
|
create_app(
|
||||||
auth,
|
index_scheduler.clone(),
|
||||||
search_queue,
|
auth_controller.clone(),
|
||||||
personalization_service,
|
search_queue.clone(),
|
||||||
logs_route_handle,
|
opt.clone(),
|
||||||
logs_stderr_handle,
|
logs.clone(),
|
||||||
analytics,
|
analytics.clone(),
|
||||||
};
|
enable_dashboard,
|
||||||
|
)
|
||||||
let http_server =
|
})
|
||||||
HttpServer::new(move || create_app(services.clone(), opt.clone(), enable_dashboard))
|
|
||||||
// Disable signals allows the server to terminate immediately when a user enter CTRL-C
|
// Disable signals allows the server to terminate immediately when a user enter CTRL-C
|
||||||
.disable_signals()
|
.disable_signals()
|
||||||
.keep_alive(KeepAlive::Os);
|
.keep_alive(KeepAlive::Os);
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use prometheus::{
|
use prometheus::{
|
||||||
opts, register_gauge, register_gauge_vec, register_histogram_vec, register_int_counter_vec,
|
opts, register_gauge, register_histogram_vec, register_int_counter_vec, register_int_gauge,
|
||||||
register_int_gauge, register_int_gauge_vec, Gauge, GaugeVec, HistogramVec, IntCounterVec,
|
register_int_gauge_vec, Gauge, HistogramVec, IntCounterVec, IntGauge, IntGaugeVec,
|
||||||
IntGauge, IntGaugeVec,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
@@ -74,20 +73,6 @@ lazy_static! {
|
|||||||
&["kind", "value"]
|
&["kind", "value"]
|
||||||
)
|
)
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
pub static ref MEILISEARCH_BATCH_RUNNING_PROGRESS_TRACE: GaugeVec = register_gauge_vec!(
|
|
||||||
opts!("meilisearch_batch_running_progress_trace", "The currently running progress trace"),
|
|
||||||
&["batch_uid", "step_name"]
|
|
||||||
)
|
|
||||||
.expect("Can't create a metric");
|
|
||||||
pub static ref MEILISEARCH_LAST_FINISHED_BATCHES_PROGRESS_TRACE_MS: IntGaugeVec =
|
|
||||||
register_int_gauge_vec!(
|
|
||||||
opts!(
|
|
||||||
"meilisearch_last_finished_batches_progress_trace_ms",
|
|
||||||
"The last few batches progress trace in milliseconds"
|
|
||||||
),
|
|
||||||
&["batch_uid", "step_name"]
|
|
||||||
)
|
|
||||||
.expect("Can't create a metric");
|
|
||||||
pub static ref MEILISEARCH_LAST_UPDATE: IntGauge =
|
pub static ref MEILISEARCH_LAST_UPDATE: IntGauge =
|
||||||
register_int_gauge!(opts!("meilisearch_last_update", "Meilisearch Last Update"))
|
register_int_gauge!(opts!("meilisearch_last_update", "Meilisearch Last Update"))
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
@@ -129,9 +114,4 @@ lazy_static! {
|
|||||||
"Meilisearch Task Queue Size Until Stop Registering",
|
"Meilisearch Task Queue Size Until Stop Registering",
|
||||||
))
|
))
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
pub static ref MEILISEARCH_PERSONALIZED_SEARCH_REQUESTS: IntGauge = register_int_gauge!(opts!(
|
|
||||||
"meilisearch_personalized_search_requests",
|
|
||||||
"Meilisearch number of search requests with personalization"
|
|
||||||
))
|
|
||||||
.expect("Can't create a metric");
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,13 +7,12 @@ use std::ops::Deref;
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
|
||||||
use std::{env, fmt, fs};
|
use std::{env, fmt, fs};
|
||||||
|
|
||||||
use byte_unit::{Byte, ParseError, UnitType};
|
use byte_unit::{Byte, ParseError, UnitType};
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use meilisearch_types::features::InstanceTogglableFeatures;
|
use meilisearch_types::features::InstanceTogglableFeatures;
|
||||||
use meilisearch_types::milli::update::{IndexerConfig, S3SnapshotOptions};
|
use meilisearch_types::milli::update::IndexerConfig;
|
||||||
use meilisearch_types::milli::ThreadPoolNoAbortBuilder;
|
use meilisearch_types::milli::ThreadPoolNoAbortBuilder;
|
||||||
use rustls::server::{ServerSessionMemoryCache, WebPkiClientVerifier};
|
use rustls::server::{ServerSessionMemoryCache, WebPkiClientVerifier};
|
||||||
use rustls::RootCertStore;
|
use rustls::RootCertStore;
|
||||||
@@ -75,22 +74,6 @@ const MEILI_EXPERIMENTAL_EMBEDDING_CACHE_ENTRIES: &str =
|
|||||||
const MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION: &str = "MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION";
|
const MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION: &str = "MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION";
|
||||||
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS: &str =
|
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS: &str =
|
||||||
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS";
|
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS";
|
||||||
const MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY: &str =
|
|
||||||
"MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY";
|
|
||||||
|
|
||||||
// Related to S3 snapshots
|
|
||||||
const MEILI_S3_BUCKET_URL: &str = "MEILI_S3_BUCKET_URL";
|
|
||||||
const MEILI_S3_BUCKET_REGION: &str = "MEILI_S3_BUCKET_REGION";
|
|
||||||
const MEILI_S3_BUCKET_NAME: &str = "MEILI_S3_BUCKET_NAME";
|
|
||||||
const MEILI_S3_SNAPSHOT_PREFIX: &str = "MEILI_S3_SNAPSHOT_PREFIX";
|
|
||||||
const MEILI_S3_ACCESS_KEY: &str = "MEILI_S3_ACCESS_KEY";
|
|
||||||
const MEILI_S3_SECRET_KEY: &str = "MEILI_S3_SECRET_KEY";
|
|
||||||
const MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS: &str = "MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS";
|
|
||||||
const MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL: &str = "MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL";
|
|
||||||
const MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS: &str =
|
|
||||||
"MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS";
|
|
||||||
const MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE: &str = "MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE";
|
|
||||||
|
|
||||||
const DEFAULT_CONFIG_FILE_PATH: &str = "./config.toml";
|
const DEFAULT_CONFIG_FILE_PATH: &str = "./config.toml";
|
||||||
const DEFAULT_DB_PATH: &str = "./data.ms";
|
const DEFAULT_DB_PATH: &str = "./data.ms";
|
||||||
const DEFAULT_HTTP_ADDR: &str = "localhost:7700";
|
const DEFAULT_HTTP_ADDR: &str = "localhost:7700";
|
||||||
@@ -100,10 +83,6 @@ const DEFAULT_SNAPSHOT_DIR: &str = "snapshots/";
|
|||||||
const DEFAULT_SNAPSHOT_INTERVAL_SEC: u64 = 86400;
|
const DEFAULT_SNAPSHOT_INTERVAL_SEC: u64 = 86400;
|
||||||
const DEFAULT_SNAPSHOT_INTERVAL_SEC_STR: &str = "86400";
|
const DEFAULT_SNAPSHOT_INTERVAL_SEC_STR: &str = "86400";
|
||||||
const DEFAULT_DUMP_DIR: &str = "dumps/";
|
const DEFAULT_DUMP_DIR: &str = "dumps/";
|
||||||
const DEFAULT_S3_SNAPSHOT_MAX_IN_FLIGHT_PARTS: NonZeroUsize = NonZeroUsize::new(10).unwrap();
|
|
||||||
const DEFAULT_S3_SNAPSHOT_COMPRESSION_LEVEL: u32 = 0;
|
|
||||||
const DEFAULT_S3_SNAPSHOT_SIGNATURE_DURATION_SECONDS: u64 = 8 * 3600; // 8 hours
|
|
||||||
const DEFAULT_S3_SNAPSHOT_MULTIPART_PART_SIZE: Byte = Byte::from_u64(375 * 1024 * 1024); // 375 MiB
|
|
||||||
|
|
||||||
const MEILI_MAX_INDEXING_MEMORY: &str = "MEILI_MAX_INDEXING_MEMORY";
|
const MEILI_MAX_INDEXING_MEMORY: &str = "MEILI_MAX_INDEXING_MEMORY";
|
||||||
const MEILI_MAX_INDEXING_THREADS: &str = "MEILI_MAX_INDEXING_THREADS";
|
const MEILI_MAX_INDEXING_THREADS: &str = "MEILI_MAX_INDEXING_THREADS";
|
||||||
@@ -473,14 +452,11 @@ pub struct Opt {
|
|||||||
#[serde(default = "default_limit_batched_tasks")]
|
#[serde(default = "default_limit_batched_tasks")]
|
||||||
pub experimental_max_number_of_batched_tasks: usize,
|
pub experimental_max_number_of_batched_tasks: usize,
|
||||||
|
|
||||||
/// Experimentally controls the maximum total size, in bytes, of tasks that will be processed
|
/// Experimentally reduces the maximum total size, in bytes, of tasks that will be processed at once,
|
||||||
/// simultaneously. When unspecified, defaults to half of the maximum indexing memory and
|
/// see: <https://github.com/orgs/meilisearch/discussions/801>
|
||||||
/// clamped to 10 GiB.
|
#[clap(long, env = MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE, default_value_t = default_limit_batched_tasks_total_size())]
|
||||||
///
|
#[serde(default = "default_limit_batched_tasks_total_size")]
|
||||||
/// See: <https://github.com/orgs/meilisearch/discussions/801>
|
pub experimental_limit_batched_tasks_total_size: Byte,
|
||||||
#[clap(long, env = MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub experimental_limit_batched_tasks_total_size: Option<Byte>,
|
|
||||||
|
|
||||||
/// Enables experimental caching of search query embeddings. The value represents the maximal number of entries in the cache of each
|
/// Enables experimental caching of search query embeddings. The value represents the maximal number of entries in the cache of each
|
||||||
/// distinct embedder.
|
/// distinct embedder.
|
||||||
@@ -499,20 +475,10 @@ pub struct Opt {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub experimental_no_snapshot_compaction: bool,
|
pub experimental_no_snapshot_compaction: bool,
|
||||||
|
|
||||||
/// Experimental personalization API key feature.
|
|
||||||
///
|
|
||||||
/// Sets the API key for personalization features.
|
|
||||||
#[clap(long, env = MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY)]
|
|
||||||
pub experimental_personalization_api_key: Option<String>,
|
|
||||||
|
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
pub indexer_options: IndexerOpts,
|
pub indexer_options: IndexerOpts,
|
||||||
|
|
||||||
#[serde(flatten)]
|
|
||||||
#[clap(flatten)]
|
|
||||||
pub s3_snapshot_options: Option<S3SnapshotOpts>,
|
|
||||||
|
|
||||||
/// Set the path to a configuration file that should be used to setup the engine.
|
/// Set the path to a configuration file that should be used to setup the engine.
|
||||||
/// Format must be TOML.
|
/// Format must be TOML.
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
@@ -614,8 +580,6 @@ impl Opt {
|
|||||||
experimental_limit_batched_tasks_total_size,
|
experimental_limit_batched_tasks_total_size,
|
||||||
experimental_embedding_cache_entries,
|
experimental_embedding_cache_entries,
|
||||||
experimental_no_snapshot_compaction,
|
experimental_no_snapshot_compaction,
|
||||||
experimental_personalization_api_key,
|
|
||||||
s3_snapshot_options,
|
|
||||||
} = self;
|
} = self;
|
||||||
export_to_env_if_not_present(MEILI_DB_PATH, db_path);
|
export_to_env_if_not_present(MEILI_DB_PATH, db_path);
|
||||||
export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr);
|
export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr);
|
||||||
@@ -704,12 +668,10 @@ impl Opt {
|
|||||||
MEILI_EXPERIMENTAL_MAX_NUMBER_OF_BATCHED_TASKS,
|
MEILI_EXPERIMENTAL_MAX_NUMBER_OF_BATCHED_TASKS,
|
||||||
experimental_max_number_of_batched_tasks.to_string(),
|
experimental_max_number_of_batched_tasks.to_string(),
|
||||||
);
|
);
|
||||||
if let Some(limit) = experimental_limit_batched_tasks_total_size {
|
|
||||||
export_to_env_if_not_present(
|
export_to_env_if_not_present(
|
||||||
MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE,
|
MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE,
|
||||||
limit.to_string(),
|
experimental_limit_batched_tasks_total_size.to_string(),
|
||||||
);
|
);
|
||||||
}
|
|
||||||
export_to_env_if_not_present(
|
export_to_env_if_not_present(
|
||||||
MEILI_EXPERIMENTAL_EMBEDDING_CACHE_ENTRIES,
|
MEILI_EXPERIMENTAL_EMBEDDING_CACHE_ENTRIES,
|
||||||
experimental_embedding_cache_entries.to_string(),
|
experimental_embedding_cache_entries.to_string(),
|
||||||
@@ -718,22 +680,7 @@ impl Opt {
|
|||||||
MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION,
|
MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION,
|
||||||
experimental_no_snapshot_compaction.to_string(),
|
experimental_no_snapshot_compaction.to_string(),
|
||||||
);
|
);
|
||||||
if let Some(experimental_personalization_api_key) = experimental_personalization_api_key {
|
|
||||||
export_to_env_if_not_present(
|
|
||||||
MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY,
|
|
||||||
experimental_personalization_api_key,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
indexer_options.export_to_env();
|
indexer_options.export_to_env();
|
||||||
if let Some(s3_snapshot_options) = s3_snapshot_options {
|
|
||||||
#[cfg(not(unix))]
|
|
||||||
{
|
|
||||||
let _ = s3_snapshot_options;
|
|
||||||
panic!("S3 snapshot options are not supported on Windows");
|
|
||||||
}
|
|
||||||
#[cfg(unix)]
|
|
||||||
s3_snapshot_options.export_to_env();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_ssl_config(&self) -> anyhow::Result<Option<rustls::ServerConfig>> {
|
pub fn get_ssl_config(&self) -> anyhow::Result<Option<rustls::ServerConfig>> {
|
||||||
@@ -902,16 +849,6 @@ impl TryFrom<&IndexerOpts> for IndexerConfig {
|
|||||||
type Error = anyhow::Error;
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
fn try_from(other: &IndexerOpts) -> Result<Self, Self::Error> {
|
fn try_from(other: &IndexerOpts) -> Result<Self, Self::Error> {
|
||||||
let IndexerOpts {
|
|
||||||
max_indexing_memory,
|
|
||||||
max_indexing_threads,
|
|
||||||
skip_index_budget,
|
|
||||||
experimental_no_edition_2024_for_settings,
|
|
||||||
experimental_no_edition_2024_for_dumps,
|
|
||||||
experimental_no_edition_2024_for_prefix_post_processing,
|
|
||||||
experimental_no_edition_2024_for_facet_post_processing,
|
|
||||||
} = other;
|
|
||||||
|
|
||||||
let thread_pool = ThreadPoolNoAbortBuilder::new_for_indexing()
|
let thread_pool = ThreadPoolNoAbortBuilder::new_for_indexing()
|
||||||
.num_threads(other.max_indexing_threads.unwrap_or_else(|| num_cpus::get() / 2))
|
.num_threads(other.max_indexing_threads.unwrap_or_else(|| num_cpus::get() / 2))
|
||||||
.build()?;
|
.build()?;
|
||||||
@@ -919,163 +856,21 @@ impl TryFrom<&IndexerOpts> for IndexerConfig {
|
|||||||
Ok(Self {
|
Ok(Self {
|
||||||
thread_pool,
|
thread_pool,
|
||||||
log_every_n: Some(DEFAULT_LOG_EVERY_N),
|
log_every_n: Some(DEFAULT_LOG_EVERY_N),
|
||||||
max_memory: max_indexing_memory.map(|b| b.as_u64() as usize),
|
max_memory: other.max_indexing_memory.map(|b| b.as_u64() as usize),
|
||||||
max_threads: max_indexing_threads.0,
|
max_threads: *other.max_indexing_threads,
|
||||||
max_positions_per_attributes: None,
|
max_positions_per_attributes: None,
|
||||||
skip_index_budget: *skip_index_budget,
|
skip_index_budget: other.skip_index_budget,
|
||||||
experimental_no_edition_2024_for_settings: *experimental_no_edition_2024_for_settings,
|
experimental_no_edition_2024_for_settings: other
|
||||||
experimental_no_edition_2024_for_dumps: *experimental_no_edition_2024_for_dumps,
|
.experimental_no_edition_2024_for_settings,
|
||||||
|
experimental_no_edition_2024_for_dumps: other.experimental_no_edition_2024_for_dumps,
|
||||||
chunk_compression_type: Default::default(),
|
chunk_compression_type: Default::default(),
|
||||||
chunk_compression_level: Default::default(),
|
chunk_compression_level: Default::default(),
|
||||||
documents_chunk_size: Default::default(),
|
documents_chunk_size: Default::default(),
|
||||||
max_nb_chunks: Default::default(),
|
max_nb_chunks: Default::default(),
|
||||||
experimental_no_edition_2024_for_prefix_post_processing:
|
experimental_no_edition_2024_for_prefix_post_processing: other
|
||||||
*experimental_no_edition_2024_for_prefix_post_processing,
|
.experimental_no_edition_2024_for_prefix_post_processing,
|
||||||
experimental_no_edition_2024_for_facet_post_processing:
|
experimental_no_edition_2024_for_facet_post_processing: other
|
||||||
*experimental_no_edition_2024_for_facet_post_processing,
|
.experimental_no_edition_2024_for_facet_post_processing,
|
||||||
s3_snapshot_options: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Parser, Deserialize)]
|
|
||||||
// This group is a bit tricky but makes it possible to require all listed fields if one of them
|
|
||||||
// is specified. It lets us keep an Option for the S3SnapshotOpts configuration.
|
|
||||||
// <https://github.com/clap-rs/clap/issues/5092#issuecomment-2616986075>
|
|
||||||
#[group(requires_all = ["s3_bucket_url", "s3_bucket_region", "s3_bucket_name", "s3_snapshot_prefix", "s3_access_key", "s3_secret_key"])]
|
|
||||||
pub struct S3SnapshotOpts {
|
|
||||||
/// The S3 bucket URL in the format https://s3.<region>.amazonaws.com.
|
|
||||||
#[clap(long, env = MEILI_S3_BUCKET_URL, required = false)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub s3_bucket_url: String,
|
|
||||||
|
|
||||||
/// The region in the format us-east-1.
|
|
||||||
#[clap(long, env = MEILI_S3_BUCKET_REGION, required = false)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub s3_bucket_region: String,
|
|
||||||
|
|
||||||
/// The bucket name.
|
|
||||||
#[clap(long, env = MEILI_S3_BUCKET_NAME, required = false)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub s3_bucket_name: String,
|
|
||||||
|
|
||||||
/// The prefix path where to put the snapshot, uses normal slashes (/).
|
|
||||||
#[clap(long, env = MEILI_S3_SNAPSHOT_PREFIX, required = false)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub s3_snapshot_prefix: String,
|
|
||||||
|
|
||||||
/// The S3 access key.
|
|
||||||
#[clap(long, env = MEILI_S3_ACCESS_KEY, required = false)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub s3_access_key: String,
|
|
||||||
|
|
||||||
/// The S3 secret key.
|
|
||||||
#[clap(long, env = MEILI_S3_SECRET_KEY, required = false)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub s3_secret_key: String,
|
|
||||||
|
|
||||||
/// The maximum number of parts that can be uploaded in parallel.
|
|
||||||
///
|
|
||||||
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
|
|
||||||
#[clap(long, env = MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS, default_value_t = default_experimental_s3_snapshot_max_in_flight_parts())]
|
|
||||||
#[serde(default = "default_experimental_s3_snapshot_max_in_flight_parts")]
|
|
||||||
pub experimental_s3_max_in_flight_parts: NonZeroUsize,
|
|
||||||
|
|
||||||
/// The compression level. Defaults to no compression (0).
|
|
||||||
///
|
|
||||||
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
|
|
||||||
#[clap(long, env = MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL, default_value_t = default_experimental_s3_snapshot_compression_level())]
|
|
||||||
#[serde(default = "default_experimental_s3_snapshot_compression_level")]
|
|
||||||
pub experimental_s3_compression_level: u32,
|
|
||||||
|
|
||||||
/// The signature duration for the multipart upload.
|
|
||||||
///
|
|
||||||
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
|
|
||||||
#[clap(long, env = MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS, default_value_t = default_experimental_s3_snapshot_signature_duration_seconds())]
|
|
||||||
#[serde(default = "default_experimental_s3_snapshot_signature_duration_seconds")]
|
|
||||||
pub experimental_s3_signature_duration_seconds: u64,
|
|
||||||
|
|
||||||
/// The size of the the multipart parts.
|
|
||||||
///
|
|
||||||
/// Must not be less than 10MiB and larger than 8GiB. Yes,
|
|
||||||
/// twice the boundaries of the AWS S3 multipart upload
|
|
||||||
/// because we use it a bit differently internally.
|
|
||||||
///
|
|
||||||
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
|
|
||||||
#[clap(long, env = MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE, default_value_t = default_experimental_s3_snapshot_multipart_part_size())]
|
|
||||||
#[serde(default = "default_experimental_s3_snapshot_multipart_part_size")]
|
|
||||||
pub experimental_s3_multipart_part_size: Byte,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl S3SnapshotOpts {
|
|
||||||
/// Exports the values to their corresponding env vars if they are not set.
|
|
||||||
pub fn export_to_env(self) {
|
|
||||||
let S3SnapshotOpts {
|
|
||||||
s3_bucket_url,
|
|
||||||
s3_bucket_region,
|
|
||||||
s3_bucket_name,
|
|
||||||
s3_snapshot_prefix,
|
|
||||||
s3_access_key,
|
|
||||||
s3_secret_key,
|
|
||||||
experimental_s3_max_in_flight_parts,
|
|
||||||
experimental_s3_compression_level,
|
|
||||||
experimental_s3_signature_duration_seconds,
|
|
||||||
experimental_s3_multipart_part_size,
|
|
||||||
} = self;
|
|
||||||
|
|
||||||
export_to_env_if_not_present(MEILI_S3_BUCKET_URL, s3_bucket_url);
|
|
||||||
export_to_env_if_not_present(MEILI_S3_BUCKET_REGION, s3_bucket_region);
|
|
||||||
export_to_env_if_not_present(MEILI_S3_BUCKET_NAME, s3_bucket_name);
|
|
||||||
export_to_env_if_not_present(MEILI_S3_SNAPSHOT_PREFIX, s3_snapshot_prefix);
|
|
||||||
export_to_env_if_not_present(MEILI_S3_ACCESS_KEY, s3_access_key);
|
|
||||||
export_to_env_if_not_present(MEILI_S3_SECRET_KEY, s3_secret_key);
|
|
||||||
export_to_env_if_not_present(
|
|
||||||
MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS,
|
|
||||||
experimental_s3_max_in_flight_parts.to_string(),
|
|
||||||
);
|
|
||||||
export_to_env_if_not_present(
|
|
||||||
MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL,
|
|
||||||
experimental_s3_compression_level.to_string(),
|
|
||||||
);
|
|
||||||
export_to_env_if_not_present(
|
|
||||||
MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS,
|
|
||||||
experimental_s3_signature_duration_seconds.to_string(),
|
|
||||||
);
|
|
||||||
export_to_env_if_not_present(
|
|
||||||
MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE,
|
|
||||||
experimental_s3_multipart_part_size.to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<S3SnapshotOpts> for S3SnapshotOptions {
|
|
||||||
type Error = anyhow::Error;
|
|
||||||
|
|
||||||
fn try_from(other: S3SnapshotOpts) -> Result<Self, Self::Error> {
|
|
||||||
let S3SnapshotOpts {
|
|
||||||
s3_bucket_url,
|
|
||||||
s3_bucket_region,
|
|
||||||
s3_bucket_name,
|
|
||||||
s3_snapshot_prefix,
|
|
||||||
s3_access_key,
|
|
||||||
s3_secret_key,
|
|
||||||
experimental_s3_max_in_flight_parts,
|
|
||||||
experimental_s3_compression_level,
|
|
||||||
experimental_s3_signature_duration_seconds,
|
|
||||||
experimental_s3_multipart_part_size,
|
|
||||||
} = other;
|
|
||||||
|
|
||||||
Ok(S3SnapshotOptions {
|
|
||||||
s3_bucket_url,
|
|
||||||
s3_bucket_region,
|
|
||||||
s3_bucket_name,
|
|
||||||
s3_snapshot_prefix,
|
|
||||||
s3_access_key,
|
|
||||||
s3_secret_key,
|
|
||||||
s3_max_in_flight_parts: experimental_s3_max_in_flight_parts,
|
|
||||||
s3_compression_level: experimental_s3_compression_level,
|
|
||||||
s3_signature_duration: Duration::from_secs(experimental_s3_signature_duration_seconds),
|
|
||||||
s3_multipart_part_size: experimental_s3_multipart_part_size.as_u64(),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1278,6 +1073,10 @@ fn default_limit_batched_tasks() -> usize {
|
|||||||
usize::MAX
|
usize::MAX
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn default_limit_batched_tasks_total_size() -> Byte {
|
||||||
|
Byte::from_u64(u64::MAX)
|
||||||
|
}
|
||||||
|
|
||||||
fn default_embedding_cache_entries() -> usize {
|
fn default_embedding_cache_entries() -> usize {
|
||||||
0
|
0
|
||||||
}
|
}
|
||||||
@@ -1290,22 +1089,6 @@ fn default_snapshot_interval_sec() -> &'static str {
|
|||||||
DEFAULT_SNAPSHOT_INTERVAL_SEC_STR
|
DEFAULT_SNAPSHOT_INTERVAL_SEC_STR
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_experimental_s3_snapshot_max_in_flight_parts() -> NonZeroUsize {
|
|
||||||
DEFAULT_S3_SNAPSHOT_MAX_IN_FLIGHT_PARTS
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_experimental_s3_snapshot_compression_level() -> u32 {
|
|
||||||
DEFAULT_S3_SNAPSHOT_COMPRESSION_LEVEL
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_experimental_s3_snapshot_signature_duration_seconds() -> u64 {
|
|
||||||
DEFAULT_S3_SNAPSHOT_SIGNATURE_DURATION_SECONDS
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_experimental_s3_snapshot_multipart_part_size() -> Byte {
|
|
||||||
DEFAULT_S3_SNAPSHOT_MULTIPART_PART_SIZE
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_dump_dir() -> PathBuf {
|
fn default_dump_dir() -> PathBuf {
|
||||||
PathBuf::from(DEFAULT_DUMP_DIR)
|
PathBuf::from(DEFAULT_DUMP_DIR)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,366 +0,0 @@
|
|||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use meilisearch_types::error::{Code, ErrorCode, ResponseError};
|
|
||||||
use meilisearch_types::milli::TimeBudget;
|
|
||||||
use rand::Rng;
|
|
||||||
use reqwest::Client;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use tracing::{debug, info, warn};
|
|
||||||
|
|
||||||
use crate::search::{Personalize, SearchResult};
|
|
||||||
|
|
||||||
const COHERE_API_URL: &str = "https://api.cohere.ai/v1/rerank";
|
|
||||||
const MAX_RETRIES: u32 = 10;
|
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
|
||||||
enum PersonalizationError {
|
|
||||||
#[error("Personalization service: HTTP request failed: {0}")]
|
|
||||||
Request(#[from] reqwest::Error),
|
|
||||||
#[error("Personalization service: Failed to parse response: {0}")]
|
|
||||||
Parse(String),
|
|
||||||
#[error("Personalization service: Cohere API error: {0}")]
|
|
||||||
Api(String),
|
|
||||||
#[error("Personalization service: Unauthorized: invalid API key")]
|
|
||||||
Unauthorized,
|
|
||||||
#[error("Personalization service: Rate limited: too many requests")]
|
|
||||||
RateLimited,
|
|
||||||
#[error("Personalization service: Bad request: {0}")]
|
|
||||||
BadRequest(String),
|
|
||||||
#[error("Personalization service: Internal server error: {0}")]
|
|
||||||
InternalServerError(String),
|
|
||||||
#[error("Personalization service: Network error: {0}")]
|
|
||||||
Network(String),
|
|
||||||
#[error("Personalization service: Deadline exceeded")]
|
|
||||||
DeadlineExceeded,
|
|
||||||
#[error(transparent)]
|
|
||||||
FeatureNotEnabled(#[from] index_scheduler::error::FeatureNotEnabledError),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ErrorCode for PersonalizationError {
|
|
||||||
fn error_code(&self) -> Code {
|
|
||||||
match self {
|
|
||||||
PersonalizationError::FeatureNotEnabled { .. } => Code::FeatureNotEnabled,
|
|
||||||
PersonalizationError::Unauthorized => Code::RemoteInvalidApiKey,
|
|
||||||
PersonalizationError::RateLimited => Code::TooManySearchRequests,
|
|
||||||
PersonalizationError::BadRequest(_) => Code::RemoteBadRequest,
|
|
||||||
PersonalizationError::InternalServerError(_) => Code::RemoteRemoteError,
|
|
||||||
PersonalizationError::Network(_) | PersonalizationError::Request(_) => {
|
|
||||||
Code::RemoteCouldNotSendRequest
|
|
||||||
}
|
|
||||||
PersonalizationError::Parse(_) | PersonalizationError::Api(_) => {
|
|
||||||
Code::RemoteBadResponse
|
|
||||||
}
|
|
||||||
PersonalizationError::DeadlineExceeded => Code::Internal, // should not be returned to the client
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct CohereService {
|
|
||||||
client: Client,
|
|
||||||
api_key: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CohereService {
|
|
||||||
pub fn new(api_key: String) -> Self {
|
|
||||||
info!("Personalization service initialized with Cohere API");
|
|
||||||
let client = Client::builder()
|
|
||||||
.timeout(Duration::from_secs(30))
|
|
||||||
.build()
|
|
||||||
.expect("Failed to create HTTP client");
|
|
||||||
Self { client, api_key }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn rerank_search_results(
|
|
||||||
&self,
|
|
||||||
search_result: SearchResult,
|
|
||||||
personalize: &Personalize,
|
|
||||||
query: Option<&str>,
|
|
||||||
time_budget: TimeBudget,
|
|
||||||
) -> Result<SearchResult, ResponseError> {
|
|
||||||
if time_budget.exceeded() {
|
|
||||||
warn!("Could not rerank due to deadline");
|
|
||||||
// If the deadline is exceeded, return the original search result instead of an error
|
|
||||||
return Ok(search_result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract user context from personalization
|
|
||||||
let user_context = personalize.user_context.as_str();
|
|
||||||
|
|
||||||
// Build the prompt by merging query and user context
|
|
||||||
let prompt = match query {
|
|
||||||
Some(q) => format!("User Context: {user_context}\nQuery: {q}"),
|
|
||||||
None => format!("User Context: {user_context}"),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Extract documents for reranking
|
|
||||||
let documents: Vec<String> = search_result
|
|
||||||
.hits
|
|
||||||
.iter()
|
|
||||||
.map(|hit| {
|
|
||||||
// Convert the document to a string representation for reranking
|
|
||||||
serde_json::to_string(&hit.document).unwrap_or_else(|_| "{}".to_string())
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if documents.is_empty() {
|
|
||||||
return Ok(search_result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call Cohere's rerank API with retry logic
|
|
||||||
let reranked_indices =
|
|
||||||
match self.call_rerank_with_retry(&prompt, &documents, time_budget).await {
|
|
||||||
Ok(indices) => indices,
|
|
||||||
Err(PersonalizationError::DeadlineExceeded) => {
|
|
||||||
// If the deadline is exceeded, return the original search result instead of an error
|
|
||||||
return Ok(search_result);
|
|
||||||
}
|
|
||||||
Err(e) => return Err(e.into()),
|
|
||||||
};
|
|
||||||
|
|
||||||
debug!("Cohere rerank successful, reordering {} results", search_result.hits.len());
|
|
||||||
|
|
||||||
// Reorder the hits based on Cohere's reranking
|
|
||||||
let mut reranked_hits = Vec::new();
|
|
||||||
for index in reranked_indices.iter() {
|
|
||||||
if let Some(hit) = search_result.hits.get(*index) {
|
|
||||||
reranked_hits.push(hit.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(SearchResult { hits: reranked_hits, ..search_result })
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn call_rerank_with_retry(
|
|
||||||
&self,
|
|
||||||
query: &str,
|
|
||||||
documents: &[String],
|
|
||||||
time_budget: TimeBudget,
|
|
||||||
) -> Result<Vec<usize>, PersonalizationError> {
|
|
||||||
let request_body = CohereRerankRequest {
|
|
||||||
query: query.to_string(),
|
|
||||||
documents: documents.to_vec(),
|
|
||||||
model: "rerank-english-v3.0".to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Retry loop similar to vector extraction
|
|
||||||
for attempt in 0..MAX_RETRIES {
|
|
||||||
let response_result = self.send_rerank_request(&request_body).await;
|
|
||||||
|
|
||||||
let retry_duration = match self.handle_response(response_result).await {
|
|
||||||
Ok(indices) => return Ok(indices),
|
|
||||||
Err(retry) => {
|
|
||||||
warn!("Cohere rerank attempt #{} failed: {}", attempt, retry.error);
|
|
||||||
|
|
||||||
if time_budget.exceeded() {
|
|
||||||
warn!("Could not rerank due to deadline");
|
|
||||||
return Err(PersonalizationError::DeadlineExceeded);
|
|
||||||
} else {
|
|
||||||
match retry.into_duration(attempt) {
|
|
||||||
Ok(d) => d,
|
|
||||||
Err(error) => return Err(error),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// randomly up to double the retry duration
|
|
||||||
let retry_duration = retry_duration
|
|
||||||
+ rand::thread_rng().gen_range(std::time::Duration::ZERO..retry_duration);
|
|
||||||
|
|
||||||
warn!("Retrying after {}ms", retry_duration.as_millis());
|
|
||||||
tokio::time::sleep(retry_duration).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Final attempt without retry
|
|
||||||
let response_result = self.send_rerank_request(&request_body).await;
|
|
||||||
|
|
||||||
match self.handle_response(response_result).await {
|
|
||||||
Ok(indices) => Ok(indices),
|
|
||||||
Err(retry) => Err(retry.into_error()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_rerank_request(
|
|
||||||
&self,
|
|
||||||
request_body: &CohereRerankRequest,
|
|
||||||
) -> Result<reqwest::Response, reqwest::Error> {
|
|
||||||
self.client
|
|
||||||
.post(COHERE_API_URL)
|
|
||||||
.header("Authorization", format!("Bearer {}", self.api_key))
|
|
||||||
.header("Content-Type", "application/json")
|
|
||||||
.json(request_body)
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_response(
|
|
||||||
&self,
|
|
||||||
response_result: Result<reqwest::Response, reqwest::Error>,
|
|
||||||
) -> Result<Vec<usize>, Retry> {
|
|
||||||
let response = match response_result {
|
|
||||||
Ok(r) => r,
|
|
||||||
Err(e) if e.is_timeout() => {
|
|
||||||
return Err(Retry::retry_later(PersonalizationError::Network(format!(
|
|
||||||
"Request timeout: {}",
|
|
||||||
e
|
|
||||||
))));
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
return Err(Retry::retry_later(PersonalizationError::Network(format!(
|
|
||||||
"Network error: {}",
|
|
||||||
e
|
|
||||||
))));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let status = response.status();
|
|
||||||
let status_code = status.as_u16();
|
|
||||||
|
|
||||||
if status.is_success() {
|
|
||||||
let rerank_response: CohereRerankResponse = match response.json().await {
|
|
||||||
Ok(r) => r,
|
|
||||||
Err(e) => {
|
|
||||||
return Err(Retry::retry_later(PersonalizationError::Parse(format!(
|
|
||||||
"Failed to parse response: {}",
|
|
||||||
e
|
|
||||||
))));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Extract indices from rerank results
|
|
||||||
let indices: Vec<usize> =
|
|
||||||
rerank_response.results.iter().map(|result| result.index as usize).collect();
|
|
||||||
|
|
||||||
return Ok(indices);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle error status codes
|
|
||||||
let error_body = response.text().await.unwrap_or_else(|_| "Unknown error".to_string());
|
|
||||||
|
|
||||||
let retry = match status_code {
|
|
||||||
401 => Retry::give_up(PersonalizationError::Unauthorized),
|
|
||||||
429 => Retry::rate_limited(PersonalizationError::RateLimited),
|
|
||||||
400 => Retry::give_up(PersonalizationError::BadRequest(error_body)),
|
|
||||||
500..=599 => Retry::retry_later(PersonalizationError::InternalServerError(format!(
|
|
||||||
"Status {}: {}",
|
|
||||||
status_code, error_body
|
|
||||||
))),
|
|
||||||
402..=499 => Retry::give_up(PersonalizationError::Api(format!(
|
|
||||||
"Status {}: {}",
|
|
||||||
status_code, error_body
|
|
||||||
))),
|
|
||||||
_ => Retry::retry_later(PersonalizationError::Api(format!(
|
|
||||||
"Unexpected status {}: {}",
|
|
||||||
status_code, error_body
|
|
||||||
))),
|
|
||||||
};
|
|
||||||
|
|
||||||
Err(retry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize)]
|
|
||||||
struct CohereRerankRequest {
|
|
||||||
query: String,
|
|
||||||
documents: Vec<String>,
|
|
||||||
model: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct CohereRerankResponse {
|
|
||||||
results: Vec<CohereRerankResult>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct CohereRerankResult {
|
|
||||||
index: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retry strategy similar to vector extraction
|
|
||||||
struct Retry {
|
|
||||||
error: PersonalizationError,
|
|
||||||
strategy: RetryStrategy,
|
|
||||||
}
|
|
||||||
|
|
||||||
enum RetryStrategy {
|
|
||||||
GiveUp,
|
|
||||||
Retry,
|
|
||||||
RetryAfterRateLimit,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Retry {
|
|
||||||
fn give_up(error: PersonalizationError) -> Self {
|
|
||||||
Self { error, strategy: RetryStrategy::GiveUp }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn retry_later(error: PersonalizationError) -> Self {
|
|
||||||
Self { error, strategy: RetryStrategy::Retry }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn rate_limited(error: PersonalizationError) -> Self {
|
|
||||||
Self { error, strategy: RetryStrategy::RetryAfterRateLimit }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn into_duration(self, attempt: u32) -> Result<Duration, PersonalizationError> {
|
|
||||||
match self.strategy {
|
|
||||||
RetryStrategy::GiveUp => Err(self.error),
|
|
||||||
RetryStrategy::Retry => {
|
|
||||||
// Exponential backoff: 10^attempt milliseconds
|
|
||||||
Ok(Duration::from_millis((10u64).pow(attempt)))
|
|
||||||
}
|
|
||||||
RetryStrategy::RetryAfterRateLimit => {
|
|
||||||
// Longer backoff for rate limits: 100ms + exponential
|
|
||||||
Ok(Duration::from_millis(100 + (10u64).pow(attempt)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn into_error(self) -> PersonalizationError {
|
|
||||||
self.error
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum PersonalizationService {
|
|
||||||
Cohere(CohereService),
|
|
||||||
Disabled,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PersonalizationService {
|
|
||||||
pub fn cohere(api_key: String) -> Self {
|
|
||||||
// If the API key is empty, consider the personalization service as disabled
|
|
||||||
if api_key.trim().is_empty() {
|
|
||||||
Self::disabled()
|
|
||||||
} else {
|
|
||||||
Self::Cohere(CohereService::new(api_key))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn disabled() -> Self {
|
|
||||||
debug!("Personalization service disabled");
|
|
||||||
Self::Disabled
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn rerank_search_results(
|
|
||||||
&self,
|
|
||||||
search_result: SearchResult,
|
|
||||||
personalize: &Personalize,
|
|
||||||
query: Option<&str>,
|
|
||||||
time_budget: TimeBudget,
|
|
||||||
) -> Result<SearchResult, ResponseError> {
|
|
||||||
match self {
|
|
||||||
Self::Cohere(cohere_service) => {
|
|
||||||
cohere_service
|
|
||||||
.rerank_search_results(search_result, personalize, query, time_budget)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
Self::Disabled => Err(PersonalizationError::FeatureNotEnabled(
|
|
||||||
index_scheduler::error::FeatureNotEnabledError {
|
|
||||||
disabled_action: "reranking search results",
|
|
||||||
feature: "personalization",
|
|
||||||
issue_link: "https://github.com/orgs/meilisearch/discussions/866",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
pub mod proxy {
|
|
||||||
|
|
||||||
use std::fs::File;
|
|
||||||
|
|
||||||
use actix_web::HttpRequest;
|
|
||||||
use index_scheduler::IndexScheduler;
|
|
||||||
|
|
||||||
use crate::error::MeilisearchHttpError;
|
|
||||||
|
|
||||||
pub enum Body<T: serde::Serialize> {
|
|
||||||
NdJsonPayload,
|
|
||||||
Inline(T),
|
|
||||||
None,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Body<()> {
|
|
||||||
pub fn with_ndjson_payload(_file: File) -> Self {
|
|
||||||
Self::NdJsonPayload
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn none() -> Self {
|
|
||||||
Self::None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "Meili-Proxy-Origin-Remote";
|
|
||||||
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "Meili-Proxy-Origin-TaskUid";
|
|
||||||
|
|
||||||
pub async fn proxy<T: serde::Serialize>(
|
|
||||||
_index_scheduler: &IndexScheduler,
|
|
||||||
_index_uid: &str,
|
|
||||||
_req: &HttpRequest,
|
|
||||||
_network: meilisearch_types::network::Network,
|
|
||||||
_body: Body<T>,
|
|
||||||
_task: &meilisearch_types::tasks::Task,
|
|
||||||
) -> Result<(), MeilisearchHttpError> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -34,7 +34,7 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
|
|||||||
/// Compact an index
|
/// Compact an index
|
||||||
#[utoipa::path(
|
#[utoipa::path(
|
||||||
post,
|
post,
|
||||||
path = "/{indexUid}/compact",
|
path = "{indexUid}/compact",
|
||||||
tag = "Compact an index",
|
tag = "Compact an index",
|
||||||
security(("Bearer" = ["search", "*"])),
|
security(("Bearer" = ["search", "*"])),
|
||||||
params(("indexUid" = String, Path, example = "movies", description = "Index Unique Identifier", nullable = false)),
|
params(("indexUid" = String, Path, example = "movies", description = "Index Unique Identifier", nullable = false)),
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ use crate::extractors::authentication::policies::*;
|
|||||||
use crate::extractors::authentication::GuardedData;
|
use crate::extractors::authentication::GuardedData;
|
||||||
use crate::extractors::payload::Payload;
|
use crate::extractors::payload::Payload;
|
||||||
use crate::extractors::sequential_extractor::SeqHandler;
|
use crate::extractors::sequential_extractor::SeqHandler;
|
||||||
use crate::routes::indexes::current_edition::proxy::{proxy, Body};
|
use crate::routes::indexes::enterprise_edition::proxy::{proxy, Body};
|
||||||
use crate::routes::indexes::search::fix_sort_query_parameters;
|
use crate::routes::indexes::search::fix_sort_query_parameters;
|
||||||
use crate::routes::{
|
use crate::routes::{
|
||||||
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
|
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
|
||||||
@@ -333,12 +333,10 @@ impl Aggregate for DocumentsDeletionAggregator {
|
|||||||
pub async fn delete_document(
|
pub async fn delete_document(
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
||||||
path: web::Path<DocumentParam>,
|
path: web::Path<DocumentParam>,
|
||||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
|
||||||
req: HttpRequest,
|
req: HttpRequest,
|
||||||
opt: web::Data<Opt>,
|
opt: web::Data<Opt>,
|
||||||
analytics: web::Data<Analytics>,
|
analytics: web::Data<Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
|
||||||
let DocumentParam { index_uid, document_id } = path.into_inner();
|
let DocumentParam { index_uid, document_id } = path.into_inner();
|
||||||
let index_uid = IndexUid::try_from(index_uid)?;
|
let index_uid = IndexUid::try_from(index_uid)?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
@@ -361,13 +359,10 @@ pub async fn delete_document(
|
|||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||||
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
|
||||||
})
|
|
||||||
.await??
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if network.sharding() && !dry_run {
|
if network.sharding && !dry_run {
|
||||||
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -683,19 +678,6 @@ pub struct UpdateDocumentsQuery {
|
|||||||
#[param(value_type = char, default = ",", example = ";")]
|
#[param(value_type = char, default = ",", example = ";")]
|
||||||
#[deserr(default, try_from(char) = from_char_csv_delimiter -> DeserrQueryParamError<InvalidDocumentCsvDelimiter>, error = DeserrQueryParamError<InvalidDocumentCsvDelimiter>)]
|
#[deserr(default, try_from(char) = from_char_csv_delimiter -> DeserrQueryParamError<InvalidDocumentCsvDelimiter>, error = DeserrQueryParamError<InvalidDocumentCsvDelimiter>)]
|
||||||
pub csv_delimiter: Option<u8>,
|
pub csv_delimiter: Option<u8>,
|
||||||
|
|
||||||
#[param(example = "custom")]
|
|
||||||
#[deserr(default, error = DeserrQueryParamError<InvalidIndexCustomMetadata>)]
|
|
||||||
pub custom_metadata: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug, Deserr, IntoParams)]
|
|
||||||
#[deserr(error = DeserrQueryParamError, rename_all = camelCase, deny_unknown_fields)]
|
|
||||||
#[into_params(parameter_in = Query, rename_all = "camelCase")]
|
|
||||||
pub struct CustomMetadataQuery {
|
|
||||||
#[param(example = "custom")]
|
|
||||||
#[deserr(default, error = DeserrQueryParamError<InvalidIndexCustomMetadata>)]
|
|
||||||
pub custom_metadata: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_char_csv_delimiter(
|
fn from_char_csv_delimiter(
|
||||||
@@ -837,7 +819,6 @@ pub async fn replace_documents(
|
|||||||
body,
|
body,
|
||||||
IndexDocumentsMethod::ReplaceDocuments,
|
IndexDocumentsMethod::ReplaceDocuments,
|
||||||
uid,
|
uid,
|
||||||
params.custom_metadata,
|
|
||||||
dry_run,
|
dry_run,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
&req,
|
&req,
|
||||||
@@ -940,7 +921,6 @@ pub async fn update_documents(
|
|||||||
body,
|
body,
|
||||||
IndexDocumentsMethod::UpdateDocuments,
|
IndexDocumentsMethod::UpdateDocuments,
|
||||||
uid,
|
uid,
|
||||||
params.custom_metadata,
|
|
||||||
dry_run,
|
dry_run,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
&req,
|
&req,
|
||||||
@@ -960,7 +940,6 @@ async fn document_addition(
|
|||||||
body: Payload,
|
body: Payload,
|
||||||
method: IndexDocumentsMethod,
|
method: IndexDocumentsMethod,
|
||||||
task_id: Option<TaskId>,
|
task_id: Option<TaskId>,
|
||||||
custom_metadata: Option<String>,
|
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
allow_index_creation: bool,
|
allow_index_creation: bool,
|
||||||
req: &HttpRequest,
|
req: &HttpRequest,
|
||||||
@@ -1086,9 +1065,7 @@ async fn document_addition(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let scheduler = index_scheduler.clone();
|
let scheduler = index_scheduler.clone();
|
||||||
let task = match tokio::task::spawn_blocking(move || {
|
let task = match tokio::task::spawn_blocking(move || scheduler.register(task, task_id, dry_run))
|
||||||
scheduler.register_with_custom_metadata(task, task_id, custom_metadata, dry_run)
|
|
||||||
})
|
|
||||||
.await?
|
.await?
|
||||||
{
|
{
|
||||||
Ok(task) => task,
|
Ok(task) => task,
|
||||||
@@ -1098,7 +1075,7 @@ async fn document_addition(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if network.sharding() {
|
if network.sharding {
|
||||||
if let Some(file) = file {
|
if let Some(file) = file {
|
||||||
proxy(
|
proxy(
|
||||||
&index_scheduler,
|
&index_scheduler,
|
||||||
@@ -1153,7 +1130,7 @@ async fn copy_body_to_file(
|
|||||||
/// Delete a set of documents based on an array of document ids.
|
/// Delete a set of documents based on an array of document ids.
|
||||||
#[utoipa::path(
|
#[utoipa::path(
|
||||||
post,
|
post,
|
||||||
path = "{indexUid}/documents/delete-batch",
|
path = "{indexUid}/delete-batch",
|
||||||
tag = "Documents",
|
tag = "Documents",
|
||||||
security(("Bearer" = ["documents.delete", "documents.*", "*"])),
|
security(("Bearer" = ["documents.delete", "documents.*", "*"])),
|
||||||
params(
|
params(
|
||||||
@@ -1184,14 +1161,11 @@ pub async fn delete_documents_batch(
|
|||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
||||||
index_uid: web::Path<String>,
|
index_uid: web::Path<String>,
|
||||||
body: web::Json<Vec<Value>>,
|
body: web::Json<Vec<Value>>,
|
||||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
|
||||||
req: HttpRequest,
|
req: HttpRequest,
|
||||||
opt: web::Data<Opt>,
|
opt: web::Data<Opt>,
|
||||||
analytics: web::Data<Analytics>,
|
analytics: web::Data<Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
debug!(parameters = ?body, "Delete documents by batch");
|
debug!(parameters = ?body, "Delete documents by batch");
|
||||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
|
||||||
|
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
|
|
||||||
@@ -1216,13 +1190,10 @@ pub async fn delete_documents_batch(
|
|||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||||
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
|
||||||
})
|
|
||||||
.await??
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if network.sharding() && !dry_run {
|
if network.sharding && !dry_run {
|
||||||
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
|
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1273,15 +1244,12 @@ pub struct DocumentDeletionByFilter {
|
|||||||
pub async fn delete_documents_by_filter(
|
pub async fn delete_documents_by_filter(
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
||||||
index_uid: web::Path<String>,
|
index_uid: web::Path<String>,
|
||||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
|
||||||
body: AwebJson<DocumentDeletionByFilter, DeserrJsonError>,
|
body: AwebJson<DocumentDeletionByFilter, DeserrJsonError>,
|
||||||
req: HttpRequest,
|
req: HttpRequest,
|
||||||
opt: web::Data<Opt>,
|
opt: web::Data<Opt>,
|
||||||
analytics: web::Data<Analytics>,
|
analytics: web::Data<Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
debug!(parameters = ?body, "Delete documents by filter");
|
debug!(parameters = ?body, "Delete documents by filter");
|
||||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
|
||||||
|
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let index_uid = index_uid.into_inner();
|
let index_uid = index_uid.into_inner();
|
||||||
let filter = body.into_inner();
|
let filter = body.into_inner();
|
||||||
@@ -1314,13 +1282,10 @@ pub async fn delete_documents_by_filter(
|
|||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||||
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
|
||||||
})
|
|
||||||
.await??
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if network.sharding() && !dry_run {
|
if network.sharding && !dry_run {
|
||||||
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(filter), &task).await?;
|
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(filter), &task).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1407,14 +1372,12 @@ impl Aggregate for EditDocumentsByFunctionAggregator {
|
|||||||
pub async fn edit_documents_by_function(
|
pub async fn edit_documents_by_function(
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_ALL }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_ALL }>, Data<IndexScheduler>>,
|
||||||
index_uid: web::Path<String>,
|
index_uid: web::Path<String>,
|
||||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
params: AwebJson<DocumentEditionByFunction, DeserrJsonError>,
|
||||||
body: AwebJson<DocumentEditionByFunction, DeserrJsonError>,
|
|
||||||
req: HttpRequest,
|
req: HttpRequest,
|
||||||
opt: web::Data<Opt>,
|
opt: web::Data<Opt>,
|
||||||
analytics: web::Data<Analytics>,
|
analytics: web::Data<Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
debug!(parameters = ?body, "Edit documents by function");
|
debug!(parameters = ?params, "Edit documents by function");
|
||||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
|
||||||
|
|
||||||
index_scheduler
|
index_scheduler
|
||||||
.features()
|
.features()
|
||||||
@@ -1424,23 +1387,23 @@ pub async fn edit_documents_by_function(
|
|||||||
|
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let index_uid = index_uid.into_inner();
|
let index_uid = index_uid.into_inner();
|
||||||
let body = body.into_inner();
|
let params = params.into_inner();
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
EditDocumentsByFunctionAggregator {
|
EditDocumentsByFunctionAggregator {
|
||||||
filtered: body.filter.is_some(),
|
filtered: params.filter.is_some(),
|
||||||
with_context: body.context.is_some(),
|
with_context: params.context.is_some(),
|
||||||
index_creation: index_scheduler.index(&index_uid).is_err(),
|
index_creation: index_scheduler.index(&index_uid).is_err(),
|
||||||
},
|
},
|
||||||
&req,
|
&req,
|
||||||
);
|
);
|
||||||
|
|
||||||
let engine = milli::rhai::Engine::new();
|
let engine = milli::rhai::Engine::new();
|
||||||
if let Err(e) = engine.compile(&body.function) {
|
if let Err(e) = engine.compile(¶ms.function) {
|
||||||
return Err(ResponseError::from_msg(e.to_string(), Code::BadRequest));
|
return Err(ResponseError::from_msg(e.to_string(), Code::BadRequest));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ref filter) = body.filter {
|
if let Some(ref filter) = params.filter {
|
||||||
// we ensure the filter is well formed before enqueuing it
|
// we ensure the filter is well formed before enqueuing it
|
||||||
crate::search::parse_filter(
|
crate::search::parse_filter(
|
||||||
filter,
|
filter,
|
||||||
@@ -1451,8 +1414,8 @@ pub async fn edit_documents_by_function(
|
|||||||
}
|
}
|
||||||
let task = KindWithContent::DocumentEdition {
|
let task = KindWithContent::DocumentEdition {
|
||||||
index_uid: index_uid.clone(),
|
index_uid: index_uid.clone(),
|
||||||
filter_expr: body.filter.clone(),
|
filter_expr: params.filter.clone(),
|
||||||
context: match body.context.clone() {
|
context: match params.context.clone() {
|
||||||
Some(Value::Object(m)) => Some(m),
|
Some(Value::Object(m)) => Some(m),
|
||||||
None => None,
|
None => None,
|
||||||
_ => {
|
_ => {
|
||||||
@@ -1462,21 +1425,18 @@ pub async fn edit_documents_by_function(
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
function: body.function.clone(),
|
function: params.function.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let uid = get_task_id(&req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||||
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
|
||||||
})
|
|
||||||
.await??
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if network.sharding() && !dry_run {
|
if network.sharding && !dry_run {
|
||||||
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
|
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(params), &task).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
@@ -1517,14 +1477,12 @@ pub async fn edit_documents_by_function(
|
|||||||
pub async fn clear_all_documents(
|
pub async fn clear_all_documents(
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
||||||
index_uid: web::Path<String>,
|
index_uid: web::Path<String>,
|
||||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
|
||||||
req: HttpRequest,
|
req: HttpRequest,
|
||||||
opt: web::Data<Opt>,
|
opt: web::Data<Opt>,
|
||||||
analytics: web::Data<Analytics>,
|
analytics: web::Data<Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
DocumentsDeletionAggregator {
|
DocumentsDeletionAggregator {
|
||||||
@@ -1543,13 +1501,10 @@ pub async fn clear_all_documents(
|
|||||||
let task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
|
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||||
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
|
||||||
})
|
|
||||||
.await??
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if network.sharding() && !dry_run {
|
if network.sharding && !dry_run {
|
||||||
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ pub async fn proxy<T: serde::Serialize>(
|
|||||||
index_scheduler: &IndexScheduler,
|
index_scheduler: &IndexScheduler,
|
||||||
index_uid: &str,
|
index_uid: &str,
|
||||||
req: &HttpRequest,
|
req: &HttpRequest,
|
||||||
network: meilisearch_types::network::Network,
|
network: meilisearch_types::enterprise_edition::network::Network,
|
||||||
body: Body<T>,
|
body: Body<T>,
|
||||||
task: &meilisearch_types::tasks::Task,
|
task: &meilisearch_types::tasks::Task,
|
||||||
) -> Result<(), MeilisearchHttpError> {
|
) -> Result<(), MeilisearchHttpError> {
|
||||||
|
|||||||
@@ -343,7 +343,6 @@ impl From<FacetSearchQuery> for SearchQuery {
|
|||||||
hybrid,
|
hybrid,
|
||||||
ranking_score_threshold,
|
ranking_score_threshold,
|
||||||
locales,
|
locales,
|
||||||
personalize: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,16 +30,7 @@ use crate::Opt;
|
|||||||
|
|
||||||
pub mod compact;
|
pub mod compact;
|
||||||
pub mod documents;
|
pub mod documents;
|
||||||
|
|
||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
mod community_edition;
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
mod enterprise_edition;
|
mod enterprise_edition;
|
||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
use community_edition as current_edition;
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
use enterprise_edition as current_edition;
|
|
||||||
|
|
||||||
pub mod facet_search;
|
pub mod facet_search;
|
||||||
pub mod search;
|
pub mod search;
|
||||||
mod search_analytics;
|
mod search_analytics;
|
||||||
@@ -50,7 +41,7 @@ mod settings_analytics;
|
|||||||
pub mod similar;
|
pub mod similar;
|
||||||
mod similar_analytics;
|
mod similar_analytics;
|
||||||
|
|
||||||
pub use current_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER};
|
pub use enterprise_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER};
|
||||||
|
|
||||||
#[derive(OpenApi)]
|
#[derive(OpenApi)]
|
||||||
#[openapi(
|
#[openapi(
|
||||||
|
|||||||
@@ -24,9 +24,9 @@ use crate::metrics::MEILISEARCH_DEGRADED_SEARCH_REQUESTS;
|
|||||||
use crate::routes::indexes::search_analytics::{SearchAggregator, SearchGET, SearchPOST};
|
use crate::routes::indexes::search_analytics::{SearchAggregator, SearchGET, SearchPOST};
|
||||||
use crate::routes::parse_include_metadata_header;
|
use crate::routes::parse_include_metadata_header;
|
||||||
use crate::search::{
|
use crate::search::{
|
||||||
add_search_rules, perform_search, HybridQuery, MatchingStrategy, Personalize,
|
add_search_rules, perform_search, HybridQuery, MatchingStrategy, RankingScoreThreshold,
|
||||||
RankingScoreThreshold, RetrieveVectors, SearchKind, SearchParams, SearchQuery, SearchResult,
|
RetrieveVectors, SearchKind, SearchParams, SearchQuery, SearchResult, SemanticRatio,
|
||||||
SemanticRatio, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG,
|
DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG,
|
||||||
DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET, DEFAULT_SEMANTIC_RATIO,
|
DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET, DEFAULT_SEMANTIC_RATIO,
|
||||||
};
|
};
|
||||||
use crate::search_queue::SearchQueue;
|
use crate::search_queue::SearchQueue;
|
||||||
@@ -134,8 +134,6 @@ pub struct SearchQueryGet {
|
|||||||
#[deserr(default, error = DeserrQueryParamError<InvalidSearchLocales>)]
|
#[deserr(default, error = DeserrQueryParamError<InvalidSearchLocales>)]
|
||||||
#[param(value_type = Vec<Locale>, explode = false)]
|
#[param(value_type = Vec<Locale>, explode = false)]
|
||||||
pub locales: Option<CS<Locale>>,
|
pub locales: Option<CS<Locale>>,
|
||||||
#[deserr(default, error = DeserrQueryParamError<InvalidSearchPersonalizeUserContext>)]
|
|
||||||
pub personalize_user_context: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, deserr::Deserr)]
|
#[derive(Debug, Clone, Copy, PartialEq, deserr::Deserr)]
|
||||||
@@ -207,9 +205,6 @@ impl TryFrom<SearchQueryGet> for SearchQuery {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let personalize =
|
|
||||||
other.personalize_user_context.map(|user_context| Personalize { user_context });
|
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
q: other.q,
|
q: other.q,
|
||||||
// `media` not supported for `GET`
|
// `media` not supported for `GET`
|
||||||
@@ -239,7 +234,6 @@ impl TryFrom<SearchQueryGet> for SearchQuery {
|
|||||||
hybrid,
|
hybrid,
|
||||||
ranking_score_threshold: other.ranking_score_threshold.map(|o| o.0),
|
ranking_score_threshold: other.ranking_score_threshold.map(|o| o.0),
|
||||||
locales: other.locales.map(|o| o.into_iter().collect()),
|
locales: other.locales.map(|o| o.into_iter().collect()),
|
||||||
personalize,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -328,7 +322,6 @@ pub fn fix_sort_query_parameters(sort_query: &str) -> Vec<String> {
|
|||||||
pub async fn search_with_url_query(
|
pub async fn search_with_url_query(
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
|
||||||
search_queue: web::Data<SearchQueue>,
|
search_queue: web::Data<SearchQueue>,
|
||||||
personalization_service: web::Data<crate::personalization::PersonalizationService>,
|
|
||||||
index_uid: web::Path<String>,
|
index_uid: web::Path<String>,
|
||||||
params: AwebQueryParameter<SearchQueryGet, DeserrQueryParamError>,
|
params: AwebQueryParameter<SearchQueryGet, DeserrQueryParamError>,
|
||||||
req: HttpRequest,
|
req: HttpRequest,
|
||||||
@@ -349,16 +342,9 @@ pub async fn search_with_url_query(
|
|||||||
|
|
||||||
let index = index_scheduler.index(&index_uid)?;
|
let index = index_scheduler.index(&index_uid)?;
|
||||||
|
|
||||||
// Extract personalization and query string before moving query
|
|
||||||
let personalize = query.personalize.take();
|
|
||||||
|
|
||||||
let search_kind =
|
let search_kind =
|
||||||
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
|
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
|
||||||
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
|
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
|
||||||
|
|
||||||
// Save the query string for personalization if requested
|
|
||||||
let personalize_query = personalize.is_some().then(|| query.q.clone()).flatten();
|
|
||||||
|
|
||||||
let permit = search_queue.try_get_search_permit().await?;
|
let permit = search_queue.try_get_search_permit().await?;
|
||||||
let include_metadata = parse_include_metadata_header(&req);
|
let include_metadata = parse_include_metadata_header(&req);
|
||||||
|
|
||||||
@@ -379,24 +365,12 @@ pub async fn search_with_url_query(
|
|||||||
.await;
|
.await;
|
||||||
permit.drop().await;
|
permit.drop().await;
|
||||||
let search_result = search_result?;
|
let search_result = search_result?;
|
||||||
if let Ok((search_result, _)) = search_result.as_ref() {
|
if let Ok(ref search_result) = search_result {
|
||||||
aggregate.succeed(search_result);
|
aggregate.succeed(search_result);
|
||||||
}
|
}
|
||||||
analytics.publish(aggregate, &req);
|
analytics.publish(aggregate, &req);
|
||||||
|
|
||||||
let (mut search_result, time_budget) = search_result?;
|
let search_result = search_result?;
|
||||||
|
|
||||||
// Apply personalization if requested
|
|
||||||
if let Some(personalize) = personalize.as_ref() {
|
|
||||||
search_result = personalization_service
|
|
||||||
.rerank_search_results(
|
|
||||||
search_result,
|
|
||||||
personalize,
|
|
||||||
personalize_query.as_deref(),
|
|
||||||
time_budget,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!(request_uid = ?request_uid, returns = ?search_result, "Search get");
|
debug!(request_uid = ?request_uid, returns = ?search_result, "Search get");
|
||||||
Ok(HttpResponse::Ok().json(search_result))
|
Ok(HttpResponse::Ok().json(search_result))
|
||||||
@@ -461,7 +435,6 @@ pub async fn search_with_url_query(
|
|||||||
pub async fn search_with_post(
|
pub async fn search_with_post(
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
|
||||||
search_queue: web::Data<SearchQueue>,
|
search_queue: web::Data<SearchQueue>,
|
||||||
personalization_service: web::Data<crate::personalization::PersonalizationService>,
|
|
||||||
index_uid: web::Path<String>,
|
index_uid: web::Path<String>,
|
||||||
params: AwebJson<SearchQuery, DeserrJsonError>,
|
params: AwebJson<SearchQuery, DeserrJsonError>,
|
||||||
req: HttpRequest,
|
req: HttpRequest,
|
||||||
@@ -482,18 +455,12 @@ pub async fn search_with_post(
|
|||||||
|
|
||||||
let index = index_scheduler.index(&index_uid)?;
|
let index = index_scheduler.index(&index_uid)?;
|
||||||
|
|
||||||
// Extract personalization and query string before moving query
|
|
||||||
let personalize = query.personalize.take();
|
|
||||||
|
|
||||||
let search_kind =
|
let search_kind =
|
||||||
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
|
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
|
||||||
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
|
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
|
||||||
|
|
||||||
let include_metadata = parse_include_metadata_header(&req);
|
let include_metadata = parse_include_metadata_header(&req);
|
||||||
|
|
||||||
// Save the query string for personalization if requested
|
|
||||||
let personalize_query = personalize.is_some().then(|| query.q.clone()).flatten();
|
|
||||||
|
|
||||||
let permit = search_queue.try_get_search_permit().await?;
|
let permit = search_queue.try_get_search_permit().await?;
|
||||||
let search_result = tokio::task::spawn_blocking(move || {
|
let search_result = tokio::task::spawn_blocking(move || {
|
||||||
perform_search(
|
perform_search(
|
||||||
@@ -512,7 +479,7 @@ pub async fn search_with_post(
|
|||||||
.await;
|
.await;
|
||||||
permit.drop().await;
|
permit.drop().await;
|
||||||
let search_result = search_result?;
|
let search_result = search_result?;
|
||||||
if let Ok((ref search_result, _)) = search_result {
|
if let Ok(ref search_result) = search_result {
|
||||||
aggregate.succeed(search_result);
|
aggregate.succeed(search_result);
|
||||||
if search_result.degraded {
|
if search_result.degraded {
|
||||||
MEILISEARCH_DEGRADED_SEARCH_REQUESTS.inc();
|
MEILISEARCH_DEGRADED_SEARCH_REQUESTS.inc();
|
||||||
@@ -520,19 +487,7 @@ pub async fn search_with_post(
|
|||||||
}
|
}
|
||||||
analytics.publish(aggregate, &req);
|
analytics.publish(aggregate, &req);
|
||||||
|
|
||||||
let (mut search_result, time_budget) = search_result?;
|
let search_result = search_result?;
|
||||||
|
|
||||||
// Apply personalization if requested
|
|
||||||
if let Some(personalize) = personalize.as_ref() {
|
|
||||||
search_result = personalization_service
|
|
||||||
.rerank_search_results(
|
|
||||||
search_result,
|
|
||||||
personalize,
|
|
||||||
personalize_query.as_deref(),
|
|
||||||
time_budget,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!(request_uid = ?request_uid, returns = ?search_result, "Search post");
|
debug!(request_uid = ?request_uid, returns = ?search_result, "Search post");
|
||||||
Ok(HttpResponse::Ok().json(search_result))
|
Ok(HttpResponse::Ok().json(search_result))
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ use serde_json::{json, Value};
|
|||||||
|
|
||||||
use crate::aggregate_methods;
|
use crate::aggregate_methods;
|
||||||
use crate::analytics::{Aggregate, AggregateMethod};
|
use crate::analytics::{Aggregate, AggregateMethod};
|
||||||
use crate::metrics::MEILISEARCH_PERSONALIZED_SEARCH_REQUESTS;
|
|
||||||
use crate::search::{
|
use crate::search::{
|
||||||
SearchQuery, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
|
SearchQuery, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
|
||||||
DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT,
|
DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT,
|
||||||
@@ -96,9 +95,6 @@ pub struct SearchAggregator<Method: AggregateMethod> {
|
|||||||
show_ranking_score_details: bool,
|
show_ranking_score_details: bool,
|
||||||
ranking_score_threshold: bool,
|
ranking_score_threshold: bool,
|
||||||
|
|
||||||
// personalization
|
|
||||||
total_personalized: usize,
|
|
||||||
|
|
||||||
marker: std::marker::PhantomData<Method>,
|
marker: std::marker::PhantomData<Method>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,7 +129,6 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
|
|||||||
hybrid,
|
hybrid,
|
||||||
ranking_score_threshold,
|
ranking_score_threshold,
|
||||||
locales,
|
locales,
|
||||||
personalize,
|
|
||||||
} = query;
|
} = query;
|
||||||
|
|
||||||
let mut ret = Self::default();
|
let mut ret = Self::default();
|
||||||
@@ -209,12 +204,6 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
|
|||||||
ret.locales = locales.iter().copied().collect();
|
ret.locales = locales.iter().copied().collect();
|
||||||
}
|
}
|
||||||
|
|
||||||
// personalization
|
|
||||||
if personalize.is_some() {
|
|
||||||
ret.total_personalized = 1;
|
|
||||||
MEILISEARCH_PERSONALIZED_SEARCH_REQUESTS.inc();
|
|
||||||
}
|
|
||||||
|
|
||||||
ret.highlight_pre_tag = *highlight_pre_tag != DEFAULT_HIGHLIGHT_PRE_TAG();
|
ret.highlight_pre_tag = *highlight_pre_tag != DEFAULT_HIGHLIGHT_PRE_TAG();
|
||||||
ret.highlight_post_tag = *highlight_post_tag != DEFAULT_HIGHLIGHT_POST_TAG();
|
ret.highlight_post_tag = *highlight_post_tag != DEFAULT_HIGHLIGHT_POST_TAG();
|
||||||
ret.crop_marker = *crop_marker != DEFAULT_CROP_MARKER();
|
ret.crop_marker = *crop_marker != DEFAULT_CROP_MARKER();
|
||||||
@@ -307,7 +296,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
|
|||||||
total_used_negative_operator,
|
total_used_negative_operator,
|
||||||
ranking_score_threshold,
|
ranking_score_threshold,
|
||||||
mut locales,
|
mut locales,
|
||||||
total_personalized,
|
|
||||||
marker: _,
|
marker: _,
|
||||||
} = *new;
|
} = *new;
|
||||||
|
|
||||||
@@ -393,9 +381,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
|
|||||||
// locales
|
// locales
|
||||||
self.locales.append(&mut locales);
|
self.locales.append(&mut locales);
|
||||||
|
|
||||||
// personalization
|
|
||||||
self.total_personalized = self.total_personalized.saturating_add(total_personalized);
|
|
||||||
|
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -441,7 +426,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
|
|||||||
total_used_negative_operator,
|
total_used_negative_operator,
|
||||||
ranking_score_threshold,
|
ranking_score_threshold,
|
||||||
locales,
|
locales,
|
||||||
total_personalized,
|
|
||||||
marker: _,
|
marker: _,
|
||||||
} = *self;
|
} = *self;
|
||||||
|
|
||||||
@@ -515,9 +499,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
|
|||||||
"show_ranking_score_details": show_ranking_score_details,
|
"show_ranking_score_details": show_ranking_score_details,
|
||||||
"ranking_score_threshold": ranking_score_threshold,
|
"ranking_score_threshold": ranking_score_threshold,
|
||||||
},
|
},
|
||||||
"personalization": {
|
|
||||||
"total_personalized": total_personalized,
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ use index_scheduler::{IndexScheduler, Query};
|
|||||||
use meilisearch_auth::AuthController;
|
use meilisearch_auth::AuthController;
|
||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::keys::actions;
|
use meilisearch_types::keys::actions;
|
||||||
use meilisearch_types::milli::progress::ProgressStepView;
|
|
||||||
use meilisearch_types::tasks::Status;
|
use meilisearch_types::tasks::Status;
|
||||||
use prometheus::{Encoder, TextEncoder};
|
use prometheus::{Encoder, TextEncoder};
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
@@ -39,12 +38,6 @@ pub fn configure(config: &mut web::ServiceConfig) {
|
|||||||
# HELP meilisearch_db_size_bytes Meilisearch DB Size In Bytes
|
# HELP meilisearch_db_size_bytes Meilisearch DB Size In Bytes
|
||||||
# TYPE meilisearch_db_size_bytes gauge
|
# TYPE meilisearch_db_size_bytes gauge
|
||||||
meilisearch_db_size_bytes 1130496
|
meilisearch_db_size_bytes 1130496
|
||||||
# HELP meilisearch_batch_running_progress_trace The currently running progress trace
|
|
||||||
# TYPE meilisearch_batch_running_progress_trace gauge
|
|
||||||
meilisearch_batch_running_progress_trace{batch_uid="0",step_name="document"} 0.710618582519409
|
|
||||||
meilisearch_batch_running_progress_trace{batch_uid="0",step_name="extracting word proximity"} 0.2222222222222222
|
|
||||||
meilisearch_batch_running_progress_trace{batch_uid="0",step_name="indexing"} 0.6666666666666666
|
|
||||||
meilisearch_batch_running_progress_trace{batch_uid="0",step_name="processing tasks"} 0
|
|
||||||
# HELP meilisearch_http_requests_total Meilisearch HTTP requests total
|
# HELP meilisearch_http_requests_total Meilisearch HTTP requests total
|
||||||
# TYPE meilisearch_http_requests_total counter
|
# TYPE meilisearch_http_requests_total counter
|
||||||
meilisearch_http_requests_total{method="GET",path="/metrics",status="400"} 1
|
meilisearch_http_requests_total{method="GET",path="/metrics",status="400"} 1
|
||||||
@@ -68,13 +61,6 @@ meilisearch_http_response_time_seconds_bucket{method="GET",path="/metrics",le="1
|
|||||||
meilisearch_http_response_time_seconds_bucket{method="GET",path="/metrics",le="+Inf"} 0
|
meilisearch_http_response_time_seconds_bucket{method="GET",path="/metrics",le="+Inf"} 0
|
||||||
meilisearch_http_response_time_seconds_sum{method="GET",path="/metrics"} 0
|
meilisearch_http_response_time_seconds_sum{method="GET",path="/metrics"} 0
|
||||||
meilisearch_http_response_time_seconds_count{method="GET",path="/metrics"} 0
|
meilisearch_http_response_time_seconds_count{method="GET",path="/metrics"} 0
|
||||||
# HELP meilisearch_last_finished_batches_progress_trace_ms The last few batches progress trace in milliseconds
|
|
||||||
# TYPE meilisearch_last_finished_batches_progress_trace_ms gauge
|
|
||||||
meilisearch_last_finished_batches_progress_trace_ms{batch_uid="0",step_name="processing tasks"} 19360
|
|
||||||
meilisearch_last_finished_batches_progress_trace_ms{batch_uid="0",step_name="processing tasks > computing document changes"} 368
|
|
||||||
meilisearch_last_finished_batches_progress_trace_ms{batch_uid="0",step_name="processing tasks > computing document changes > preparing payloads"} 367
|
|
||||||
meilisearch_last_finished_batches_progress_trace_ms{batch_uid="0",step_name="processing tasks > computing document changes > preparing payloads > payload"} 367
|
|
||||||
meilisearch_last_finished_batches_progress_trace_ms{batch_uid="0",step_name="processing tasks > indexing"} 18970
|
|
||||||
# HELP meilisearch_index_count Meilisearch Index Count
|
# HELP meilisearch_index_count Meilisearch Index Count
|
||||||
# TYPE meilisearch_index_count gauge
|
# TYPE meilisearch_index_count gauge
|
||||||
meilisearch_index_count 1
|
meilisearch_index_count 1
|
||||||
@@ -162,50 +148,6 @@ pub async fn get_metrics(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch and expose the current progressing step
|
|
||||||
crate::metrics::MEILISEARCH_BATCH_RUNNING_PROGRESS_TRACE.reset();
|
|
||||||
let (batches, _total) = index_scheduler.get_batches_from_authorized_indexes(
|
|
||||||
&Query { statuses: Some(vec![Status::Processing]), ..Query::default() },
|
|
||||||
auth_filters,
|
|
||||||
)?;
|
|
||||||
if let Some(batch) = batches.into_iter().next() {
|
|
||||||
let batch_uid = batch.uid.to_string();
|
|
||||||
if let Some(progress) = batch.progress {
|
|
||||||
for ProgressStepView { current_step, finished, total } in progress.steps {
|
|
||||||
crate::metrics::MEILISEARCH_BATCH_RUNNING_PROGRESS_TRACE
|
|
||||||
.with_label_values(&[batch_uid.as_str(), current_step.as_ref()])
|
|
||||||
// We return the completion ratio of the current step
|
|
||||||
.set(finished as f64 / total as f64);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
crate::metrics::MEILISEARCH_LAST_FINISHED_BATCHES_PROGRESS_TRACE_MS.reset();
|
|
||||||
let (batches, _total) = index_scheduler.get_batches_from_authorized_indexes(
|
|
||||||
// Fetch the finished batches...
|
|
||||||
&Query {
|
|
||||||
statuses: Some(vec![Status::Succeeded, Status::Failed]),
|
|
||||||
limit: Some(1),
|
|
||||||
..Query::default()
|
|
||||||
},
|
|
||||||
auth_filters,
|
|
||||||
)?;
|
|
||||||
// ...and get the last batch only.
|
|
||||||
if let Some(batch) = batches.into_iter().next() {
|
|
||||||
let batch_uid = batch.uid.to_string();
|
|
||||||
for (step_name, duration_str) in batch.stats.progress_trace {
|
|
||||||
let Some(duration_str) = duration_str.as_str() else { continue };
|
|
||||||
match humantime::parse_duration(duration_str) {
|
|
||||||
Ok(duration) => {
|
|
||||||
crate::metrics::MEILISEARCH_LAST_FINISHED_BATCHES_PROGRESS_TRACE_MS
|
|
||||||
.with_label_values(&[&batch_uid, &step_name])
|
|
||||||
.set(duration.as_millis() as i64);
|
|
||||||
}
|
|
||||||
Err(e) => tracing::error!("Failed to parse duration: {e}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(last_update) = response.last_update {
|
if let Some(last_update) = response.last_update {
|
||||||
crate::metrics::MEILISEARCH_LAST_UPDATE.set(last_update.unix_timestamp());
|
crate::metrics::MEILISEARCH_LAST_UPDATE.set(last_update.unix_timestamp());
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,9 +41,7 @@ use crate::routes::indexes::IndexView;
|
|||||||
use crate::routes::multi_search::SearchResults;
|
use crate::routes::multi_search::SearchResults;
|
||||||
use crate::routes::network::{Network, Remote};
|
use crate::routes::network::{Network, Remote};
|
||||||
use crate::routes::swap_indexes::SwapIndexesPayload;
|
use crate::routes::swap_indexes::SwapIndexesPayload;
|
||||||
use crate::routes::webhooks::{
|
use crate::routes::webhooks::{WebhookResults, WebhookSettings, WebhookWithMetadata};
|
||||||
WebhookResults, WebhookSettings, WebhookWithMetadataRedactedAuthorization,
|
|
||||||
};
|
|
||||||
use crate::search::{
|
use crate::search::{
|
||||||
FederatedSearch, FederatedSearchResult, Federation, FederationOptions, MergeFacets,
|
FederatedSearch, FederatedSearchResult, Federation, FederationOptions, MergeFacets,
|
||||||
SearchQueryWithIndex, SearchResultWithIndex, SimilarQuery, SimilarResult,
|
SearchQueryWithIndex, SearchResultWithIndex, SimilarQuery, SimilarResult,
|
||||||
@@ -105,7 +103,7 @@ mod webhooks;
|
|||||||
url = "/",
|
url = "/",
|
||||||
description = "Local server",
|
description = "Local server",
|
||||||
)),
|
)),
|
||||||
components(schemas(PaginationView<KeyView>, PaginationView<IndexView>, IndexView, DocumentDeletionByFilter, AllBatches, BatchStats, ProgressStepView, ProgressView, BatchView, RuntimeTogglableFeatures, SwapIndexesPayload, DocumentEditionByFunction, MergeFacets, FederationOptions, SearchQueryWithIndex, Federation, FederatedSearch, FederatedSearchResult, SearchResults, SearchResultWithIndex, SimilarQuery, SimilarResult, PaginationView<serde_json::Value>, BrowseQuery, UpdateIndexRequest, IndexUid, IndexCreateRequest, KeyView, Action, CreateApiKey, UpdateStderrLogs, LogMode, GetLogs, IndexStats, Stats, HealthStatus, HealthResponse, VersionResponse, Code, ErrorType, AllTasks, TaskView, Status, DetailsView, ResponseError, Settings<Unchecked>, Settings<Checked>, TypoSettings, MinWordSizeTyposSetting, FacetingSettings, PaginationSettings, SummarizedTaskView, Kind, Network, Remote, FilterableAttributesRule, FilterableAttributesPatterns, AttributePatterns, FilterableAttributesFeatures, FilterFeatures, Export, WebhookSettings, WebhookResults, WebhookWithMetadataRedactedAuthorization, meilisearch_types::milli::vector::VectorStoreBackend))
|
components(schemas(PaginationView<KeyView>, PaginationView<IndexView>, IndexView, DocumentDeletionByFilter, AllBatches, BatchStats, ProgressStepView, ProgressView, BatchView, RuntimeTogglableFeatures, SwapIndexesPayload, DocumentEditionByFunction, MergeFacets, FederationOptions, SearchQueryWithIndex, Federation, FederatedSearch, FederatedSearchResult, SearchResults, SearchResultWithIndex, SimilarQuery, SimilarResult, PaginationView<serde_json::Value>, BrowseQuery, UpdateIndexRequest, IndexUid, IndexCreateRequest, KeyView, Action, CreateApiKey, UpdateStderrLogs, LogMode, GetLogs, IndexStats, Stats, HealthStatus, HealthResponse, VersionResponse, Code, ErrorType, AllTasks, TaskView, Status, DetailsView, ResponseError, Settings<Unchecked>, Settings<Checked>, TypoSettings, MinWordSizeTyposSetting, FacetingSettings, PaginationSettings, SummarizedTaskView, Kind, Network, Remote, FilterableAttributesRule, FilterableAttributesPatterns, AttributePatterns, FilterableAttributesFeatures, FilterFeatures, Export, WebhookSettings, WebhookResults, WebhookWithMetadata, meilisearch_types::milli::vector::VectorStoreBackend))
|
||||||
)]
|
)]
|
||||||
pub struct MeilisearchApi;
|
pub struct MeilisearchApi;
|
||||||
|
|
||||||
@@ -218,8 +216,6 @@ pub struct SummarizedTaskView {
|
|||||||
deserialize_with = "time::serde::rfc3339::deserialize"
|
deserialize_with = "time::serde::rfc3339::deserialize"
|
||||||
)]
|
)]
|
||||||
enqueued_at: OffsetDateTime,
|
enqueued_at: OffsetDateTime,
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
custom_metadata: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Task> for SummarizedTaskView {
|
impl From<Task> for SummarizedTaskView {
|
||||||
@@ -230,7 +226,6 @@ impl From<Task> for SummarizedTaskView {
|
|||||||
status: task.status,
|
status: task.status,
|
||||||
kind: task.kind.as_kind(),
|
kind: task.kind.as_kind(),
|
||||||
enqueued_at: task.enqueued_at,
|
enqueued_at: task.enqueued_at,
|
||||||
custom_metadata: task.custom_metadata,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -146,7 +146,6 @@ pub struct SearchResults {
|
|||||||
pub async fn multi_search_with_post(
|
pub async fn multi_search_with_post(
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
|
||||||
search_queue: Data<SearchQueue>,
|
search_queue: Data<SearchQueue>,
|
||||||
personalization_service: web::Data<crate::personalization::PersonalizationService>,
|
|
||||||
params: AwebJson<FederatedSearch, DeserrJsonError>,
|
params: AwebJson<FederatedSearch, DeserrJsonError>,
|
||||||
req: HttpRequest,
|
req: HttpRequest,
|
||||||
analytics: web::Data<Analytics>,
|
analytics: web::Data<Analytics>,
|
||||||
@@ -237,7 +236,7 @@ pub async fn multi_search_with_post(
|
|||||||
// changes.
|
// changes.
|
||||||
let search_results: Result<_, (ResponseError, usize)> = async {
|
let search_results: Result<_, (ResponseError, usize)> = async {
|
||||||
let mut search_results = Vec::with_capacity(queries.len());
|
let mut search_results = Vec::with_capacity(queries.len());
|
||||||
for (query_index, (index_uid, mut query, federation_options)) in queries
|
for (query_index, (index_uid, query, federation_options)) in queries
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(SearchQueryWithIndex::into_index_query_federation)
|
.map(SearchQueryWithIndex::into_index_query_federation)
|
||||||
.enumerate()
|
.enumerate()
|
||||||
@@ -270,13 +269,6 @@ pub async fn multi_search_with_post(
|
|||||||
})
|
})
|
||||||
.with_index(query_index)?;
|
.with_index(query_index)?;
|
||||||
|
|
||||||
// Extract personalization and query string before moving query
|
|
||||||
let personalize = query.personalize.take();
|
|
||||||
|
|
||||||
// Save the query string for personalization if requested
|
|
||||||
let personalize_query =
|
|
||||||
personalize.is_some().then(|| query.q.clone()).flatten();
|
|
||||||
|
|
||||||
let index_uid_str = index_uid.to_string();
|
let index_uid_str = index_uid.to_string();
|
||||||
|
|
||||||
let search_kind = search_kind(
|
let search_kind = search_kind(
|
||||||
@@ -288,7 +280,7 @@ pub async fn multi_search_with_post(
|
|||||||
.with_index(query_index)?;
|
.with_index(query_index)?;
|
||||||
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
|
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
|
||||||
|
|
||||||
let (mut search_result, time_budget) = tokio::task::spawn_blocking(move || {
|
let search_result = tokio::task::spawn_blocking(move || {
|
||||||
perform_search(
|
perform_search(
|
||||||
SearchParams {
|
SearchParams {
|
||||||
index_uid: index_uid_str.clone(),
|
index_uid: index_uid_str.clone(),
|
||||||
@@ -303,25 +295,11 @@ pub async fn multi_search_with_post(
|
|||||||
)
|
)
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
.with_index(query_index)?
|
|
||||||
.with_index(query_index)?;
|
.with_index(query_index)?;
|
||||||
|
|
||||||
// Apply personalization if requested
|
|
||||||
if let Some(personalize) = personalize.as_ref() {
|
|
||||||
search_result = personalization_service
|
|
||||||
.rerank_search_results(
|
|
||||||
search_result,
|
|
||||||
personalize,
|
|
||||||
personalize_query.as_deref(),
|
|
||||||
time_budget,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.with_index(query_index)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
search_results.push(SearchResultWithIndex {
|
search_results.push(SearchResultWithIndex {
|
||||||
index_uid: index_uid.into_inner(),
|
index_uid: index_uid.into_inner(),
|
||||||
result: search_result,
|
result: search_result.with_index(query_index)?,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Ok(search_results)
|
Ok(search_results)
|
||||||
|
|||||||
@@ -67,7 +67,6 @@ impl MultiSearchAggregator {
|
|||||||
hybrid: _,
|
hybrid: _,
|
||||||
ranking_score_threshold: _,
|
ranking_score_threshold: _,
|
||||||
locales: _,
|
locales: _,
|
||||||
personalize: _,
|
|
||||||
} in &federated_search.queries
|
} in &federated_search.queries
|
||||||
{
|
{
|
||||||
if let Some(federation_options) = federation_options {
|
if let Some(federation_options) = federation_options {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user