Compare commits

..

12 Commits

Author SHA1 Message Date
Louis Dureuil
9bd3482230 WIP 2025-10-14 09:09:33 +02:00
Louis Dureuil
d6e4e414d7 Refactor export route subroutine, add extra-header support 2025-10-13 11:53:31 +02:00
Louis Dureuil
c0617efe76 Extract part of the implementation of the network route 2025-10-09 17:01:02 +02:00
Louis Dureuil
8316c36648 Take &str where possible instead of String 2025-10-09 16:49:33 +02:00
Louis Dureuil
572bae9da1 Pass tokio handle to index-scheduler 2025-10-09 13:58:44 +02:00
Louis Dureuil
2a330dce83 Update easy snapshot 2025-09-23 17:43:03 +02:00
Louis Dureuil
d62a6b6f0d Make network route async 2025-09-23 16:38:22 +02:00
Louis Dureuil
58b8630862 Add existing errors as UserError as they will now be triggered inside of the task 2025-09-23 16:37:59 +02:00
Louis Dureuil
0703767fc6 Add a process network task type 2025-09-23 16:35:48 +02:00
Louis Dureuil
e0c97325d6 Allow to register a NetworkTopologyChange task 2025-09-23 16:34:28 +02:00
Louis Dureuil
0f3ef8de73 move Network route types to meilisearch_types and prefix existing ones by Db 2025-09-23 16:32:34 +02:00
Louis Dureuil
7313cefd74 Add itertools 2025-09-23 16:25:41 +02:00
255 changed files with 4253 additions and 13231 deletions

View File

@@ -24,11 +24,6 @@ TBD
- [ ] If not, add the `no db change` label to your PR, and you're good to merge. - [ ] If not, add the `no db change` label to your PR, and you're good to merge.
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do. - [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
### Reminders when adding features
- [ ] Write unit tests using insta
- [ ] Write declarative integration tests in [workloads/tests](https://github.com/meilisearch/meilisearch/tree/main/workloads/test). Specify the routes to call and then call `cargo xtask test workloads/tests/YOUR_TEST.json --update-responses` so that responses are automatically filled.
### Reminders when modifying the API ### Reminders when modifying the API
- [ ] Update the openAPI file with utoipa: - [ ] Update the openAPI file with utoipa:

View File

@@ -7,5 +7,6 @@ updates:
schedule: schedule:
interval: "monthly" interval: "monthly"
labels: labels:
- 'skip changelog'
- 'dependencies' - 'dependencies'
rebase-strategy: disabled rebase-strategy: disabled

33
.github/release-draft-template.yml vendored Normal file
View File

@@ -0,0 +1,33 @@
name-template: 'v$RESOLVED_VERSION'
tag-template: 'v$RESOLVED_VERSION'
exclude-labels:
- 'skip changelog'
version-resolver:
minor:
labels:
- 'enhancement'
default: patch
categories:
- title: '⚠️ Breaking changes'
label: 'breaking-change'
- title: '🚀 Enhancements'
label: 'enhancement'
- title: '🐛 Bug Fixes'
label: 'bug'
- title: '🔒 Security'
label: 'security'
- title: '⚙️ Maintenance/misc'
label:
- 'maintenance'
- 'documentation'
template: |
$CHANGES
❤️ Huge thanks to our contributors: $CONTRIBUTORS.
no-changes-template: 'Changes are coming soon 😎'
sort-direction: 'ascending'
replacers:
- search: '/(?:and )?@dependabot-preview(?:\[bot\])?,?/g'
replace: ''
- search: '/(?:and )?@dependabot(?:\[bot\])?,?/g'
replace: ''

View File

@@ -67,6 +67,8 @@ jobs:
ref: ${{ steps.comment-branch.outputs.head_ref }} ref: ${{ steps.comment-branch.outputs.head_ref }}
- uses: dtolnay/rust-toolchain@1.89 - uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
- name: Run benchmarks on PR ${{ github.event.issue.id }} - name: Run benchmarks on PR ${{ github.event.issue.id }}
run: | run: |

View File

@@ -13,6 +13,8 @@ jobs:
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89 - uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
# Run benchmarks # Run benchmarks
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }} - name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}

View File

@@ -19,7 +19,6 @@ env:
- [ ] Detail the change to the DB format and why they are forward compatible - [ ] Detail the change to the DB format and why they are forward compatible
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR. - [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
## This PR makes breaking changes ## This PR makes breaking changes
@@ -36,7 +35,8 @@ env:
- [ ] Write the code to go from the old database to the new one - [ ] Write the code to go from the old database to the new one
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47) - If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13) - If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage) - [ ] Write an integration test [here](https://github.com/meilisearch/meilisearch/blob/main/crates/meilisearch/tests/upgrade/mod.rs) ensuring you can read the old database, upgrade to the new database, and read the new database as expected
jobs: jobs:
add-comment: add-comment:

View File

@@ -13,12 +13,6 @@ jobs:
image: ubuntu:22.04 image: ubuntu:22.04
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- name: Install needed dependencies - name: Install needed dependencies
run: | run: |
apt-get update && apt-get install -y curl apt-get update && apt-get install -y curl

View File

@@ -13,6 +13,8 @@ jobs:
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89 - uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
# Run benchmarks # Run benchmarks
- name: Run the fuzzer - name: Run the fuzzer

View File

@@ -25,12 +25,6 @@ jobs:
run: | run: |
apt-get update && apt-get install -y curl apt-get update && apt-get install -y curl
apt-get install build-essential -y apt-get install build-essential -y
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89 - uses: dtolnay/rust-toolchain@1.89
- name: Install cargo-deb - name: Install cargo-deb
run: cargo install cargo-deb run: cargo install cargo-deb

View File

@@ -14,105 +14,10 @@ on:
workflow_dispatch: workflow_dispatch:
jobs: jobs:
build: docker:
runs-on: ${{ matrix.runner }} runs-on: docker
strategy:
matrix:
platform: [amd64, arm64]
edition: [community, enterprise]
include:
- platform: amd64
runner: ubuntu-24.04
- platform: arm64
runner: ubuntu-24.04-arm
- edition: community
registry: getmeili/meilisearch
feature-flag: ""
- edition: enterprise
registry: getmeili/meilisearch-enterprise
feature-flag: "--features enterprise"
permissions: {}
steps:
- uses: actions/checkout@v5
- name: Prepare
run: |
platform=linux/${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
platforms: linux/${{ matrix.platform }}
install: true
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ matrix.registry }}
# Prevent `latest` to be updated for each new tag pushed.
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
flavor: latest=false
tags: |
type=ref,event=tag
type=raw,value=nightly,enable=${{ github.event_name != 'push' }}
type=semver,pattern=v{{major}}.{{minor}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
- name: Build and push by digest
uses: docker/build-push-action@v6
id: build-and-push
with:
platforms: linux/${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
tags: ${{ matrix.registry }}
outputs: type=image,push-by-digest=true,name-canonical=true,push=true
build-args: |
COMMIT_SHA=${{ github.sha }}
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
GIT_TAG=${{ github.ref_name }}
EXTRA_ARGS=${{ matrix.feature-flag }}
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests
digest="${{ steps.build-and-push.outputs.digest }}"
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ matrix.edition }}-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
merge:
runs-on: ubuntu-latest
strategy:
matrix:
edition: [community, enterprise]
include:
- edition: community
registry: getmeili/meilisearch
- edition: enterprise
registry: getmeili/meilisearch-enterprise
needs:
- build
permissions: permissions:
id-token: write # This is needed to use Cosign in keyless mode id-token: write # This is needed to use Cosign in keyless mode
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
@@ -153,15 +58,14 @@ jobs:
echo "date=$commit_date" >> $GITHUB_OUTPUT echo "date=$commit_date" >> $GITHUB_OUTPUT
- name: Install cosign - name: Set up QEMU
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # tag=v3.10.0 uses: docker/setup-qemu-action@v3
- name: Download digests - name: Set up Docker Buildx
uses: actions/download-artifact@v4 uses: docker/setup-buildx-action@v3
with:
path: ${{ runner.temp }}/digests - name: Install cosign
pattern: digests-${{ matrix.edition }}-* uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # tag=v3.9.2
merge-multiple: true
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
@@ -169,14 +73,11 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Docker meta - name: Docker meta
id: meta id: meta
uses: docker/metadata-action@v5 uses: docker/metadata-action@v5
with: with:
images: ${{ matrix.registry }} images: getmeili/meilisearch
# Prevent `latest` to be updated for each new tag pushed. # Prevent `latest` to be updated for each new tag pushed.
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases. # We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
flavor: latest=false flavor: latest=false
@@ -187,31 +88,33 @@ jobs:
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }} type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }} type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
- name: Create manifest list and push - name: Build and push
working-directory: ${{ runner.temp }}/digests uses: docker/build-push-action@v6
run: | id: build-and-push
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ with:
$(printf '${{ matrix.registry }}@sha256:%s ' *) push: true
platforms: linux/amd64,linux/arm64
- name: Inspect image to fetch digest to sign tags: ${{ steps.meta.outputs.tags }}
run: | build-args: |
digest=$(docker buildx imagetools inspect --format='{{ json .Manifest }}' ${{ matrix.registry }}:${{ steps.meta.outputs.version }} | jq -r '.digest') COMMIT_SHA=${{ github.sha }}
echo "DIGEST=${digest}" >> $GITHUB_ENV COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
GIT_TAG=${{ github.ref_name }}
- name: Sign the images with GitHub OIDC Token - name: Sign the images with GitHub OIDC Token
env: env:
DIGEST: ${{ steps.build-and-push.outputs.digest }}
TAGS: ${{ steps.meta.outputs.tags }} TAGS: ${{ steps.meta.outputs.tags }}
run: | run: |
images="" images=""
for tag in ${TAGS}; do for tag in ${TAGS}; do
images+="${tag}@${{ env.DIGEST }} " images+="${tag}@${DIGEST} "
done done
cosign sign --yes ${images} cosign sign --yes ${images}
# /!\ Don't touch this without checking with engineers working on the Cloud code base on #discussion-engineering Slack channel # /!\ Don't touch this without checking with Cloud team
- name: Notify meilisearch-cloud - name: Send CI information to Cloud team
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event) # Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
if: ${{ (github.event_name == 'push') && (matrix.edition == 'enterprise') }} if: github.event_name == 'push'
uses: peter-evans/repository-dispatch@v3 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.MEILI_BOT_GH_PAT }} token: ${{ secrets.MEILI_BOT_GH_PAT }}
@@ -219,13 +122,21 @@ jobs:
event-type: cloud-docker-build event-type: cloud-docker-build
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }' client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
# /!\ Don't touch this without checking with integration team members on #discussion-integrations Slack channel # Send notification to Swarmia to notify of a deployment: https://app.swarmia.com
- name: Notify meilisearch-kubernetes # - name: 'Setup jq'
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event), or if not stable # uses: dcarbone/install-jq-action
if: ${{ github.event_name == 'push' && matrix.edition == 'community' && steps.check-tag-format.outputs.stable == 'true' }} # - name: Send deployment to Swarmia
uses: peter-evans/repository-dispatch@v3 # if: github.event_name == 'push' && success()
with: # run: |
token: ${{ secrets.MEILI_BOT_GH_PAT }} # JSON_STRING=$( jq --null-input --compact-output \
repository: meilisearch/meilisearch-kubernetes # --arg version "${{ github.ref_name }}" \
event-type: meilisearch-release # --arg appName "meilisearch" \
client-payload: '{ "version": "${{ github.ref_name }}" }' # --arg environment "production" \
# --arg commitSha "${{ github.sha }}" \
# --arg repositoryFullName "${{ github.repository }}" \
# '{"version": $version, "appName": $appName, "environment": $environment, "commitSha": $commitSha, "repositoryFullName": $repositoryFullName}' )
# curl -H "Authorization: ${{ secrets.SWARMIA_DEPLOYMENTS_AUTHORIZATION }}" \
# -H "Content-Type: application/json" \
# -d "$JSON_STRING" \
# https://hook.swarmia.com/deployments

View File

@@ -11,7 +11,7 @@ jobs:
check-version: check-version:
name: Check the version validity name: Check the version validity
runs-on: ubuntu-latest runs-on: ubuntu-latest
# No need to check the version for dry run (cron or workflow_dispatch) # No need to check the version for dry run (cron)
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
# Check if the tag has the v<nmumber>.<number>.<number> format. # Check if the tag has the v<nmumber>.<number>.<number> format.
@@ -32,66 +32,161 @@ jobs:
if: github.event_name == 'release' && steps.check-tag-format.outputs.stable == 'true' if: github.event_name == 'release' && steps.check-tag-format.outputs.stable == 'true'
run: bash .github/scripts/check-release.sh run: bash .github/scripts/check-release.sh
publish-binaries: publish-linux:
name: Publish binary for ${{ matrix.release }} ${{ matrix.edition }} edition name: Publish binary for Linux
runs-on: ${{ matrix.os }} runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
edition: [community, enterprise]
release:
[macos-amd64, macos-aarch64, windows, linux-amd64, linux-aarch64]
include:
- edition: "community"
feature-flag: ""
edition-suffix: ""
- edition: "enterprise"
feature-flag: "--features enterprise"
edition-suffix: "enterprise-"
- release: macos-amd64
os: macos-15-intel
binary_path: release/meilisearch
asset_name: macos-amd64
extra-args: ""
- release: macos-aarch64
os: macos-14
binary_path: aarch64-apple-darwin/release/meilisearch
asset_name: macos-apple-silicon
extra-args: "--target aarch64-apple-darwin"
- release: windows
os: windows-2022
binary_path: release/meilisearch.exe
asset_name: windows-amd64.exe
extra-args: ""
- release: linux-amd64
os: ubuntu-22.04
binary_path: x86_64-unknown-linux-gnu/release/meilisearch
asset_name: linux-amd64
extra-args: "--target x86_64-unknown-linux-gnu"
- release: linux-aarch64
os: ubuntu-22.04-arm
binary_path: aarch64-unknown-linux-gnu/release/meilisearch
asset_name: linux-aarch64
extra-args: "--target aarch64-unknown-linux-gnu"
needs: check-version needs: check-version
container:
# Use ubuntu-22.04 to compile with glibc 2.35
image: ubuntu:22.04
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Install needed dependencies
run: |
apt-get update && apt-get install -y curl
apt-get install build-essential -y
- uses: dtolnay/rust-toolchain@1.89 - uses: dtolnay/rust-toolchain@1.89
- name: Build - name: Build
run: cargo build --release --locked ${{ matrix.feature-flag }} ${{ matrix.extra-args }} run: cargo build --release --locked
# No need to upload binaries for dry run (cron or workflow_dispatch) # No need to upload binaries for dry run (cron)
- name: Upload binaries to release - name: Upload binaries to release
if: github.event_name == 'release' if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2 uses: svenstaro/upload-release-action@2.11.2
with: with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }} repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/${{ matrix.binary_path }} file: target/release/meilisearch
asset_name: meilisearch-${{ matrix.edition-suffix }}${{ matrix.asset_name }} asset_name: meilisearch-linux-amd64
tag: ${{ github.ref }}
publish-macos-windows:
name: Publish binary for ${{ matrix.os }}
runs-on: ${{ matrix.os }}
needs: check-version
strategy:
fail-fast: false
matrix:
os: [macos-13, windows-2022]
include:
- os: macos-13
artifact_name: meilisearch
asset_name: meilisearch-macos-amd64
- os: windows-2022
artifact_name: meilisearch.exe
asset_name: meilisearch-windows-amd64.exe
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- name: Build
run: cargo build --release --locked
# No need to upload binaries for dry run (cron)
- name: Upload binaries to release
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/release/${{ matrix.artifact_name }}
asset_name: ${{ matrix.asset_name }}
tag: ${{ github.ref }}
publish-macos-apple-silicon:
name: Publish binary for macOS silicon
runs-on: macos-13
needs: check-version
strategy:
matrix:
include:
- target: aarch64-apple-darwin
asset_name: meilisearch-macos-apple-silicon
steps:
- name: Checkout repository
uses: actions/checkout@v5
- name: Installing Rust toolchain
uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
target: ${{ matrix.target }}
- name: Cargo build
uses: actions-rs/cargo@v1
with:
command: build
args: --release --target ${{ matrix.target }}
- name: Upload the binary to release
# No need to upload binaries for dry run (cron)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/${{ matrix.target }}/release/meilisearch
asset_name: ${{ matrix.asset_name }}
tag: ${{ github.ref }}
publish-aarch64:
name: Publish binary for aarch64
runs-on: ubuntu-latest
needs: check-version
env:
DEBIAN_FRONTEND: noninteractive
container:
# Use ubuntu-22.04 to compile with glibc 2.35
image: ubuntu:22.04
strategy:
matrix:
include:
- target: aarch64-unknown-linux-gnu
asset_name: meilisearch-linux-aarch64
steps:
- name: Checkout repository
uses: actions/checkout@v5
- name: Install needed dependencies
run: |
apt-get update -y && apt upgrade -y
apt-get install -y curl build-essential gcc-aarch64-linux-gnu
- name: Set up Docker for cross compilation
run: |
apt-get install -y curl apt-transport-https ca-certificates software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update -y && apt-get install -y docker-ce
- name: Installing Rust toolchain
uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
target: ${{ matrix.target }}
- name: Configure target aarch64 GNU
## Environment variable is not passed using env:
## LD gold won't work with MUSL
# env:
# JEMALLOC_SYS_WITH_LG_PAGE: 16
# RUSTFLAGS: '-Clink-arg=-fuse-ld=gold'
run: |
echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config
echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
echo 'JEMALLOC_SYS_WITH_LG_PAGE=16' >> $GITHUB_ENV
- name: Install a default toolchain that will be used to build cargo cross
run: |
rustup default stable
- name: Cargo build
uses: actions-rs/cargo@v1
with:
command: build
use-cross: true
args: --release --target ${{ matrix.target }}
env:
CROSS_DOCKER_IN_DOCKER: true
- name: List target output files
run: ls -lR ./target
- name: Upload the binary to release
# No need to upload binaries for dry run (cron)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/${{ matrix.target }}/release/meilisearch
asset_name: ${{ matrix.asset_name }}
tag: ${{ github.ref }} tag: ${{ github.ref }}
publish-openapi-file: publish-openapi-file:
name: Publish OpenAPI file name: Publish OpenAPI file
needs: check-version
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
@@ -106,7 +201,7 @@ jobs:
cd crates/openapi-generator cd crates/openapi-generator
cargo run --release -- --pretty --output ../../meilisearch.json cargo run --release -- --pretty --output ../../meilisearch.json
- name: Upload OpenAPI to Release - name: Upload OpenAPI to Release
# No need to upload for dry run (cron or workflow_dispatch) # No need to upload for dry run (cron)
if: github.event_name == 'release' if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2 uses: svenstaro/upload-release-action@2.11.2
with: with:

20
.github/workflows/release-drafter.yml vendored Normal file
View File

@@ -0,0 +1,20 @@
name: Release Drafter
permissions:
contents: read
pull-requests: write
on:
push:
branches:
- main
jobs:
update_release_draft:
runs-on: ubuntu-latest
steps:
- uses: release-drafter/release-drafter@v6
with:
config-name: release-draft-template.yml
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_DRAFTER_TOKEN }}

View File

@@ -50,7 +50,7 @@ jobs:
with: with:
repository: meilisearch/meilisearch-dotnet repository: meilisearch/meilisearch-dotnet
- name: Setup .NET Core - name: Setup .NET Core
uses: actions/setup-dotnet@v5 uses: actions/setup-dotnet@v4
with: with:
dotnet-version: "8.0.x" dotnet-version: "8.0.x"
- name: Install dependencies - name: Install dependencies
@@ -68,7 +68,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -92,7 +92,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -100,7 +100,7 @@ jobs:
- '7700:7700' - '7700:7700'
steps: steps:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v6 uses: actions/setup-go@v5
with: with:
go-version: stable go-version: stable
- uses: actions/checkout@v5 - uses: actions/checkout@v5
@@ -122,7 +122,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -135,13 +135,13 @@ jobs:
- name: Set up Java - name: Set up Java
uses: actions/setup-java@v5 uses: actions/setup-java@v5
with: with:
java-version: 17 java-version: 8
distribution: 'temurin' distribution: 'zulu'
cache: gradle cache: gradle
- name: Grant execute permission for gradlew - name: Grant execute permission for gradlew
run: chmod +x gradlew run: chmod +x gradlew
- name: Build and run unit and integration tests - name: Build and run unit and integration tests
run: ./gradlew build integrationTest --info run: ./gradlew build integrationTest
meilisearch-js-tests: meilisearch-js-tests:
needs: define-docker-image needs: define-docker-image
@@ -149,7 +149,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -160,7 +160,7 @@ jobs:
with: with:
repository: meilisearch/meilisearch-js repository: meilisearch/meilisearch-js
- name: Setup node - name: Setup node
uses: actions/setup-node@v5 uses: actions/setup-node@v4
with: with:
cache: 'yarn' cache: 'yarn'
- name: Install dependencies - name: Install dependencies
@@ -184,7 +184,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -213,7 +213,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -224,7 +224,7 @@ jobs:
with: with:
repository: meilisearch/meilisearch-python repository: meilisearch/meilisearch-python
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v6 uses: actions/setup-python@v5
- name: Install pipenv - name: Install pipenv
uses: dschep/install-pipenv-action@v1 uses: dschep/install-pipenv-action@v1
- name: Install dependencies - name: Install dependencies
@@ -238,7 +238,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -263,7 +263,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -284,7 +284,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -307,7 +307,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -318,7 +318,7 @@ jobs:
with: with:
repository: meilisearch/meilisearch-js-plugins repository: meilisearch/meilisearch-js-plugins
- name: Setup node - name: Setup node
uses: actions/setup-node@v5 uses: actions/setup-node@v4
with: with:
cache: yarn cache: yarn
- name: Install dependencies - name: Install dependencies
@@ -338,7 +338,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -370,7 +370,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}

View File

@@ -15,40 +15,31 @@ env:
jobs: jobs:
test-linux: test-linux:
name: Tests on Ubuntu name: Tests on ubuntu-22.04
runs-on: ${{ matrix.runner }} runs-on: ubuntu-latest
strategy: container:
matrix: # Use ubuntu-22.04 to compile with glibc 2.35
runner: [ubuntu-22.04, ubuntu-22.04-arm] image: ubuntu:22.04
features: ["", "--features enterprise"]
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: check free space before - name: Install needed dependencies
run: df -h
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: | run: |
sudo rm -rf "/opt/ghc" || true apt-get update && apt-get install -y curl
sudo rm -rf "/usr/share/dotnet" || true apt-get install build-essential -y
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- name: check free space after
run: df -h
- name: Setup test with Rust stable - name: Setup test with Rust stable
uses: dtolnay/rust-toolchain@1.89 uses: dtolnay/rust-toolchain@1.89
- name: Cache dependencies - name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0 uses: Swatinem/rust-cache@v2.8.0
with: - name: Run cargo check without any default features
key: ${{ matrix.features }}
- name: Run cargo build without any default features
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: build command: build
args: --locked --no-default-features --all args: --locked --release --no-default-features --all
- name: Run cargo test - name: Run cargo test
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: test command: test
args: --locked --all ${{ matrix.features }} args: --locked --release --all
test-others: test-others:
name: Tests on ${{ matrix.os }} name: Tests on ${{ matrix.os }}
@@ -56,58 +47,51 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
os: [macos-14, windows-2022] os: [macos-13, windows-2022]
features: ["", "--features enterprise"]
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Cache dependencies - name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0 uses: Swatinem/rust-cache@v2.8.0
- uses: dtolnay/rust-toolchain@1.89 - uses: dtolnay/rust-toolchain@1.89
- name: Run cargo build without any default features - name: Run cargo check without any default features
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: build command: build
args: --locked --no-default-features --all args: --locked --release --no-default-features --all
- name: Run cargo test - name: Run cargo test
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: test command: test
args: --locked --all ${{ matrix.features }} args: --locked --release --all
test-all-features: test-all-features:
name: Tests almost all features name: Tests almost all features
runs-on: ubuntu-22.04 runs-on: ubuntu-latest
container:
# Use ubuntu-22.04 to compile with glibc 2.35
image: ubuntu:22.04
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709 - name: Install needed dependencies
run: | run: |
sudo rm -rf "/opt/ghc" || true apt-get update
sudo rm -rf "/usr/share/dotnet" || true apt-get install --assume-yes build-essential curl
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89 - uses: dtolnay/rust-toolchain@1.89
- name: Run cargo build with almost all features - name: Run cargo build with almost all features
run: | run: |
cargo build --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)" cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
- name: Run cargo test with almost all features - name: Run cargo test with almost all features
run: | run: |
cargo test --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)" cargo test --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
ollama-ubuntu: ollama-ubuntu:
name: Test with Ollama name: Test with Ollama
runs-on: ubuntu-22.04 runs-on: ubuntu-latest
env: env:
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434" MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- name: Install Ollama - name: Install Ollama
run: | run: |
curl -fsSL https://ollama.com/install.sh | sudo -E sh curl -fsSL https://ollama.com/install.sh | sudo -E sh
@@ -131,20 +115,20 @@ jobs:
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: test command: test
args: --locked -p meilisearch --features test-ollama ollama args: --locked --release --all --features test-ollama ollama
test-disabled-tokenization: test-disabled-tokenization:
name: Test disabled tokenization name: Test disabled tokenization
runs-on: ubuntu-22.04 runs-on: ubuntu-latest
container:
image: ubuntu:22.04
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709 - name: Install needed dependencies
run: | run: |
sudo rm -rf "/opt/ghc" || true apt-get update
sudo rm -rf "/usr/share/dotnet" || true apt-get install --assume-yes build-essential curl
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89 - uses: dtolnay/rust-toolchain@1.89
- name: Run cargo tree without default features and check lindera is not present - name: Run cargo tree without default features and check lindera is not present
run: | run: |
@@ -156,39 +140,36 @@ jobs:
run: | run: |
cargo tree -f '{p} {f}' -e normal | grep lindera -qz cargo tree -f '{p} {f}' -e normal | grep lindera -qz
build: # We run tests in debug also, to make sure that the debug_assertions are hit
name: Build in release test-debug:
runs-on: ubuntu-22.04 name: Run tests in debug
runs-on: ubuntu-latest
container:
# Use ubuntu-22.04 to compile with glibc 2.35
image: ubuntu:22.04
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709 - name: Install needed dependencies
run: | run: |
sudo rm -rf "/opt/ghc" || true apt-get update && apt-get install -y curl
sudo rm -rf "/usr/share/dotnet" || true apt-get install build-essential -y
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89 - uses: dtolnay/rust-toolchain@1.89
- name: Cache dependencies - name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0 uses: Swatinem/rust-cache@v2.8.0
- name: Build - name: Run tests in debug
run: cargo build --release --locked --target x86_64-unknown-linux-gnu uses: actions-rs/cargo@v1
with:
command: test
args: --locked --all
clippy: clippy:
name: Run Clippy name: Run Clippy
runs-on: ubuntu-22.04 runs-on: ubuntu-latest
strategy:
matrix:
features: ["", "--features enterprise"]
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89 - uses: dtolnay/rust-toolchain@1.89
with: with:
profile: minimal
components: clippy components: clippy
- name: Cache dependencies - name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0 uses: Swatinem/rust-cache@v2.8.0
@@ -196,21 +177,18 @@ jobs:
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: clippy command: clippy
args: --all-targets ${{ matrix.features }} -- --deny warnings args: --all-targets -- --deny warnings
fmt: fmt:
name: Run Rustfmt name: Run Rustfmt
runs-on: ubuntu-22.04 runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89 - uses: dtolnay/rust-toolchain@1.89
with: with:
profile: minimal
toolchain: nightly-2024-07-09
override: true
components: rustfmt components: rustfmt
- name: Cache dependencies - name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0 uses: Swatinem/rust-cache@v2.8.0
@@ -221,23 +199,3 @@ jobs:
run: | run: |
echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs
cargo fmt --all -- --check cargo fmt --all -- --check
declarative-tests:
name: Run declarative tests
runs-on: ubuntu-22.04-arm
permissions:
contents: read
steps:
- uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0
- name: Run declarative tests
run: |
cargo xtask test workloads/tests/*.json

View File

@@ -18,13 +18,9 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89 - uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
- name: Install sd - name: Install sd
run: cargo install sd run: cargo install sd
- name: Update Cargo.toml file - name: Update Cargo.toml file

View File

@@ -124,7 +124,6 @@ They are JSON files with the following structure (comments are not actually supp
{ {
// Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard. // Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard.
"name": "hackernews.ndjson_1M,no-threads", "name": "hackernews.ndjson_1M,no-threads",
"type": "bench",
// Number of consecutive runs of the commands that should be performed. // Number of consecutive runs of the commands that should be performed.
// Each run uses a fresh instance of Meilisearch and a fresh database. // Each run uses a fresh instance of Meilisearch and a fresh database.
// Each run produces its own report file. // Each run produces its own report file.

1561
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -23,7 +23,7 @@ members = [
] ]
[workspace.package] [workspace.package]
version = "1.29.0" version = "1.21.0"
authors = [ authors = [
"Quentin de Quelen <quentin@dequelen.me>", "Quentin de Quelen <quentin@dequelen.me>",
"Clément Renault <clement@meilisearch.com>", "Clément Renault <clement@meilisearch.com>",
@@ -50,5 +50,3 @@ opt-level = 3
opt-level = 3 opt-level = 3
[profile.dev.package.roaring] [profile.dev.package.roaring]
opt-level = 3 opt-level = 3
[profile.dev.package.gemm-f16]
opt-level = 3

7
Cross.toml Normal file
View File

@@ -0,0 +1,7 @@
[build.env]
passthrough = [
"RUST_BACKTRACE",
"CARGO_TERM_COLOR",
"RUSTFLAGS",
"JEMALLOC_SYS_WITH_LG_PAGE"
]

View File

@@ -1,5 +1,5 @@
# Compile # Compile
FROM rust:1.89-alpine3.22 AS compiler FROM rust:1.89-alpine3.20 AS compiler
RUN apk add -q --no-cache build-base openssl-dev RUN apk add -q --no-cache build-base openssl-dev
@@ -8,17 +8,19 @@ WORKDIR /
ARG COMMIT_SHA ARG COMMIT_SHA
ARG COMMIT_DATE ARG COMMIT_DATE
ARG GIT_TAG ARG GIT_TAG
ARG EXTRA_ARGS
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_DESCRIBE=${GIT_TAG} ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_DESCRIBE=${GIT_TAG}
ENV RUSTFLAGS="-C target-feature=-crt-static" ENV RUSTFLAGS="-C target-feature=-crt-static"
COPY . . COPY . .
RUN set -eux; \ RUN set -eux; \
apkArch="$(apk --print-arch)"; \ apkArch="$(apk --print-arch)"; \
cargo build --release -p meilisearch -p meilitool ${EXTRA_ARGS} if [ "$apkArch" = "aarch64" ]; then \
export JEMALLOC_SYS_WITH_LG_PAGE=16; \
fi && \
cargo build --release -p meilisearch -p meilitool
# Run # Run
FROM alpine:3.22 FROM alpine:3.20
LABEL org.opencontainers.image.source="https://github.com/meilisearch/meilisearch" LABEL org.opencontainers.image.source="https://github.com/meilisearch/meilisearch"
ENV MEILI_HTTP_ADDR 0.0.0.0:7700 ENV MEILI_HTTP_ADDR 0.0.0.0:7700

28
LICENSE
View File

@@ -1,9 +1,29 @@
# License MIT License
Copyright (c) 2019-2025 Meili SAS Copyright (c) 2019-2025 Meili SAS
Part of this work fall under the Meilisearch Enterprise Edition (EE) and are licensed under the Business Source License 1.1, please refer to [LICENSE-EE](./LICENSE-EE) for details. Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The other parts of this work are licensed under the [MIT license](./LICENSE-MIT). The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
`SPDX-License-Identifier: MIT AND BUSL-1.1` THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
---
🔒 Meilisearch Enterprise Edition (EE)
Certain parts of this codebase are not licensed under the MIT license and governed by the Business Source License 1.1.
See the LICENSE-EE file for details.

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2019-2025 Meili SAS
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -39,7 +39,6 @@
## 🖥 Examples ## 🖥 Examples
- [**Movies**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — An application to help you find streaming platforms to watch movies using [hybrid search](https://www.meilisearch.com/solutions/hybrid-search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos). - [**Movies**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — An application to help you find streaming platforms to watch movies using [hybrid search](https://www.meilisearch.com/solutions/hybrid-search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos).
- [**Flickr**](https://flickr.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — Search and explore one hundred million Flickr images with semantic search.
- [**Ecommerce**](https://ecommerce.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Ecommerce website using disjunctive [facets](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos), range and rating filtering, and pagination. - [**Ecommerce**](https://ecommerce.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Ecommerce website using disjunctive [facets](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos), range and rating filtering, and pagination.
- [**Songs**](https://music.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search through 47 million of songs. - [**Songs**](https://music.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search through 47 million of songs.
- [**SaaS**](https://saas.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search for contacts, deals, and companies in this [multi-tenant](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) CRM application. - [**SaaS**](https://saas.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search for contacts, deals, and companies in this [multi-tenant](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) CRM application.
@@ -122,7 +121,7 @@ If you want to know more about the kind of data we collect and what we use it fo
Meilisearch is a search engine created by [Meili](https://www.meilisearch.com/careers), a software development company headquartered in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact) Meilisearch is a search engine created by [Meili](https://www.meilisearch.com/careers), a software development company headquartered in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
🗞 [Subscribe to our newsletter](https://share-eu1.hsforms.com/1LN5N0x_GQgq7ss7tXmSykwfg3aq) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months. 🗞 [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
💌 Want to make a suggestion or give feedback? Here are some of the channels where you can reach us: 💌 Want to make a suggestion or give feedback? Here are some of the channels where you can reach us:

View File

@@ -1,326 +0,0 @@
# Declarative tests
Declarative tests ensure that Meilisearch features remain stable across versions.
While we already have unit tests, those are run against **temporary databases** that are created fresh each time and therefore never risk corruption.
Declarative tests instead **simulate the lifetime of a database**: they chain together commands and requests to change the binary, verifying that database state and API responses remain consistent.
## Basic example
```jsonc
{
"type": "test",
"name": "api-keys",
"binary": { // the first command will run on the binary following this specification.
"source": "release", // get the binary as a release from GitHub
"version": "1.19.0", // version to fetch
"edition": "community" // edition to fetch
},
"commands": []
}
```
This example defines a no-op test (it does nothing).
If the file is saved at `workloads/tests/example.json`, you can run it with:
```bash
cargo xtask test workloads/tests/example.json
```
## Commands
Commands represent API requests sent to Meilisearch endpoints during a test.
They are executed sequentially, and their responses can be validated to ensure consistent behavior across upgrades.
```jsonc
{
"route": "keys",
"method": "POST",
"body": {
"inline": {
"actions": [
"search",
"documents.add"
],
"description": "Test API Key",
"expiresAt": null,
"indexes": [ "movies" ]
}
}
}
```
This command issues a `POST /keys` request, creating an API key with permissions to search and add documents in the `movies` index.
### Using assets in commands
To keep tests concise and reusable, you can define **assets** at the root of the workload file.
Assets are external data sources (such as datasets) that are cached between runs, making tests faster and easier to read.
```jsonc
{
"type": "test",
"name": "movies",
"binary": {
"source": "release",
"version": "1.19.0",
"edition": "community"
},
"assets": {
"movies.json": {
"local_location": null,
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
}
},
"commands": [
{
"route": "indexes/movies/documents",
"method": "POST",
"body": {
"asset": "movies.json"
}
}
]
}
```
In this example:
- The `movies.json` dataset is defined as an asset, pointing to a remote URL.
- The SHA-256 checksum ensures integrity.
- The `POST /indexes/movies/documents` command uses this asset as the request body.
This makes the test much cleaner than inlining a large dataset directly into the command.
For asset handling, please refer to the [declarative benchmarks documentation](/BENCHMARKS.md#adding-new-assets).
### Asserting responses
Commands can specify both the **expected status code** and the **expected response body**.
```jsonc
{
"route": "indexes/movies/documents",
"method": "POST",
"body": {
"asset": "movies.json"
},
"expectedStatus": 202,
"expectedResponse": {
"enqueuedAt": "[timestamp]", // Set to a bracketed string to ignore the value
"indexUid": "movies",
"status": "enqueued",
"taskUid": 1,
"type": "documentAdditionOrUpdate"
},
"synchronous": "WaitForTask"
}
```
Manually writing `expectedResponse` fields can be tedious.
Instead, you can let the test runner populate them automatically:
```bash
# Run the workload to populate expected fields. Only adds the missing ones, doesn't change existing data
cargo xtask test workloads/tests/example.json --add-missing-responses
# OR
# Run the workload to populate expected fields. Updates all fields including existing ones
cargo xtask test workloads/tests/example.json --update-responses
```
This workflow is recommended:
1. Write the test without expected fields.
2. Run it with `--add-missing-responses` to capture the actual responses.
3. Review and commit the generated expectations.
## Changing binary
It is possible to insert an instruction to change the current Meilisearch instance from one binary specification to another during a test.
When executed, such an instruction will:
1. Stop the current Meilisearch instance.
2. Fetch the binary specified by the instruction.
3. Restart the server with the specified binary on the same database.
```jsonc
{
"type": "test",
"name": "movies",
"binary": {
"source": "release",
"version": "1.19.0", // start with version v1.19.0
"edition": "community"
},
"assets": {
"movies.json": {
"local_location": null,
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
}
},
"commands": [
// setup some data
{
"route": "indexes/movies/documents",
"method": "POST",
"body": {
"asset": "movies.json"
}
},
// switch binary to v1.24.0
{
"binary": {
"source": "release",
"version": "1.24.0",
"edition": "community"
}
}
]
}
```
### Typical Usage
In most cases, the change binary instruction will be used to update a database.
- **Set up** some data using commands on an older version.
- **Upgrade** to the latest version.
- **Assert** that the data and API behavior remain correct after the upgrade.
To properly test the dumpless upgrade, one should typically:
1. Open the database without processing the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` and `--experimental-max-number-of-batched-tasks=0` as extra CLI arguments
2. Check that the search, stats and task queue still work.
3. Open the database and process the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` as the extra CLI argument. Use a `health` command to wait for the upgrade task to finish.
4. Check that the indexing, search, stats, and task queue still work.
```jsonc
{
"type": "test",
"name": "movies",
"binary": {
"source": "release",
"version": "1.12.0",
"edition": "community"
},
"commands": [
// 0. Run commands to populate the database
{
// ..
},
// 1. Open the database with new MS without processing the update task
{
"binary": {
"source": "build", // build the binary from the sources in the current git repository
"edition": "community",
"extraCliArgs": [
"--experimental-dumpless-upgrade", // allows to open with a newer MS
"--experimental-max-number-of-batched-tasks=0" // prevent processing of the update task
]
}
},
// 2. Check the search etc.
{
// ..
},
// 3. Open the database with new MS and processing the update task
{
"binary": {
"source": "build", // build the binary from the sources in the current git repository
"edition": "community",
"extraCliArgs": [
"--experimental-dumpless-upgrade" // allows to open with a newer MS
// no `--experimental-max-number-of-batched-tasks=0`
]
}
},
// 4. Check the indexing, search, etc.
{
// ..
}
]
}
```
This ensures backward compatibility: databases created with older Meilisearch versions should remain functional and consistent after an upgrade.
## Variables
Sometimes a command needs to use a value returned by a **previous response**.
These values can be captured and reused using the register field.
```jsonc
{
"route": "keys",
"method": "POST",
"body": {
"inline": {
"actions": [
"search",
"documents.add"
],
"description": "Test API Key",
"expiresAt": null,
"indexes": [ "movies" ]
}
},
"expectedResponse": {
"key": "c6f64630bad2996b1f675007c8800168e14adf5d6a7bb1a400a6d2b158050eaf",
// ...
},
"register": {
"key": "/key"
},
"synchronous": "WaitForResponse"
}
```
The `register` field captures the value at the JSON path `/key` from the response.
Paths follow the **JavaScript Object Notation Pointer (RFC 6901)** format.
Registered variables are available for all subsequent commands.
Registered variables can be referenced by wrapping their name in double curly braces:
In the route/path:
```jsonc
{
"route": "tasks/{{ task_id }}",
"method": "GET"
}
```
In the request body:
```jsonc
{
"route": "indexes/movies/documents",
"method": "PATCH",
"body": {
"inline": {
"id": "{{ document_id }}",
"overview": "Shazam turns evil and the world is in danger.",
}
}
}
```
Or they can be referenced by their name (**without curly braces**) as an API key:
```jsonc
{
"route": "indexes/movies/documents",
"method": "POST",
"body": { /* ... */ },
"apiKeyVariable": "key" // The **content** of the key variable will be used as an API key
}
```

View File

@@ -11,27 +11,27 @@ edition.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
anyhow = "1.0.100" anyhow = "1.0.98"
bumpalo = "3.19.0" bumpalo = "3.18.1"
csv = "1.4.0" csv = "1.3.1"
memmap2 = "0.9.9" memmap2 = "0.9.7"
milli = { path = "../milli" } milli = { path = "../milli" }
mimalloc = { version = "0.1.48", default-features = false } mimalloc = { version = "0.1.47", default-features = false }
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
tempfile = "3.23.0" tempfile = "3.20.0"
[dev-dependencies] [dev-dependencies]
criterion = { version = "0.7.0", features = ["html_reports"] } criterion = { version = "0.6.0", features = ["html_reports"] }
rand = "0.8.5" rand = "0.8.5"
rand_chacha = "0.3.1" rand_chacha = "0.3.1"
roaring = "0.10.12" roaring = "0.10.12"
[build-dependencies] [build-dependencies]
anyhow = "1.0.100" anyhow = "1.0.98"
bytes = "1.11.0" bytes = "1.10.1"
convert_case = "0.9.0" convert_case = "0.8.0"
flate2 = "1.1.5" flate2 = "1.1.2"
reqwest = { version = "0.12.24", features = ["blocking", "rustls-tls"], default-features = false } reqwest = { version = "0.12.20", features = ["blocking", "rustls-tls"], default-features = false }
[features] [features]
default = ["milli/all-tokenizations"] default = ["milli/all-tokenizations"]

View File

@@ -21,10 +21,6 @@ use roaring::RoaringBitmap;
#[global_allocator] #[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc; static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
fn no_cancel() -> bool {
false
}
const BENCHMARK_ITERATION: usize = 10; const BENCHMARK_ITERATION: usize = 10;
fn setup_dir(path: impl AsRef<Path>) { fn setup_dir(path: impl AsRef<Path>) {
@@ -69,7 +65,7 @@ fn setup_settings<'t>(
let sortable_fields = sortable_fields.iter().map(|s| s.to_string()).collect(); let sortable_fields = sortable_fields.iter().map(|s| s.to_string()).collect();
builder.set_sortable_fields(sortable_fields); builder.set_sortable_fields(sortable_fields);
builder.execute(&no_cancel, &Progress::default(), Default::default()).unwrap(); builder.execute(&|| false, &Progress::default(), Default::default()).unwrap();
} }
fn setup_index_with_settings( fn setup_index_with_settings(
@@ -156,7 +152,7 @@ fn indexing_songs_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -172,7 +168,7 @@ fn indexing_songs_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -224,7 +220,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -240,7 +236,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -270,7 +266,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -286,7 +282,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -340,7 +336,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -356,7 +352,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -418,7 +414,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -434,7 +430,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -464,7 +460,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -480,7 +476,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -506,7 +502,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -522,7 +518,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -575,7 +571,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -591,7 +587,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -643,7 +639,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -659,7 +655,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -711,7 +707,7 @@ fn indexing_wiki(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -727,7 +723,7 @@ fn indexing_wiki(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -778,7 +774,7 @@ fn reindexing_wiki(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -794,7 +790,7 @@ fn reindexing_wiki(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -824,7 +820,7 @@ fn reindexing_wiki(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -840,7 +836,7 @@ fn reindexing_wiki(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -893,7 +889,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -909,7 +905,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -971,7 +967,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -987,7 +983,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1018,7 +1014,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1034,7 +1030,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1061,7 +1057,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1077,7 +1073,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1129,7 +1125,7 @@ fn indexing_movies_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1145,7 +1141,7 @@ fn indexing_movies_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1196,7 +1192,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1212,7 +1208,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1242,7 +1238,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1258,7 +1254,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1311,7 +1307,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1327,7 +1323,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1376,7 +1372,7 @@ fn delete_documents_from_ids(index: Index, document_ids_to_delete: Vec<RoaringBi
Some(primary_key), Some(primary_key),
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1426,7 +1422,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1442,7 +1438,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1472,7 +1468,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1488,7 +1484,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1514,7 +1510,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1530,7 +1526,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1605,7 +1601,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1621,7 +1617,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1697,7 +1693,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1713,7 +1709,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1781,7 +1777,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1797,7 +1793,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1849,7 +1845,7 @@ fn indexing_geo(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1865,7 +1861,7 @@ fn indexing_geo(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1916,7 +1912,7 @@ fn reindexing_geo(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1932,7 +1928,7 @@ fn reindexing_geo(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1962,7 +1958,7 @@ fn reindexing_geo(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1978,7 +1974,7 @@ fn reindexing_geo(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -2031,7 +2027,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -2047,7 +2043,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )

View File

@@ -11,8 +11,8 @@ license.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
time = { version = "0.3.44", features = ["parsing"] } time = { version = "0.3.41", features = ["parsing"] }
[build-dependencies] [build-dependencies]
anyhow = "1.0.100" anyhow = "1.0.98"
vergen-gitcl = "1.0.8" vergen-git2 = "1.0.7"

View File

@@ -15,7 +15,7 @@ fn emit_git_variables() -> anyhow::Result<()> {
// Note: any code that needs VERGEN_ environment variables should take care to define them manually in the Dockerfile and pass them // Note: any code that needs VERGEN_ environment variables should take care to define them manually in the Dockerfile and pass them
// in the corresponding GitHub workflow (publish_docker.yml). // in the corresponding GitHub workflow (publish_docker.yml).
// This is due to the Dockerfile building the binary outside of the git directory. // This is due to the Dockerfile building the binary outside of the git directory.
let mut builder = vergen_gitcl::GitclBuilder::default(); let mut builder = vergen_git2::Git2Builder::default();
builder.branch(true); builder.branch(true);
builder.commit_timestamp(true); builder.commit_timestamp(true);
@@ -25,5 +25,5 @@ fn emit_git_variables() -> anyhow::Result<()> {
let git2 = builder.build()?; let git2 = builder.build()?;
vergen_gitcl::Emitter::default().fail_on_error().add_instructions(&git2)?.emit() vergen_git2::Emitter::default().fail_on_error().add_instructions(&git2)?.emit()
} }

View File

@@ -1,6 +0,0 @@
use build_info::BuildInfo;
fn main() {
let info = BuildInfo::from_build();
dbg!(info);
}

View File

@@ -11,27 +11,24 @@ readme.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
anyhow = "1.0.100" anyhow = "1.0.98"
flate2 = "1.1.5" flate2 = "1.1.2"
http = "1.3.1" http = "1.3.1"
meilisearch-types = { path = "../meilisearch-types" } meilisearch-types = { path = "../meilisearch-types" }
once_cell = "1.21.3" once_cell = "1.21.3"
regex = "1.12.2" regex = "1.11.1"
roaring = { version = "0.10.12", features = ["serde"] } roaring = { version = "0.10.12", features = ["serde"] }
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
tar = "0.4.44" tar = "0.4.44"
tempfile = "3.23.0" tempfile = "3.20.0"
thiserror = "2.0.17" thiserror = "2.0.12"
time = { version = "0.3.44", features = ["serde-well-known", "formatting", "parsing", "macros"] } time = { version = "0.3.41", features = ["serde-well-known", "formatting", "parsing", "macros"] }
tracing = "0.1.41" tracing = "0.1.41"
uuid = { version = "1.18.1", features = ["serde", "v4"] } uuid = { version = "1.17.0", features = ["serde", "v4"] }
[dev-dependencies] [dev-dependencies]
big_s = "1.0.2" big_s = "1.0.2"
maplit = "1.0.2" maplit = "1.0.2"
meili-snap = { path = "../meili-snap" } meili-snap = { path = "../meili-snap" }
meilisearch-types = { path = "../meilisearch-types" } meilisearch-types = { path = "../meilisearch-types" }
[features]
enterprise = ["meilisearch-types/enterprise"]

View File

@@ -96,8 +96,6 @@ pub struct TaskDump {
pub finished_at: Option<OffsetDateTime>, pub finished_at: Option<OffsetDateTime>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub network: Option<TaskNetwork>, pub network: Option<TaskNetwork>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub custom_metadata: Option<String>,
} }
// A `Kind` specific version made for the dump. If modified you may break the dump. // A `Kind` specific version made for the dump. If modified you may break the dump.
@@ -160,8 +158,9 @@ pub enum KindDump {
UpgradeDatabase { UpgradeDatabase {
from: (u32, u32, u32), from: (u32, u32, u32),
}, },
IndexCompaction { NetworkTopologyChange {
index_uid: String, network: Option<meilisearch_types::enterprise_edition::network::Network>,
origin: Option<meilisearch_types::tasks::Origin>,
}, },
} }
@@ -180,7 +179,6 @@ impl From<Task> for TaskDump {
started_at: task.started_at, started_at: task.started_at,
finished_at: task.finished_at, finished_at: task.finished_at,
network: task.network, network: task.network,
custom_metadata: task.custom_metadata,
} }
} }
} }
@@ -246,8 +244,8 @@ impl From<KindWithContent> for KindDump {
KindWithContent::UpgradeDatabase { from: version } => { KindWithContent::UpgradeDatabase { from: version } => {
KindDump::UpgradeDatabase { from: version } KindDump::UpgradeDatabase { from: version }
} }
KindWithContent::IndexCompaction { index_uid } => { KindWithContent::NetworkTopologyChange { network, origin } => {
KindDump::IndexCompaction { index_uid } KindDump::NetworkTopologyChange { network, origin }
} }
} }
} }
@@ -262,13 +260,13 @@ pub(crate) mod test {
use big_s::S; use big_s::S;
use maplit::{btreemap, btreeset}; use maplit::{btreemap, btreeset};
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats}; use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
use meilisearch_types::enterprise_edition::network::{DbNetwork, DbRemote};
use meilisearch_types::facet_values_sort::FacetValuesSort; use meilisearch_types::facet_values_sort::FacetValuesSort;
use meilisearch_types::features::RuntimeTogglableFeatures; use meilisearch_types::features::RuntimeTogglableFeatures;
use meilisearch_types::index_uid_pattern::IndexUidPattern; use meilisearch_types::index_uid_pattern::IndexUidPattern;
use meilisearch_types::keys::{Action, Key}; use meilisearch_types::keys::{Action, Key};
use meilisearch_types::milli::update::Setting; use meilisearch_types::milli::update::Setting;
use meilisearch_types::milli::{self, FilterableAttributesRule}; use meilisearch_types::milli::{self, FilterableAttributesRule};
use meilisearch_types::network::{Network, Remote};
use meilisearch_types::settings::{Checked, FacetingSettings, Settings}; use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
use meilisearch_types::task_view::DetailsView; use meilisearch_types::task_view::DetailsView;
use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status}; use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status};
@@ -399,7 +397,6 @@ pub(crate) mod test {
started_at: Some(datetime!(2022-11-20 0:00 UTC)), started_at: Some(datetime!(2022-11-20 0:00 UTC)),
finished_at: Some(datetime!(2022-11-21 0:00 UTC)), finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
network: None, network: None,
custom_metadata: None,
}, },
None, None,
), ),
@@ -425,7 +422,6 @@ pub(crate) mod test {
started_at: None, started_at: None,
finished_at: None, finished_at: None,
network: None, network: None,
custom_metadata: None,
}, },
Some(vec![ Some(vec![
json!({ "id": 4, "race": "leonberg" }).as_object().unwrap().clone(), json!({ "id": 4, "race": "leonberg" }).as_object().unwrap().clone(),
@@ -446,7 +442,6 @@ pub(crate) mod test {
started_at: None, started_at: None,
finished_at: None, finished_at: None,
network: None, network: None,
custom_metadata: None,
}, },
None, None,
), ),
@@ -556,10 +551,10 @@ pub(crate) mod test {
RuntimeTogglableFeatures::default() RuntimeTogglableFeatures::default()
} }
fn create_test_network() -> Network { fn create_test_network() -> DbNetwork {
Network { DbNetwork {
local: Some("myself".to_string()), local: Some("myself".to_string()),
remotes: maplit::btreemap! {"other".to_string() => Remote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }}, remotes: maplit::btreemap! {"other".to_string() => DbRemote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
sharding: false, sharding: false,
} }
} }

View File

@@ -164,7 +164,6 @@ impl CompatV5ToV6 {
started_at: task_view.started_at, started_at: task_view.started_at,
finished_at: task_view.finished_at, finished_at: task_view.finished_at,
network: None, network: None,
custom_metadata: None,
}; };
(task, content_file) (task, content_file)

View File

@@ -24,7 +24,7 @@ pub type Batch = meilisearch_types::batches::Batch;
pub type Key = meilisearch_types::keys::Key; pub type Key = meilisearch_types::keys::Key;
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings; pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures; pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
pub type Network = meilisearch_types::network::Network; pub type Network = meilisearch_types::enterprise_edition::network::DbNetwork;
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView; pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
// ===== Other types to clarify the code of the compat module // ===== Other types to clarify the code of the compat module

View File

@@ -5,9 +5,9 @@ use std::path::PathBuf;
use flate2::write::GzEncoder; use flate2::write::GzEncoder;
use flate2::Compression; use flate2::Compression;
use meilisearch_types::batches::Batch; use meilisearch_types::batches::Batch;
use meilisearch_types::enterprise_edition::network::DbNetwork;
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures}; use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
use meilisearch_types::keys::Key; use meilisearch_types::keys::Key;
use meilisearch_types::network::Network;
use meilisearch_types::settings::{Checked, Settings}; use meilisearch_types::settings::{Checked, Settings};
use meilisearch_types::webhooks::WebhooksDumpView; use meilisearch_types::webhooks::WebhooksDumpView;
use serde_json::{Map, Value}; use serde_json::{Map, Value};
@@ -72,7 +72,7 @@ impl DumpWriter {
)?) )?)
} }
pub fn create_network(&self, network: Network) -> Result<()> { pub fn create_network(&self, network: DbNetwork) -> Result<()> {
Ok(std::fs::write(self.dir.path().join("network.json"), serde_json::to_string(&network)?)?) Ok(std::fs::write(self.dir.path().join("network.json"), serde_json::to_string(&network)?)?)
} }

View File

@@ -11,7 +11,7 @@ edition.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
tempfile = "3.23.0" tempfile = "3.20.0"
thiserror = "2.0.17" thiserror = "2.0.12"
tracing = "0.1.41" tracing = "0.1.41"
uuid = { version = "1.18.1", features = ["serde", "v4"] } uuid = { version = "1.17.0", features = ["serde", "v4"] }

View File

@@ -60,7 +60,7 @@ impl FileStore {
/// Returns the file corresponding to the requested uuid. /// Returns the file corresponding to the requested uuid.
pub fn get_update(&self, uuid: Uuid) -> Result<StdFile> { pub fn get_update(&self, uuid: Uuid) -> Result<StdFile> {
let path = self.update_path(uuid); let path = self.get_update_path(uuid);
let file = match StdFile::open(path) { let file = match StdFile::open(path) {
Ok(file) => file, Ok(file) => file,
Err(e) => { Err(e) => {
@@ -72,7 +72,7 @@ impl FileStore {
} }
/// Returns the path that correspond to this uuid, the path could not exists. /// Returns the path that correspond to this uuid, the path could not exists.
pub fn update_path(&self, uuid: Uuid) -> PathBuf { pub fn get_update_path(&self, uuid: Uuid) -> PathBuf {
self.path.join(uuid.to_string()) self.path.join(uuid.to_string())
} }

View File

@@ -16,7 +16,7 @@ license.workspace = true
serde_json = "1.0" serde_json = "1.0"
[dev-dependencies] [dev-dependencies]
criterion = { version = "0.7.0", features = ["html_reports"] } criterion = { version = "0.6.0", features = ["html_reports"] }
[[bench]] [[bench]]
name = "benchmarks" name = "benchmarks"

View File

@@ -11,12 +11,12 @@ edition.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
arbitrary = { version = "1.4.2", features = ["derive"] } arbitrary = { version = "1.4.1", features = ["derive"] }
bumpalo = "3.19.0" bumpalo = "3.18.1"
clap = { version = "4.5.52", features = ["derive"] } clap = { version = "4.5.40", features = ["derive"] }
either = "1.15.0" either = "1.15.0"
fastrand = "2.3.0" fastrand = "2.3.0"
milli = { path = "../milli" } milli = { path = "../milli" }
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
tempfile = "3.23.0" tempfile = "3.20.0"

View File

@@ -11,33 +11,32 @@ edition.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
anyhow = "1.0.100" anyhow = "1.0.98"
bincode = "1.3.3" bincode = "1.3.3"
byte-unit = "5.1.6" byte-unit = "5.1.6"
bytes = "1.11.0" bumpalo = "3.18.1"
bumpalo = "3.19.0"
bumparaw-collections = "0.1.4" bumparaw-collections = "0.1.4"
convert_case = "0.9.0" convert_case = "0.8.0"
csv = "1.4.0" csv = "1.3.1"
derive_builder = "0.20.2" derive_builder = "0.20.2"
dump = { path = "../dump" } dump = { path = "../dump" }
enum-iterator = "2.3.0" enum-iterator = "2.1.0"
file-store = { path = "../file-store" } file-store = { path = "../file-store" }
flate2 = "1.1.5" flate2 = "1.1.2"
indexmap = "2.12.0" hashbrown = "0.15.4"
indexmap = "2.9.0"
meilisearch-auth = { path = "../meilisearch-auth" } meilisearch-auth = { path = "../meilisearch-auth" }
meilisearch-types = { path = "../meilisearch-types" } meilisearch-types = { path = "../meilisearch-types" }
memmap2 = "0.9.9" memmap2 = "0.9.7"
page_size = "0.6.0" page_size = "0.6.0"
rayon = "1.11.0" rayon = "1.10.0"
roaring = { version = "0.10.12", features = ["serde"] } roaring = { version = "0.10.12", features = ["serde"] }
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
tar = "0.4.44"
synchronoise = "1.0.1" synchronoise = "1.0.1"
tempfile = "3.23.0" tempfile = "3.20.0"
thiserror = "2.0.17" thiserror = "2.0.12"
time = { version = "0.3.44", features = [ time = { version = "0.3.41", features = [
"serde-well-known", "serde-well-known",
"formatting", "formatting",
"parsing", "parsing",
@@ -45,11 +44,10 @@ time = { version = "0.3.44", features = [
] } ] }
tracing = "0.1.41" tracing = "0.1.41"
ureq = "2.12.1" ureq = "2.12.1"
uuid = { version = "1.18.1", features = ["serde", "v4"] } uuid = { version = "1.17.0", features = ["serde", "v4"] }
backoff = "0.4.0" backoff = "0.4.0"
reqwest = { version = "0.12.24", features = ["rustls-tls", "http2"], default-features = false } itertools = "0.14.0"
rusty-s3 = "0.8.1" tokio = { version = "1.47.1", features = ["full"] }
tokio = { version = "1.48.0", features = ["full"] }
[dev-dependencies] [dev-dependencies]
big_s = "1.0.2" big_s = "1.0.2"

View File

@@ -150,7 +150,6 @@ impl<'a> Dump<'a> {
details: task.details, details: task.details,
status: task.status, status: task.status,
network: task.network, network: task.network,
custom_metadata: task.custom_metadata,
kind: match task.kind { kind: match task.kind {
KindDump::DocumentImport { KindDump::DocumentImport {
primary_key, primary_key,
@@ -235,8 +234,8 @@ impl<'a> Dump<'a> {
} }
} }
KindDump::UpgradeDatabase { from } => KindWithContent::UpgradeDatabase { from }, KindDump::UpgradeDatabase { from } => KindWithContent::UpgradeDatabase { from },
KindDump::IndexCompaction { index_uid } => { KindDump::NetworkTopologyChange { network: new_network, origin } => {
KindWithContent::IndexCompaction { index_uid } KindWithContent::NetworkTopologyChange { network: new_network, origin }
} }
}, },
}; };

View File

@@ -5,7 +5,6 @@ use meilisearch_types::error::{Code, ErrorCode};
use meilisearch_types::milli::index::RollbackOutcome; use meilisearch_types::milli::index::RollbackOutcome;
use meilisearch_types::tasks::{Kind, Status}; use meilisearch_types::tasks::{Kind, Status};
use meilisearch_types::{heed, milli}; use meilisearch_types::{heed, milli};
use reqwest::StatusCode;
use thiserror::Error; use thiserror::Error;
use crate::TaskId; use crate::TaskId;
@@ -128,14 +127,6 @@ pub enum Error {
#[error("Aborted task")] #[error("Aborted task")]
AbortedTask, AbortedTask,
#[error("S3 error: status: {status}, body: {body}")]
S3Error { status: StatusCode, body: String },
#[error("S3 HTTP error: {0}")]
S3HttpError(reqwest::Error),
#[error("S3 XML error: {0}")]
S3XmlError(Box<dyn std::error::Error + Send + Sync>),
#[error("S3 bucket error: {0}")]
S3BucketError(rusty_s3::BucketError),
#[error(transparent)] #[error(transparent)]
Dump(#[from] dump::Error), Dump(#[from] dump::Error),
#[error(transparent)] #[error(transparent)]
@@ -235,10 +226,6 @@ impl Error {
| Error::TaskCancelationWithEmptyQuery | Error::TaskCancelationWithEmptyQuery
| Error::FromRemoteWhenExporting { .. } | Error::FromRemoteWhenExporting { .. }
| Error::AbortedTask | Error::AbortedTask
| Error::S3Error { .. }
| Error::S3HttpError(_)
| Error::S3XmlError(_)
| Error::S3BucketError(_)
| Error::Dump(_) | Error::Dump(_)
| Error::Heed(_) | Error::Heed(_)
| Error::Milli { .. } | Error::Milli { .. }
@@ -306,14 +293,8 @@ impl ErrorCode for Error {
Error::BatchNotFound(_) => Code::BatchNotFound, Error::BatchNotFound(_) => Code::BatchNotFound,
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters, Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters, Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
// TODO: not sure of the Code to use
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice, Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
Error::S3Error { status, .. } if status.is_client_error() => {
Code::InvalidS3SnapshotRequest
}
Error::S3Error { .. } => Code::S3SnapshotServerError,
Error::S3HttpError(_) => Code::S3SnapshotServerError,
Error::S3XmlError(_) => Code::S3SnapshotServerError,
Error::S3BucketError(_) => Code::InvalidS3SnapshotParameters,
Error::Dump(e) => e.error_code(), Error::Dump(e) => e.error_code(),
Error::Milli { error, .. } => error.error_code(), Error::Milli { error, .. } => error.error_code(),
Error::ProcessBatchPanicked(_) => Code::Internal, Error::ProcessBatchPanicked(_) => Code::Internal,

View File

@@ -1,9 +1,9 @@
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use meilisearch_types::enterprise_edition::network::DbNetwork;
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures}; use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
use meilisearch_types::heed::types::{SerdeJson, Str}; use meilisearch_types::heed::types::{SerdeJson, Str};
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls}; use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
use meilisearch_types::network::Network;
use crate::error::FeatureNotEnabledError; use crate::error::FeatureNotEnabledError;
use crate::Result; use crate::Result;
@@ -24,7 +24,7 @@ mod db_keys {
pub(crate) struct FeatureData { pub(crate) struct FeatureData {
persisted: Database<Str, SerdeJson<RuntimeTogglableFeatures>>, persisted: Database<Str, SerdeJson<RuntimeTogglableFeatures>>,
runtime: Arc<RwLock<RuntimeTogglableFeatures>>, runtime: Arc<RwLock<RuntimeTogglableFeatures>>,
network: Arc<RwLock<Network>>, network: Arc<RwLock<DbNetwork>>,
} }
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
@@ -197,8 +197,8 @@ impl FeatureData {
})); }));
// Once this is stabilized, network should be stored along with webhooks in index-scheduler's persisted database // Once this is stabilized, network should be stored along with webhooks in index-scheduler's persisted database
let network_db = runtime_features_db.remap_data_type::<SerdeJson<Network>>(); let network_db = runtime_features_db.remap_data_type::<SerdeJson<DbNetwork>>();
let network: Network = network_db.get(wtxn, db_keys::NETWORK)?.unwrap_or_default(); let network: DbNetwork = network_db.get(wtxn, db_keys::NETWORK)?.unwrap_or_default();
Ok(Self { Ok(Self {
persisted: runtime_features_db, persisted: runtime_features_db,
@@ -234,8 +234,8 @@ impl FeatureData {
RoFeatures::new(self) RoFeatures::new(self)
} }
pub fn put_network(&self, mut wtxn: RwTxn, new_network: Network) -> Result<()> { pub fn put_network(&self, mut wtxn: RwTxn, new_network: DbNetwork) -> Result<()> {
self.persisted.remap_data_type::<SerdeJson<Network>>().put( self.persisted.remap_data_type::<SerdeJson<DbNetwork>>().put(
&mut wtxn, &mut wtxn,
db_keys::NETWORK, db_keys::NETWORK,
&new_network, &new_network,
@@ -247,7 +247,7 @@ impl FeatureData {
Ok(()) Ok(())
} }
pub fn network(&self) -> Network { pub fn network(&self) -> DbNetwork {
Network::clone(&*self.network.read().unwrap()) DbNetwork::clone(&*self.network.read().unwrap())
} }
} }

View File

@@ -199,7 +199,7 @@ impl IndexMapper {
let uuid = Uuid::new_v4(); let uuid = Uuid::new_v4();
self.index_mapping.put(&mut wtxn, name, &uuid)?; self.index_mapping.put(&mut wtxn, name, &uuid)?;
let index_path = self.index_path(uuid); let index_path = self.base_path.join(uuid.to_string());
fs::create_dir_all(&index_path)?; fs::create_dir_all(&index_path)?;
// Error if the UUIDv4 somehow already exists in the map, since it should be fresh. // Error if the UUIDv4 somehow already exists in the map, since it should be fresh.
@@ -286,7 +286,7 @@ impl IndexMapper {
}; };
let index_map = self.index_map.clone(); let index_map = self.index_map.clone();
let index_path = self.index_path(uuid); let index_path = self.base_path.join(uuid.to_string());
let index_name = name.to_string(); let index_name = name.to_string();
thread::Builder::new() thread::Builder::new()
.name(String::from("index_deleter")) .name(String::from("index_deleter"))
@@ -341,26 +341,6 @@ impl IndexMapper {
Ok(()) Ok(())
} }
/// Closes the specified index.
///
/// This operation involves closing the underlying environment and so can take a long time to complete.
///
/// # Panics
///
/// - If the Index corresponding to the passed name is concurrently being deleted/resized or cannot be found in the
/// in memory hash map.
pub fn close_index(&self, rtxn: &RoTxn, name: &str) -> Result<()> {
let uuid = self
.index_mapping
.get(rtxn, name)?
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
// We remove the index from the in-memory index map.
self.index_map.write().unwrap().close_for_resize(&uuid, self.enable_mdb_writemap, 0);
Ok(())
}
/// Return an index, may open it if it wasn't already opened. /// Return an index, may open it if it wasn't already opened.
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> { pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
if let Some((current_name, current_index)) = if let Some((current_name, current_index)) =
@@ -408,7 +388,7 @@ impl IndexMapper {
} else { } else {
continue; continue;
}; };
let index_path = self.index_path(uuid); let index_path = self.base_path.join(uuid.to_string());
// take the lock to reopen the environment. // take the lock to reopen the environment.
reopen reopen
.reopen(&mut self.index_map.write().unwrap(), &index_path) .reopen(&mut self.index_map.write().unwrap(), &index_path)
@@ -425,7 +405,7 @@ impl IndexMapper {
// if it's not already there. // if it's not already there.
match index_map.get(&uuid) { match index_map.get(&uuid) {
Missing => { Missing => {
let index_path = self.index_path(uuid); let index_path = self.base_path.join(uuid.to_string());
break index_map break index_map
.create( .create(
@@ -452,14 +432,6 @@ impl IndexMapper {
Ok(index) Ok(index)
} }
/// Returns the path of the index.
///
/// The folder located at this path is containing the data.mdb,
/// the lock.mdb and an optional data.mdb.cpy file.
pub fn index_path(&self, uuid: Uuid) -> PathBuf {
self.base_path.join(uuid.to_string())
}
pub fn rollback_index( pub fn rollback_index(
&self, &self,
rtxn: &RoTxn, rtxn: &RoTxn,
@@ -500,7 +472,7 @@ impl IndexMapper {
}; };
} }
let index_path = self.index_path(uuid); let index_path = self.base_path.join(uuid.to_string());
Index::rollback(milli::heed::EnvOpenOptions::new().read_txn_without_tls(), index_path, to) Index::rollback(milli::heed::EnvOpenOptions::new().read_txn_without_tls(), index_path, to)
.map_err(|err| crate::Error::from_milli(err, Some(name.to_string()))) .map_err(|err| crate::Error::from_milli(err, Some(name.to_string())))
} }

View File

@@ -6,7 +6,7 @@ use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str};
use meilisearch_types::heed::{Database, RoTxn}; use meilisearch_types::heed::{Database, RoTxn};
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32}; use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
use meilisearch_types::tasks::{Details, Kind, Status, Task}; use meilisearch_types::tasks::{Details, Kind, Status, Task};
use meilisearch_types::versioning::{self, VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH}; use meilisearch_types::versioning;
use roaring::RoaringBitmap; use roaring::RoaringBitmap;
use crate::index_mapper::IndexMapper; use crate::index_mapper::IndexMapper;
@@ -232,7 +232,6 @@ pub fn snapshot_task(task: &Task) -> String {
status, status,
kind, kind,
network, network,
custom_metadata,
} = task; } = task;
snap.push('{'); snap.push('{');
snap.push_str(&format!("uid: {uid}, ")); snap.push_str(&format!("uid: {uid}, "));
@@ -253,9 +252,6 @@ pub fn snapshot_task(task: &Task) -> String {
if let Some(network) = network { if let Some(network) = network {
snap.push_str(&format!("network: {network:?}, ")) snap.push_str(&format!("network: {network:?}, "))
} }
if let Some(custom_metadata) = custom_metadata {
snap.push_str(&format!("custom_metadata: {custom_metadata:?}"))
}
snap.push('}'); snap.push('}');
snap snap
@@ -320,14 +316,10 @@ fn snapshot_details(d: &Details) -> String {
format!("{{ url: {url:?}, api_key: {api_key:?}, payload_size: {payload_size:?}, indexes: {indexes:?} }}") format!("{{ url: {url:?}, api_key: {api_key:?}, payload_size: {payload_size:?}, indexes: {indexes:?} }}")
} }
Details::UpgradeDatabase { from, to } => { Details::UpgradeDatabase { from, to } => {
if to == &(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) {
format!("{{ from: {from:?}, to: [current version] }}")
} else {
format!("{{ from: {from:?}, to: {to:?} }}") format!("{{ from: {from:?}, to: {to:?} }}")
} }
} Details::NetworkTopologyChange { network: new_network } => {
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => { format!("{{ new_network: {new_network:?} }}")
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
} }
} }
} }
@@ -404,21 +396,7 @@ pub fn snapshot_batch(batch: &Batch) -> String {
snap.push('{'); snap.push('{');
snap.push_str(&format!("uid: {uid}, ")); snap.push_str(&format!("uid: {uid}, "));
let details = if let Some(upgrade_to) = &details.upgrade_to { snap.push_str(&format!("details: {}, ", serde_json::to_string(details).unwrap()));
if upgrade_to.as_str()
== format!("v{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_PATCH}").as_str()
{
let mut details = details.clone();
details.upgrade_to = Some("[current version]".into());
serde_json::to_string(&details).unwrap()
} else {
serde_json::to_string(details).unwrap()
}
} else {
serde_json::to_string(details).unwrap()
};
snap.push_str(&format!("details: {details}, "));
snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap())); snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap()));
if !embedder_stats.skip_serializing() { if !embedder_stats.skip_serializing() {
snap.push_str(&format!( snap.push_str(&format!(

View File

@@ -54,6 +54,7 @@ pub use features::RoFeatures;
use flate2::bufread::GzEncoder; use flate2::bufread::GzEncoder;
use flate2::Compression; use flate2::Compression;
use meilisearch_types::batches::Batch; use meilisearch_types::batches::Batch;
use meilisearch_types::enterprise_edition::network::DbNetwork;
use meilisearch_types::features::{ use meilisearch_types::features::{
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures, ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
}; };
@@ -66,7 +67,6 @@ use meilisearch_types::milli::vector::{
Embedder, EmbedderOptions, RuntimeEmbedder, RuntimeEmbedders, RuntimeFragment, Embedder, EmbedderOptions, RuntimeEmbedder, RuntimeEmbedders, RuntimeFragment,
}; };
use meilisearch_types::milli::{self, Index}; use meilisearch_types::milli::{self, Index};
use meilisearch_types::network::Network;
use meilisearch_types::task_view::TaskView; use meilisearch_types::task_view::TaskView;
use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork}; use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork};
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView}; use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
@@ -217,7 +217,6 @@ pub struct IndexScheduler {
#[cfg(test)] #[cfg(test)]
run_loop_iteration: Arc<RwLock<usize>>, run_loop_iteration: Arc<RwLock<usize>>,
/// The tokio runtime used for asynchronous tasks.
runtime: Option<tokio::runtime::Handle>, runtime: Option<tokio::runtime::Handle>,
} }
@@ -259,23 +258,14 @@ impl IndexScheduler {
} }
/// Create an index scheduler and start its run loop. /// Create an index scheduler and start its run loop.
#[allow(private_interfaces)] // because test_utils is private
pub fn new( pub fn new(
options: IndexSchedulerOptions, options: IndexSchedulerOptions,
auth_env: Env<WithoutTls>, auth_env: Env<WithoutTls>,
from_db_version: (u32, u32, u32), from_db_version: (u32, u32, u32),
runtime: Option<tokio::runtime::Handle>, runtime: Option<tokio::runtime::Handle>,
) -> Result<Self> { #[cfg(test)] test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
let this = Self::new_without_run(options, auth_env, from_db_version, runtime)?; #[cfg(test)] planned_failures: Vec<(usize, test_utils::FailureLocation)>,
this.run();
Ok(this)
}
fn new_without_run(
options: IndexSchedulerOptions,
auth_env: Env<WithoutTls>,
from_db_version: (u32, u32, u32),
runtime: Option<tokio::runtime::Handle>,
) -> Result<Self> { ) -> Result<Self> {
std::fs::create_dir_all(&options.tasks_path)?; std::fs::create_dir_all(&options.tasks_path)?;
std::fs::create_dir_all(&options.update_file_path)?; std::fs::create_dir_all(&options.update_file_path)?;
@@ -330,7 +320,8 @@ impl IndexScheduler {
wtxn.commit()?; wtxn.commit()?;
Ok(Self { // allow unreachable_code to get rids of the warning in the case of a test build.
let this = Self {
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())), processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
version, version,
queue, queue,
@@ -346,32 +337,16 @@ impl IndexScheduler {
webhooks: Arc::new(webhooks), webhooks: Arc::new(webhooks),
embedders: Default::default(), embedders: Default::default(),
#[cfg(test)] // Will be replaced in `new_tests` in test environments #[cfg(test)]
test_breakpoint_sdr: crossbeam_channel::bounded(0).0, test_breakpoint_sdr,
#[cfg(test)] // Will be replaced in `new_tests` in test environments #[cfg(test)]
planned_failures: Default::default(), planned_failures,
#[cfg(test)] #[cfg(test)]
run_loop_iteration: Arc::new(RwLock::new(0)), run_loop_iteration: Arc::new(RwLock::new(0)),
features, features,
chat_settings, chat_settings,
runtime, runtime,
}) };
}
/// Create an index scheduler and start its run loop.
#[cfg(test)]
fn new_test(
options: IndexSchedulerOptions,
auth_env: Env<WithoutTls>,
from_db_version: (u32, u32, u32),
runtime: Option<tokio::runtime::Handle>,
test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
planned_failures: Vec<(usize, test_utils::FailureLocation)>,
) -> Result<Self> {
let mut this = Self::new_without_run(options, auth_env, from_db_version, runtime)?;
this.test_breakpoint_sdr = test_breakpoint_sdr;
this.planned_failures = planned_failures;
this.run(); this.run();
Ok(this) Ok(this)
@@ -756,19 +731,6 @@ impl IndexScheduler {
kind: KindWithContent, kind: KindWithContent,
task_id: Option<TaskId>, task_id: Option<TaskId>,
dry_run: bool, dry_run: bool,
) -> Result<Task> {
self.register_with_custom_metadata(kind, task_id, None, dry_run)
}
/// Register a new task in the scheduler, with metadata.
///
/// If it fails and data was associated with the task, it tries to delete the associated data.
pub fn register_with_custom_metadata(
&self,
kind: KindWithContent,
task_id: Option<TaskId>,
custom_metadata: Option<String>,
dry_run: bool,
) -> Result<Task> { ) -> Result<Task> {
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task // if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty()) if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
@@ -779,7 +741,7 @@ impl IndexScheduler {
} }
let mut wtxn = self.env.write_txn()?; let mut wtxn = self.env.write_txn()?;
let task = self.queue.register(&mut wtxn, &kind, task_id, custom_metadata, dry_run)?; let task = self.queue.register(&mut wtxn, &kind, task_id, dry_run)?;
// If the registered task is a task cancelation // If the registered task is a task cancelation
// we inform the processing tasks to stop (if necessary). // we inform the processing tasks to stop (if necessary).
@@ -935,13 +897,13 @@ impl IndexScheduler {
Ok(()) Ok(())
} }
pub fn put_network(&self, network: Network) -> Result<()> { pub fn put_network(&self, network: DbNetwork) -> Result<()> {
let wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?; let wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
self.features.put_network(wtxn, network)?; self.features.put_network(wtxn, network)?;
Ok(()) Ok(())
} }
pub fn network(&self) -> Network { pub fn network(&self) -> DbNetwork {
self.features.network() self.features.network()
} }
@@ -970,9 +932,10 @@ impl IndexScheduler {
pub fn embedders( pub fn embedders(
&self, &self,
index_uid: String, index_uid: &str,
embedding_configs: Vec<IndexEmbeddingConfig>, embedding_configs: Vec<IndexEmbeddingConfig>,
) -> Result<RuntimeEmbedders> { ) -> Result<RuntimeEmbedders> {
let err = |err| Error::from_milli(err, Some(index_uid.to_owned()));
let res: Result<_> = embedding_configs let res: Result<_> = embedding_configs
.into_iter() .into_iter()
.map( .map(
@@ -985,7 +948,7 @@ impl IndexScheduler {
let document_template = prompt let document_template = prompt
.try_into() .try_into()
.map_err(meilisearch_types::milli::Error::from) .map_err(meilisearch_types::milli::Error::from)
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?; .map_err(err)?;
let fragments = fragments let fragments = fragments
.into_inner() .into_inner()
@@ -1015,9 +978,8 @@ impl IndexScheduler {
let embedder = Arc::new( let embedder = Arc::new(
Embedder::new(embedder_options.clone(), self.scheduler.embedding_cache_cap) Embedder::new(embedder_options.clone(), self.scheduler.embedding_cache_cap)
.map_err(meilisearch_types::milli::vector::Error::from) .map_err(meilisearch_types::milli::vector::Error::from)
.map_err(|err| { .map_err(milli::Error::from)
Error::from_milli(err.into(), Some(index_uid.clone())) .map_err(err)?,
})?,
); );
{ {
let mut embedders = self.embedders.write().unwrap(); let mut embedders = self.embedders.write().unwrap();

View File

@@ -75,7 +75,6 @@ make_enum_progress! {
pub enum TaskCancelationProgress { pub enum TaskCancelationProgress {
RetrievingTasks, RetrievingTasks,
CancelingUpgrade, CancelingUpgrade,
CleaningCompactionLeftover,
UpdatingTasks, UpdatingTasks,
} }
} }
@@ -139,17 +138,6 @@ make_enum_progress! {
} }
} }
make_enum_progress! {
pub enum IndexCompaction {
RetrieveTheIndex,
CreateTemporaryFile,
CopyAndCompactTheIndex,
PersistTheCompactedIndex,
CloseTheIndex,
ReopenTheIndex,
}
}
make_enum_progress! { make_enum_progress! {
pub enum InnerSwappingTwoIndexes { pub enum InnerSwappingTwoIndexes {
RetrieveTheTasks, RetrieveTheTasks,

View File

@@ -257,7 +257,6 @@ impl Queue {
wtxn: &mut RwTxn, wtxn: &mut RwTxn,
kind: &KindWithContent, kind: &KindWithContent,
task_id: Option<TaskId>, task_id: Option<TaskId>,
custom_metadata: Option<String>,
dry_run: bool, dry_run: bool,
) -> Result<Task> { ) -> Result<Task> {
let next_task_id = self.tasks.next_task_id(wtxn)?; let next_task_id = self.tasks.next_task_id(wtxn)?;
@@ -281,7 +280,6 @@ impl Queue {
status: Status::Enqueued, status: Status::Enqueued,
kind: kind.clone(), kind: kind.clone(),
network: None, network: None,
custom_metadata,
}; };
// For deletion and cancelation tasks, we want to make extra sure that they // For deletion and cancelation tasks, we want to make extra sure that they
// don't attempt to delete/cancel tasks that are newer than themselves. // don't attempt to delete/cancel tasks that are newer than themselves.
@@ -312,8 +310,7 @@ impl Queue {
| self.tasks.status.get(wtxn, &Status::Failed)?.unwrap_or_default() | self.tasks.status.get(wtxn, &Status::Failed)?.unwrap_or_default()
| self.tasks.status.get(wtxn, &Status::Canceled)?.unwrap_or_default(); | self.tasks.status.get(wtxn, &Status::Canceled)?.unwrap_or_default();
let to_delete = let to_delete = RoaringBitmap::from_iter(finished.into_iter().rev().take(100_000));
RoaringBitmap::from_sorted_iter(finished.into_iter().take(100_000)).unwrap();
// /!\ the len must be at least 2 or else we might enter an infinite loop where we only delete // /!\ the len must be at least 2 or else we might enter an infinite loop where we only delete
// the deletion tasks we enqueued ourselves. // the deletion tasks we enqueued ourselves.
@@ -346,7 +343,6 @@ impl Queue {
tasks: to_delete, tasks: to_delete,
}, },
None, None,
None,
false, false,
)?; )?;

View File

@@ -68,14 +68,14 @@ impl From<KindWithContent> for AutobatchKind {
KindWithContent::IndexCreation { .. } => AutobatchKind::IndexCreation, KindWithContent::IndexCreation { .. } => AutobatchKind::IndexCreation,
KindWithContent::IndexUpdate { .. } => AutobatchKind::IndexUpdate, KindWithContent::IndexUpdate { .. } => AutobatchKind::IndexUpdate,
KindWithContent::IndexSwap { .. } => AutobatchKind::IndexSwap, KindWithContent::IndexSwap { .. } => AutobatchKind::IndexSwap,
KindWithContent::IndexCompaction { .. } KindWithContent::TaskCancelation { .. }
| KindWithContent::TaskCancelation { .. }
| KindWithContent::TaskDeletion { .. } | KindWithContent::TaskDeletion { .. }
| KindWithContent::DumpCreation { .. } | KindWithContent::DumpCreation { .. }
| KindWithContent::Export { .. } | KindWithContent::Export { .. }
| KindWithContent::UpgradeDatabase { .. } | KindWithContent::UpgradeDatabase { .. }
| KindWithContent::NetworkTopologyChange { .. }
| KindWithContent::SnapshotCreation => { | KindWithContent::SnapshotCreation => {
panic!("The autobatcher should never be called with tasks with special priority or that don't apply to an index.") panic!("The autobatcher should never be called with tasks that don't apply to an index.")
} }
} }
} }
@@ -289,9 +289,7 @@ impl BatchKind {
match (self, autobatch_kind) { match (self, autobatch_kind) {
// We don't batch any of these operations // We don't batch any of these operations
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition) => { (this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition) => Break((this, BatchStopReason::TaskCannotBeBatched { kind, id })),
Break((this, BatchStopReason::TaskCannotBeBatched { kind, id }))
},
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists. // We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
(this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => { (this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
Break((this, BatchStopReason::IndexCreationMismatch { id })) Break((this, BatchStopReason::IndexCreationMismatch { id }))

View File

@@ -55,9 +55,8 @@ pub(crate) enum Batch {
UpgradeDatabase { UpgradeDatabase {
tasks: Vec<Task>, tasks: Vec<Task>,
}, },
IndexCompaction { NetworkTopologyChanges {
index_uid: String, tasks: Vec<Task>,
task: Task,
}, },
} }
@@ -114,14 +113,14 @@ impl Batch {
| Batch::Dump(task) | Batch::Dump(task)
| Batch::IndexCreation { task, .. } | Batch::IndexCreation { task, .. }
| Batch::Export { task } | Batch::Export { task }
| Batch::IndexUpdate { task, .. } | Batch::IndexUpdate { task, .. } => {
| Batch::IndexCompaction { task, .. } => {
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap() RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
} }
Batch::SnapshotCreation(tasks) Batch::SnapshotCreation(tasks)
| Batch::TaskDeletions(tasks) | Batch::TaskDeletions(tasks)
| Batch::UpgradeDatabase { tasks } | Batch::UpgradeDatabase { tasks }
| Batch::IndexDeletion { tasks, .. } => { | Batch::IndexDeletion { tasks, .. }
| Batch::NetworkTopologyChanges { tasks } => {
RoaringBitmap::from_iter(tasks.iter().map(|task| task.uid)) RoaringBitmap::from_iter(tasks.iter().map(|task| task.uid))
} }
Batch::IndexOperation { op, .. } => match op { Batch::IndexOperation { op, .. } => match op {
@@ -156,12 +155,12 @@ impl Batch {
| Dump(_) | Dump(_)
| Export { .. } | Export { .. }
| UpgradeDatabase { .. } | UpgradeDatabase { .. }
| NetworkTopologyChanges { .. }
| IndexSwap { .. } => None, | IndexSwap { .. } => None,
IndexOperation { op, .. } => Some(op.index_uid()), IndexOperation { op, .. } => Some(op.index_uid()),
IndexCreation { index_uid, .. } IndexCreation { index_uid, .. }
| IndexUpdate { index_uid, .. } | IndexUpdate { index_uid, .. }
| IndexDeletion { index_uid, .. } | IndexDeletion { index_uid, .. } => Some(index_uid),
| IndexCompaction { index_uid, .. } => Some(index_uid),
} }
} }
} }
@@ -181,8 +180,8 @@ impl fmt::Display for Batch {
Batch::IndexUpdate { .. } => f.write_str("IndexUpdate")?, Batch::IndexUpdate { .. } => f.write_str("IndexUpdate")?,
Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?, Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?,
Batch::IndexSwap { .. } => f.write_str("IndexSwap")?, Batch::IndexSwap { .. } => f.write_str("IndexSwap")?,
Batch::IndexCompaction { .. } => f.write_str("IndexCompaction")?,
Batch::Export { .. } => f.write_str("Export")?, Batch::Export { .. } => f.write_str("Export")?,
Batch::NetworkTopologyChanges { .. } => f.write_str("NetworkTopologyChange")?,
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?, Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
}; };
match index_uid { match index_uid {
@@ -519,33 +518,17 @@ impl IndexScheduler {
return Ok(Some((Batch::TaskDeletions(tasks), current_batch))); return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
} }
// 3. we get the next task to compact // 3. we batch the export.
let to_compact = self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)? & enqueued;
if let Some(task_id) = to_compact.min() {
let mut task =
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
current_batch.processing(Some(&mut task));
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
kind: Kind::IndexCompaction,
id: task_id,
});
let index_uid =
task.index_uid().expect("Compaction task must have an index uid").to_owned();
return Ok(Some((Batch::IndexCompaction { index_uid, task }, current_batch)));
}
// 4. we batch the export.
let to_export = self.queue.tasks.get_kind(rtxn, Kind::Export)? & enqueued; let to_export = self.queue.tasks.get_kind(rtxn, Kind::Export)? & enqueued;
if !to_export.is_empty() { if !to_export.is_empty() {
let task_id = to_export.iter().next().expect("There must be at least one export task"); let task_id = to_export.iter().next().expect("There must be at least one export task");
let mut task = self.queue.tasks.get_task(rtxn, task_id)?.unwrap(); let mut task = self.queue.tasks.get_task(rtxn, task_id)?.unwrap();
current_batch.processing([&mut task]); current_batch.processing([&mut task]);
current_batch current_batch.reason(BatchStopReason::TaskKindCannotBeBatched { kind: Kind::Export });
.reason(BatchStopReason::TaskCannotBeBatched { kind: Kind::Export, id: task_id });
return Ok(Some((Batch::Export { task }, current_batch))); return Ok(Some((Batch::Export { task }, current_batch)));
} }
// 5. we batch the snapshot. // 4. we batch the snapshot.
let to_snapshot = self.queue.tasks.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued; let to_snapshot = self.queue.tasks.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued;
if !to_snapshot.is_empty() { if !to_snapshot.is_empty() {
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?; let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
@@ -555,7 +538,7 @@ impl IndexScheduler {
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch))); return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
} }
// 6. we batch the dumps. // 5. we batch the dumps.
let to_dump = self.queue.tasks.get_kind(rtxn, Kind::DumpCreation)? & enqueued; let to_dump = self.queue.tasks.get_kind(rtxn, Kind::DumpCreation)? & enqueued;
if let Some(to_dump) = to_dump.min() { if let Some(to_dump) = to_dump.min() {
let mut task = let mut task =
@@ -568,6 +551,17 @@ impl IndexScheduler {
return Ok(Some((Batch::Dump(task), current_batch))); return Ok(Some((Batch::Dump(task), current_batch)));
} }
// 6. We batch the network changes.
let to_network = self.queue.tasks.get_kind(rtxn, Kind::NetworkTopologyChange)? & enqueued;
if !to_network.is_empty() {
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_network)?;
current_batch.processing(&mut tasks);
current_batch.reason(BatchStopReason::TaskKindCannotBeBatched {
kind: Kind::NetworkTopologyChange,
});
return Ok(Some((Batch::NetworkTopologyChanges { tasks }, current_batch)));
}
// 7. We make a batch from the unprioritised tasks. Start by taking the next enqueued task. // 7. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) }; let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
let mut task = let mut task =

View File

@@ -0,0 +1,6 @@
// Copyright © 2025 Meilisearch Some Rights Reserved
// This file is part of Meilisearch Enterprise Edition (EE).
// Use of this source code is governed by the Business Source License 1.1,
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
mod process_network;

View File

@@ -0,0 +1,362 @@
// Copyright © 2025 Meilisearch Some Rights Reserved
// This file is part of Meilisearch Enterprise Edition (EE).
// Use of this source code is governed by the Business Source License 1.1,
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
use std::collections::BTreeMap;
use std::time::Duration;
use bumpalo::Bump;
use itertools::{EitherOrBoth, Itertools};
use meilisearch_types::enterprise_edition::network::{DbNetwork, DbRemote, Network, Remote};
use meilisearch_types::milli::documents::PrimaryKey;
use meilisearch_types::milli::progress::{EmbedderStats, Progress};
use meilisearch_types::milli::update::new::indexer;
use meilisearch_types::milli::update::Setting;
use meilisearch_types::milli::{self};
use meilisearch_types::tasks::{KindWithContent, Status, Task};
use roaring::RoaringBitmap;
use crate::scheduler::process_export::{ExportContext, ExportOptions, TargetInstance};
use crate::{Error, IndexScheduler};
impl IndexScheduler {
pub(crate) fn process_network_changes(
&self,
progress: Progress,
mut tasks: Vec<Task>,
) -> crate::Result<Vec<Task>> {
let old_network = self.network();
let mut current_network = Some(old_network.clone());
for task in &tasks {
let KindWithContent::NetworkTopologyChange { network, origin } = &task.kind else {
continue;
};
current_network = match (current_network, network) {
(None, None) => None,
(None, Some(network)) => Some(accumulate(DbNetwork::default(), network.clone())?),
(Some(current_network), None) => Some(current_network),
(Some(current_network), Some(new_network)) => {
Some(accumulate(current_network, new_network.clone())?)
}
};
}
'network: {
let mut new_network = current_network.unwrap_or_default();
if old_network == new_network {
// no change, exit
break 'network;
}
/// TODO: only do this if the task originates with an end-user
let must_replicate = old_network.sharding || new_network.sharding;
if !must_replicate {
self.put_network(new_network)?;
break 'network;
}
let must_stop_processing = &self.scheduler.must_stop_processing;
/// FIXME: make it mandatory for `self` to be part of the network
let old_this = old_network.local.as_deref();
/// FIXME: error here
let new_this = new_network.local.unwrap();
// in network replication, we need to tell old nodes that they are no longer part of the network.
// This is made difficult by "node aliasing": Meilisearch has no way of knowing if two nodes with different names
// or even different URLs actually refer to the same machine in two different versions of the network.
//
// This implementation ignores aliasing: a node is the same when it has the same name.
//
// To defeat aliasing, we iterate a first time to collect all deletions and additions, then we make sure to process the deletions
// first, rather than processing the tasks in the alphalexical order of remotes.
let mut node_deletions = Vec::new();
let mut node_additions = Vec::new();
for eob in old_network
.remotes
.iter()
.merge_join_by(new_network.remotes.iter(), |(left, _), (right, _)| left.cmp(right))
{
match eob {
EitherOrBoth::Both((to_update_name, _), (_, new_node)) => {
if to_update_name.as_str() == new_this {
continue; // skip `self`
}
node_additions.push((to_update_name, new_node));
}
EitherOrBoth::Left((to_delete_name, to_delete_node)) => {
if Some(to_delete_name.as_str()) == old_this {
continue; // skip `self`
}
node_deletions.push((to_delete_name, to_delete_node));
}
EitherOrBoth::Right((to_add_name, to_add_node)) => {
if to_add_name.as_str() == new_this {
continue; // skip `self`
}
node_additions.push((to_add_name, to_add_node));
}
}
}
let runtime = self.runtime.clone().unwrap();
let mut in_flight = Vec::new();
// process deletions
for (to_delete_name, to_delete) in node_deletions {
// set `self` to None so that this node is forgotten about
new_network.local = None;
in_flight.push(proxy_network(&runtime, to_delete.url.as_str(), &new_network)?);
}
runtime.block_on(async {
for task in in_flight.drain(..) {
// TODO: log and ignore errors during deletion
let res = task.await;
}
});
// process additions
for (to_add_name, to_add) in node_additions {
new_network.local = Some(to_add_name.clone());
in_flight.push(proxy_network(&runtime, to_add.url.as_str(), &new_network)?);
}
runtime.block_on(async {
for task in in_flight.drain(..) {
// TODO: handle errors during addition
let res = task.await;
}
});
// balance documents
new_network.local = Some(new_this);
self.balance_documents(&new_network, &progress, &must_stop_processing)?;
self.put_network(new_network)?;
}
for task in &mut tasks {
task.status = Status::Succeeded;
}
Ok(tasks)
}
fn balance_documents(
&self,
new_network: &DbNetwork,
progress: &Progress,
must_stop_processing: &crate::scheduler::MustStopProcessing,
) -> crate::Result<()> {
/// FIXME unwrap
let new_shards = new_network.shards().unwrap();
// TECHDEBT: this spawns a `ureq` agent additionally to `reqwest`. We probably want to harmonize all of this.
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
let mut indexer_alloc = Bump::new();
// process by batches of 20MiB. Allow for compression? Don't forget about embeddings
let _: Vec<()> = self.try_for_each_index(|index_uid, index| -> crate::Result<()> {
indexer_alloc.reset();
let err = |err| Error::from_milli(err, Some(index_uid.to_string()));
let index_rtxn = index.read_txn()?;
let all_docids = index.external_documents_ids();
let mut documents_to_move_to: hashbrown::HashMap<String, RoaringBitmap> =
hashbrown::HashMap::new();
let mut documents_to_delete = RoaringBitmap::new();
for res in all_docids.iter(&index_rtxn)? {
let (external_docid, docid) = res?;
match new_shards.processing_shard(external_docid) {
Some(shard) if shard.is_own => continue,
Some(shard) => {
documents_to_move_to
.entry_ref(shard.name.as_str())
.or_default()
.insert(docid);
}
None => {
documents_to_delete.insert(docid);
}
}
}
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
for (remote, documents_to_move) in documents_to_move_to {
/// TODO: justify the unwrap
let remote = new_network.remotes.get(&remote).unwrap();
let target = TargetInstance {
base_url: &remote.url,
api_key: remote.write_api_key.as_deref(),
};
let options = ExportOptions {
index_uid,
payload_size: None,
override_settings: false,
extra_headers: &Default::default(),
};
let ctx = ExportContext {
index,
index_rtxn: &index_rtxn,
universe: &documents_to_move,
progress,
agent: &agent,
must_stop_processing,
};
self.export_one_index(target, options, ctx)?;
documents_to_delete |= documents_to_move;
}
if documents_to_delete.is_empty() {
return Ok(());
}
let mut new_fields_ids_map = fields_ids_map.clone();
// candidates not empty => index not empty => a primary key is set
let primary_key = index.primary_key(&index_rtxn)?.unwrap();
let primary_key = PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
.map_err(milli::Error::from)
.map_err(err)?;
let mut index_wtxn = index.write_txn()?;
let mut indexer = indexer::DocumentDeletion::new();
indexer.delete_documents_by_docids(documents_to_delete);
let document_changes = indexer.into_changes(&indexer_alloc, primary_key);
let embedders = index
.embedding_configs()
.embedding_configs(&index_wtxn)
.map_err(milli::Error::from)
.map_err(err)?;
let embedders = self.embedders(index_uid, embedders)?;
let indexer_config = self.index_mapper.indexer_config();
let pool = &indexer_config.thread_pool;
indexer::index(
&mut index_wtxn,
index,
pool,
indexer_config.grenad_parameters(),
&fields_ids_map,
new_fields_ids_map,
None, // document deletion never changes primary key
&document_changes,
embedders,
&|| must_stop_processing.get(),
&progress,
&EmbedderStats::default(),
)
.map_err(err)?;
index_wtxn.commit()?;
Ok(())
})?;
Ok(())
}
}
fn proxy_network(
runtime: &tokio::runtime::Handle,
url: &str,
network: &DbNetwork,
) -> crate::Result<tokio::task::JoinHandle<()>> {
todo!()
}
fn accumulate(old_network: DbNetwork, new_network: Network) -> crate::Result<DbNetwork> {
let err = |err| Err(Error::from_milli(milli::Error::UserError(err), None));
let merged_local = match new_network.local {
Setting::Set(new_self) => Some(new_self),
Setting::Reset => None,
Setting::NotSet => old_network.local,
};
let merged_sharding = match new_network.sharding {
Setting::Set(new_sharding) => new_sharding,
Setting::Reset => false,
Setting::NotSet => old_network.sharding,
};
if merged_sharding && merged_local.is_none() {
return err(milli::UserError::NetworkShardingWithoutSelf);
}
let merged_remotes = match new_network.remotes {
Setting::Set(new_remotes) => {
let mut merged_remotes = BTreeMap::new();
for either_or_both in old_network
.remotes
.into_iter()
.merge_join_by(new_remotes.into_iter(), |left, right| left.0.cmp(&right.0))
{
match either_or_both {
EitherOrBoth::Both((name, old), (_, Some(new))) => {
let DbRemote {
url: old_url,
search_api_key: old_search_api_key,
write_api_key: old_write_api_key,
} = old;
let Remote {
url: new_url,
search_api_key: new_search_api_key,
write_api_key: new_write_api_key,
} = new;
let merged = DbRemote {
url: match new_url {
Setting::Set(new_url) => new_url,
Setting::Reset => {
return err(milli::UserError::NetworkMissingUrl(name))
}
Setting::NotSet => old_url,
},
search_api_key: match new_search_api_key {
Setting::Set(new_search_api_key) => Some(new_search_api_key),
Setting::Reset => None,
Setting::NotSet => old_search_api_key,
},
write_api_key: match new_write_api_key {
Setting::Set(new_write_api_key) => Some(new_write_api_key),
Setting::Reset => None,
Setting::NotSet => old_write_api_key,
},
};
merged_remotes.insert(name, merged);
}
EitherOrBoth::Both((_, _), (_, None)) | EitherOrBoth::Right((_, None)) => {}
EitherOrBoth::Left((name, node)) => {
merged_remotes.insert(name, node);
}
EitherOrBoth::Right((name, Some(node))) => {
let Some(url) = node.url.set() else {
return err(milli::UserError::NetworkMissingUrl(name));
};
let node = DbRemote {
url,
search_api_key: node.search_api_key.set(),
write_api_key: node.write_api_key.set(),
};
merged_remotes.insert(name, node);
}
}
}
merged_remotes
}
Setting::Reset => BTreeMap::new(),
Setting::NotSet => old_network.remotes,
};
Ok(DbNetwork { local: merged_local, remotes: merged_remotes, sharding: merged_sharding })
}

View File

@@ -2,6 +2,7 @@ mod autobatcher;
#[cfg(test)] #[cfg(test)]
mod autobatcher_test; mod autobatcher_test;
mod create_batch; mod create_batch;
mod enterprise_edition;
mod process_batch; mod process_batch;
mod process_dump_creation; mod process_dump_creation;
mod process_export; mod process_export;
@@ -25,7 +26,6 @@ use convert_case::{Case, Casing as _};
use meilisearch_types::error::ResponseError; use meilisearch_types::error::ResponseError;
use meilisearch_types::heed::{Env, WithoutTls}; use meilisearch_types::heed::{Env, WithoutTls};
use meilisearch_types::milli; use meilisearch_types::milli;
use meilisearch_types::milli::update::S3SnapshotOptions;
use meilisearch_types::tasks::Status; use meilisearch_types::tasks::Status;
use process_batch::ProcessBatchInfo; use process_batch::ProcessBatchInfo;
use rayon::current_num_threads; use rayon::current_num_threads;
@@ -88,14 +88,11 @@ pub struct Scheduler {
/// Snapshot compaction status. /// Snapshot compaction status.
pub(crate) experimental_no_snapshot_compaction: bool, pub(crate) experimental_no_snapshot_compaction: bool,
/// S3 Snapshot options.
pub(crate) s3_snapshot_options: Option<S3SnapshotOptions>,
} }
impl Scheduler { impl Scheduler {
pub(crate) fn private_clone(&self) -> Self { pub(crate) fn private_clone(&self) -> Scheduler {
Self { Scheduler {
must_stop_processing: self.must_stop_processing.clone(), must_stop_processing: self.must_stop_processing.clone(),
wake_up: self.wake_up.clone(), wake_up: self.wake_up.clone(),
autobatching_enabled: self.autobatching_enabled, autobatching_enabled: self.autobatching_enabled,
@@ -107,52 +104,23 @@ impl Scheduler {
version_file_path: self.version_file_path.clone(), version_file_path: self.version_file_path.clone(),
embedding_cache_cap: self.embedding_cache_cap, embedding_cache_cap: self.embedding_cache_cap,
experimental_no_snapshot_compaction: self.experimental_no_snapshot_compaction, experimental_no_snapshot_compaction: self.experimental_no_snapshot_compaction,
s3_snapshot_options: self.s3_snapshot_options.clone(),
} }
} }
pub fn new(options: &IndexSchedulerOptions, auth_env: Env<WithoutTls>) -> Scheduler { pub fn new(options: &IndexSchedulerOptions, auth_env: Env<WithoutTls>) -> Scheduler {
let IndexSchedulerOptions {
version_file_path,
auth_path: _,
tasks_path: _,
update_file_path: _,
indexes_path: _,
snapshots_path,
dumps_path,
cli_webhook_url: _,
cli_webhook_authorization: _,
task_db_size: _,
index_base_map_size: _,
enable_mdb_writemap: _,
index_growth_amount: _,
index_count: _,
indexer_config,
autobatching_enabled,
cleanup_enabled: _,
max_number_of_tasks: _,
max_number_of_batched_tasks,
batched_tasks_size_limit,
instance_features: _,
auto_upgrade: _,
embedding_cache_cap,
experimental_no_snapshot_compaction,
} = options;
Scheduler { Scheduler {
must_stop_processing: MustStopProcessing::default(), must_stop_processing: MustStopProcessing::default(),
// we want to start the loop right away in case meilisearch was ctrl+Ced while processing things // we want to start the loop right away in case meilisearch was ctrl+Ced while processing things
wake_up: Arc::new(SignalEvent::auto(true)), wake_up: Arc::new(SignalEvent::auto(true)),
autobatching_enabled: *autobatching_enabled, autobatching_enabled: options.autobatching_enabled,
max_number_of_batched_tasks: *max_number_of_batched_tasks, max_number_of_batched_tasks: options.max_number_of_batched_tasks,
batched_tasks_size_limit: *batched_tasks_size_limit, batched_tasks_size_limit: options.batched_tasks_size_limit,
dumps_path: dumps_path.clone(), dumps_path: options.dumps_path.clone(),
snapshots_path: snapshots_path.clone(), snapshots_path: options.snapshots_path.clone(),
auth_env, auth_env,
version_file_path: version_file_path.clone(), version_file_path: options.version_file_path.clone(),
embedding_cache_cap: *embedding_cache_cap, embedding_cache_cap: options.embedding_cache_cap,
experimental_no_snapshot_compaction: *experimental_no_snapshot_compaction, experimental_no_snapshot_compaction: options.experimental_no_snapshot_compaction,
s3_snapshot_options: indexer_config.s3_snapshot_options.clone(),
} }
} }
} }

View File

@@ -1,27 +1,22 @@
use std::collections::{BTreeSet, HashMap, HashSet}; use std::collections::{BTreeSet, HashMap, HashSet};
use std::fs::{remove_file, File};
use std::io::{ErrorKind, Seek, SeekFrom};
use std::panic::{catch_unwind, AssertUnwindSafe}; use std::panic::{catch_unwind, AssertUnwindSafe};
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
use byte_unit::Byte;
use meilisearch_types::batches::{BatchEnqueuedAt, BatchId}; use meilisearch_types::batches::{BatchEnqueuedAt, BatchId};
use meilisearch_types::heed::{RoTxn, RwTxn}; use meilisearch_types::heed::{RoTxn, RwTxn};
use meilisearch_types::milli::heed::CompactionOption;
use meilisearch_types::milli::progress::{Progress, VariableNameStep}; use meilisearch_types::milli::progress::{Progress, VariableNameStep};
use meilisearch_types::milli::{self, ChannelCongestion}; use meilisearch_types::milli::{self, ChannelCongestion};
use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status, Task}; use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status, Task};
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH}; use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
use milli::update::Settings as MilliSettings; use milli::update::Settings as MilliSettings;
use roaring::RoaringBitmap; use roaring::RoaringBitmap;
use tempfile::{PersistError, TempPath};
use time::OffsetDateTime; use time::OffsetDateTime;
use super::create_batch::Batch; use super::create_batch::Batch;
use crate::processing::{ use crate::processing::{
AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress, FinalizingIndexStep, AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress, FinalizingIndexStep,
IndexCompaction, InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, TaskDeletionProgress,
TaskDeletionProgress, UpdateIndexProgress, UpdateIndexProgress,
}; };
use crate::utils::{ use crate::utils::{
self, remove_n_tasks_datetime_earlier_than, remove_task_datetime, swap_index_uid_in_task, self, remove_n_tasks_datetime_earlier_than, remove_task_datetime, swap_index_uid_in_task,
@@ -29,9 +24,6 @@ use crate::utils::{
}; };
use crate::{Error, IndexScheduler, Result, TaskId}; use crate::{Error, IndexScheduler, Result, TaskId};
/// The name of the copy of the data.mdb file used during compaction.
const DATA_MDB_COPY_NAME: &str = "data.mdb.cpy";
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct ProcessBatchInfo { pub struct ProcessBatchInfo {
/// The write channel congestion. None when unavailable: settings update. /// The write channel congestion. None when unavailable: settings update.
@@ -143,6 +135,9 @@ impl IndexScheduler {
Batch::Dump(task) => self Batch::Dump(task) => self
.process_dump_creation(progress, task) .process_dump_creation(progress, task)
.map(|tasks| (tasks, ProcessBatchInfo::default())), .map(|tasks| (tasks, ProcessBatchInfo::default())),
Batch::NetworkTopologyChanges { tasks } => self
.process_network_changes(progress, tasks)
.map(|tasks| (tasks, ProcessBatchInfo::default())),
Batch::IndexOperation { op, must_create_index } => { Batch::IndexOperation { op, must_create_index } => {
let index_uid = op.index_uid().to_string(); let index_uid = op.index_uid().to_string();
let index = if must_create_index { let index = if must_create_index {
@@ -426,47 +421,6 @@ impl IndexScheduler {
task.status = Status::Succeeded; task.status = Status::Succeeded;
Ok((vec![task], ProcessBatchInfo::default())) Ok((vec![task], ProcessBatchInfo::default()))
} }
Batch::IndexCompaction { index_uid: _, mut task } => {
let KindWithContent::IndexCompaction { index_uid } = &task.kind else {
unreachable!()
};
let rtxn = self.env.read_txn()?;
let ret = catch_unwind(AssertUnwindSafe(|| {
self.apply_compaction(&rtxn, &progress, index_uid)
}));
let (pre_size, post_size) = match ret {
Ok(Ok(stats)) => stats,
Ok(Err(Error::AbortedTask)) => return Err(Error::AbortedTask),
Ok(Err(e)) => return Err(e),
Err(e) => {
let msg = match e.downcast_ref::<&'static str>() {
Some(s) => *s,
None => match e.downcast_ref::<String>() {
Some(s) => &s[..],
None => "Box<dyn Any>",
},
};
return Err(Error::Export(Box::new(Error::ProcessBatchPanicked(
msg.to_string(),
))));
}
};
task.status = Status::Succeeded;
if let Some(Details::IndexCompaction {
index_uid: _,
pre_compaction_size,
post_compaction_size,
}) = task.details.as_mut()
{
*pre_compaction_size = Some(Byte::from_u64(pre_size));
*post_compaction_size = Some(Byte::from_u64(post_size));
}
Ok((vec![task], ProcessBatchInfo::default()))
}
Batch::Export { mut task } => { Batch::Export { mut task } => {
let KindWithContent::Export { url, api_key, payload_size, indexes } = &task.kind let KindWithContent::Export { url, api_key, payload_size, indexes } = &task.kind
else { else {
@@ -542,92 +496,6 @@ impl IndexScheduler {
} }
} }
fn apply_compaction(
&self,
rtxn: &RoTxn,
progress: &Progress,
index_uid: &str,
) -> Result<(u64, u64)> {
// 1. Verify that the index exists
if !self.index_mapper.index_exists(rtxn, index_uid)? {
return Err(Error::IndexNotFound(index_uid.to_owned()));
}
// 2. We retrieve the index and create a temporary file in the index directory
progress.update_progress(IndexCompaction::RetrieveTheIndex);
let index = self.index_mapper.index(rtxn, index_uid)?;
// the index operation can take a long time, so save this handle to make it available to the search for the duration of the tick
self.index_mapper
.set_currently_updating_index(Some((index_uid.to_string(), index.clone())));
progress.update_progress(IndexCompaction::CreateTemporaryFile);
let src_path = index.path().join("data.mdb");
let pre_size = std::fs::metadata(&src_path)?.len();
let dst_path = TempPath::from_path(index.path().join(DATA_MDB_COPY_NAME));
let file = File::create(&dst_path)?;
let mut file = tempfile::NamedTempFile::from_parts(file, dst_path);
// 3. We copy the index data to the temporary file
progress.update_progress(IndexCompaction::CopyAndCompactTheIndex);
index
.copy_to_file(file.as_file_mut(), CompactionOption::Enabled)
.map_err(|error| Error::Milli { error, index_uid: Some(index_uid.to_string()) })?;
// ...and reset the file position as specified in the documentation
file.seek(SeekFrom::Start(0))?;
// 4. We replace the index data file with the temporary file
progress.update_progress(IndexCompaction::PersistTheCompactedIndex);
match file.persist(src_path) {
Ok(file) => file.sync_all()?,
// TODO see if we have a _resource busy_ error and probably handle this by:
// 1. closing the index, 2. replacing and 3. reopening it
Err(PersistError { error, file: _ }) => return Err(Error::IoError(error)),
};
// 5. Prepare to close the index
progress.update_progress(IndexCompaction::CloseTheIndex);
// unmark that the index is the processing one so we don't keep a handle to it, preventing its closing
self.index_mapper.set_currently_updating_index(None);
self.index_mapper.close_index(rtxn, index_uid)?;
drop(index);
progress.update_progress(IndexCompaction::ReopenTheIndex);
// 6. Reopen the index
// The index will use the compacted data file when being reopened
let index = self.index_mapper.index(rtxn, index_uid)?;
// if the update processed successfully, we're going to store the new
// stats of the index. Since the tasks have already been processed and
// this is a non-critical operation. If it fails, we should not fail
// the entire batch.
let res = || -> Result<_> {
let mut wtxn = self.env.write_txn()?;
let index_rtxn = index.read_txn()?;
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)
.map_err(|e| Error::from_milli(e, Some(index_uid.to_string())))?;
self.index_mapper.store_stats_of(&mut wtxn, index_uid, &stats)?;
wtxn.commit()?;
Ok(stats.database_size)
}();
let post_size = match res {
Ok(post_size) => post_size,
Err(e) => {
tracing::error!(
error = &e as &dyn std::error::Error,
"Could not write the stats of the index"
);
0
}
};
Ok((pre_size, post_size))
}
/// Swap the index `lhs` with the index `rhs`. /// Swap the index `lhs` with the index `rhs`.
fn apply_index_swap( fn apply_index_swap(
&self, &self,
@@ -915,10 +783,9 @@ impl IndexScheduler {
let enqueued_tasks = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?; let enqueued_tasks = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
// 0. Check if any upgrade or compaction tasks were matched. // 0. Check if any upgrade task was matched.
// If so, we cancel all the failed or enqueued upgrade tasks. // If so, we cancel all the failed or enqueued upgrade tasks.
let upgrade_tasks = &self.queue.tasks.get_kind(rtxn, Kind::UpgradeDatabase)?; let upgrade_tasks = &self.queue.tasks.get_kind(rtxn, Kind::UpgradeDatabase)?;
let compaction_tasks = &self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)?;
let is_canceling_upgrade = !matched_tasks.is_disjoint(upgrade_tasks); let is_canceling_upgrade = !matched_tasks.is_disjoint(upgrade_tasks);
if is_canceling_upgrade { if is_canceling_upgrade {
let failed_tasks = self.queue.tasks.get_status(rtxn, Status::Failed)?; let failed_tasks = self.queue.tasks.get_status(rtxn, Status::Failed)?;
@@ -983,33 +850,7 @@ impl IndexScheduler {
} }
} }
// 3. If we are cancelling a compaction task, remove the tempfiles after incomplete compactions // 3. We now have a list of tasks to cancel, cancel them
for compaction_task in &tasks_to_cancel & compaction_tasks {
progress.update_progress(TaskCancelationProgress::CleaningCompactionLeftover);
let task = self.queue.tasks.get_task(rtxn, compaction_task)?.unwrap();
let Some(Details::IndexCompaction {
index_uid,
pre_compaction_size: _,
post_compaction_size: _,
}) = task.details
else {
unreachable!("wrong details for compaction task {compaction_task}")
};
let index_path = match self.index_mapper.index_mapping.get(rtxn, &index_uid)? {
Some(index_uuid) => self.index_mapper.index_path(index_uuid),
None => continue,
};
if let Err(e) = remove_file(index_path.join(DATA_MDB_COPY_NAME)) {
match e.kind() {
ErrorKind::NotFound => (),
_ => return Err(Error::IoError(e)),
}
}
}
// 4. We now have a list of tasks to cancel, cancel them
let (task_progress, progress_obj) = AtomicTaskStep::new(tasks_to_cancel.len() as u32); let (task_progress, progress_obj) = AtomicTaskStep::new(tasks_to_cancel.len() as u32);
progress.update_progress(progress_obj); progress.update_progress(progress_obj);

View File

@@ -16,6 +16,7 @@ use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOr
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError}; use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
use meilisearch_types::settings::{self, SecretPolicy}; use meilisearch_types::settings::{self, SecretPolicy};
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings}; use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
use roaring::RoaringBitmap;
use serde::Deserialize; use serde::Deserialize;
use ureq::{json, Response}; use ureq::{json, Response};
@@ -50,6 +51,7 @@ impl IndexScheduler {
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build(); let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
let must_stop_processing = self.scheduler.must_stop_processing.clone(); let must_stop_processing = self.scheduler.must_stop_processing.clone();
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() { for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
let err = |err| Error::from_milli(err, Some(uid.to_string()));
if must_stop_processing.get() { if must_stop_processing.get() {
return Err(Error::AbortedTask); return Err(Error::AbortedTask);
} }
@@ -61,104 +63,31 @@ impl IndexScheduler {
)); ));
let ExportIndexSettings { filter, override_settings } = export_settings; let ExportIndexSettings { filter, override_settings } = export_settings;
let index = self.index(uid)?; let index = self.index(uid)?;
let index_rtxn = index.read_txn()?; let index_rtxn = index.read_txn()?;
let bearer = api_key.map(|api_key| format!("Bearer {api_key}")); let filter = filter.as_ref().map(Filter::from_json).transpose().map_err(err)?.flatten();
let filter_universe =
// First, check if the index already exists filter.map(|f| f.evaluate(&index_rtxn, &index)).transpose().map_err(err)?;
let url = format!("{base_url}/indexes/{uid}"); let whole_universe =
let response = retry(&must_stop_processing, || { index.documents_ids(&index_rtxn).map_err(milli::Error::from).map_err(err)?;
let mut request = agent.get(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(Default::default()).map_err(into_backoff_error)
});
let index_exists = match response {
Ok(response) => response.status() == 200,
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => {
false
}
Err(e) => return Err(e),
};
let primary_key = index
.primary_key(&index_rtxn)
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
// Create the index
if !index_exists {
let url = format!("{base_url}/indexes");
retry(&must_stop_processing, || {
let mut request = agent.post(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "uid": uid, "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
})?;
}
// Patch the index primary key
if index_exists && *override_settings {
let url = format!("{base_url}/indexes/{uid}");
retry(&must_stop_processing, || {
let mut request = agent.patch(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
})?;
}
// Send the index settings
if !index_exists || *override_settings {
let mut settings =
settings::settings(&index, &index_rtxn, SecretPolicy::RevealSecrets)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
// Remove the experimental chat setting if not enabled
if self.features().check_chat_completions("exporting chat settings").is_err() {
settings.chat = Setting::NotSet;
}
// Retry logic for sending settings
let url = format!("{base_url}/indexes/{uid}/settings");
retry(&must_stop_processing, || {
let mut request = agent.patch(&url);
if let Some(bearer) = bearer.as_ref() {
request = request.set("Authorization", bearer);
}
request.send_json(settings.clone()).map_err(into_backoff_error)
})?;
}
let filter = filter
.as_ref()
.map(Filter::from_json)
.transpose()
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?
.flatten();
let filter_universe = filter
.map(|f| f.evaluate(&index_rtxn, &index))
.transpose()
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
let whole_universe = index
.documents_ids(&index_rtxn)
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
let universe = filter_universe.unwrap_or(whole_universe); let universe = filter_universe.unwrap_or(whole_universe);
let target = TargetInstance { base_url, api_key };
let fields_ids_map = index.fields_ids_map(&index_rtxn)?; let ctx = ExportContext {
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect(); index: &index,
index_rtxn: &index_rtxn,
// We don't need to keep this one alive as we will universe: &universe,
// spawn many threads to process the documents progress: &progress,
drop(index_rtxn); agent: &agent,
must_stop_processing: &must_stop_processing,
let total_documents = universe.len() as u32; };
let (step, progress_step) = AtomicDocumentStep::new(total_documents); let options = ExportOptions {
progress.update_progress(progress_step); index_uid: uid,
payload_size,
override_settings: *override_settings,
extra_headers: &Default::default(),
};
let total_documents = self.export_one_index(target, options, ctx)?;
output.insert( output.insert(
IndexUidPattern::new_unchecked(uid.clone()), IndexUidPattern::new_unchecked(uid.clone()),
@@ -167,36 +96,116 @@ impl IndexScheduler {
matched_documents: Some(total_documents as u64), matched_documents: Some(total_documents as u64),
}, },
); );
}
let limit = payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024); // defaults to 20 MiB Ok(output)
let documents_url = format!("{base_url}/indexes/{uid}/documents"); }
pub(super) fn export_one_index(
&self,
target: TargetInstance<'_>,
options: ExportOptions<'_>,
ctx: ExportContext<'_>,
) -> Result<u64, Error> {
let err = |err| Error::from_milli(err, Some(options.index_uid.to_string()));
let bearer = target.api_key.map(|api_key| format!("Bearer {api_key}"));
let url = format!(
"{base_url}/indexes/{index_uid}",
base_url = target.base_url,
index_uid = options.index_uid
);
let response = retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.get(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(Default::default()).map_err(into_backoff_error)
});
let index_exists = match response {
Ok(response) => response.status() == 200,
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => false,
Err(e) => return Err(e),
};
let primary_key =
ctx.index.primary_key(&ctx.index_rtxn).map_err(milli::Error::from).map_err(err)?;
if !index_exists {
let url = format!("{base_url}/indexes", base_url = target.base_url);
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.post(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "uid": options.index_uid, "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
})?;
}
if index_exists && options.override_settings {
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.patch(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
})?;
}
if !index_exists || options.override_settings {
let mut settings =
settings::settings(&ctx.index, &ctx.index_rtxn, SecretPolicy::RevealSecrets)
.map_err(err)?;
// Remove the experimental chat setting if not enabled
if self.features().check_chat_completions("exporting chat settings").is_err() {
settings.chat = Setting::NotSet;
}
// Retry logic for sending settings
let url = format!(
"{base_url}/indexes/{index_uid}/settings",
base_url = target.base_url,
index_uid = options.index_uid
);
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.patch(&url);
if let Some(bearer) = bearer.as_ref() {
request = request.set("Authorization", bearer);
}
request.send_json(settings.clone()).map_err(into_backoff_error)
})?;
}
let fields_ids_map = ctx.index.fields_ids_map(&ctx.index_rtxn)?;
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
let total_documents = ctx.universe.len() as u32;
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
ctx.progress.update_progress(progress_step);
let limit = options.payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024);
let documents_url = format!(
"{base_url}/indexes/{index_uid}/documents",
base_url = target.base_url,
index_uid = options.index_uid
);
let results = request_threads() let results = request_threads()
.broadcast(|ctx| { .broadcast(|broadcast| {
let index_rtxn = index let index_rtxn = ctx.index.read_txn().map_err(milli::Error::from).map_err(err)?;
.read_txn()
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
let mut buffer = Vec::new(); let mut buffer = Vec::new();
let mut tmp_buffer = Vec::new(); let mut tmp_buffer = Vec::new();
let mut compressed_buffer = Vec::new(); let mut compressed_buffer = Vec::new();
for (i, docid) in universe.iter().enumerate() { for (i, docid) in ctx.universe.iter().enumerate() {
if i % ctx.num_threads() != ctx.index() { if i % broadcast.num_threads() != broadcast.index() {
continue; continue;
} }
let document = index let document = ctx.index.document(&index_rtxn, docid).map_err(err)?;
.document(&index_rtxn, docid)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
let mut document = obkv_to_json(&all_fields, &fields_ids_map, document) let mut document =
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?; obkv_to_json(&all_fields, &fields_ids_map, document).map_err(err)?;
// TODO definitely factorize this code // TODO definitely factorize this code
'inject_vectors: { 'inject_vectors: {
let embeddings = index let embeddings = ctx.index.embeddings(&index_rtxn, docid).map_err(err)?;
.embeddings(&index_rtxn, docid)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
if embeddings.is_empty() { if embeddings.is_empty() {
break 'inject_vectors; break 'inject_vectors;
@@ -207,15 +216,12 @@ impl IndexScheduler {
.or_insert(serde_json::Value::Object(Default::default())); .or_insert(serde_json::Value::Object(Default::default()));
let serde_json::Value::Object(vectors) = vectors else { let serde_json::Value::Object(vectors) = vectors else {
return Err(Error::from_milli( return Err(err(milli::Error::UserError(
milli::Error::UserError(
milli::UserError::InvalidVectorsMapType { milli::UserError::InvalidVectorsMapType {
document_id: { document_id: {
if let Ok(Some(Ok(index))) = index if let Ok(Some(Ok(index))) = ctx
.external_id_of( .index
&index_rtxn, .external_id_of(&index_rtxn, std::iter::once(docid))
std::iter::once(docid),
)
.map(|it| it.into_iter().next()) .map(|it| it.into_iter().next())
{ {
index index
@@ -225,9 +231,7 @@ impl IndexScheduler {
}, },
value: vectors.clone(), value: vectors.clone(),
}, },
), )));
Some(uid.to_string()),
));
}; };
for ( for (
@@ -236,9 +240,9 @@ impl IndexScheduler {
) in embeddings ) in embeddings
{ {
let embeddings = ExplicitVectors { let embeddings = ExplicitVectors {
embeddings: Some( embeddings: Some(VectorOrArrayOfVectors::from_array_of_vectors(
VectorOrArrayOfVectors::from_array_of_vectors(embeddings), embeddings,
), )),
regenerate: regenerate && regenerate: regenerate &&
// Meilisearch does not handle well dumps with fragments, because as the fragments // Meilisearch does not handle well dumps with fragments, because as the fragments
// are marked as user-provided, // are marked as user-provided,
@@ -246,17 +250,16 @@ impl IndexScheduler {
// To prevent this, we mark embeddings has non regenerate in this case. // To prevent this, we mark embeddings has non regenerate in this case.
!has_fragments, !has_fragments,
}; };
vectors.insert( vectors
embedder_name, .insert(embedder_name, serde_json::to_value(embeddings).unwrap());
serde_json::to_value(embeddings).unwrap(),
);
} }
} }
tmp_buffer.clear(); tmp_buffer.clear();
serde_json::to_writer(&mut tmp_buffer, &document) serde_json::to_writer(&mut tmp_buffer, &document)
.map_err(milli::InternalError::from) .map_err(milli::InternalError::from)
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?; .map_err(milli::Error::from)
.map_err(err)?;
// Make sure we put at least one document in the buffer even // Make sure we put at least one document in the buffer even
// though we might go above the buffer limit before sending // though we might go above the buffer limit before sending
@@ -264,15 +267,11 @@ impl IndexScheduler {
// We compress the documents before sending them // We compress the documents before sending them
let mut encoder = let mut encoder =
GzEncoder::new(&mut compressed_buffer, Compression::default()); GzEncoder::new(&mut compressed_buffer, Compression::default());
encoder encoder.write_all(&buffer).map_err(milli::Error::from).map_err(err)?;
.write_all(&buffer) encoder.finish().map_err(milli::Error::from).map_err(err)?;
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
encoder
.finish()
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
retry(&must_stop_processing, || { retry(ctx.must_stop_processing, || {
let mut request = agent.post(&documents_url); let mut request = ctx.agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson"); request = request.set("Content-Type", "application/x-ndjson");
request = request.set("Content-Encoding", "gzip"); request = request.set("Content-Encoding", "gzip");
if let Some(bearer) = &bearer { if let Some(bearer) = &bearer {
@@ -290,8 +289,8 @@ impl IndexScheduler {
} }
} }
retry(&must_stop_processing, || { retry(ctx.must_stop_processing, || {
let mut request = agent.post(&documents_url); let mut request = ctx.agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson"); request = request.set("Content-Type", "application/x-ndjson");
if let Some(bearer) = &bearer { if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer); request = request.set("Authorization", bearer);
@@ -301,20 +300,12 @@ impl IndexScheduler {
Ok(()) Ok(())
}) })
.map_err(|e| { .map_err(|e| err(milli::Error::InternalError(InternalError::PanicInThreadPool(e))))?;
Error::from_milli(
milli::Error::InternalError(InternalError::PanicInThreadPool(e)),
Some(uid.to_string()),
)
})?;
for result in results { for result in results {
result?; result?;
} }
step.store(total_documents, atomic::Ordering::Relaxed); step.store(total_documents, atomic::Ordering::Relaxed);
} Ok(total_documents as u64)
Ok(output)
} }
} }
@@ -374,4 +365,27 @@ fn ureq_error_into_error(error: ureq::Error) -> Error {
} }
} }
// export_one_index arguments
pub(super) struct TargetInstance<'a> {
pub(super) base_url: &'a str,
pub(super) api_key: Option<&'a str>,
}
pub(super) struct ExportOptions<'a> {
pub(super) index_uid: &'a str,
pub(super) payload_size: Option<&'a Byte>,
pub(super) override_settings: bool,
pub(super) extra_headers: &'a hashbrown::HashMap<String, String>,
}
pub(super) struct ExportContext<'a> {
pub(super) index: &'a meilisearch_types::milli::Index,
pub(super) index_rtxn: &'a milli::heed::RoTxn<'a>,
pub(super) universe: &'a RoaringBitmap,
pub(super) progress: &'a Progress,
pub(super) agent: &'a ureq::Agent,
pub(super) must_stop_processing: &'a MustStopProcessing,
}
// progress related
enum ExportIndex {} enum ExportIndex {}

View File

@@ -97,7 +97,7 @@ impl IndexScheduler {
.embedding_configs() .embedding_configs()
.embedding_configs(index_wtxn) .embedding_configs(index_wtxn)
.map_err(|e| Error::from_milli(e.into(), Some(index_uid.clone())))?; .map_err(|e| Error::from_milli(e.into(), Some(index_uid.clone())))?;
let embedders = self.embedders(index_uid.clone(), embedders)?; let embedders = self.embedders(&index_uid, embedders)?;
for operation in operations { for operation in operations {
match operation { match operation {
DocumentOperation::Replace(_content_uuid) => { DocumentOperation::Replace(_content_uuid) => {
@@ -284,7 +284,7 @@ impl IndexScheduler {
.embedding_configs() .embedding_configs()
.embedding_configs(index_wtxn) .embedding_configs(index_wtxn)
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?; .map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
let embedders = self.embedders(index_uid.clone(), embedders)?; let embedders = self.embedders(&index_uid, embedders)?;
progress.update_progress(DocumentEditionProgress::Indexing); progress.update_progress(DocumentEditionProgress::Indexing);
congestion = Some( congestion = Some(
@@ -434,7 +434,7 @@ impl IndexScheduler {
.embedding_configs() .embedding_configs()
.embedding_configs(index_wtxn) .embedding_configs(index_wtxn)
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?; .map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
let embedders = self.embedders(index_uid.clone(), embedders)?; let embedders = self.embedders(&index_uid, embedders)?;
progress.update_progress(DocumentDeletionProgress::Indexing); progress.update_progress(DocumentDeletionProgress::Indexing);
congestion = Some( congestion = Some(

View File

@@ -12,8 +12,6 @@ use crate::processing::{AtomicUpdateFileStep, SnapshotCreationProgress};
use crate::queue::TaskQueue; use crate::queue::TaskQueue;
use crate::{Error, IndexScheduler, Result}; use crate::{Error, IndexScheduler, Result};
const UPDATE_FILES_DIR_NAME: &str = "update_files";
/// # Safety /// # Safety
/// ///
/// See [`EnvOpenOptions::open`]. /// See [`EnvOpenOptions::open`].
@@ -80,32 +78,10 @@ impl IndexScheduler {
pub(super) fn process_snapshot( pub(super) fn process_snapshot(
&self, &self,
progress: Progress, progress: Progress,
tasks: Vec<Task>, mut tasks: Vec<Task>,
) -> Result<Vec<Task>> { ) -> Result<Vec<Task>> {
progress.update_progress(SnapshotCreationProgress::StartTheSnapshotCreation); progress.update_progress(SnapshotCreationProgress::StartTheSnapshotCreation);
match self.scheduler.s3_snapshot_options.clone() {
Some(options) => {
#[cfg(not(unix))]
{
let _ = options;
panic!("Non-unix platform does not support S3 snapshotting");
}
#[cfg(unix)]
self.runtime
.as_ref()
.expect("Runtime not initialized")
.block_on(self.process_snapshot_to_s3(progress, options, tasks))
}
None => self.process_snapshots_to_disk(progress, tasks),
}
}
fn process_snapshots_to_disk(
&self,
progress: Progress,
mut tasks: Vec<Task>,
) -> Result<Vec<Task>, Error> {
fs::create_dir_all(&self.scheduler.snapshots_path)?; fs::create_dir_all(&self.scheduler.snapshots_path)?;
let temp_snapshot_dir = tempfile::tempdir()?; let temp_snapshot_dir = tempfile::tempdir()?;
@@ -152,7 +128,7 @@ impl IndexScheduler {
let rtxn = self.env.read_txn()?; let rtxn = self.env.read_txn()?;
// 2.4 Create the update files directory // 2.4 Create the update files directory
let update_files_dir = temp_snapshot_dir.path().join(UPDATE_FILES_DIR_NAME); let update_files_dir = temp_snapshot_dir.path().join("update_files");
fs::create_dir_all(&update_files_dir)?; fs::create_dir_all(&update_files_dir)?;
// 2.5 Only copy the update files of the enqueued tasks // 2.5 Only copy the update files of the enqueued tasks
@@ -164,7 +140,7 @@ impl IndexScheduler {
let task = let task =
self.queue.tasks.get_task(&rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?; self.queue.tasks.get_task(&rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
if let Some(content_uuid) = task.content_uuid() { if let Some(content_uuid) = task.content_uuid() {
let src = self.queue.file_store.update_path(content_uuid); let src = self.queue.file_store.get_update_path(content_uuid);
let dst = update_files_dir.join(content_uuid.to_string()); let dst = update_files_dir.join(content_uuid.to_string());
fs::copy(src, dst)?; fs::copy(src, dst)?;
} }
@@ -230,407 +206,4 @@ impl IndexScheduler {
Ok(tasks) Ok(tasks)
} }
#[cfg(unix)]
pub(super) async fn process_snapshot_to_s3(
&self,
progress: Progress,
opts: meilisearch_types::milli::update::S3SnapshotOptions,
mut tasks: Vec<Task>,
) -> Result<Vec<Task>> {
use meilisearch_types::milli::update::S3SnapshotOptions;
let S3SnapshotOptions {
s3_bucket_url,
s3_bucket_region,
s3_bucket_name,
s3_snapshot_prefix,
s3_access_key,
s3_secret_key,
s3_max_in_flight_parts,
s3_compression_level: level,
s3_signature_duration,
s3_multipart_part_size,
} = opts;
let must_stop_processing = self.scheduler.must_stop_processing.clone();
let retry_backoff = backoff::ExponentialBackoff::default();
let db_name = {
let mut base_path = self.env.path().to_owned();
base_path.pop();
base_path.file_name().and_then(OsStr::to_str).unwrap_or("data.ms").to_string()
};
let (reader, writer) = std::io::pipe()?;
let uploader_task = tokio::spawn(multipart_stream_to_s3(
s3_bucket_url,
s3_bucket_region,
s3_bucket_name,
s3_snapshot_prefix,
s3_access_key,
s3_secret_key,
s3_max_in_flight_parts,
s3_signature_duration,
s3_multipart_part_size,
must_stop_processing,
retry_backoff,
db_name,
reader,
));
let index_scheduler = IndexScheduler::private_clone(self);
let builder_task = tokio::task::spawn_blocking(move || {
stream_tarball_into_pipe(progress, level, writer, index_scheduler)
});
let (uploader_result, builder_result) = tokio::join!(uploader_task, builder_task);
// Check uploader result first to early return on task abortion.
// safety: JoinHandle can return an error if the task was aborted, cancelled, or panicked.
uploader_result.unwrap()?;
builder_result.unwrap()?;
for task in &mut tasks {
task.status = Status::Succeeded;
}
Ok(tasks)
}
}
/// Streams a tarball of the database content into a pipe.
#[cfg(unix)]
fn stream_tarball_into_pipe(
progress: Progress,
level: u32,
writer: std::io::PipeWriter,
index_scheduler: IndexScheduler,
) -> std::result::Result<(), Error> {
use std::io::Write as _;
use std::path::Path;
let writer = flate2::write::GzEncoder::new(writer, flate2::Compression::new(level));
let mut tarball = tar::Builder::new(writer);
// 1. Snapshot the version file
tarball
.append_path_with_name(&index_scheduler.scheduler.version_file_path, VERSION_FILE_NAME)?;
// 2. Snapshot the index scheduler LMDB env
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexScheduler);
let tasks_env_file = index_scheduler.env.try_clone_inner_file()?;
let path = Path::new("tasks").join("data.mdb");
append_file_to_tarball(&mut tarball, path, tasks_env_file)?;
// 2.3 Create a read transaction on the index-scheduler
let rtxn = index_scheduler.env.read_txn()?;
// 2.4 Create the update files directory
// And only copy the update files of the enqueued tasks
progress.update_progress(SnapshotCreationProgress::SnapshotTheUpdateFiles);
let enqueued = index_scheduler.queue.tasks.get_status(&rtxn, Status::Enqueued)?;
let (atomic, update_file_progress) = AtomicUpdateFileStep::new(enqueued.len() as u32);
progress.update_progress(update_file_progress);
// We create the update_files directory so that it
// always exists even if there are no update files
let update_files_dir = Path::new(UPDATE_FILES_DIR_NAME);
let src_update_files_dir = {
let mut path = index_scheduler.env.path().to_path_buf();
path.pop();
path.join(UPDATE_FILES_DIR_NAME)
};
tarball.append_dir(update_files_dir, src_update_files_dir)?;
for task_id in enqueued {
let task = index_scheduler
.queue
.tasks
.get_task(&rtxn, task_id)?
.ok_or(Error::CorruptedTaskQueue)?;
if let Some(content_uuid) = task.content_uuid() {
use std::fs::File;
let src = index_scheduler.queue.file_store.update_path(content_uuid);
let mut update_file = File::open(src)?;
let path = update_files_dir.join(content_uuid.to_string());
tarball.append_file(path, &mut update_file)?;
}
atomic.fetch_add(1, Ordering::Relaxed);
}
// 3. Snapshot every indexes
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexes);
let index_mapping = index_scheduler.index_mapper.index_mapping;
let nb_indexes = index_mapping.len(&rtxn)? as u32;
let indexes_dir = Path::new("indexes");
let indexes_references: Vec<_> = index_scheduler
.index_mapper
.index_mapping
.iter(&rtxn)?
.map(|res| res.map_err(Error::from).map(|(name, uuid)| (name.to_string(), uuid)))
.collect::<Result<_, Error>>()?;
// It's prettier to use a for loop instead of the IndexMapper::try_for_each_index
// method, especially when we need to access the UUID, local path and index number.
for (i, (name, uuid)) in indexes_references.into_iter().enumerate() {
progress.update_progress(VariableNameStep::<SnapshotCreationProgress>::new(
&name, i as u32, nb_indexes,
));
let path = indexes_dir.join(uuid.to_string()).join("data.mdb");
let index = index_scheduler.index_mapper.index(&rtxn, &name)?;
let index_file = index.try_clone_inner_file()?;
tracing::trace!("Appending index file for {name} in {}", path.display());
append_file_to_tarball(&mut tarball, path, index_file)?;
}
drop(rtxn);
// 4. Snapshot the auth LMDB env
progress.update_progress(SnapshotCreationProgress::SnapshotTheApiKeys);
let auth_env_file = index_scheduler.scheduler.auth_env.try_clone_inner_file()?;
let path = Path::new("auth").join("data.mdb");
append_file_to_tarball(&mut tarball, path, auth_env_file)?;
let mut gzencoder = tarball.into_inner()?;
gzencoder.flush()?;
gzencoder.try_finish()?;
let mut writer = gzencoder.finish()?;
writer.flush()?;
Result::<_, Error>::Ok(())
}
#[cfg(unix)]
fn append_file_to_tarball<W, P>(
tarball: &mut tar::Builder<W>,
path: P,
mut auth_env_file: fs::File,
) -> Result<(), Error>
where
W: std::io::Write,
P: AsRef<std::path::Path>,
{
use std::io::{Seek as _, SeekFrom};
// Note: A previous snapshot operation may have left the cursor
// at the end of the file so we need to seek to the start.
auth_env_file.seek(SeekFrom::Start(0))?;
tarball.append_file(path, &mut auth_env_file)?;
Ok(())
}
/// Streams the content read from the given reader to S3.
#[cfg(unix)]
#[allow(clippy::too_many_arguments)]
async fn multipart_stream_to_s3(
s3_bucket_url: String,
s3_bucket_region: String,
s3_bucket_name: String,
s3_snapshot_prefix: String,
s3_access_key: String,
s3_secret_key: String,
s3_max_in_flight_parts: std::num::NonZero<usize>,
s3_signature_duration: std::time::Duration,
s3_multipart_part_size: u64,
must_stop_processing: super::MustStopProcessing,
retry_backoff: backoff::exponential::ExponentialBackoff<backoff::SystemClock>,
db_name: String,
reader: std::io::PipeReader,
) -> Result<(), Error> {
use std::collections::VecDeque;
use std::io;
use std::os::fd::OwnedFd;
use std::path::PathBuf;
use bytes::{Bytes, BytesMut};
use reqwest::{Client, Response};
use rusty_s3::actions::CreateMultipartUpload;
use rusty_s3::{Bucket, BucketError, Credentials, S3Action as _, UrlStyle};
use tokio::task::JoinHandle;
let reader = OwnedFd::from(reader);
let reader = tokio::net::unix::pipe::Receiver::from_owned_fd(reader)?;
let s3_snapshot_prefix = PathBuf::from(s3_snapshot_prefix);
let url =
s3_bucket_url.parse().map_err(BucketError::ParseError).map_err(Error::S3BucketError)?;
let bucket = Bucket::new(url, UrlStyle::Path, s3_bucket_name, s3_bucket_region)
.map_err(Error::S3BucketError)?;
let credential = Credentials::new(s3_access_key, s3_secret_key);
// Note for the future (rust 1.91+): use with_added_extension, it's prettier
let object_path = s3_snapshot_prefix.join(format!("{db_name}.snapshot"));
// Note: It doesn't work on Windows and if a port to this platform is needed,
// use the slash-path crate or similar to get the correct path separator.
let object = object_path.display().to_string();
let action = bucket.create_multipart_upload(Some(&credential), &object);
let url = action.sign(s3_signature_duration);
let client = Client::new();
let resp = client.post(url).send().await.map_err(Error::S3HttpError)?;
let status = resp.status();
let body = match resp.error_for_status_ref() {
Ok(_) => resp.text().await.map_err(Error::S3HttpError)?,
Err(_) => {
return Err(Error::S3Error { status, body: resp.text().await.unwrap_or_default() })
}
};
let multipart =
CreateMultipartUpload::parse_response(&body).map_err(|e| Error::S3XmlError(Box::new(e)))?;
tracing::debug!("Starting the upload of the snapshot to {object}");
// We use this bumpalo for etags strings.
let bump = bumpalo::Bump::new();
let mut etags = Vec::<&str>::new();
let mut in_flight = VecDeque::<(JoinHandle<reqwest::Result<Response>>, Bytes)>::with_capacity(
s3_max_in_flight_parts.get(),
);
// Part numbers start at 1 and cannot be larger than 10k
for part_number in 1u16.. {
if must_stop_processing.get() {
return Err(Error::AbortedTask);
}
let part_upload =
bucket.upload_part(Some(&credential), &object, part_number, multipart.upload_id());
let url = part_upload.sign(s3_signature_duration);
// Wait for a buffer to be ready if there are in-flight parts that landed
let mut buffer = if in_flight.len() >= s3_max_in_flight_parts.get() {
let (handle, buffer) = in_flight.pop_front().expect("At least one in flight request");
let resp = join_and_map_error(handle).await?;
extract_and_append_etag(&bump, &mut etags, resp.headers())?;
let mut buffer = match buffer.try_into_mut() {
Ok(buffer) => buffer,
Err(_) => unreachable!("All bytes references were consumed in the task"),
};
buffer.clear();
buffer
} else {
BytesMut::with_capacity(s3_multipart_part_size as usize)
};
// If we successfully read enough bytes,
// we can continue and send the buffer/part
while buffer.len() < (s3_multipart_part_size as usize / 2) {
// Wait for the pipe to be readable
reader.readable().await?;
match reader.try_read_buf(&mut buffer) {
Ok(0) => break,
// We read some bytes but maybe not enough
Ok(_) => continue,
// The readiness event is a false positive.
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
Err(e) => return Err(e.into()),
}
}
if buffer.is_empty() {
// Break the loop if the buffer is
// empty after we tried to read bytes
break;
}
let body = buffer.freeze();
tracing::trace!("Sending part {part_number}");
let task = tokio::spawn({
let client = client.clone();
let body = body.clone();
backoff::future::retry(retry_backoff.clone(), move || {
let client = client.clone();
let url = url.clone();
let body = body.clone();
async move {
match client.put(url).body(body).send().await {
Ok(resp) if resp.status().is_client_error() => {
resp.error_for_status().map_err(backoff::Error::Permanent)
}
Ok(resp) => Ok(resp),
Err(e) => Err(backoff::Error::transient(e)),
}
}
})
});
in_flight.push_back((task, body));
}
for (handle, _buffer) in in_flight {
let resp = join_and_map_error(handle).await?;
extract_and_append_etag(&bump, &mut etags, resp.headers())?;
}
tracing::debug!("Finalizing the multipart upload");
let action = bucket.complete_multipart_upload(
Some(&credential),
&object,
multipart.upload_id(),
etags.iter().map(AsRef::as_ref),
);
let url = action.sign(s3_signature_duration);
let body = action.body();
let resp = backoff::future::retry(retry_backoff, move || {
let client = client.clone();
let url = url.clone();
let body = body.clone();
async move {
match client.post(url).body(body).send().await {
Ok(resp) if resp.status().is_client_error() => {
Err(backoff::Error::Permanent(Error::S3Error {
status: resp.status(),
body: resp.text().await.unwrap_or_default(),
}))
}
Ok(resp) => Ok(resp),
Err(e) => Err(backoff::Error::transient(Error::S3HttpError(e))),
}
}
})
.await?;
let status = resp.status();
let body = resp.text().await.map_err(|e| Error::S3Error { status, body: e.to_string() })?;
if status.is_success() {
Ok(())
} else {
Err(Error::S3Error { status, body })
}
}
#[cfg(unix)]
async fn join_and_map_error(
join_handle: tokio::task::JoinHandle<Result<reqwest::Response, reqwest::Error>>,
) -> Result<reqwest::Response> {
// safety: Panic happens if the task (JoinHandle) was aborted, cancelled, or panicked
let request = join_handle.await.unwrap();
let resp = request.map_err(Error::S3HttpError)?;
match resp.error_for_status_ref() {
Ok(_) => Ok(resp),
Err(_) => Err(Error::S3Error {
status: resp.status(),
body: resp.text().await.unwrap_or_default(),
}),
}
}
#[cfg(unix)]
fn extract_and_append_etag<'b>(
bump: &'b bumpalo::Bump,
etags: &mut Vec<&'b str>,
headers: &reqwest::header::HeaderMap,
) -> Result<()> {
use reqwest::header::ETAG;
let etag = headers.get(ETAG).ok_or_else(|| Error::S3XmlError("Missing ETag header".into()))?;
let etag = etag.to_str().map_err(|e| Error::S3XmlError(Box::new(e)))?;
etags.push(bump.alloc_str(etag));
Ok(())
} }

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
[timestamp] [4,] [timestamp] [4,]
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Batches: ### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", } 0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", } 1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", } 2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", } 3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Status: ### Status:
enqueued [0,] enqueued [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Status: ### Status:

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Status: ### Status:
@@ -37,7 +37,7 @@ catto [1,]
[timestamp] [0,] [timestamp] [0,]
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Batches: ### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", } 0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Batch to tasks mapping: ### Batch to tasks mapping:
0 [0,] 0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
---------------------------------------------------------------------- ----------------------------------------------------------------------
@@ -40,7 +40,7 @@ doggo [2,]
[timestamp] [0,] [timestamp] [0,]
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Batches: ### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", } 0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Batch to tasks mapping: ### Batch to tasks mapping:
0 [0,] 0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -43,7 +43,7 @@ doggo [2,3,]
[timestamp] [0,] [timestamp] [0,]
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Batches: ### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", } 0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Batch to tasks mapping: ### Batch to tasks mapping:
0 [0,] 0 [0,]

View File

@@ -742,11 +742,11 @@ fn basic_get_stats() {
"documentEdition": 0, "documentEdition": 0,
"dumpCreation": 0, "dumpCreation": 0,
"export": 0, "export": 0,
"indexCompaction": 0,
"indexCreation": 3, "indexCreation": 3,
"indexDeletion": 0, "indexDeletion": 0,
"indexSwap": 0, "indexSwap": 0,
"indexUpdate": 0, "indexUpdate": 0,
"networkTopologyChange": 0,
"settingsUpdate": 0, "settingsUpdate": 0,
"snapshotCreation": 0, "snapshotCreation": 0,
"taskCancelation": 0, "taskCancelation": 0,
@@ -757,7 +757,7 @@ fn basic_get_stats() {
"###); "###);
handle.advance_till([Start, BatchCreated]); handle.advance_till([Start, BatchCreated]);
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###" snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
{ {
"indexes": { "indexes": {
"catto": 1, "catto": 1,
@@ -777,7 +777,6 @@ fn basic_get_stats() {
"documentEdition": 0, "documentEdition": 0,
"dumpCreation": 0, "dumpCreation": 0,
"export": 0, "export": 0,
"indexCompaction": 0,
"indexCreation": 3, "indexCreation": 3,
"indexDeletion": 0, "indexDeletion": 0,
"indexSwap": 0, "indexSwap": 0,
@@ -789,7 +788,7 @@ fn basic_get_stats() {
"upgradeDatabase": 0 "upgradeDatabase": 0
} }
} }
"###); "#);
handle.advance_till([ handle.advance_till([
InsideProcessBatch, InsideProcessBatch,
@@ -799,7 +798,7 @@ fn basic_get_stats() {
Start, Start,
BatchCreated, BatchCreated,
]); ]);
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###" snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
{ {
"indexes": { "indexes": {
"catto": 1, "catto": 1,
@@ -819,7 +818,6 @@ fn basic_get_stats() {
"documentEdition": 0, "documentEdition": 0,
"dumpCreation": 0, "dumpCreation": 0,
"export": 0, "export": 0,
"indexCompaction": 0,
"indexCreation": 3, "indexCreation": 3,
"indexDeletion": 0, "indexDeletion": 0,
"indexSwap": 0, "indexSwap": 0,
@@ -831,7 +829,7 @@ fn basic_get_stats() {
"upgradeDatabase": 0 "upgradeDatabase": 0
} }
} }
"###); "#);
// now we make one more batch, the started_at field of the new tasks will be past `second_start_time` // now we make one more batch, the started_at field of the new tasks will be past `second_start_time`
handle.advance_till([ handle.advance_till([
@@ -842,7 +840,7 @@ fn basic_get_stats() {
Start, Start,
BatchCreated, BatchCreated,
]); ]);
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###" snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
{ {
"indexes": { "indexes": {
"catto": 1, "catto": 1,
@@ -862,7 +860,6 @@ fn basic_get_stats() {
"documentEdition": 0, "documentEdition": 0,
"dumpCreation": 0, "dumpCreation": 0,
"export": 0, "export": 0,
"indexCompaction": 0,
"indexCreation": 3, "indexCreation": 3,
"indexDeletion": 0, "indexDeletion": 0,
"indexSwap": 0, "indexSwap": 0,
@@ -874,7 +871,7 @@ fn basic_get_stats() {
"upgradeDatabase": 0 "upgradeDatabase": 0
} }
} }
"###); "#);
} }
#[test] #[test]

View File

@@ -121,7 +121,7 @@ fn import_vectors() {
insta::assert_json_snapshot!(simple_hf_config.embedder_options); insta::assert_json_snapshot!(simple_hf_config.embedder_options);
let simple_hf_name = name.clone(); let simple_hf_name = name.clone();
let configs = index_scheduler.embedders("doggos".to_string(), configs).unwrap(); let configs = index_scheduler.embedders("doggos", configs).unwrap();
let hf_runtime = configs.get(&simple_hf_name).unwrap(); let hf_runtime = configs.get(&simple_hf_name).unwrap();
let hf_embedder = &hf_runtime.embedder; let hf_embedder = &hf_runtime.embedder;
let beagle_embed = hf_embedder let beagle_embed = hf_embedder

View File

@@ -126,7 +126,7 @@ impl IndexScheduler {
std::fs::create_dir_all(&options.auth_path).unwrap(); std::fs::create_dir_all(&options.auth_path).unwrap();
let auth_env = open_auth_store_env(&options.auth_path).unwrap(); let auth_env = open_auth_store_env(&options.auth_path).unwrap();
let index_scheduler = let index_scheduler =
Self::new_test(options, auth_env, version, None, sender, planned_failures).unwrap(); Self::new(options, auth_env, version, None, sender, planned_failures).unwrap();
// To be 100% consistent between all test we're going to start the scheduler right now // To be 100% consistent between all test we're going to start the scheduler right now
// and ensure it's in the expected starting state. // and ensure it's in the expected starting state.

View File

@@ -1,7 +1,7 @@
use anyhow::bail; use anyhow::bail;
use meilisearch_types::heed::{Env, RwTxn, WithoutTls}; use meilisearch_types::heed::{Env, RwTxn, WithoutTls};
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task}; use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
use meilisearch_types::versioning; use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
use time::OffsetDateTime; use time::OffsetDateTime;
use tracing::info; use tracing::info;
@@ -9,82 +9,76 @@ use crate::queue::TaskQueue;
use crate::versioning::Versioning; use crate::versioning::Versioning;
trait UpgradeIndexScheduler { trait UpgradeIndexScheduler {
fn upgrade(&self, env: &Env<WithoutTls>, wtxn: &mut RwTxn) -> anyhow::Result<()>; fn upgrade(
/// Whether the migration should be applied, depending on the initial version of the index scheduler before &self,
/// any migration was applied env: &Env<WithoutTls>,
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool; wtxn: &mut RwTxn,
/// A progress-centric description of the migration original: (u32, u32, u32),
fn description(&self) -> &'static str; ) -> anyhow::Result<()>;
fn target_version(&self) -> (u32, u32, u32);
} }
/// Upgrade the index scheduler to the binary version.
///
/// # Warning
///
/// The current implementation uses a single wtxn to the index scheduler for the whole duration of the upgrade.
/// If migrations start taking take a long time, it might prevent tasks from being registered.
/// If this issue manifests, then it can be mitigated by adding a `fn target_version` to `UpgradeIndexScheduler`,
/// to be able to write intermediate versions and drop the wtxn between applying migrations.
pub fn upgrade_index_scheduler( pub fn upgrade_index_scheduler(
env: &Env<WithoutTls>, env: &Env<WithoutTls>,
versioning: &Versioning, versioning: &Versioning,
initial_version: (u32, u32, u32), from: (u32, u32, u32),
to: (u32, u32, u32),
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let target_major: u32 = versioning::VERSION_MAJOR; let current_major = to.0;
let target_minor: u32 = versioning::VERSION_MINOR; let current_minor = to.1;
let target_patch: u32 = versioning::VERSION_PATCH; let current_patch = to.2;
let target_version = (target_major, target_minor, target_patch);
if initial_version == target_version {
return Ok(());
}
let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[ let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[
// List all upgrade functions to apply in order here. // This is the last upgrade function, it will be called when the index is up to date.
// any other upgrade function should be added before this one.
&ToCurrentNoOp {},
]; ];
let (initial_major, initial_minor, initial_patch) = initial_version; let start = match from {
(1, 12, _) => 0,
if initial_version > target_version { (1, 13, _) => 0,
(1, 14, _) => 0,
(1, 15, _) => 0,
(1, 16, _) => 0,
(1, 17, _) => 0,
(1, 18, _) => 0,
(1, 19, _) => 0,
(1, 20, _) => 0,
(1, 21, _) => 0,
(major, minor, patch) => {
if major > current_major
|| (major == current_major && minor > current_minor)
|| (major == current_major && minor == current_minor && patch > current_patch)
{
bail!( bail!(
"Database version {initial_major}.{initial_minor}.{initial_patch} is higher than the Meilisearch version {target_major}.{target_minor}.{target_patch}. Downgrade is not supported", "Database version {major}.{minor}.{patch} is higher than the Meilisearch version {current_major}.{current_minor}.{current_patch}. Downgrade is not supported",
); );
} } else if major < 1 || (major == current_major && minor < 12) {
if initial_version < (1, 12, 0) {
bail!( bail!(
"Database version {initial_major}.{initial_minor}.{initial_patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{initial_major}.{initial_minor}.{initial_patch} and import it in the v{target_major}.{target_minor}.{target_patch}", "Database version {major}.{minor}.{patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{major}.{minor}.{patch} and import it in the v{current_major}.{current_minor}.{current_patch}",
); );
} else {
bail!("Unknown database version: v{major}.{minor}.{patch}");
} }
}
};
info!("Upgrading the task queue"); info!("Upgrading the task queue");
let mut wtxn = env.write_txn()?; let mut local_from = from;
let migration_count = upgrade_functions.len(); for upgrade in upgrade_functions[start..].iter() {
for (migration_index, upgrade) in upgrade_functions.iter().enumerate() { let target = upgrade.target_version();
if upgrade.must_upgrade(initial_version) {
info!( info!(
"[{migration_index}/{migration_count}]Applying migration: {}", "Upgrading from v{}.{}.{} to v{}.{}.{}",
upgrade.description() local_from.0, local_from.1, local_from.2, target.0, target.1, target.2
); );
let mut wtxn = env.write_txn()?;
upgrade.upgrade(env, &mut wtxn)?; upgrade.upgrade(env, &mut wtxn, local_from)?;
versioning.set_version(&mut wtxn, target)?;
info!( wtxn.commit()?;
"[{}/{migration_count}]Migration applied: {}", local_from = target;
migration_index + 1,
upgrade.description()
)
} else {
info!(
"[{migration_index}/{migration_count}]Skipping unnecessary migration: {}",
upgrade.description()
)
}
} }
versioning.set_version(&mut wtxn, target_version)?; let mut wtxn = env.write_txn()?;
info!("Task queue upgraded, spawning the upgrade database task");
let queue = TaskQueue::new(env, &mut wtxn)?; let queue = TaskQueue::new(env, &mut wtxn)?;
let uid = queue.next_task_id(&wtxn)?; let uid = queue.next_task_id(&wtxn)?;
queue.register( queue.register(
@@ -97,14 +91,31 @@ pub fn upgrade_index_scheduler(
finished_at: None, finished_at: None,
error: None, error: None,
canceled_by: None, canceled_by: None,
details: Some(Details::UpgradeDatabase { from: initial_version, to: target_version }), details: Some(Details::UpgradeDatabase { from, to }),
status: Status::Enqueued, status: Status::Enqueued,
kind: KindWithContent::UpgradeDatabase { from: initial_version }, kind: KindWithContent::UpgradeDatabase { from },
network: None, network: None,
custom_metadata: None,
}, },
)?; )?;
wtxn.commit()?; wtxn.commit()?;
Ok(()) Ok(())
} }
#[allow(non_camel_case_types)]
struct ToCurrentNoOp {}
impl UpgradeIndexScheduler for ToCurrentNoOp {
fn upgrade(
&self,
_env: &Env<WithoutTls>,
_wtxn: &mut RwTxn,
_original: (u32, u32, u32),
) -> anyhow::Result<()> {
Ok(())
}
fn target_version(&self) -> (u32, u32, u32) {
(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
}
}

View File

@@ -256,15 +256,14 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
use KindWithContent as K; use KindWithContent as K;
let mut index_uids = vec![]; let mut index_uids = vec![];
match &mut task.kind { match &mut task.kind {
K::DocumentAdditionOrUpdate { index_uid, .. } K::DocumentAdditionOrUpdate { index_uid, .. } => index_uids.push(index_uid),
| K::DocumentEdition { index_uid, .. } K::DocumentEdition { index_uid, .. } => index_uids.push(index_uid),
| K::DocumentDeletion { index_uid, .. } K::DocumentDeletion { index_uid, .. } => index_uids.push(index_uid),
| K::DocumentDeletionByFilter { index_uid, .. } K::DocumentDeletionByFilter { index_uid, .. } => index_uids.push(index_uid),
| K::DocumentClear { index_uid } K::DocumentClear { index_uid } => index_uids.push(index_uid),
| K::SettingsUpdate { index_uid, .. } K::SettingsUpdate { index_uid, .. } => index_uids.push(index_uid),
| K::IndexDeletion { index_uid } K::IndexDeletion { index_uid } => index_uids.push(index_uid),
| K::IndexCreation { index_uid, .. } K::IndexCreation { index_uid, .. } => index_uids.push(index_uid),
| K::IndexCompaction { index_uid, .. } => index_uids.push(index_uid),
K::IndexUpdate { index_uid, new_index_uid, .. } => { K::IndexUpdate { index_uid, new_index_uid, .. } => {
index_uids.push(index_uid); index_uids.push(index_uid);
if let Some(new_uid) = new_index_uid { if let Some(new_uid) = new_index_uid {
@@ -286,6 +285,7 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
| K::DumpCreation { .. } | K::DumpCreation { .. }
| K::Export { .. } | K::Export { .. }
| K::UpgradeDatabase { .. } | K::UpgradeDatabase { .. }
| K::NetworkTopologyChange { .. }
| K::SnapshotCreation => (), | K::SnapshotCreation => (),
}; };
if let Some(Details::IndexSwap { swaps }) = &mut task.details { if let Some(Details::IndexSwap { swaps }) = &mut task.details {
@@ -379,7 +379,6 @@ impl crate::IndexScheduler {
status, status,
kind, kind,
network: _, network: _,
custom_metadata: _,
} = task; } = task;
assert_eq!(uid, task.uid); assert_eq!(uid, task.uid);
if task.status != Status::Enqueued { if task.status != Status::Enqueued {
@@ -620,12 +619,8 @@ impl crate::IndexScheduler {
Details::UpgradeDatabase { from: _, to: _ } => { Details::UpgradeDatabase { from: _, to: _ } => {
assert_eq!(kind.as_kind(), Kind::UpgradeDatabase); assert_eq!(kind.as_kind(), Kind::UpgradeDatabase);
} }
Details::IndexCompaction { Details::NetworkTopologyChange { .. } => {
index_uid: _, assert_eq!(kind.as_kind(), Kind::NetworkTopologyChange);
pre_compaction_size: _,
post_compaction_size: _,
} => {
assert_eq!(kind.as_kind(), Kind::IndexCompaction);
} }
} }
} }

View File

@@ -64,7 +64,14 @@ impl Versioning {
}; };
wtxn.commit()?; wtxn.commit()?;
upgrade_index_scheduler(env, &this, from)?; let bin_major: u32 = versioning::VERSION_MAJOR;
let bin_minor: u32 = versioning::VERSION_MINOR;
let bin_patch: u32 = versioning::VERSION_PATCH;
let to = (bin_major, bin_minor, bin_patch);
if from != to {
upgrade_index_scheduler(env, &this, from, to)?;
}
// Once we reach this point it means the upgrade process, if there was one is entirely finished // Once we reach this point it means the upgrade process, if there was one is entirely finished
// we can safely say we reached the latest version of the index scheduler // we can safely say we reached the latest version of the index scheduler

View File

@@ -15,7 +15,7 @@ license.workspace = true
serde_json = "1.0" serde_json = "1.0"
[dev-dependencies] [dev-dependencies]
criterion = "0.7.0" criterion = "0.6.0"
[[bench]] [[bench]]
name = "depth" name = "depth"

View File

@@ -13,7 +13,7 @@ license.workspace = true
[dependencies] [dependencies]
# fixed version due to format breakages in v1.40 # fixed version due to format breakages in v1.40
insta = { version = "=1.39.0", features = ["json", "redactions"] } insta = { version = "=1.39.0", features = ["json", "redactions"] }
md5 = "0.8.0" md5 = "0.7.0"
once_cell = "1.21" once_cell = "1.21"
regex-lite = "0.1.8" regex-lite = "0.1.6"
uuid = { version = "1.18.1", features = ["v4"] } uuid = { version = "1.17.0", features = ["v4"] }

View File

@@ -12,15 +12,15 @@ license.workspace = true
[dependencies] [dependencies]
base64 = "0.22.1" base64 = "0.22.1"
enum-iterator = "2.3.0" enum-iterator = "2.1.0"
hmac = "0.12.1" hmac = "0.12.1"
maplit = "1.0.2" maplit = "1.0.2"
meilisearch-types = { path = "../meilisearch-types" } meilisearch-types = { path = "../meilisearch-types" }
rand = "0.8.5" rand = "0.8.5"
roaring = { version = "0.10.12", features = ["serde"] } roaring = { version = "0.10.12", features = ["serde"] }
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
sha2 = "0.10.9" sha2 = "0.10.9"
thiserror = "2.0.17" thiserror = "2.0.12"
time = { version = "0.3.44", features = ["serde-well-known", "formatting", "parsing", "macros"] } time = { version = "0.3.41", features = ["serde-well-known", "formatting", "parsing", "macros"] }
uuid = { version = "1.18.1", features = ["serde", "v4"] } uuid = { version = "1.17.0", features = ["serde", "v4"] }

View File

@@ -109,7 +109,6 @@ impl HeedAuthStore {
Action::IndexesGet, Action::IndexesGet,
Action::IndexesUpdate, Action::IndexesUpdate,
Action::IndexesSwap, Action::IndexesSwap,
Action::IndexesCompact,
] ]
.iter(), .iter(),
); );

View File

@@ -11,38 +11,38 @@ edition.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
actix-web = { version = "4.12.0", default-features = false } actix-web = { version = "4.11.0", default-features = false }
anyhow = "1.0.100" anyhow = "1.0.98"
bumpalo = "3.19.0" bumpalo = "3.18.1"
bumparaw-collections = "0.1.4" bumparaw-collections = "0.1.4"
byte-unit = { version = "5.1.6", features = ["serde"] } byte-unit = { version = "5.1.6", features = ["serde"] }
convert_case = "0.9.0" convert_case = "0.8.0"
csv = "1.4.0" csv = "1.3.1"
deserr = { version = "0.6.4", features = ["actix-web"] } deserr = { version = "0.6.3", features = ["actix-web"] }
either = { version = "1.15.0", features = ["serde"] } either = { version = "1.15.0", features = ["serde"] }
enum-iterator = "2.3.0" enum-iterator = "2.1.0"
file-store = { path = "../file-store" } file-store = { path = "../file-store" }
flate2 = "1.1.5" flate2 = "1.1.2"
fst = "0.4.7" fst = "0.4.7"
memmap2 = "0.9.9" memmap2 = "0.9.7"
milli = { path = "../milli" } milli = { path = "../milli" }
roaring = { version = "0.10.12", features = ["serde"] } roaring = { version = "0.10.12", features = ["serde"] }
rustc-hash = "2.1.1" rustc-hash = "2.1.1"
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.219", features = ["derive"] }
serde-cs = "0.2.4" serde-cs = "0.2.4"
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
tar = "0.4.44" tar = "0.4.44"
tempfile = "3.23.0" tempfile = "3.20.0"
thiserror = "2.0.17" thiserror = "2.0.12"
time = { version = "0.3.44", features = [ time = { version = "0.3.41", features = [
"serde-well-known", "serde-well-known",
"formatting", "formatting",
"parsing", "parsing",
"macros", "macros",
] } ] }
tokio = "1.48" tokio = "1.45"
utoipa = { version = "5.4.0", features = ["macros"] } utoipa = { version = "5.4.0", features = ["macros"] }
uuid = { version = "1.18.1", features = ["serde", "v4"] } uuid = { version = "1.17.0", features = ["serde", "v4"] }
[dev-dependencies] [dev-dependencies]
# fixed version due to format breakages in v1.40 # fixed version due to format breakages in v1.40
@@ -56,9 +56,6 @@ all-tokenizations = ["milli/all-tokenizations"]
# chinese specialized tokenization # chinese specialized tokenization
chinese = ["milli/chinese"] chinese = ["milli/chinese"]
chinese-pinyin = ["milli/chinese-pinyin"] chinese-pinyin = ["milli/chinese-pinyin"]
enterprise = ["milli/enterprise"]
# hebrew specialized tokenization # hebrew specialized tokenization
hebrew = ["milli/hebrew"] hebrew = ["milli/hebrew"]
# japanese specialized tokenization # japanese specialized tokenization

View File

@@ -1,16 +0,0 @@
pub mod network {
use milli::update::new::indexer::current_edition::sharding::Shards;
use crate::network::Network;
impl Network {
pub fn shards(&self) -> Option<Shards> {
None
}
pub fn sharding(&self) -> bool {
// always false in CE
false
}
}
}

View File

@@ -3,27 +3,99 @@
// Use of this source code is governed by the Business Source License 1.1, // Use of this source code is governed by the Business Source License 1.1,
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11> // as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
use milli::update::new::indexer::enterprise_edition::sharding::Shards; use std::collections::BTreeMap;
use crate::network::Network; use deserr::Deserr;
use milli::update::new::indexer::enterprise_edition::sharding::{Shard, Shards};
use milli::update::Setting;
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
impl Network { use crate::deserr::DeserrJsonError;
use crate::error::deserr_codes::{
InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf, InvalidNetworkSharding,
InvalidNetworkUrl, InvalidNetworkWriteApiKey,
};
#[derive(Clone, Debug, Deserr, ToSchema, Serialize, Deserialize, PartialEq, Eq)]
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
pub struct Network {
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!("http://localhost:7700"))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
#[serde(default)]
pub remotes: Setting<BTreeMap<String, Option<Remote>>>,
#[schema(value_type = Option<String>, example = json!("ms-00"), rename = "self")]
#[serde(default, rename = "self")]
#[deserr(default, rename = "self", error = DeserrJsonError<InvalidNetworkSelf>)]
pub local: Setting<String>,
#[schema(value_type = Option<bool>, example = json!(true))]
#[serde(default)]
#[deserr(default, error = DeserrJsonError<InvalidNetworkSharding>)]
pub sharding: Setting<bool>,
}
#[derive(Clone, Debug, Deserr, ToSchema, Serialize, Deserialize, PartialEq, Eq)]
#[deserr(error = DeserrJsonError<InvalidNetworkRemotes>, rename_all = camelCase, deny_unknown_fields)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
pub struct Remote {
#[schema(value_type = Option<String>, example = json!({
"ms-0": Remote { url: Setting::Set("http://localhost:7700".into()), search_api_key: Setting::Reset, write_api_key: Setting::Reset },
"ms-1": Remote { url: Setting::Set("http://localhost:7701".into()), search_api_key: Setting::Set("foo".into()), write_api_key: Setting::Set("bar".into()) },
"ms-2": Remote { url: Setting::Set("http://localhost:7702".into()), search_api_key: Setting::Set("bar".into()), write_api_key: Setting::Set("foo".into()) },
}))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkUrl>)]
#[serde(default)]
pub url: Setting<String>,
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkSearchApiKey>)]
#[serde(default)]
pub search_api_key: Setting<String>,
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkWriteApiKey>)]
#[serde(default)]
pub write_api_key: Setting<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
#[serde(rename_all = "camelCase")]
pub struct DbNetwork {
#[serde(default, rename = "self")]
pub local: Option<String>,
#[serde(default)]
pub remotes: BTreeMap<String, DbRemote>,
#[serde(default)]
pub sharding: bool,
}
impl DbNetwork {
pub fn shards(&self) -> Option<Shards> { pub fn shards(&self) -> Option<Shards> {
if self.sharding { if self.sharding {
let this = self.local.as_deref().expect("Inconsistent `sharding` and `self`"); let this = self.local.as_deref();
let others = self
.remotes Some(Shards(
self.remotes
.keys() .keys()
.filter(|name| name.as_str() != this) .map(|name| Shard {
.map(|name| name.to_owned()) is_own: Some(name.as_str()) == this,
.collect(); name: name.to_owned(),
Some(Shards { own: vec![this.to_owned()], others }) })
.collect(),
))
} else { } else {
None None
} }
} }
}
pub fn sharding(&self) -> bool {
self.sharding #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
} #[serde(rename_all = "camelCase")]
pub struct DbRemote {
pub url: String,
#[serde(default)]
pub search_api_key: Option<String>,
#[serde(default)]
pub write_api_key: Option<String>,
} }

View File

@@ -254,12 +254,10 @@ InvalidSearchHybridQuery , InvalidRequest , BAD_REQU
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ; InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ; InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ; InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
InvalidIndexCustomMetadata , InvalidRequest , BAD_REQUEST ;
InvalidIndexUid , InvalidRequest , BAD_REQUEST ; InvalidIndexUid , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchFacets , InvalidRequest , BAD_REQUEST ; InvalidMultiSearchFacets , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchFacetsByIndex , InvalidRequest , BAD_REQUEST ; InvalidMultiSearchFacetsByIndex , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchFacetOrder , InvalidRequest , BAD_REQUEST ; InvalidMultiSearchFacetOrder , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchQueryPersonalization , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchFederated , InvalidRequest , BAD_REQUEST ; InvalidMultiSearchFederated , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchFederationOptions , InvalidRequest , BAD_REQUEST ; InvalidMultiSearchFederationOptions , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchMaxValuesPerFacet , InvalidRequest , BAD_REQUEST ; InvalidMultiSearchMaxValuesPerFacet , InvalidRequest , BAD_REQUEST ;
@@ -317,8 +315,6 @@ InvalidSearchShowRankingScoreDetails , InvalidRequest , BAD_REQU
InvalidSimilarShowRankingScoreDetails , InvalidRequest , BAD_REQUEST ; InvalidSimilarShowRankingScoreDetails , InvalidRequest , BAD_REQUEST ;
InvalidSearchSort , InvalidRequest , BAD_REQUEST ; InvalidSearchSort , InvalidRequest , BAD_REQUEST ;
InvalidSearchDistinct , InvalidRequest , BAD_REQUEST ; InvalidSearchDistinct , InvalidRequest , BAD_REQUEST ;
InvalidSearchPersonalize , InvalidRequest , BAD_REQUEST ;
InvalidSearchPersonalizeUserContext , InvalidRequest , BAD_REQUEST ;
InvalidSearchMediaAndVector , InvalidRequest , BAD_REQUEST ; InvalidSearchMediaAndVector , InvalidRequest , BAD_REQUEST ;
InvalidSettingsDisplayedAttributes , InvalidRequest , BAD_REQUEST ; InvalidSettingsDisplayedAttributes , InvalidRequest , BAD_REQUEST ;
InvalidSettingsDistinctAttribute , InvalidRequest , BAD_REQUEST ; InvalidSettingsDistinctAttribute , InvalidRequest , BAD_REQUEST ;
@@ -394,9 +390,6 @@ TooManyVectors , InvalidRequest , BAD_REQU
UnretrievableDocument , Internal , BAD_REQUEST ; UnretrievableDocument , Internal , BAD_REQUEST ;
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ; UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ; UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
InvalidS3SnapshotRequest , Internal , BAD_REQUEST ;
InvalidS3SnapshotParameters , Internal , BAD_REQUEST ;
S3SnapshotServerError , Internal , BAD_GATEWAY ;
// Experimental features // Experimental features
VectorEmbeddingError , InvalidRequest , BAD_REQUEST ; VectorEmbeddingError , InvalidRequest , BAD_REQUEST ;
@@ -433,7 +426,6 @@ InvalidChatCompletionSearchQueryParamPrompt , InvalidRequest , BAD_REQU
InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ; InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ; InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST ; InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST ;
RequiresEnterpriseEdition , InvalidRequest , UNAVAILABLE_FOR_LEGAL_REASONS ;
// Webhooks // Webhooks
InvalidWebhooks , InvalidRequest , BAD_REQUEST ; InvalidWebhooks , InvalidRequest , BAD_REQUEST ;
InvalidWebhookUrl , InvalidRequest , BAD_REQUEST ; InvalidWebhookUrl , InvalidRequest , BAD_REQUEST ;
@@ -537,6 +529,8 @@ impl ErrorCode for milli::Error {
| UserError::DocumentEditionCompilationError(_) => { | UserError::DocumentEditionCompilationError(_) => {
Code::EditDocumentsByFunctionError Code::EditDocumentsByFunctionError
} }
UserError::NetworkShardingWithoutSelf => Code::InvalidNetworkSharding,
UserError::NetworkMissingUrl(_) => Code::MissingNetworkUrl,
UserError::CelluliteError(err) => match err { UserError::CelluliteError(err) => match err {
cellulite::Error::BuildCanceled cellulite::Error::BuildCanceled
| cellulite::Error::VersionMismatchOnBuild(_) | cellulite::Error::VersionMismatchOnBuild(_)
@@ -687,18 +681,6 @@ impl fmt::Display for deserr_codes::InvalidNetworkSearchApiKey {
} }
} }
impl fmt::Display for deserr_codes::InvalidSearchPersonalize {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "the value of `personalize` is invalid, expected a JSON object with `userContext` string.")
}
}
impl fmt::Display for deserr_codes::InvalidSearchPersonalizeUserContext {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "the value of `userContext` is invalid, expected a string.")
}
}
#[macro_export] #[macro_export]
macro_rules! internal_error { macro_rules! internal_error {
($target:ty : $($other:path), *) => { ($target:ty : $($other:path), *) => {

View File

@@ -380,9 +380,6 @@ pub enum Action {
#[serde(rename = "webhooks.*")] #[serde(rename = "webhooks.*")]
#[deserr(rename = "webhooks.*")] #[deserr(rename = "webhooks.*")]
WebhooksAll, WebhooksAll,
#[serde(rename = "indexes.compact")]
#[deserr(rename = "indexes.compact")]
IndexesCompact,
} }
impl Action { impl Action {
@@ -401,7 +398,6 @@ impl Action {
INDEXES_UPDATE => Some(Self::IndexesUpdate), INDEXES_UPDATE => Some(Self::IndexesUpdate),
INDEXES_DELETE => Some(Self::IndexesDelete), INDEXES_DELETE => Some(Self::IndexesDelete),
INDEXES_SWAP => Some(Self::IndexesSwap), INDEXES_SWAP => Some(Self::IndexesSwap),
INDEXES_COMPACT => Some(Self::IndexesCompact),
TASKS_ALL => Some(Self::TasksAll), TASKS_ALL => Some(Self::TasksAll),
TASKS_CANCEL => Some(Self::TasksCancel), TASKS_CANCEL => Some(Self::TasksCancel),
TASKS_DELETE => Some(Self::TasksDelete), TASKS_DELETE => Some(Self::TasksDelete),
@@ -466,7 +462,6 @@ impl Action {
IndexesUpdate => false, IndexesUpdate => false,
IndexesDelete => false, IndexesDelete => false,
IndexesSwap => false, IndexesSwap => false,
IndexesCompact => false,
TasksCancel => false, TasksCancel => false,
TasksDelete => false, TasksDelete => false,
TasksGet => true, TasksGet => true,
@@ -518,7 +513,6 @@ pub mod actions {
pub const INDEXES_UPDATE: u8 = IndexesUpdate.repr(); pub const INDEXES_UPDATE: u8 = IndexesUpdate.repr();
pub const INDEXES_DELETE: u8 = IndexesDelete.repr(); pub const INDEXES_DELETE: u8 = IndexesDelete.repr();
pub const INDEXES_SWAP: u8 = IndexesSwap.repr(); pub const INDEXES_SWAP: u8 = IndexesSwap.repr();
pub const INDEXES_COMPACT: u8 = IndexesCompact.repr();
pub const TASKS_ALL: u8 = TasksAll.repr(); pub const TASKS_ALL: u8 = TasksAll.repr();
pub const TASKS_CANCEL: u8 = TasksCancel.repr(); pub const TASKS_CANCEL: u8 = TasksCancel.repr();
pub const TASKS_DELETE: u8 = TasksDelete.repr(); pub const TASKS_DELETE: u8 = TasksDelete.repr();
@@ -620,7 +614,6 @@ pub(crate) mod test {
assert!(WebhooksDelete.repr() == 47 && WEBHOOKS_DELETE == 47); assert!(WebhooksDelete.repr() == 47 && WEBHOOKS_DELETE == 47);
assert!(WebhooksCreate.repr() == 48 && WEBHOOKS_CREATE == 48); assert!(WebhooksCreate.repr() == 48 && WEBHOOKS_CREATE == 48);
assert!(WebhooksAll.repr() == 49 && WEBHOOKS_ALL == 49); assert!(WebhooksAll.repr() == 49 && WEBHOOKS_ALL == 49);
assert!(IndexesCompact.repr() == 50 && INDEXES_COMPACT == 50);
} }
#[test] #[test]

View File

@@ -2,17 +2,10 @@
pub mod batch_view; pub mod batch_view;
pub mod batches; pub mod batches;
#[cfg(not(feature = "enterprise"))]
pub mod community_edition;
pub mod compression; pub mod compression;
pub mod deserr; pub mod deserr;
pub mod document_formats; pub mod document_formats;
#[cfg(feature = "enterprise")]
pub mod enterprise_edition; pub mod enterprise_edition;
#[cfg(not(feature = "enterprise"))]
pub use community_edition as current_edition;
#[cfg(feature = "enterprise")]
pub use enterprise_edition as current_edition;
pub mod error; pub mod error;
pub mod facet_values_sort; pub mod facet_values_sort;
pub mod features; pub mod features;
@@ -20,7 +13,6 @@ pub mod index_uid;
pub mod index_uid_pattern; pub mod index_uid_pattern;
pub mod keys; pub mod keys;
pub mod locales; pub mod locales;
pub mod network;
pub mod settings; pub mod settings;
pub mod star_or; pub mod star_or;
pub mod task_view; pub mod task_view;

View File

@@ -1,24 +0,0 @@
use std::collections::BTreeMap;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
#[serde(rename_all = "camelCase")]
pub struct Network {
#[serde(default, rename = "self")]
pub local: Option<String>,
#[serde(default)]
pub remotes: BTreeMap<String, Remote>,
#[serde(default)]
pub sharding: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct Remote {
pub url: String,
#[serde(default)]
pub search_api_key: Option<String>,
#[serde(default)]
pub write_api_key: Option<String>,
}

View File

@@ -346,26 +346,24 @@ impl<T> Settings<T> {
continue; continue;
}; };
hide_secret(api_key, 0); Self::hide_secret(api_key);
} }
} }
}
/// Redact a secret string, starting from the `secret_offset`th byte. fn hide_secret(secret: &mut String) {
pub fn hide_secret(secret: &mut String, secret_offset: usize) { match secret.len() {
match secret.len().checked_sub(secret_offset) { x if x < 10 => {
None => (), secret.replace_range(.., "XXX...");
Some(x) if x < 10 => {
secret.replace_range(secret_offset.., "XXX...");
} }
Some(x) if x < 20 => { x if x < 20 => {
secret.replace_range((secret_offset + 2).., "XXXX..."); secret.replace_range(2.., "XXXX...");
} }
Some(x) if x < 30 => { x if x < 30 => {
secret.replace_range((secret_offset + 3).., "XXXXX..."); secret.replace_range(3.., "XXXXX...");
}
_x => {
secret.replace_range(5.., "XXXXXX...");
} }
Some(_x) => {
secret.replace_range((secret_offset + 5).., "XXXXXX...");
} }
} }
} }

View File

@@ -7,6 +7,7 @@ use time::{Duration, OffsetDateTime};
use utoipa::ToSchema; use utoipa::ToSchema;
use crate::batches::BatchId; use crate::batches::BatchId;
use crate::enterprise_edition::network::Network;
use crate::error::ResponseError; use crate::error::ResponseError;
use crate::settings::{Settings, Unchecked}; use crate::settings::{Settings, Unchecked};
use crate::tasks::{ use crate::tasks::{
@@ -55,9 +56,6 @@ pub struct TaskView {
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub network: Option<TaskNetwork>, pub network: Option<TaskNetwork>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub custom_metadata: Option<String>,
} }
impl TaskView { impl TaskView {
@@ -76,7 +74,6 @@ impl TaskView {
started_at: task.started_at, started_at: task.started_at,
finished_at: task.finished_at, finished_at: task.finished_at,
network: task.network.clone(), network: task.network.clone(),
custom_metadata: task.custom_metadata.clone(),
} }
} }
} }
@@ -146,11 +143,9 @@ pub struct DetailsView {
pub old_index_uid: Option<String>, pub old_index_uid: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub new_index_uid: Option<String>, pub new_index_uid: Option<String>,
// index compaction // network
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub pre_compaction_size: Option<String>, pub network: Option<Network>,
#[serde(skip_serializing_if = "Option::is_none")]
pub post_compaction_size: Option<String>,
} }
impl DetailsView { impl DetailsView {
@@ -323,23 +318,9 @@ impl DetailsView {
// We should never be able to batch multiple renames at the same time. // We should never be able to batch multiple renames at the same time.
(Some(left), Some(_right)) => Some(left), (Some(left), Some(_right)) => Some(left),
}, },
pre_compaction_size: match ( network: match (&self.network, &other.network) {
self.pre_compaction_size.clone(),
other.pre_compaction_size.clone(),
) {
(None, None) => None, (None, None) => None,
(None, Some(size)) | (Some(size), None) => Some(size), (_, Some(network)) | (Some(network), None) => Some(network.clone()),
// We should never be able to batch multiple compactions at the same time.
(Some(left), Some(_right)) => Some(left),
},
post_compaction_size: match (
self.post_compaction_size.clone(),
other.post_compaction_size.clone(),
) {
(None, None) => None,
(None, Some(size)) | (Some(size), None) => Some(size),
// We should never be able to batch multiple compactions at the same time.
(Some(left), Some(_right)) => Some(left),
}, },
} }
} }
@@ -442,14 +423,8 @@ impl From<Details> for DetailsView {
upgrade_to: Some(format!("v{}.{}.{}", to.0, to.1, to.2)), upgrade_to: Some(format!("v{}.{}.{}", to.0, to.1, to.2)),
..Default::default() ..Default::default()
}, },
Details::IndexCompaction { pre_compaction_size, post_compaction_size, .. } => { Details::NetworkTopologyChange { network: new_network } => {
DetailsView { DetailsView { network: new_network, ..Default::default() }
pre_compaction_size: pre_compaction_size
.map(|size| size.get_appropriate_unit(UnitType::Both).to_string()),
post_compaction_size: post_compaction_size
.map(|size| size.get_appropriate_unit(UnitType::Both).to_string()),
..Default::default()
}
} }
} }
} }

View File

@@ -15,6 +15,7 @@ use utoipa::{schema, ToSchema};
use uuid::Uuid; use uuid::Uuid;
use crate::batches::BatchId; use crate::batches::BatchId;
use crate::enterprise_edition::network::Network;
use crate::error::ResponseError; use crate::error::ResponseError;
use crate::index_uid_pattern::IndexUidPattern; use crate::index_uid_pattern::IndexUidPattern;
use crate::keys::Key; use crate::keys::Key;
@@ -45,9 +46,6 @@ pub struct Task {
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub network: Option<TaskNetwork>, pub network: Option<TaskNetwork>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub custom_metadata: Option<String>,
} }
impl Task { impl Task {
@@ -61,6 +59,7 @@ impl Task {
| TaskDeletion { .. } | TaskDeletion { .. }
| Export { .. } | Export { .. }
| UpgradeDatabase { .. } | UpgradeDatabase { .. }
| NetworkTopologyChange { .. }
| IndexSwap { .. } => None, | IndexSwap { .. } => None,
DocumentAdditionOrUpdate { index_uid, .. } DocumentAdditionOrUpdate { index_uid, .. }
| DocumentEdition { index_uid, .. } | DocumentEdition { index_uid, .. }
@@ -70,8 +69,7 @@ impl Task {
| SettingsUpdate { index_uid, .. } | SettingsUpdate { index_uid, .. }
| IndexCreation { index_uid, .. } | IndexCreation { index_uid, .. }
| IndexUpdate { index_uid, .. } | IndexUpdate { index_uid, .. }
| IndexDeletion { index_uid } | IndexDeletion { index_uid } => Some(index_uid),
| IndexCompaction { index_uid } => Some(index_uid),
} }
} }
@@ -99,7 +97,7 @@ impl Task {
| KindWithContent::SnapshotCreation | KindWithContent::SnapshotCreation
| KindWithContent::Export { .. } | KindWithContent::Export { .. }
| KindWithContent::UpgradeDatabase { .. } | KindWithContent::UpgradeDatabase { .. }
| KindWithContent::IndexCompaction { .. } => None, | KindWithContent::NetworkTopologyChange { .. } => None,
} }
} }
} }
@@ -175,8 +173,9 @@ pub enum KindWithContent {
UpgradeDatabase { UpgradeDatabase {
from: (u32, u32, u32), from: (u32, u32, u32),
}, },
IndexCompaction { NetworkTopologyChange {
index_uid: String, network: Option<Network>,
origin: Option<Origin>,
}, },
} }
@@ -214,7 +213,7 @@ impl KindWithContent {
KindWithContent::SnapshotCreation => Kind::SnapshotCreation, KindWithContent::SnapshotCreation => Kind::SnapshotCreation,
KindWithContent::Export { .. } => Kind::Export, KindWithContent::Export { .. } => Kind::Export,
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase, KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
KindWithContent::IndexCompaction { .. } => Kind::IndexCompaction, KindWithContent::NetworkTopologyChange { .. } => Kind::NetworkTopologyChange,
} }
} }
@@ -227,6 +226,7 @@ impl KindWithContent {
| TaskCancelation { .. } | TaskCancelation { .. }
| TaskDeletion { .. } | TaskDeletion { .. }
| Export { .. } | Export { .. }
| NetworkTopologyChange { .. }
| UpgradeDatabase { .. } => vec![], | UpgradeDatabase { .. } => vec![],
DocumentAdditionOrUpdate { index_uid, .. } DocumentAdditionOrUpdate { index_uid, .. }
| DocumentEdition { index_uid, .. } | DocumentEdition { index_uid, .. }
@@ -235,8 +235,7 @@ impl KindWithContent {
| DocumentClear { index_uid } | DocumentClear { index_uid }
| SettingsUpdate { index_uid, .. } | SettingsUpdate { index_uid, .. }
| IndexCreation { index_uid, .. } | IndexCreation { index_uid, .. }
| IndexDeletion { index_uid } | IndexDeletion { index_uid } => vec![index_uid],
| IndexCompaction { index_uid } => vec![index_uid],
IndexUpdate { index_uid, new_index_uid, .. } => { IndexUpdate { index_uid, new_index_uid, .. } => {
let mut indexes = vec![index_uid.as_str()]; let mut indexes = vec![index_uid.as_str()];
if let Some(new_uid) = new_index_uid { if let Some(new_uid) = new_index_uid {
@@ -335,11 +334,9 @@ impl KindWithContent {
versioning::VERSION_PATCH, versioning::VERSION_PATCH,
), ),
}), }),
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction { KindWithContent::NetworkTopologyChange { network: new_network, origin: _ } => {
index_uid: index_uid.clone(), Some(Details::NetworkTopologyChange { network: new_network.clone() })
pre_compaction_size: None, }
post_compaction_size: None,
}),
} }
} }
@@ -422,11 +419,9 @@ impl KindWithContent {
versioning::VERSION_PATCH, versioning::VERSION_PATCH,
), ),
}), }),
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction { KindWithContent::NetworkTopologyChange { network: new_network, origin: _s } => {
index_uid: index_uid.clone(), Some(Details::NetworkTopologyChange { network: new_network.clone() })
pre_compaction_size: None, }
post_compaction_size: None,
}),
} }
} }
} }
@@ -489,11 +484,9 @@ impl From<&KindWithContent> for Option<Details> {
versioning::VERSION_PATCH, versioning::VERSION_PATCH,
), ),
}), }),
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction { KindWithContent::NetworkTopologyChange { network: new_network, origin: _ } => {
index_uid: index_uid.clone(), Some(Details::NetworkTopologyChange { network: new_network.clone() })
pre_compaction_size: None, }
post_compaction_size: None,
}),
} }
} }
} }
@@ -604,7 +597,7 @@ pub enum Kind {
SnapshotCreation, SnapshotCreation,
Export, Export,
UpgradeDatabase, UpgradeDatabase,
IndexCompaction, NetworkTopologyChange,
} }
impl Kind { impl Kind {
@@ -616,15 +609,15 @@ impl Kind {
| Kind::SettingsUpdate | Kind::SettingsUpdate
| Kind::IndexCreation | Kind::IndexCreation
| Kind::IndexDeletion | Kind::IndexDeletion
| Kind::IndexUpdate | Kind::IndexUpdate => true,
| Kind::IndexCompaction => true,
Kind::IndexSwap Kind::IndexSwap
| Kind::TaskCancelation | Kind::TaskCancelation
| Kind::TaskDeletion | Kind::TaskDeletion
| Kind::DumpCreation | Kind::DumpCreation
| Kind::Export | Kind::Export
| Kind::UpgradeDatabase | Kind::UpgradeDatabase
| Kind::SnapshotCreation => false, | Kind::SnapshotCreation
| Kind::NetworkTopologyChange => false,
} }
} }
} }
@@ -645,7 +638,7 @@ impl Display for Kind {
Kind::SnapshotCreation => write!(f, "snapshotCreation"), Kind::SnapshotCreation => write!(f, "snapshotCreation"),
Kind::Export => write!(f, "export"), Kind::Export => write!(f, "export"),
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"), Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
Kind::IndexCompaction => write!(f, "indexCompaction"), Kind::NetworkTopologyChange => write!(f, "networkTopologyChange"),
} }
} }
} }
@@ -681,8 +674,8 @@ impl FromStr for Kind {
Ok(Kind::Export) Ok(Kind::Export)
} else if kind.eq_ignore_ascii_case("upgradeDatabase") { } else if kind.eq_ignore_ascii_case("upgradeDatabase") {
Ok(Kind::UpgradeDatabase) Ok(Kind::UpgradeDatabase)
} else if kind.eq_ignore_ascii_case("indexCompaction") { } else if kind.eq_ignore_ascii_case("networkTopologyChange") {
Ok(Kind::IndexCompaction) Ok(Kind::NetworkTopologyChange)
} else { } else {
Err(ParseTaskKindError(kind.to_owned())) Err(ParseTaskKindError(kind.to_owned()))
} }
@@ -768,10 +761,8 @@ pub enum Details {
from: (u32, u32, u32), from: (u32, u32, u32),
to: (u32, u32, u32), to: (u32, u32, u32),
}, },
IndexCompaction { NetworkTopologyChange {
index_uid: String, network: Option<Network>,
pre_compaction_size: Option<Byte>,
post_compaction_size: Option<Byte>,
}, },
} }
@@ -835,15 +826,12 @@ impl Details {
Self::ClearAll { deleted_documents } => *deleted_documents = Some(0), Self::ClearAll { deleted_documents } => *deleted_documents = Some(0),
Self::TaskCancelation { canceled_tasks, .. } => *canceled_tasks = Some(0), Self::TaskCancelation { canceled_tasks, .. } => *canceled_tasks = Some(0),
Self::TaskDeletion { deleted_tasks, .. } => *deleted_tasks = Some(0), Self::TaskDeletion { deleted_tasks, .. } => *deleted_tasks = Some(0),
Self::IndexCompaction { pre_compaction_size, post_compaction_size, .. } => {
*pre_compaction_size = None;
*post_compaction_size = None;
}
Self::SettingsUpdate { .. } Self::SettingsUpdate { .. }
| Self::IndexInfo { .. } | Self::IndexInfo { .. }
| Self::Dump { .. } | Self::Dump { .. }
| Self::Export { .. } | Self::Export { .. }
| Self::UpgradeDatabase { .. } | Self::UpgradeDatabase { .. }
| Self::NetworkTopologyChange { .. }
| Self::IndexSwap { .. } => (), | Self::IndexSwap { .. } => (),
} }

View File

@@ -11,24 +11,6 @@ pub struct Webhook {
pub headers: BTreeMap<String, String>, pub headers: BTreeMap<String, String>,
} }
impl Webhook {
pub fn redact_authorization_header(&mut self) {
// headers are case insensitive, so to make the redaction robust we iterate over qualifying headers
// rather than getting one canonical `Authorization` header.
for value in self
.headers
.iter_mut()
.filter_map(|(name, value)| name.eq_ignore_ascii_case("authorization").then_some(value))
{
if value.starts_with("Bearer ") {
crate::settings::hide_secret(value, "Bearer ".len());
} else {
crate::settings::hide_secret(value, 0);
}
}
}
}
#[derive(Debug, Serialize, Default, Clone, PartialEq)] #[derive(Debug, Serialize, Default, Clone, PartialEq)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct WebhooksView { pub struct WebhooksView {

View File

@@ -14,91 +14,91 @@ default-run = "meilisearch"
[dependencies] [dependencies]
actix-cors = "0.7.1" actix-cors = "0.7.1"
actix-http = { version = "3.11.2", default-features = false, features = [ actix-http = { version = "3.11.0", default-features = false, features = [
"compress-brotli", "compress-brotli",
"compress-gzip", "compress-gzip",
"rustls-0_23", "rustls-0_23",
] } ] }
actix-utils = "3.0.1" actix-utils = "3.0.1"
actix-web = { version = "4.12.0", default-features = false, features = [ actix-web = { version = "4.11.0", default-features = false, features = [
"macros", "macros",
"compress-brotli", "compress-brotli",
"compress-gzip", "compress-gzip",
"cookies", "cookies",
"rustls-0_23", "rustls-0_23",
] } ] }
anyhow = { version = "1.0.100", features = ["backtrace"] } anyhow = { version = "1.0.98", features = ["backtrace"] }
bstr = "1.12.1" bstr = "1.12.0"
byte-unit = { version = "5.1.6", features = ["serde"] } byte-unit = { version = "5.1.6", features = ["serde"] }
bytes = "1.11.0" bytes = "1.10.1"
bumpalo = "3.19.0" bumpalo = "3.18.1"
clap = { version = "4.5.52", features = ["derive", "env"] } clap = { version = "4.5.40", features = ["derive", "env"] }
crossbeam-channel = "0.5.15" crossbeam-channel = "0.5.15"
deserr = { version = "0.6.4", features = ["actix-web"] } deserr = { version = "0.6.3", features = ["actix-web"] }
dump = { path = "../dump" } dump = { path = "../dump" }
either = "1.15.0" either = "1.15.0"
file-store = { path = "../file-store" } file-store = { path = "../file-store" }
flate2 = "1.1.5" flate2 = "1.1.2"
fst = "0.4.7" fst = "0.4.7"
futures = "0.3.31" futures = "0.3.31"
futures-util = "0.3.31" futures-util = "0.3.31"
index-scheduler = { path = "../index-scheduler" } index-scheduler = { path = "../index-scheduler" }
indexmap = { version = "2.12.0", features = ["serde"] } indexmap = { version = "2.9.0", features = ["serde"] }
is-terminal = "0.4.17" is-terminal = "0.4.16"
itertools = "0.14.0" itertools = "0.14.0"
jsonwebtoken = "9.3.1" jsonwebtoken = "9.3.1"
lazy_static = "1.5.0" lazy_static = "1.5.0"
meilisearch-auth = { path = "../meilisearch-auth" } meilisearch-auth = { path = "../meilisearch-auth" }
meilisearch-types = { path = "../meilisearch-types" } meilisearch-types = { path = "../meilisearch-types" }
memmap2 = "0.9.9" memmap2 = "0.9.7"
mimalloc = { version = "0.1.48", default-features = false } mimalloc = { version = "0.1.47", default-features = false }
mime = "0.3.17" mime = "0.3.17"
num_cpus = "1.17.0" num_cpus = "1.17.0"
obkv = "0.3.0" obkv = "0.3.0"
once_cell = "1.21.3" once_cell = "1.21.3"
ordered-float = "5.1.0" ordered-float = "5.0.0"
parking_lot = "0.12.5" parking_lot = "0.12.4"
permissive-json-pointer = { path = "../permissive-json-pointer" } permissive-json-pointer = { path = "../permissive-json-pointer" }
pin-project-lite = "0.2.16" pin-project-lite = "0.2.16"
platform-dirs = "0.3.0" platform-dirs = "0.3.0"
prometheus = { version = "0.14.0", features = ["process"] } prometheus = { version = "0.14.0", features = ["process"] }
rand = "0.8.5" rand = "0.8.5"
rayon = "1.11.0" rayon = "1.10.0"
regex = "1.12.2" regex = "1.11.1"
reqwest = { version = "0.12.24", features = [ reqwest = { version = "0.12.20", features = [
"rustls-tls", "rustls-tls",
"json", "json",
], default-features = false } ], default-features = false }
rustls = { version = "0.23.35", features = ["ring"], default-features = false } rustls = { version = "0.23.28", features = ["ring"], default-features = false }
rustls-pki-types = { version = "1.13.0", features = ["alloc"] } rustls-pki-types = { version = "1.12.0", features = ["alloc"] }
rustls-pemfile = "2.2.0" rustls-pemfile = "2.2.0"
segment = { version = "0.2.6" } segment = { version = "0.2.6" }
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
sha2 = "0.10.9" sha2 = "0.10.9"
siphasher = "1.0.1" siphasher = "1.0.1"
slice-group-by = "0.3.1" slice-group-by = "0.3.1"
static-files = { version = "0.3.1", optional = true } static-files = { version = "0.2.5", optional = true }
sysinfo = "0.37.2" sysinfo = "0.35.2"
tar = "0.4.44" tar = "0.4.44"
tempfile = "3.23.0" tempfile = "3.20.0"
thiserror = "2.0.17" thiserror = "2.0.12"
time = { version = "0.3.44", features = [ time = { version = "0.3.41", features = [
"serde-well-known", "serde-well-known",
"formatting", "formatting",
"parsing", "parsing",
"macros", "macros",
] } ] }
tokio = { version = "1.48.0", features = ["full"] } tokio = { version = "1.45.1", features = ["full"] }
toml = "0.9.8" toml = "0.8.23"
uuid = { version = "1.18.1", features = ["serde", "v4", "v7"] } uuid = { version = "1.17.0", features = ["serde", "v4"] }
serde_urlencoded = "0.7.1" serde_urlencoded = "0.7.1"
termcolor = "1.4.1" termcolor = "1.4.1"
url = { version = "2.5.7", features = ["serde"] } url = { version = "2.5.4", features = ["serde"] }
tracing = "0.1.41" tracing = "0.1.41"
tracing-subscriber = { version = "0.3.20", features = ["json"] } tracing-subscriber = { version = "0.3.20", features = ["json"] }
tracing-trace = { version = "0.1.0", path = "../tracing-trace" } tracing-trace = { version = "0.1.0", path = "../tracing-trace" }
tracing-actix-web = "0.7.19" tracing-actix-web = "0.7.18"
build-info = { version = "1.7.0", path = "../build-info" } build-info = { version = "1.7.0", path = "../build-info" }
roaring = "0.10.12" roaring = "0.10.12"
mopa-maintained = "0.2.3" mopa-maintained = "0.2.3"
@@ -114,35 +114,35 @@ utoipa = { version = "5.4.0", features = [
utoipa-scalar = { version = "0.3.0", optional = true, features = ["actix-web"] } utoipa-scalar = { version = "0.3.0", optional = true, features = ["actix-web"] }
async-openai = { git = "https://github.com/meilisearch/async-openai", branch = "better-error-handling" } async-openai = { git = "https://github.com/meilisearch/async-openai", branch = "better-error-handling" }
secrecy = "0.10.3" secrecy = "0.10.3"
actix-web-lab = { version = "0.24.3", default-features = false } actix-web-lab = { version = "0.24.1", default-features = false }
urlencoding = "2.1.3" urlencoding = "2.1.3"
backoff = { version = "0.4.0", features = ["tokio"] } backoff = { version = "0.4.0", features = ["tokio"] }
humantime = { version = "2.3.0", default-features = false }
[dev-dependencies] [dev-dependencies]
actix-rt = "2.11.0" actix-rt = "2.10.0"
brotli = "8.0.2" brotli = "8.0.1"
# fixed version due to format breakages in v1.40 # fixed version due to format breakages in v1.40
insta = { version = "=1.39.0", features = ["redactions"] } insta = { version = "=1.39.0", features = ["redactions"] }
manifest-dir-macros = "0.1.18" manifest-dir-macros = "0.1.18"
maplit = "1.0.2" maplit = "1.0.2"
meili-snap = { path = "../meili-snap" } meili-snap = { path = "../meili-snap" }
temp-env = "0.3.6" temp-env = "0.3.6"
wiremock = "0.6.5" wiremock = "0.6.3"
yaup = "0.3.1" yaup = "0.3.1"
[build-dependencies] [build-dependencies]
anyhow = { version = "1.0.100", optional = true } anyhow = { version = "1.0.98", optional = true }
cargo_toml = { version = "0.22.3", optional = true } cargo_toml = { version = "0.22.1", optional = true }
hex = { version = "0.4.3", optional = true } hex = { version = "0.4.3", optional = true }
reqwest = { version = "0.12.24", features = [ reqwest = { version = "0.12.20", features = [
"blocking", "blocking",
"rustls-tls", "rustls-tls",
], default-features = false, optional = true } ], default-features = false, optional = true }
sha-1 = { version = "0.10.1", optional = true } sha-1 = { version = "0.10.1", optional = true }
static-files = { version = "0.3.1", optional = true } static-files = { version = "0.2.5", optional = true }
tempfile = { version = "3.23.0", optional = true } tempfile = { version = "3.20.0", optional = true }
zip = { version = "6.0.0", optional = true } zip = { version = "4.1.0", optional = true }
[features] [features]
default = ["meilisearch-types/all-tokenizations", "mini-dashboard"] default = ["meilisearch-types/all-tokenizations", "mini-dashboard"]
@@ -160,7 +160,6 @@ mini-dashboard = [
] ]
chinese = ["meilisearch-types/chinese"] chinese = ["meilisearch-types/chinese"]
chinese-pinyin = ["meilisearch-types/chinese-pinyin"] chinese-pinyin = ["meilisearch-types/chinese-pinyin"]
enterprise = ["meilisearch-types/enterprise"]
hebrew = ["meilisearch-types/hebrew"] hebrew = ["meilisearch-types/hebrew"]
japanese = ["meilisearch-types/japanese"] japanese = ["meilisearch-types/japanese"]
korean = ["meilisearch-types/korean"] korean = ["meilisearch-types/korean"]

View File

@@ -195,7 +195,7 @@ struct Infos {
experimental_enable_logs_route: bool, experimental_enable_logs_route: bool,
experimental_reduce_indexing_memory_usage: bool, experimental_reduce_indexing_memory_usage: bool,
experimental_max_number_of_batched_tasks: usize, experimental_max_number_of_batched_tasks: usize,
experimental_limit_batched_tasks_total_size: Option<u64>, experimental_limit_batched_tasks_total_size: u64,
experimental_network: bool, experimental_network: bool,
experimental_multimodal: bool, experimental_multimodal: bool,
experimental_chat_completions: bool, experimental_chat_completions: bool,
@@ -205,10 +205,7 @@ struct Infos {
experimental_no_snapshot_compaction: bool, experimental_no_snapshot_compaction: bool,
experimental_no_edition_2024_for_dumps: bool, experimental_no_edition_2024_for_dumps: bool,
experimental_no_edition_2024_for_settings: bool, experimental_no_edition_2024_for_settings: bool,
experimental_no_edition_2024_for_prefix_post_processing: bool,
experimental_no_edition_2024_for_facet_post_processing: bool,
experimental_vector_store_setting: bool, experimental_vector_store_setting: bool,
experimental_personalization: bool,
gpu_enabled: bool, gpu_enabled: bool,
db_path: bool, db_path: bool,
import_dump: bool, import_dump: bool,
@@ -218,7 +215,6 @@ struct Infos {
import_snapshot: bool, import_snapshot: bool,
schedule_snapshot: Option<u64>, schedule_snapshot: Option<u64>,
snapshot_dir: bool, snapshot_dir: bool,
uses_s3_snapshots: bool,
ignore_missing_snapshot: bool, ignore_missing_snapshot: bool,
ignore_snapshot_if_db_exists: bool, ignore_snapshot_if_db_exists: bool,
http_addr: bool, http_addr: bool,
@@ -287,8 +283,6 @@ impl Infos {
indexer_options, indexer_options,
config_file_path, config_file_path,
no_analytics: _, no_analytics: _,
experimental_personalization_api_key,
s3_snapshot_options,
} = options; } = options;
let schedule_snapshot = match schedule_snapshot { let schedule_snapshot = match schedule_snapshot {
@@ -302,8 +296,6 @@ impl Infos {
skip_index_budget: _, skip_index_budget: _,
experimental_no_edition_2024_for_settings, experimental_no_edition_2024_for_settings,
experimental_no_edition_2024_for_dumps, experimental_no_edition_2024_for_dumps,
experimental_no_edition_2024_for_prefix_post_processing,
experimental_no_edition_2024_for_facet_post_processing,
} = indexer_options; } = indexer_options;
let RuntimeTogglableFeatures { let RuntimeTogglableFeatures {
@@ -352,14 +344,13 @@ impl Infos {
import_snapshot: import_snapshot.is_some(), import_snapshot: import_snapshot.is_some(),
schedule_snapshot, schedule_snapshot,
snapshot_dir: snapshot_dir != PathBuf::from("snapshots/"), snapshot_dir: snapshot_dir != PathBuf::from("snapshots/"),
uses_s3_snapshots: s3_snapshot_options.is_some(),
ignore_missing_snapshot, ignore_missing_snapshot,
ignore_snapshot_if_db_exists, ignore_snapshot_if_db_exists,
http_addr: http_addr != default_http_addr(), http_addr: http_addr != default_http_addr(),
http_payload_size_limit, http_payload_size_limit,
experimental_max_number_of_batched_tasks, experimental_max_number_of_batched_tasks,
experimental_limit_batched_tasks_total_size: experimental_limit_batched_tasks_total_size:
experimental_limit_batched_tasks_total_size.map(|size| size.as_u64()), experimental_limit_batched_tasks_total_size.into(),
task_queue_webhook: task_webhook_url.is_some(), task_queue_webhook: task_webhook_url.is_some(),
task_webhook_authorization_header: task_webhook_authorization_header.is_some(), task_webhook_authorization_header: task_webhook_authorization_header.is_some(),
log_level: log_level.to_string(), log_level: log_level.to_string(),
@@ -374,9 +365,6 @@ impl Infos {
ssl_resumption, ssl_resumption,
ssl_tickets, ssl_tickets,
experimental_no_edition_2024_for_settings, experimental_no_edition_2024_for_settings,
experimental_no_edition_2024_for_prefix_post_processing,
experimental_no_edition_2024_for_facet_post_processing,
experimental_personalization: experimental_personalization_api_key.is_some(),
} }
} }
} }

View File

@@ -38,8 +38,6 @@ pub enum MeilisearchHttpError {
PaginationInFederatedQuery(usize, &'static str), PaginationInFederatedQuery(usize, &'static str),
#[error("Inside `.queries[{0}]`: Using facet options is not allowed in federated queries.\n - Hint: remove `facets` from query #{0} or remove `federation` from the request\n - Hint: pass `federation.facetsByIndex.{1}: {2:?}` for facets in federated search")] #[error("Inside `.queries[{0}]`: Using facet options is not allowed in federated queries.\n - Hint: remove `facets` from query #{0} or remove `federation` from the request\n - Hint: pass `federation.facetsByIndex.{1}: {2:?}` for facets in federated search")]
FacetsInFederatedQuery(usize, String, Vec<String>), FacetsInFederatedQuery(usize, String, Vec<String>),
#[error("Inside `.queries[{0}]`: Using `.personalize` is not allowed in federated queries.\n - Hint: remove `personalize` from query #{0} or remove `federation` from the request")]
PersonalizationInFederatedQuery(usize),
#[error("Inconsistent order for values in facet `{facet}`: index `{previous_uid}` orders {previous_facet_order}, but index `{current_uid}` orders {index_facet_order}.\n - Hint: Remove `federation.mergeFacets` or change `faceting.sortFacetValuesBy` to be consistent in settings.")] #[error("Inconsistent order for values in facet `{facet}`: index `{previous_uid}` orders {previous_facet_order}, but index `{current_uid}` orders {index_facet_order}.\n - Hint: Remove `federation.mergeFacets` or change `faceting.sortFacetValuesBy` to be consistent in settings.")]
InconsistentFacetOrder { InconsistentFacetOrder {
facet: String, facet: String,
@@ -139,9 +137,6 @@ impl ErrorCode for MeilisearchHttpError {
MeilisearchHttpError::InconsistentFacetOrder { .. } => { MeilisearchHttpError::InconsistentFacetOrder { .. } => {
Code::InvalidMultiSearchFacetOrder Code::InvalidMultiSearchFacetOrder
} }
MeilisearchHttpError::PersonalizationInFederatedQuery(_) => {
Code::InvalidMultiSearchQueryPersonalization
}
MeilisearchHttpError::InconsistentOriginHeaders { .. } => { MeilisearchHttpError::InconsistentOriginHeaders { .. } => {
Code::InconsistentDocumentChangeHeaders Code::InconsistentDocumentChangeHeaders
} }

View File

@@ -11,7 +11,6 @@ pub mod middleware;
pub mod option; pub mod option;
#[cfg(test)] #[cfg(test)]
mod option_test; mod option_test;
pub mod personalization;
pub mod routes; pub mod routes;
pub mod search; pub mod search;
pub mod search_queue; pub mod search_queue;
@@ -59,7 +58,6 @@ use tracing::{error, info_span};
use tracing_subscriber::filter::Targets; use tracing_subscriber::filter::Targets;
use crate::error::MeilisearchHttpError; use crate::error::MeilisearchHttpError;
use crate::personalization::PersonalizationService;
/// Default number of simultaneously opened indexes. /// Default number of simultaneously opened indexes.
/// ///
@@ -130,8 +128,12 @@ pub type LogStderrType = tracing_subscriber::filter::Filtered<
>; >;
pub fn create_app( pub fn create_app(
services: ServicesData, index_scheduler: Data<IndexScheduler>,
auth_controller: Data<AuthController>,
search_queue: Data<SearchQueue>,
opt: Opt, opt: Opt,
logs: (LogRouteHandle, LogStderrHandle),
analytics: Data<Analytics>,
enable_dashboard: bool, enable_dashboard: bool,
) -> actix_web::App< ) -> actix_web::App<
impl ServiceFactory< impl ServiceFactory<
@@ -143,7 +145,17 @@ pub fn create_app(
>, >,
> { > {
let app = actix_web::App::new() let app = actix_web::App::new()
.configure(|s| configure_data(s, services, &opt)) .configure(|s| {
configure_data(
s,
index_scheduler.clone(),
auth_controller.clone(),
search_queue.clone(),
&opt,
logs,
analytics.clone(),
)
})
.configure(routes::configure) .configure(routes::configure)
.configure(|s| dashboard(s, enable_dashboard)); .configure(|s| dashboard(s, enable_dashboard));
@@ -221,26 +233,12 @@ pub fn setup_meilisearch(
task_db_size: opt.max_task_db_size.as_u64() as usize, task_db_size: opt.max_task_db_size.as_u64() as usize,
index_base_map_size: opt.max_index_size.as_u64() as usize, index_base_map_size: opt.max_index_size.as_u64() as usize,
enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage, enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage,
indexer_config: Arc::new({ indexer_config: Arc::new((&opt.indexer_options).try_into()?),
let s3_snapshot_options =
opt.s3_snapshot_options.clone().map(|opt| opt.try_into()).transpose()?;
IndexerConfig { s3_snapshot_options, ..(&opt.indexer_options).try_into()? }
}),
autobatching_enabled: true, autobatching_enabled: true,
cleanup_enabled: !opt.experimental_replication_parameters, cleanup_enabled: !opt.experimental_replication_parameters,
max_number_of_tasks: 1_000_000, max_number_of_tasks: 1_000_000,
max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks, max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks,
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size.map_or_else( batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size.into(),
|| {
opt.indexer_options
.max_indexing_memory
// By default, we use half of the available memory to determine the size of batched tasks
.map_or(u64::MAX, |mem| mem.as_u64() / 2)
// And never exceed 10 GiB when we infer the limit
.min(10 * 1024 * 1024 * 1024)
},
|size| size.as_u64(),
),
index_growth_amount: byte_unit::Byte::from_str("10GiB").unwrap().as_u64() as usize, index_growth_amount: byte_unit::Byte::from_str("10GiB").unwrap().as_u64() as usize,
index_count: DEFAULT_INDEX_COUNT, index_count: DEFAULT_INDEX_COUNT,
instance_features: opt.to_instance_features(), instance_features: opt.to_instance_features(),
@@ -536,11 +534,7 @@ fn import_dump(
let indexer_config = if base_config.max_threads.is_none() { let indexer_config = if base_config.max_threads.is_none() {
let (thread_pool, _) = default_thread_pool_and_threads(); let (thread_pool, _) = default_thread_pool_and_threads();
let _config = IndexerConfig { let _config = IndexerConfig { thread_pool, ..*base_config };
thread_pool,
s3_snapshot_options: base_config.s3_snapshot_options.clone(),
..*base_config
};
backup_config = _config; backup_config = _config;
&backup_config &backup_config
} else { } else {
@@ -597,7 +591,7 @@ fn import_dump(
let reader = DocumentsBatchReader::from_reader(reader)?; let reader = DocumentsBatchReader::from_reader(reader)?;
let embedder_configs = index.embedding_configs().embedding_configs(&wtxn)?; let embedder_configs = index.embedding_configs().embedding_configs(&wtxn)?;
let embedders = index_scheduler.embedders(uid.to_string(), embedder_configs)?; let embedders = index_scheduler.embedders(&uid, embedder_configs)?;
let builder = milli::update::IndexDocuments::new( let builder = milli::update::IndexDocuments::new(
&mut wtxn, &mut wtxn,
@@ -625,7 +619,7 @@ fn import_dump(
let mut indexer = indexer::DocumentOperation::new(); let mut indexer = indexer::DocumentOperation::new();
let embedders = index.embedding_configs().embedding_configs(&rtxn)?; let embedders = index.embedding_configs().embedding_configs(&rtxn)?;
let embedders = index_scheduler.embedders(uid.clone(), embedders)?; let embedders = index_scheduler.embedders(&uid, embedders)?;
let mmap = unsafe { memmap2::Mmap::map(index_reader.documents_file())? }; let mmap = unsafe { memmap2::Mmap::map(index_reader.documents_file())? };
@@ -688,26 +682,23 @@ fn import_dump(
Ok(index_scheduler_dump.finish()?) Ok(index_scheduler_dump.finish()?)
} }
pub fn configure_data(config: &mut web::ServiceConfig, services: ServicesData, opt: &Opt) { pub fn configure_data(
let ServicesData { config: &mut web::ServiceConfig,
index_scheduler, index_scheduler: Data<IndexScheduler>,
auth, auth: Data<AuthController>,
search_queue, search_queue: Data<SearchQueue>,
personalization_service, opt: &Opt,
logs_route_handle, (logs_route, logs_stderr): (LogRouteHandle, LogStderrHandle),
logs_stderr_handle, analytics: Data<Analytics>,
analytics, ) {
} = services;
let http_payload_size_limit = opt.http_payload_size_limit.as_u64() as usize; let http_payload_size_limit = opt.http_payload_size_limit.as_u64() as usize;
config config
.app_data(index_scheduler) .app_data(index_scheduler)
.app_data(auth) .app_data(auth)
.app_data(search_queue) .app_data(search_queue)
.app_data(analytics) .app_data(analytics)
.app_data(personalization_service) .app_data(web::Data::new(logs_route))
.app_data(logs_route_handle) .app_data(web::Data::new(logs_stderr))
.app_data(logs_stderr_handle)
.app_data(web::Data::new(opt.clone())) .app_data(web::Data::new(opt.clone()))
.app_data( .app_data(
web::JsonConfig::default() web::JsonConfig::default()
@@ -768,14 +759,3 @@ pub fn dashboard(config: &mut web::ServiceConfig, enable_frontend: bool) {
pub fn dashboard(config: &mut web::ServiceConfig, _enable_frontend: bool) { pub fn dashboard(config: &mut web::ServiceConfig, _enable_frontend: bool) {
config.service(web::resource("/").route(web::get().to(routes::running))); config.service(web::resource("/").route(web::get().to(routes::running)));
} }
#[derive(Clone)]
pub struct ServicesData {
pub index_scheduler: Data<IndexScheduler>,
pub auth: Data<AuthController>,
pub search_queue: Data<SearchQueue>,
pub personalization_service: Data<PersonalizationService>,
pub logs_route_handle: Data<LogRouteHandle>,
pub logs_stderr_handle: Data<LogStderrHandle>,
pub analytics: Data<Analytics>,
}

View File

@@ -14,11 +14,10 @@ use index_scheduler::IndexScheduler;
use is_terminal::IsTerminal; use is_terminal::IsTerminal;
use meilisearch::analytics::Analytics; use meilisearch::analytics::Analytics;
use meilisearch::option::LogMode; use meilisearch::option::LogMode;
use meilisearch::personalization::PersonalizationService;
use meilisearch::search_queue::SearchQueue; use meilisearch::search_queue::SearchQueue;
use meilisearch::{ use meilisearch::{
analytics, create_app, setup_meilisearch, LogRouteHandle, LogRouteType, LogStderrHandle, analytics, create_app, setup_meilisearch, LogRouteHandle, LogRouteType, LogStderrHandle,
LogStderrType, Opt, ServicesData, SubscriberForSecondLayer, LogStderrType, Opt, SubscriberForSecondLayer,
}; };
use meilisearch_auth::{generate_master_key, AuthController, MASTER_KEY_MIN_SIZE}; use meilisearch_auth::{generate_master_key, AuthController, MASTER_KEY_MIN_SIZE};
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}; use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
@@ -153,15 +152,8 @@ async fn run_http(
let enable_dashboard = &opt.env == "development"; let enable_dashboard = &opt.env == "development";
let opt_clone = opt.clone(); let opt_clone = opt.clone();
let index_scheduler = Data::from(index_scheduler); let index_scheduler = Data::from(index_scheduler);
let auth = Data::from(auth_controller); let auth_controller = Data::from(auth_controller);
let analytics = Data::from(analytics); let analytics = Data::from(analytics);
// Create personalization service with API key from options
let personalization_service = Data::new(
opt.experimental_personalization_api_key
.clone()
.map(PersonalizationService::cohere)
.unwrap_or_else(PersonalizationService::disabled),
);
let search_queue = SearchQueue::new( let search_queue = SearchQueue::new(
opt.experimental_search_queue_size, opt.experimental_search_queue_size,
available_parallelism() available_parallelism()
@@ -173,22 +165,18 @@ async fn run_http(
usize::from(opt.experimental_drop_search_after) as u64 usize::from(opt.experimental_drop_search_after) as u64
)); ));
let search_queue = Data::new(search_queue); let search_queue = Data::new(search_queue);
let (logs_route_handle, logs_stderr_handle) = logs;
let logs_route_handle = Data::new(logs_route_handle);
let logs_stderr_handle = Data::new(logs_stderr_handle);
let services = ServicesData { let http_server = HttpServer::new(move || {
index_scheduler, create_app(
auth, index_scheduler.clone(),
search_queue, auth_controller.clone(),
personalization_service, search_queue.clone(),
logs_route_handle, opt.clone(),
logs_stderr_handle, logs.clone(),
analytics, analytics.clone(),
}; enable_dashboard,
)
let http_server = })
HttpServer::new(move || create_app(services.clone(), opt.clone(), enable_dashboard))
// Disable signals allows the server to terminate immediately when a user enter CTRL-C // Disable signals allows the server to terminate immediately when a user enter CTRL-C
.disable_signals() .disable_signals()
.keep_alive(KeepAlive::Os); .keep_alive(KeepAlive::Os);

View File

@@ -1,8 +1,7 @@
use lazy_static::lazy_static; use lazy_static::lazy_static;
use prometheus::{ use prometheus::{
opts, register_gauge, register_gauge_vec, register_histogram_vec, register_int_counter_vec, opts, register_gauge, register_histogram_vec, register_int_counter_vec, register_int_gauge,
register_int_gauge, register_int_gauge_vec, Gauge, GaugeVec, HistogramVec, IntCounterVec, register_int_gauge_vec, Gauge, HistogramVec, IntCounterVec, IntGauge, IntGaugeVec,
IntGauge, IntGaugeVec,
}; };
lazy_static! { lazy_static! {
@@ -74,20 +73,6 @@ lazy_static! {
&["kind", "value"] &["kind", "value"]
) )
.expect("Can't create a metric"); .expect("Can't create a metric");
pub static ref MEILISEARCH_BATCH_RUNNING_PROGRESS_TRACE: GaugeVec = register_gauge_vec!(
opts!("meilisearch_batch_running_progress_trace", "The currently running progress trace"),
&["batch_uid", "step_name"]
)
.expect("Can't create a metric");
pub static ref MEILISEARCH_LAST_FINISHED_BATCHES_PROGRESS_TRACE_MS: IntGaugeVec =
register_int_gauge_vec!(
opts!(
"meilisearch_last_finished_batches_progress_trace_ms",
"The last few batches progress trace in milliseconds"
),
&["batch_uid", "step_name"]
)
.expect("Can't create a metric");
pub static ref MEILISEARCH_LAST_UPDATE: IntGauge = pub static ref MEILISEARCH_LAST_UPDATE: IntGauge =
register_int_gauge!(opts!("meilisearch_last_update", "Meilisearch Last Update")) register_int_gauge!(opts!("meilisearch_last_update", "Meilisearch Last Update"))
.expect("Can't create a metric"); .expect("Can't create a metric");
@@ -129,9 +114,4 @@ lazy_static! {
"Meilisearch Task Queue Size Until Stop Registering", "Meilisearch Task Queue Size Until Stop Registering",
)) ))
.expect("Can't create a metric"); .expect("Can't create a metric");
pub static ref MEILISEARCH_PERSONALIZED_SEARCH_REQUESTS: IntGauge = register_int_gauge!(opts!(
"meilisearch_personalized_search_requests",
"Meilisearch number of search requests with personalization"
))
.expect("Can't create a metric");
} }

View File

@@ -7,13 +7,12 @@ use std::ops::Deref;
use std::path::PathBuf; use std::path::PathBuf;
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use std::{env, fmt, fs}; use std::{env, fmt, fs};
use byte_unit::{Byte, ParseError, UnitType}; use byte_unit::{Byte, ParseError, UnitType};
use clap::Parser; use clap::Parser;
use meilisearch_types::features::InstanceTogglableFeatures; use meilisearch_types::features::InstanceTogglableFeatures;
use meilisearch_types::milli::update::{IndexerConfig, S3SnapshotOptions}; use meilisearch_types::milli::update::IndexerConfig;
use meilisearch_types::milli::ThreadPoolNoAbortBuilder; use meilisearch_types::milli::ThreadPoolNoAbortBuilder;
use rustls::server::{ServerSessionMemoryCache, WebPkiClientVerifier}; use rustls::server::{ServerSessionMemoryCache, WebPkiClientVerifier};
use rustls::RootCertStore; use rustls::RootCertStore;
@@ -56,10 +55,6 @@ const MEILI_EXPERIMENTAL_ENABLE_LOGS_ROUTE: &str = "MEILI_EXPERIMENTAL_ENABLE_LO
const MEILI_EXPERIMENTAL_CONTAINS_FILTER: &str = "MEILI_EXPERIMENTAL_CONTAINS_FILTER"; const MEILI_EXPERIMENTAL_CONTAINS_FILTER: &str = "MEILI_EXPERIMENTAL_CONTAINS_FILTER";
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_SETTINGS: &str = const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_SETTINGS: &str =
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_SETTINGS"; "MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_SETTINGS";
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_FACET_POST_PROCESSING: &str =
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_FACET_POST_PROCESSING";
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_PREFIX_POST_PROCESSING: &str =
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_PREFIX_POST_PROCESSING";
const MEILI_EXPERIMENTAL_ENABLE_METRICS: &str = "MEILI_EXPERIMENTAL_ENABLE_METRICS"; const MEILI_EXPERIMENTAL_ENABLE_METRICS: &str = "MEILI_EXPERIMENTAL_ENABLE_METRICS";
const MEILI_EXPERIMENTAL_SEARCH_QUEUE_SIZE: &str = "MEILI_EXPERIMENTAL_SEARCH_QUEUE_SIZE"; const MEILI_EXPERIMENTAL_SEARCH_QUEUE_SIZE: &str = "MEILI_EXPERIMENTAL_SEARCH_QUEUE_SIZE";
const MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER: &str = "MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER"; const MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER: &str = "MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER";
@@ -75,22 +70,6 @@ const MEILI_EXPERIMENTAL_EMBEDDING_CACHE_ENTRIES: &str =
const MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION: &str = "MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION"; const MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION: &str = "MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION";
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS: &str = const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS: &str =
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS"; "MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS";
const MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY: &str =
"MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY";
// Related to S3 snapshots
const MEILI_S3_BUCKET_URL: &str = "MEILI_S3_BUCKET_URL";
const MEILI_S3_BUCKET_REGION: &str = "MEILI_S3_BUCKET_REGION";
const MEILI_S3_BUCKET_NAME: &str = "MEILI_S3_BUCKET_NAME";
const MEILI_S3_SNAPSHOT_PREFIX: &str = "MEILI_S3_SNAPSHOT_PREFIX";
const MEILI_S3_ACCESS_KEY: &str = "MEILI_S3_ACCESS_KEY";
const MEILI_S3_SECRET_KEY: &str = "MEILI_S3_SECRET_KEY";
const MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS: &str = "MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS";
const MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL: &str = "MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL";
const MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS: &str =
"MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS";
const MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE: &str = "MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE";
const DEFAULT_CONFIG_FILE_PATH: &str = "./config.toml"; const DEFAULT_CONFIG_FILE_PATH: &str = "./config.toml";
const DEFAULT_DB_PATH: &str = "./data.ms"; const DEFAULT_DB_PATH: &str = "./data.ms";
const DEFAULT_HTTP_ADDR: &str = "localhost:7700"; const DEFAULT_HTTP_ADDR: &str = "localhost:7700";
@@ -100,10 +79,6 @@ const DEFAULT_SNAPSHOT_DIR: &str = "snapshots/";
const DEFAULT_SNAPSHOT_INTERVAL_SEC: u64 = 86400; const DEFAULT_SNAPSHOT_INTERVAL_SEC: u64 = 86400;
const DEFAULT_SNAPSHOT_INTERVAL_SEC_STR: &str = "86400"; const DEFAULT_SNAPSHOT_INTERVAL_SEC_STR: &str = "86400";
const DEFAULT_DUMP_DIR: &str = "dumps/"; const DEFAULT_DUMP_DIR: &str = "dumps/";
const DEFAULT_S3_SNAPSHOT_MAX_IN_FLIGHT_PARTS: NonZeroUsize = NonZeroUsize::new(10).unwrap();
const DEFAULT_S3_SNAPSHOT_COMPRESSION_LEVEL: u32 = 0;
const DEFAULT_S3_SNAPSHOT_SIGNATURE_DURATION_SECONDS: u64 = 8 * 3600; // 8 hours
const DEFAULT_S3_SNAPSHOT_MULTIPART_PART_SIZE: Byte = Byte::from_u64(375 * 1024 * 1024); // 375 MiB
const MEILI_MAX_INDEXING_MEMORY: &str = "MEILI_MAX_INDEXING_MEMORY"; const MEILI_MAX_INDEXING_MEMORY: &str = "MEILI_MAX_INDEXING_MEMORY";
const MEILI_MAX_INDEXING_THREADS: &str = "MEILI_MAX_INDEXING_THREADS"; const MEILI_MAX_INDEXING_THREADS: &str = "MEILI_MAX_INDEXING_THREADS";
@@ -473,14 +448,11 @@ pub struct Opt {
#[serde(default = "default_limit_batched_tasks")] #[serde(default = "default_limit_batched_tasks")]
pub experimental_max_number_of_batched_tasks: usize, pub experimental_max_number_of_batched_tasks: usize,
/// Experimentally controls the maximum total size, in bytes, of tasks that will be processed /// Experimentally reduces the maximum total size, in bytes, of tasks that will be processed at once,
/// simultaneously. When unspecified, defaults to half of the maximum indexing memory and /// see: <https://github.com/orgs/meilisearch/discussions/801>
/// clamped to 10 GiB. #[clap(long, env = MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE, default_value_t = default_limit_batched_tasks_total_size())]
/// #[serde(default = "default_limit_batched_tasks_total_size")]
/// See: <https://github.com/orgs/meilisearch/discussions/801> pub experimental_limit_batched_tasks_total_size: Byte,
#[clap(long, env = MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE)]
#[serde(default)]
pub experimental_limit_batched_tasks_total_size: Option<Byte>,
/// Enables experimental caching of search query embeddings. The value represents the maximal number of entries in the cache of each /// Enables experimental caching of search query embeddings. The value represents the maximal number of entries in the cache of each
/// distinct embedder. /// distinct embedder.
@@ -499,20 +471,10 @@ pub struct Opt {
#[serde(default)] #[serde(default)]
pub experimental_no_snapshot_compaction: bool, pub experimental_no_snapshot_compaction: bool,
/// Experimental personalization API key feature.
///
/// Sets the API key for personalization features.
#[clap(long, env = MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY)]
pub experimental_personalization_api_key: Option<String>,
#[serde(flatten)] #[serde(flatten)]
#[clap(flatten)] #[clap(flatten)]
pub indexer_options: IndexerOpts, pub indexer_options: IndexerOpts,
#[serde(flatten)]
#[clap(flatten)]
pub s3_snapshot_options: Option<S3SnapshotOpts>,
/// Set the path to a configuration file that should be used to setup the engine. /// Set the path to a configuration file that should be used to setup the engine.
/// Format must be TOML. /// Format must be TOML.
#[clap(long)] #[clap(long)]
@@ -614,8 +576,6 @@ impl Opt {
experimental_limit_batched_tasks_total_size, experimental_limit_batched_tasks_total_size,
experimental_embedding_cache_entries, experimental_embedding_cache_entries,
experimental_no_snapshot_compaction, experimental_no_snapshot_compaction,
experimental_personalization_api_key,
s3_snapshot_options,
} = self; } = self;
export_to_env_if_not_present(MEILI_DB_PATH, db_path); export_to_env_if_not_present(MEILI_DB_PATH, db_path);
export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr); export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr);
@@ -704,12 +664,10 @@ impl Opt {
MEILI_EXPERIMENTAL_MAX_NUMBER_OF_BATCHED_TASKS, MEILI_EXPERIMENTAL_MAX_NUMBER_OF_BATCHED_TASKS,
experimental_max_number_of_batched_tasks.to_string(), experimental_max_number_of_batched_tasks.to_string(),
); );
if let Some(limit) = experimental_limit_batched_tasks_total_size {
export_to_env_if_not_present( export_to_env_if_not_present(
MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE, MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE,
limit.to_string(), experimental_limit_batched_tasks_total_size.to_string(),
); );
}
export_to_env_if_not_present( export_to_env_if_not_present(
MEILI_EXPERIMENTAL_EMBEDDING_CACHE_ENTRIES, MEILI_EXPERIMENTAL_EMBEDDING_CACHE_ENTRIES,
experimental_embedding_cache_entries.to_string(), experimental_embedding_cache_entries.to_string(),
@@ -718,22 +676,7 @@ impl Opt {
MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION, MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION,
experimental_no_snapshot_compaction.to_string(), experimental_no_snapshot_compaction.to_string(),
); );
if let Some(experimental_personalization_api_key) = experimental_personalization_api_key {
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY,
experimental_personalization_api_key,
);
}
indexer_options.export_to_env(); indexer_options.export_to_env();
if let Some(s3_snapshot_options) = s3_snapshot_options {
#[cfg(not(unix))]
{
let _ = s3_snapshot_options;
panic!("S3 snapshot options are not supported on Windows");
}
#[cfg(unix)]
s3_snapshot_options.export_to_env();
}
} }
pub fn get_ssl_config(&self) -> anyhow::Result<Option<rustls::ServerConfig>> { pub fn get_ssl_config(&self) -> anyhow::Result<Option<rustls::ServerConfig>> {
@@ -829,22 +772,6 @@ pub struct IndexerOpts {
#[clap(long, env = MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS)] #[clap(long, env = MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS)]
#[serde(default)] #[serde(default)]
pub experimental_no_edition_2024_for_dumps: bool, pub experimental_no_edition_2024_for_dumps: bool,
/// Experimental no edition 2024 to compute prefixes. For more information,
/// see: <https://github.com/orgs/meilisearch/discussions/862>
///
/// Enables the experimental no edition 2024 to compute prefixes.
#[clap(long, env = MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_PREFIX_POST_PROCESSING)]
#[serde(default)]
pub experimental_no_edition_2024_for_prefix_post_processing: bool,
/// Experimental no edition 2024 to compute facets. For more information,
/// see: <https://github.com/orgs/meilisearch/discussions/862>
///
/// Enables the experimental no edition 2024 to compute facets.
#[clap(long, env = MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_FACET_POST_PROCESSING)]
#[serde(default)]
pub experimental_no_edition_2024_for_facet_post_processing: bool,
} }
impl IndexerOpts { impl IndexerOpts {
@@ -856,8 +783,6 @@ impl IndexerOpts {
skip_index_budget: _, skip_index_budget: _,
experimental_no_edition_2024_for_settings, experimental_no_edition_2024_for_settings,
experimental_no_edition_2024_for_dumps, experimental_no_edition_2024_for_dumps,
experimental_no_edition_2024_for_prefix_post_processing,
experimental_no_edition_2024_for_facet_post_processing,
} = self; } = self;
if let Some(max_indexing_memory) = max_indexing_memory.0 { if let Some(max_indexing_memory) = max_indexing_memory.0 {
export_to_env_if_not_present( export_to_env_if_not_present(
@@ -883,18 +808,6 @@ impl IndexerOpts {
experimental_no_edition_2024_for_dumps.to_string(), experimental_no_edition_2024_for_dumps.to_string(),
); );
} }
if experimental_no_edition_2024_for_prefix_post_processing {
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_PREFIX_POST_PROCESSING,
experimental_no_edition_2024_for_prefix_post_processing.to_string(),
);
}
if experimental_no_edition_2024_for_facet_post_processing {
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_FACET_POST_PROCESSING,
experimental_no_edition_2024_for_facet_post_processing.to_string(),
);
}
} }
} }
@@ -902,16 +815,6 @@ impl TryFrom<&IndexerOpts> for IndexerConfig {
type Error = anyhow::Error; type Error = anyhow::Error;
fn try_from(other: &IndexerOpts) -> Result<Self, Self::Error> { fn try_from(other: &IndexerOpts) -> Result<Self, Self::Error> {
let IndexerOpts {
max_indexing_memory,
max_indexing_threads,
skip_index_budget,
experimental_no_edition_2024_for_settings,
experimental_no_edition_2024_for_dumps,
experimental_no_edition_2024_for_prefix_post_processing,
experimental_no_edition_2024_for_facet_post_processing,
} = other;
let thread_pool = ThreadPoolNoAbortBuilder::new_for_indexing() let thread_pool = ThreadPoolNoAbortBuilder::new_for_indexing()
.num_threads(other.max_indexing_threads.unwrap_or_else(|| num_cpus::get() / 2)) .num_threads(other.max_indexing_threads.unwrap_or_else(|| num_cpus::get() / 2))
.build()?; .build()?;
@@ -919,163 +822,17 @@ impl TryFrom<&IndexerOpts> for IndexerConfig {
Ok(Self { Ok(Self {
thread_pool, thread_pool,
log_every_n: Some(DEFAULT_LOG_EVERY_N), log_every_n: Some(DEFAULT_LOG_EVERY_N),
max_memory: max_indexing_memory.map(|b| b.as_u64() as usize), max_memory: other.max_indexing_memory.map(|b| b.as_u64() as usize),
max_threads: max_indexing_threads.0, max_threads: *other.max_indexing_threads,
max_positions_per_attributes: None, max_positions_per_attributes: None,
skip_index_budget: *skip_index_budget, skip_index_budget: other.skip_index_budget,
experimental_no_edition_2024_for_settings: *experimental_no_edition_2024_for_settings, experimental_no_edition_2024_for_settings: other
experimental_no_edition_2024_for_dumps: *experimental_no_edition_2024_for_dumps, .experimental_no_edition_2024_for_settings,
experimental_no_edition_2024_for_dumps: other.experimental_no_edition_2024_for_dumps,
chunk_compression_type: Default::default(), chunk_compression_type: Default::default(),
chunk_compression_level: Default::default(), chunk_compression_level: Default::default(),
documents_chunk_size: Default::default(), documents_chunk_size: Default::default(),
max_nb_chunks: Default::default(), max_nb_chunks: Default::default(),
experimental_no_edition_2024_for_prefix_post_processing:
*experimental_no_edition_2024_for_prefix_post_processing,
experimental_no_edition_2024_for_facet_post_processing:
*experimental_no_edition_2024_for_facet_post_processing,
s3_snapshot_options: None,
})
}
}
#[derive(Debug, Clone, Parser, Deserialize)]
// This group is a bit tricky but makes it possible to require all listed fields if one of them
// is specified. It lets us keep an Option for the S3SnapshotOpts configuration.
// <https://github.com/clap-rs/clap/issues/5092#issuecomment-2616986075>
#[group(requires_all = ["s3_bucket_url", "s3_bucket_region", "s3_bucket_name", "s3_snapshot_prefix", "s3_access_key", "s3_secret_key"])]
pub struct S3SnapshotOpts {
/// The S3 bucket URL in the format https://s3.<region>.amazonaws.com.
#[clap(long, env = MEILI_S3_BUCKET_URL, required = false)]
#[serde(default)]
pub s3_bucket_url: String,
/// The region in the format us-east-1.
#[clap(long, env = MEILI_S3_BUCKET_REGION, required = false)]
#[serde(default)]
pub s3_bucket_region: String,
/// The bucket name.
#[clap(long, env = MEILI_S3_BUCKET_NAME, required = false)]
#[serde(default)]
pub s3_bucket_name: String,
/// The prefix path where to put the snapshot, uses normal slashes (/).
#[clap(long, env = MEILI_S3_SNAPSHOT_PREFIX, required = false)]
#[serde(default)]
pub s3_snapshot_prefix: String,
/// The S3 access key.
#[clap(long, env = MEILI_S3_ACCESS_KEY, required = false)]
#[serde(default)]
pub s3_access_key: String,
/// The S3 secret key.
#[clap(long, env = MEILI_S3_SECRET_KEY, required = false)]
#[serde(default)]
pub s3_secret_key: String,
/// The maximum number of parts that can be uploaded in parallel.
///
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
#[clap(long, env = MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS, default_value_t = default_experimental_s3_snapshot_max_in_flight_parts())]
#[serde(default = "default_experimental_s3_snapshot_max_in_flight_parts")]
pub experimental_s3_max_in_flight_parts: NonZeroUsize,
/// The compression level. Defaults to no compression (0).
///
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
#[clap(long, env = MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL, default_value_t = default_experimental_s3_snapshot_compression_level())]
#[serde(default = "default_experimental_s3_snapshot_compression_level")]
pub experimental_s3_compression_level: u32,
/// The signature duration for the multipart upload.
///
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
#[clap(long, env = MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS, default_value_t = default_experimental_s3_snapshot_signature_duration_seconds())]
#[serde(default = "default_experimental_s3_snapshot_signature_duration_seconds")]
pub experimental_s3_signature_duration_seconds: u64,
/// The size of the the multipart parts.
///
/// Must not be less than 10MiB and larger than 8GiB. Yes,
/// twice the boundaries of the AWS S3 multipart upload
/// because we use it a bit differently internally.
///
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
#[clap(long, env = MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE, default_value_t = default_experimental_s3_snapshot_multipart_part_size())]
#[serde(default = "default_experimental_s3_snapshot_multipart_part_size")]
pub experimental_s3_multipart_part_size: Byte,
}
impl S3SnapshotOpts {
/// Exports the values to their corresponding env vars if they are not set.
pub fn export_to_env(self) {
let S3SnapshotOpts {
s3_bucket_url,
s3_bucket_region,
s3_bucket_name,
s3_snapshot_prefix,
s3_access_key,
s3_secret_key,
experimental_s3_max_in_flight_parts,
experimental_s3_compression_level,
experimental_s3_signature_duration_seconds,
experimental_s3_multipart_part_size,
} = self;
export_to_env_if_not_present(MEILI_S3_BUCKET_URL, s3_bucket_url);
export_to_env_if_not_present(MEILI_S3_BUCKET_REGION, s3_bucket_region);
export_to_env_if_not_present(MEILI_S3_BUCKET_NAME, s3_bucket_name);
export_to_env_if_not_present(MEILI_S3_SNAPSHOT_PREFIX, s3_snapshot_prefix);
export_to_env_if_not_present(MEILI_S3_ACCESS_KEY, s3_access_key);
export_to_env_if_not_present(MEILI_S3_SECRET_KEY, s3_secret_key);
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS,
experimental_s3_max_in_flight_parts.to_string(),
);
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL,
experimental_s3_compression_level.to_string(),
);
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS,
experimental_s3_signature_duration_seconds.to_string(),
);
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE,
experimental_s3_multipart_part_size.to_string(),
);
}
}
impl TryFrom<S3SnapshotOpts> for S3SnapshotOptions {
type Error = anyhow::Error;
fn try_from(other: S3SnapshotOpts) -> Result<Self, Self::Error> {
let S3SnapshotOpts {
s3_bucket_url,
s3_bucket_region,
s3_bucket_name,
s3_snapshot_prefix,
s3_access_key,
s3_secret_key,
experimental_s3_max_in_flight_parts,
experimental_s3_compression_level,
experimental_s3_signature_duration_seconds,
experimental_s3_multipart_part_size,
} = other;
Ok(S3SnapshotOptions {
s3_bucket_url,
s3_bucket_region,
s3_bucket_name,
s3_snapshot_prefix,
s3_access_key,
s3_secret_key,
s3_max_in_flight_parts: experimental_s3_max_in_flight_parts,
s3_compression_level: experimental_s3_compression_level,
s3_signature_duration: Duration::from_secs(experimental_s3_signature_duration_seconds),
s3_multipart_part_size: experimental_s3_multipart_part_size.as_u64(),
}) })
} }
} }
@@ -1278,6 +1035,10 @@ fn default_limit_batched_tasks() -> usize {
usize::MAX usize::MAX
} }
fn default_limit_batched_tasks_total_size() -> Byte {
Byte::from_u64(u64::MAX)
}
fn default_embedding_cache_entries() -> usize { fn default_embedding_cache_entries() -> usize {
0 0
} }
@@ -1290,22 +1051,6 @@ fn default_snapshot_interval_sec() -> &'static str {
DEFAULT_SNAPSHOT_INTERVAL_SEC_STR DEFAULT_SNAPSHOT_INTERVAL_SEC_STR
} }
fn default_experimental_s3_snapshot_max_in_flight_parts() -> NonZeroUsize {
DEFAULT_S3_SNAPSHOT_MAX_IN_FLIGHT_PARTS
}
fn default_experimental_s3_snapshot_compression_level() -> u32 {
DEFAULT_S3_SNAPSHOT_COMPRESSION_LEVEL
}
fn default_experimental_s3_snapshot_signature_duration_seconds() -> u64 {
DEFAULT_S3_SNAPSHOT_SIGNATURE_DURATION_SECONDS
}
fn default_experimental_s3_snapshot_multipart_part_size() -> Byte {
DEFAULT_S3_SNAPSHOT_MULTIPART_PART_SIZE
}
fn default_dump_dir() -> PathBuf { fn default_dump_dir() -> PathBuf {
PathBuf::from(DEFAULT_DUMP_DIR) PathBuf::from(DEFAULT_DUMP_DIR)
} }

View File

@@ -1,366 +0,0 @@
use std::time::Duration;
use meilisearch_types::error::{Code, ErrorCode, ResponseError};
use meilisearch_types::milli::TimeBudget;
use rand::Rng;
use reqwest::Client;
use serde::{Deserialize, Serialize};
use tracing::{debug, info, warn};
use crate::search::{Personalize, SearchResult};
const COHERE_API_URL: &str = "https://api.cohere.ai/v1/rerank";
const MAX_RETRIES: u32 = 10;
#[derive(Debug, thiserror::Error)]
enum PersonalizationError {
#[error("Personalization service: HTTP request failed: {0}")]
Request(#[from] reqwest::Error),
#[error("Personalization service: Failed to parse response: {0}")]
Parse(String),
#[error("Personalization service: Cohere API error: {0}")]
Api(String),
#[error("Personalization service: Unauthorized: invalid API key")]
Unauthorized,
#[error("Personalization service: Rate limited: too many requests")]
RateLimited,
#[error("Personalization service: Bad request: {0}")]
BadRequest(String),
#[error("Personalization service: Internal server error: {0}")]
InternalServerError(String),
#[error("Personalization service: Network error: {0}")]
Network(String),
#[error("Personalization service: Deadline exceeded")]
DeadlineExceeded,
#[error(transparent)]
FeatureNotEnabled(#[from] index_scheduler::error::FeatureNotEnabledError),
}
impl ErrorCode for PersonalizationError {
fn error_code(&self) -> Code {
match self {
PersonalizationError::FeatureNotEnabled { .. } => Code::FeatureNotEnabled,
PersonalizationError::Unauthorized => Code::RemoteInvalidApiKey,
PersonalizationError::RateLimited => Code::TooManySearchRequests,
PersonalizationError::BadRequest(_) => Code::RemoteBadRequest,
PersonalizationError::InternalServerError(_) => Code::RemoteRemoteError,
PersonalizationError::Network(_) | PersonalizationError::Request(_) => {
Code::RemoteCouldNotSendRequest
}
PersonalizationError::Parse(_) | PersonalizationError::Api(_) => {
Code::RemoteBadResponse
}
PersonalizationError::DeadlineExceeded => Code::Internal, // should not be returned to the client
}
}
}
pub struct CohereService {
client: Client,
api_key: String,
}
impl CohereService {
pub fn new(api_key: String) -> Self {
info!("Personalization service initialized with Cohere API");
let client = Client::builder()
.timeout(Duration::from_secs(30))
.build()
.expect("Failed to create HTTP client");
Self { client, api_key }
}
pub async fn rerank_search_results(
&self,
search_result: SearchResult,
personalize: &Personalize,
query: Option<&str>,
time_budget: TimeBudget,
) -> Result<SearchResult, ResponseError> {
if time_budget.exceeded() {
warn!("Could not rerank due to deadline");
// If the deadline is exceeded, return the original search result instead of an error
return Ok(search_result);
}
// Extract user context from personalization
let user_context = personalize.user_context.as_str();
// Build the prompt by merging query and user context
let prompt = match query {
Some(q) => format!("User Context: {user_context}\nQuery: {q}"),
None => format!("User Context: {user_context}"),
};
// Extract documents for reranking
let documents: Vec<String> = search_result
.hits
.iter()
.map(|hit| {
// Convert the document to a string representation for reranking
serde_json::to_string(&hit.document).unwrap_or_else(|_| "{}".to_string())
})
.collect();
if documents.is_empty() {
return Ok(search_result);
}
// Call Cohere's rerank API with retry logic
let reranked_indices =
match self.call_rerank_with_retry(&prompt, &documents, time_budget).await {
Ok(indices) => indices,
Err(PersonalizationError::DeadlineExceeded) => {
// If the deadline is exceeded, return the original search result instead of an error
return Ok(search_result);
}
Err(e) => return Err(e.into()),
};
debug!("Cohere rerank successful, reordering {} results", search_result.hits.len());
// Reorder the hits based on Cohere's reranking
let mut reranked_hits = Vec::new();
for index in reranked_indices.iter() {
if let Some(hit) = search_result.hits.get(*index) {
reranked_hits.push(hit.clone());
}
}
Ok(SearchResult { hits: reranked_hits, ..search_result })
}
async fn call_rerank_with_retry(
&self,
query: &str,
documents: &[String],
time_budget: TimeBudget,
) -> Result<Vec<usize>, PersonalizationError> {
let request_body = CohereRerankRequest {
query: query.to_string(),
documents: documents.to_vec(),
model: "rerank-english-v3.0".to_string(),
};
// Retry loop similar to vector extraction
for attempt in 0..MAX_RETRIES {
let response_result = self.send_rerank_request(&request_body).await;
let retry_duration = match self.handle_response(response_result).await {
Ok(indices) => return Ok(indices),
Err(retry) => {
warn!("Cohere rerank attempt #{} failed: {}", attempt, retry.error);
if time_budget.exceeded() {
warn!("Could not rerank due to deadline");
return Err(PersonalizationError::DeadlineExceeded);
} else {
match retry.into_duration(attempt) {
Ok(d) => d,
Err(error) => return Err(error),
}
}
}
};
// randomly up to double the retry duration
let retry_duration = retry_duration
+ rand::thread_rng().gen_range(std::time::Duration::ZERO..retry_duration);
warn!("Retrying after {}ms", retry_duration.as_millis());
tokio::time::sleep(retry_duration).await;
}
// Final attempt without retry
let response_result = self.send_rerank_request(&request_body).await;
match self.handle_response(response_result).await {
Ok(indices) => Ok(indices),
Err(retry) => Err(retry.into_error()),
}
}
async fn send_rerank_request(
&self,
request_body: &CohereRerankRequest,
) -> Result<reqwest::Response, reqwest::Error> {
self.client
.post(COHERE_API_URL)
.header("Authorization", format!("Bearer {}", self.api_key))
.header("Content-Type", "application/json")
.json(request_body)
.send()
.await
}
async fn handle_response(
&self,
response_result: Result<reqwest::Response, reqwest::Error>,
) -> Result<Vec<usize>, Retry> {
let response = match response_result {
Ok(r) => r,
Err(e) if e.is_timeout() => {
return Err(Retry::retry_later(PersonalizationError::Network(format!(
"Request timeout: {}",
e
))));
}
Err(e) => {
return Err(Retry::retry_later(PersonalizationError::Network(format!(
"Network error: {}",
e
))));
}
};
let status = response.status();
let status_code = status.as_u16();
if status.is_success() {
let rerank_response: CohereRerankResponse = match response.json().await {
Ok(r) => r,
Err(e) => {
return Err(Retry::retry_later(PersonalizationError::Parse(format!(
"Failed to parse response: {}",
e
))));
}
};
// Extract indices from rerank results
let indices: Vec<usize> =
rerank_response.results.iter().map(|result| result.index as usize).collect();
return Ok(indices);
}
// Handle error status codes
let error_body = response.text().await.unwrap_or_else(|_| "Unknown error".to_string());
let retry = match status_code {
401 => Retry::give_up(PersonalizationError::Unauthorized),
429 => Retry::rate_limited(PersonalizationError::RateLimited),
400 => Retry::give_up(PersonalizationError::BadRequest(error_body)),
500..=599 => Retry::retry_later(PersonalizationError::InternalServerError(format!(
"Status {}: {}",
status_code, error_body
))),
402..=499 => Retry::give_up(PersonalizationError::Api(format!(
"Status {}: {}",
status_code, error_body
))),
_ => Retry::retry_later(PersonalizationError::Api(format!(
"Unexpected status {}: {}",
status_code, error_body
))),
};
Err(retry)
}
}
#[derive(Serialize)]
struct CohereRerankRequest {
query: String,
documents: Vec<String>,
model: String,
}
#[derive(Deserialize)]
struct CohereRerankResponse {
results: Vec<CohereRerankResult>,
}
#[derive(Deserialize)]
struct CohereRerankResult {
index: u32,
}
// Retry strategy similar to vector extraction
struct Retry {
error: PersonalizationError,
strategy: RetryStrategy,
}
enum RetryStrategy {
GiveUp,
Retry,
RetryAfterRateLimit,
}
impl Retry {
fn give_up(error: PersonalizationError) -> Self {
Self { error, strategy: RetryStrategy::GiveUp }
}
fn retry_later(error: PersonalizationError) -> Self {
Self { error, strategy: RetryStrategy::Retry }
}
fn rate_limited(error: PersonalizationError) -> Self {
Self { error, strategy: RetryStrategy::RetryAfterRateLimit }
}
fn into_duration(self, attempt: u32) -> Result<Duration, PersonalizationError> {
match self.strategy {
RetryStrategy::GiveUp => Err(self.error),
RetryStrategy::Retry => {
// Exponential backoff: 10^attempt milliseconds
Ok(Duration::from_millis((10u64).pow(attempt)))
}
RetryStrategy::RetryAfterRateLimit => {
// Longer backoff for rate limits: 100ms + exponential
Ok(Duration::from_millis(100 + (10u64).pow(attempt)))
}
}
}
fn into_error(self) -> PersonalizationError {
self.error
}
}
pub enum PersonalizationService {
Cohere(CohereService),
Disabled,
}
impl PersonalizationService {
pub fn cohere(api_key: String) -> Self {
// If the API key is empty, consider the personalization service as disabled
if api_key.trim().is_empty() {
Self::disabled()
} else {
Self::Cohere(CohereService::new(api_key))
}
}
pub fn disabled() -> Self {
debug!("Personalization service disabled");
Self::Disabled
}
pub async fn rerank_search_results(
&self,
search_result: SearchResult,
personalize: &Personalize,
query: Option<&str>,
time_budget: TimeBudget,
) -> Result<SearchResult, ResponseError> {
match self {
Self::Cohere(cohere_service) => {
cohere_service
.rerank_search_results(search_result, personalize, query, time_budget)
.await
}
Self::Disabled => Err(PersonalizationError::FeatureNotEnabled(
index_scheduler::error::FeatureNotEnabledError {
disabled_action: "reranking search results",
feature: "personalization",
issue_link: "https://github.com/orgs/meilisearch/discussions/866",
},
)
.into()),
}
}
}

View File

@@ -282,8 +282,7 @@ async fn process_search_request(
if let Some(search_rules) = auth_filter.get_index_search_rules(&index_uid) { if let Some(search_rules) = auth_filter.get_index_search_rules(&index_uid) {
add_search_rules(&mut query.filter, search_rules); add_search_rules(&mut query.filter, search_rules);
} }
let search_kind = let search_kind = search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)?;
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
let permit = search_queue.try_get_search_permit().await?; let permit = search_queue.try_get_search_permit().await?;
let features = index_scheduler.features(); let features = index_scheduler.features();
@@ -300,7 +299,7 @@ async fn process_search_request(
let (search, _is_finite_pagination, _max_total_hits, _offset) = let (search, _is_finite_pagination, _max_total_hits, _offset) =
prepare_search(&index_cloned, &rtxn, &query, &search_kind, time_budget, features)?; prepare_search(&index_cloned, &rtxn, &query, &search_kind, time_budget, features)?;
match search_from_kind(index_uid, search_kind, search) { match search_from_kind(&index_uid, search_kind, search) {
Ok((search_results, _)) => Ok((rtxn, Ok(search_results))), Ok((search_results, _)) => Ok((rtxn, Ok(search_results))),
Err(MeilisearchHttpError::Milli { Err(MeilisearchHttpError::Milli {
error: meilisearch_types::milli::Error::UserError(user_error), error: meilisearch_types::milli::Error::UserError(user_error),

View File

@@ -1,39 +0,0 @@
pub mod proxy {
use std::fs::File;
use actix_web::HttpRequest;
use index_scheduler::IndexScheduler;
use crate::error::MeilisearchHttpError;
pub enum Body<T: serde::Serialize> {
NdJsonPayload,
Inline(T),
None,
}
impl Body<()> {
pub fn with_ndjson_payload(_file: File) -> Self {
Self::NdJsonPayload
}
pub fn none() -> Self {
Self::None
}
}
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "Meili-Proxy-Origin-Remote";
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "Meili-Proxy-Origin-TaskUid";
pub async fn proxy<T: serde::Serialize>(
_index_scheduler: &IndexScheduler,
_index_uid: &str,
_req: &HttpRequest,
_network: meilisearch_types::network::Network,
_body: Body<T>,
_task: &meilisearch_types::tasks::Task,
) -> Result<(), MeilisearchHttpError> {
Ok(())
}
}

View File

@@ -1,84 +0,0 @@
use actix_web::web::{self, Data};
use actix_web::{HttpRequest, HttpResponse};
use index_scheduler::IndexScheduler;
use meilisearch_types::error::ResponseError;
use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::keys::actions;
use meilisearch_types::tasks::KindWithContent;
use tracing::debug;
use utoipa::OpenApi;
use super::ActionPolicy;
use crate::analytics::Analytics;
use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::SummarizedTaskView;
#[derive(OpenApi)]
#[openapi(
paths(compact),
tags(
(
name = "Compact an index",
description = "The /compact route uses compacts the database to reorganize and make it smaller and more efficient.",
external_docs(url = "https://www.meilisearch.com/docs/reference/api/compact"),
),
),
)]
pub struct CompactApi;
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(web::resource("").route(web::post().to(SeqHandler(compact))));
}
/// Compact an index
#[utoipa::path(
post,
path = "{indexUid}/compact",
tag = "Compact an index",
security(("Bearer" = ["search", "*"])),
params(("indexUid" = String, Path, example = "movies", description = "Index Unique Identifier", nullable = false)),
responses(
(status = ACCEPTED, description = "Task successfully enqueued", body = SummarizedTaskView, content_type = "application/json", example = json!(
{
"taskUid": 147,
"indexUid": null,
"status": "enqueued",
"type": "documentDeletion",
"enqueuedAt": "2024-08-08T17:05:55.791772Z"
}
)),
(status = 401, description = "The authorization header is missing", body = ResponseError, content_type = "application/json", example = json!(
{
"message": "The Authorization header is missing. It must use the bearer authorization method.",
"code": "missing_authorization_header",
"type": "auth",
"link": "https://docs.meilisearch.com/errors#missing_authorization_header"
}
)),
)
)]
pub async fn compact(
index_scheduler: GuardedData<ActionPolicy<{ actions::INDEXES_COMPACT }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
req: HttpRequest,
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
analytics.publish(IndexCompacted::default(), &req);
let task = KindWithContent::IndexCompaction { index_uid: index_uid.to_string() };
let task =
match tokio::task::spawn_blocking(move || index_scheduler.register(task, None, false))
.await?
{
Ok(task) => task,
Err(e) => return Err(e.into()),
};
debug!(returns = ?task, "Compact the {index_uid} index");
Ok(HttpResponse::Accepted().json(SummarizedTaskView::from(task)))
}
crate::empty_analytics!(IndexCompacted, "Index Compacted");

View File

@@ -45,7 +45,7 @@ use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::GuardedData; use crate::extractors::authentication::GuardedData;
use crate::extractors::payload::Payload; use crate::extractors::payload::Payload;
use crate::extractors::sequential_extractor::SeqHandler; use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::indexes::current_edition::proxy::{proxy, Body}; use crate::routes::indexes::enterprise_edition::proxy::{check_leader, proxy, Body};
use crate::routes::indexes::search::fix_sort_query_parameters; use crate::routes::indexes::search::fix_sort_query_parameters;
use crate::routes::{ use crate::routes::{
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT, get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
@@ -333,15 +333,14 @@ impl Aggregate for DocumentsDeletionAggregator {
pub async fn delete_document( pub async fn delete_document(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>, index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
path: web::Path<DocumentParam>, path: web::Path<DocumentParam>,
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
req: HttpRequest, req: HttpRequest,
opt: web::Data<Opt>, opt: web::Data<Opt>,
analytics: web::Data<Analytics>, analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> { ) -> Result<HttpResponse, ResponseError> {
let CustomMetadataQuery { custom_metadata } = params.into_inner();
let DocumentParam { index_uid, document_id } = path.into_inner(); let DocumentParam { index_uid, document_id } = path.into_inner();
let index_uid = IndexUid::try_from(index_uid)?; let index_uid = IndexUid::try_from(index_uid)?;
let network = index_scheduler.network(); let network = index_scheduler.network();
let origin = check_leader(&req, &network)?;
analytics.publish( analytics.publish(
DocumentsDeletionAggregator { DocumentsDeletionAggregator {
@@ -361,14 +360,11 @@ pub async fn delete_document(
let dry_run = is_dry_run(&req, &opt)?; let dry_run = is_dry_run(&req, &opt)?;
let task = { let task = {
let index_scheduler = index_scheduler.clone(); let index_scheduler = index_scheduler.clone();
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
})
.await??
}; };
if network.sharding() && !dry_run { if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?; proxy(&index_scheduler, &index_uid, &req, origin, network, Body::none(), &task).await?;
} }
let task: SummarizedTaskView = task.into(); let task: SummarizedTaskView = task.into();
@@ -683,19 +679,6 @@ pub struct UpdateDocumentsQuery {
#[param(value_type = char, default = ",", example = ";")] #[param(value_type = char, default = ",", example = ";")]
#[deserr(default, try_from(char) = from_char_csv_delimiter -> DeserrQueryParamError<InvalidDocumentCsvDelimiter>, error = DeserrQueryParamError<InvalidDocumentCsvDelimiter>)] #[deserr(default, try_from(char) = from_char_csv_delimiter -> DeserrQueryParamError<InvalidDocumentCsvDelimiter>, error = DeserrQueryParamError<InvalidDocumentCsvDelimiter>)]
pub csv_delimiter: Option<u8>, pub csv_delimiter: Option<u8>,
#[param(example = "custom")]
#[deserr(default, error = DeserrQueryParamError<InvalidIndexCustomMetadata>)]
pub custom_metadata: Option<String>,
}
#[derive(Deserialize, Debug, Deserr, IntoParams)]
#[deserr(error = DeserrQueryParamError, rename_all = camelCase, deny_unknown_fields)]
#[into_params(parameter_in = Query, rename_all = "camelCase")]
pub struct CustomMetadataQuery {
#[param(example = "custom")]
#[deserr(default, error = DeserrQueryParamError<InvalidIndexCustomMetadata>)]
pub custom_metadata: Option<String>,
} }
fn from_char_csv_delimiter( fn from_char_csv_delimiter(
@@ -837,7 +820,6 @@ pub async fn replace_documents(
body, body,
IndexDocumentsMethod::ReplaceDocuments, IndexDocumentsMethod::ReplaceDocuments,
uid, uid,
params.custom_metadata,
dry_run, dry_run,
allow_index_creation, allow_index_creation,
&req, &req,
@@ -940,7 +922,6 @@ pub async fn update_documents(
body, body,
IndexDocumentsMethod::UpdateDocuments, IndexDocumentsMethod::UpdateDocuments,
uid, uid,
params.custom_metadata,
dry_run, dry_run,
allow_index_creation, allow_index_creation,
&req, &req,
@@ -960,13 +941,13 @@ async fn document_addition(
body: Payload, body: Payload,
method: IndexDocumentsMethod, method: IndexDocumentsMethod,
task_id: Option<TaskId>, task_id: Option<TaskId>,
custom_metadata: Option<String>,
dry_run: bool, dry_run: bool,
allow_index_creation: bool, allow_index_creation: bool,
req: &HttpRequest, req: &HttpRequest,
) -> Result<SummarizedTaskView, MeilisearchHttpError> { ) -> Result<SummarizedTaskView, MeilisearchHttpError> {
let mime_type = extract_mime_type(req)?; let mime_type = extract_mime_type(req)?;
let network = index_scheduler.network(); let network = index_scheduler.network();
let origin = check_leader(&req, &network)?;
let format = match ( let format = match (
mime_type.as_ref().map(|m| (m.type_().as_str(), m.subtype().as_str())), mime_type.as_ref().map(|m| (m.type_().as_str(), m.subtype().as_str())),
@@ -1086,9 +1067,7 @@ async fn document_addition(
}; };
let scheduler = index_scheduler.clone(); let scheduler = index_scheduler.clone();
let task = match tokio::task::spawn_blocking(move || { let task = match tokio::task::spawn_blocking(move || scheduler.register(task, task_id, dry_run))
scheduler.register_with_custom_metadata(task, task_id, custom_metadata, dry_run)
})
.await? .await?
{ {
Ok(task) => task, Ok(task) => task,
@@ -1098,12 +1077,13 @@ async fn document_addition(
} }
}; };
if network.sharding() { if network.sharding {
if let Some(file) = file { if let Some(file) = file {
proxy( proxy(
&index_scheduler, &index_scheduler,
&index_uid, &index_uid,
req, req,
origin,
network, network,
Body::with_ndjson_payload(file), Body::with_ndjson_payload(file),
&task, &task,
@@ -1153,7 +1133,7 @@ async fn copy_body_to_file(
/// Delete a set of documents based on an array of document ids. /// Delete a set of documents based on an array of document ids.
#[utoipa::path( #[utoipa::path(
post, post,
path = "{indexUid}/documents/delete-batch", path = "{indexUid}/delete-batch",
tag = "Documents", tag = "Documents",
security(("Bearer" = ["documents.delete", "documents.*", "*"])), security(("Bearer" = ["documents.delete", "documents.*", "*"])),
params( params(
@@ -1184,16 +1164,14 @@ pub async fn delete_documents_batch(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>, index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
index_uid: web::Path<String>, index_uid: web::Path<String>,
body: web::Json<Vec<Value>>, body: web::Json<Vec<Value>>,
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
req: HttpRequest, req: HttpRequest,
opt: web::Data<Opt>, opt: web::Data<Opt>,
analytics: web::Data<Analytics>, analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> { ) -> Result<HttpResponse, ResponseError> {
debug!(parameters = ?body, "Delete documents by batch"); debug!(parameters = ?body, "Delete documents by batch");
let CustomMetadataQuery { custom_metadata } = params.into_inner();
let index_uid = IndexUid::try_from(index_uid.into_inner())?; let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let network = index_scheduler.network(); let network = index_scheduler.network();
let origin = check_leader(&req, &network)?;
analytics.publish( analytics.publish(
DocumentsDeletionAggregator { DocumentsDeletionAggregator {
@@ -1216,14 +1194,12 @@ pub async fn delete_documents_batch(
let dry_run = is_dry_run(&req, &opt)?; let dry_run = is_dry_run(&req, &opt)?;
let task = { let task = {
let index_scheduler = index_scheduler.clone(); let index_scheduler = index_scheduler.clone();
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
})
.await??
}; };
if network.sharding() && !dry_run { if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?; proxy(&index_scheduler, &index_uid, &req, origin, network, Body::Inline(body), &task)
.await?;
} }
let task: SummarizedTaskView = task.into(); let task: SummarizedTaskView = task.into();
@@ -1273,19 +1249,17 @@ pub struct DocumentDeletionByFilter {
pub async fn delete_documents_by_filter( pub async fn delete_documents_by_filter(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>, index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
index_uid: web::Path<String>, index_uid: web::Path<String>,
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
body: AwebJson<DocumentDeletionByFilter, DeserrJsonError>, body: AwebJson<DocumentDeletionByFilter, DeserrJsonError>,
req: HttpRequest, req: HttpRequest,
opt: web::Data<Opt>, opt: web::Data<Opt>,
analytics: web::Data<Analytics>, analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> { ) -> Result<HttpResponse, ResponseError> {
debug!(parameters = ?body, "Delete documents by filter"); debug!(parameters = ?body, "Delete documents by filter");
let CustomMetadataQuery { custom_metadata } = params.into_inner();
let index_uid = IndexUid::try_from(index_uid.into_inner())?; let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let index_uid = index_uid.into_inner(); let index_uid = index_uid.into_inner();
let filter = body.into_inner(); let filter = body.into_inner();
let network = index_scheduler.network(); let network = index_scheduler.network();
let origin = check_leader(&req, &network)?;
analytics.publish( analytics.publish(
DocumentsDeletionAggregator { DocumentsDeletionAggregator {
@@ -1314,14 +1288,12 @@ pub async fn delete_documents_by_filter(
let dry_run = is_dry_run(&req, &opt)?; let dry_run = is_dry_run(&req, &opt)?;
let task = { let task = {
let index_scheduler = index_scheduler.clone(); let index_scheduler = index_scheduler.clone();
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
})
.await??
}; };
if network.sharding() && !dry_run { if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(filter), &task).await?; proxy(&index_scheduler, &index_uid, &req, origin, network, Body::Inline(filter), &task)
.await?;
} }
let task: SummarizedTaskView = task.into(); let task: SummarizedTaskView = task.into();
@@ -1407,40 +1379,39 @@ impl Aggregate for EditDocumentsByFunctionAggregator {
pub async fn edit_documents_by_function( pub async fn edit_documents_by_function(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_ALL }>, Data<IndexScheduler>>, index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_ALL }>, Data<IndexScheduler>>,
index_uid: web::Path<String>, index_uid: web::Path<String>,
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>, params: AwebJson<DocumentEditionByFunction, DeserrJsonError>,
body: AwebJson<DocumentEditionByFunction, DeserrJsonError>,
req: HttpRequest, req: HttpRequest,
opt: web::Data<Opt>, opt: web::Data<Opt>,
analytics: web::Data<Analytics>, analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> { ) -> Result<HttpResponse, ResponseError> {
debug!(parameters = ?body, "Edit documents by function"); debug!(parameters = ?params, "Edit documents by function");
let CustomMetadataQuery { custom_metadata } = params.into_inner();
index_scheduler index_scheduler
.features() .features()
.check_edit_documents_by_function("Using the documents edit route")?; .check_edit_documents_by_function("Using the documents edit route")?;
let network = index_scheduler.network(); let network = index_scheduler.network();
let origin = check_leader(&req, &network)?;
let index_uid = IndexUid::try_from(index_uid.into_inner())?; let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let index_uid = index_uid.into_inner(); let index_uid = index_uid.into_inner();
let body = body.into_inner(); let params = params.into_inner();
analytics.publish( analytics.publish(
EditDocumentsByFunctionAggregator { EditDocumentsByFunctionAggregator {
filtered: body.filter.is_some(), filtered: params.filter.is_some(),
with_context: body.context.is_some(), with_context: params.context.is_some(),
index_creation: index_scheduler.index(&index_uid).is_err(), index_creation: index_scheduler.index(&index_uid).is_err(),
}, },
&req, &req,
); );
let engine = milli::rhai::Engine::new(); let engine = milli::rhai::Engine::new();
if let Err(e) = engine.compile(&body.function) { if let Err(e) = engine.compile(&params.function) {
return Err(ResponseError::from_msg(e.to_string(), Code::BadRequest)); return Err(ResponseError::from_msg(e.to_string(), Code::BadRequest));
} }
if let Some(ref filter) = body.filter { if let Some(ref filter) = params.filter {
// we ensure the filter is well formed before enqueuing it // we ensure the filter is well formed before enqueuing it
crate::search::parse_filter( crate::search::parse_filter(
filter, filter,
@@ -1451,8 +1422,8 @@ pub async fn edit_documents_by_function(
} }
let task = KindWithContent::DocumentEdition { let task = KindWithContent::DocumentEdition {
index_uid: index_uid.clone(), index_uid: index_uid.clone(),
filter_expr: body.filter.clone(), filter_expr: params.filter.clone(),
context: match body.context.clone() { context: match params.context.clone() {
Some(Value::Object(m)) => Some(m), Some(Value::Object(m)) => Some(m),
None => None, None => None,
_ => { _ => {
@@ -1462,21 +1433,19 @@ pub async fn edit_documents_by_function(
)) ))
} }
}, },
function: body.function.clone(), function: params.function.clone(),
}; };
let uid = get_task_id(&req, &opt)?; let uid = get_task_id(&req, &opt)?;
let dry_run = is_dry_run(&req, &opt)?; let dry_run = is_dry_run(&req, &opt)?;
let task = { let task = {
let index_scheduler = index_scheduler.clone(); let index_scheduler = index_scheduler.clone();
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
})
.await??
}; };
if network.sharding() && !dry_run { if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?; proxy(&index_scheduler, &index_uid, &req, origin, network, Body::Inline(params), &task)
.await?;
} }
let task: SummarizedTaskView = task.into(); let task: SummarizedTaskView = task.into();
@@ -1517,14 +1486,13 @@ pub async fn edit_documents_by_function(
pub async fn clear_all_documents( pub async fn clear_all_documents(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>, index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
index_uid: web::Path<String>, index_uid: web::Path<String>,
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
req: HttpRequest, req: HttpRequest,
opt: web::Data<Opt>, opt: web::Data<Opt>,
analytics: web::Data<Analytics>, analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> { ) -> Result<HttpResponse, ResponseError> {
let index_uid = IndexUid::try_from(index_uid.into_inner())?; let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let network = index_scheduler.network(); let network = index_scheduler.network();
let CustomMetadataQuery { custom_metadata } = params.into_inner(); let origin = check_leader(&req, &network)?;
analytics.publish( analytics.publish(
DocumentsDeletionAggregator { DocumentsDeletionAggregator {
@@ -1543,14 +1511,11 @@ pub async fn clear_all_documents(
let task = { let task = {
let index_scheduler = index_scheduler.clone(); let index_scheduler = index_scheduler.clone();
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
})
.await??
}; };
if network.sharding() && !dry_run { if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?; proxy(&index_scheduler, &index_uid, &req, origin, network, Body::none(), &task).await?;
} }
let task: SummarizedTaskView = task.into(); let task: SummarizedTaskView = task.into();

View File

@@ -38,6 +38,27 @@ impl Body<()> {
} }
} }
pub fn check_leader(
req: &HttpRequest,
network: &meilisearch_types::enterprise_edition::network::DbNetwork,
) -> Result<Option<Origin>, MeilisearchHttpError> {
match origin_from_req(req)? {
Some(origin) => Ok(Some(origin)),
None => {
let this = network
.local
.as_deref()
.expect("inconsistent `network.sharding` and `network.self`");
let is_leader = this == todo!();
if !is_leader {
return Err(MeilisearchHttpError::NotLeader { leader: todo!() });
}
Ok(None)
}
}
}
/// If necessary, proxies the passed request to the network and update the task description. /// If necessary, proxies the passed request to the network and update the task description.
/// ///
/// This function reads the custom headers from the request to determine if must proxy the request or if the request /// This function reads the custom headers from the request to determine if must proxy the request or if the request
@@ -52,11 +73,12 @@ pub async fn proxy<T: serde::Serialize>(
index_scheduler: &IndexScheduler, index_scheduler: &IndexScheduler,
index_uid: &str, index_uid: &str,
req: &HttpRequest, req: &HttpRequest,
network: meilisearch_types::network::Network, origin: Option<Origin>,
network: meilisearch_types::enterprise_edition::network::DbNetwork,
body: Body<T>, body: Body<T>,
task: &meilisearch_types::tasks::Task, task: &meilisearch_types::tasks::Task,
) -> Result<(), MeilisearchHttpError> { ) -> Result<(), MeilisearchHttpError> {
match origin_from_req(req)? { match origin {
Some(origin) => { Some(origin) => {
index_scheduler.set_task_network(task.uid, TaskNetwork::Origin { origin })? index_scheduler.set_task_network(task.uid, TaskNetwork::Origin { origin })?
} }

View File

@@ -260,7 +260,7 @@ pub async fn search(
} }
let index = index_scheduler.index(&index_uid)?; let index = index_scheduler.index(&index_uid)?;
let search_kind = search_kind(&search_query, &index_scheduler, index_uid.to_string(), &index)?; let search_kind = search_kind(&search_query, &index_scheduler, &index_uid, &index)?;
let permit = search_queue.try_get_search_permit().await?; let permit = search_queue.try_get_search_permit().await?;
let search_result = tokio::task::spawn_blocking(move || { let search_result = tokio::task::spawn_blocking(move || {
perform_facet_search( perform_facet_search(
@@ -343,7 +343,6 @@ impl From<FacetSearchQuery> for SearchQuery {
hybrid, hybrid,
ranking_score_threshold, ranking_score_threshold,
locales, locales,
personalize: None,
} }
} }
} }

View File

@@ -28,18 +28,8 @@ use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::is_dry_run; use crate::routes::is_dry_run;
use crate::Opt; use crate::Opt;
pub mod compact;
pub mod documents; pub mod documents;
#[cfg(not(feature = "enterprise"))]
mod community_edition;
#[cfg(feature = "enterprise")]
mod enterprise_edition; mod enterprise_edition;
#[cfg(not(feature = "enterprise"))]
use community_edition as current_edition;
#[cfg(feature = "enterprise")]
use enterprise_edition as current_edition;
pub mod facet_search; pub mod facet_search;
pub mod search; pub mod search;
mod search_analytics; mod search_analytics;
@@ -50,7 +40,7 @@ mod settings_analytics;
pub mod similar; pub mod similar;
mod similar_analytics; mod similar_analytics;
pub use current_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER}; pub use enterprise_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER};
#[derive(OpenApi)] #[derive(OpenApi)]
#[openapi( #[openapi(
@@ -59,9 +49,8 @@ pub use current_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_U
(path = "/", api = facet_search::FacetSearchApi), (path = "/", api = facet_search::FacetSearchApi),
(path = "/", api = similar::SimilarApi), (path = "/", api = similar::SimilarApi),
(path = "/", api = settings::SettingsApi), (path = "/", api = settings::SettingsApi),
(path = "/", api = compact::CompactApi),
), ),
paths(list_indexes, create_index, get_index, update_index, delete_index, get_index_stats, compact::compact), paths(list_indexes, create_index, get_index, update_index, delete_index, get_index_stats),
tags( tags(
( (
name = "Indexes", name = "Indexes",
@@ -91,8 +80,7 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
.service(web::scope("/search").configure(search::configure)) .service(web::scope("/search").configure(search::configure))
.service(web::scope("/facet-search").configure(facet_search::configure)) .service(web::scope("/facet-search").configure(facet_search::configure))
.service(web::scope("/similar").configure(similar::configure)) .service(web::scope("/similar").configure(similar::configure))
.service(web::scope("/settings").configure(settings::configure)) .service(web::scope("/settings").configure(settings::configure)),
.service(web::scope("/compact").configure(compact::configure)),
); );
} }

View File

@@ -13,7 +13,6 @@ use meilisearch_types::serde_cs::vec::CS;
use serde_json::Value; use serde_json::Value;
use tracing::debug; use tracing::debug;
use utoipa::{IntoParams, OpenApi}; use utoipa::{IntoParams, OpenApi};
use uuid::Uuid;
use crate::analytics::Analytics; use crate::analytics::Analytics;
use crate::error::MeilisearchHttpError; use crate::error::MeilisearchHttpError;
@@ -22,12 +21,11 @@ use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler; use crate::extractors::sequential_extractor::SeqHandler;
use crate::metrics::MEILISEARCH_DEGRADED_SEARCH_REQUESTS; use crate::metrics::MEILISEARCH_DEGRADED_SEARCH_REQUESTS;
use crate::routes::indexes::search_analytics::{SearchAggregator, SearchGET, SearchPOST}; use crate::routes::indexes::search_analytics::{SearchAggregator, SearchGET, SearchPOST};
use crate::routes::parse_include_metadata_header;
use crate::search::{ use crate::search::{
add_search_rules, perform_search, HybridQuery, MatchingStrategy, Personalize, add_search_rules, perform_search, HybridQuery, MatchingStrategy, RankingScoreThreshold,
RankingScoreThreshold, RetrieveVectors, SearchKind, SearchParams, SearchQuery, SearchResult, RetrieveVectors, SearchKind, SearchQuery, SearchResult, SemanticRatio, DEFAULT_CROP_LENGTH,
SemanticRatio, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG,
DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET, DEFAULT_SEMANTIC_RATIO, DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET, DEFAULT_SEMANTIC_RATIO,
}; };
use crate::search_queue::SearchQueue; use crate::search_queue::SearchQueue;
@@ -134,8 +132,6 @@ pub struct SearchQueryGet {
#[deserr(default, error = DeserrQueryParamError<InvalidSearchLocales>)] #[deserr(default, error = DeserrQueryParamError<InvalidSearchLocales>)]
#[param(value_type = Vec<Locale>, explode = false)] #[param(value_type = Vec<Locale>, explode = false)]
pub locales: Option<CS<Locale>>, pub locales: Option<CS<Locale>>,
#[deserr(default, error = DeserrQueryParamError<InvalidSearchPersonalizeUserContext>)]
pub personalize_user_context: Option<String>,
} }
#[derive(Debug, Clone, Copy, PartialEq, deserr::Deserr)] #[derive(Debug, Clone, Copy, PartialEq, deserr::Deserr)]
@@ -207,9 +203,6 @@ impl TryFrom<SearchQueryGet> for SearchQuery {
)); ));
} }
let personalize =
other.personalize_user_context.map(|user_context| Personalize { user_context });
Ok(Self { Ok(Self {
q: other.q, q: other.q,
// `media` not supported for `GET` // `media` not supported for `GET`
@@ -239,7 +232,6 @@ impl TryFrom<SearchQueryGet> for SearchQuery {
hybrid, hybrid,
ranking_score_threshold: other.ranking_score_threshold.map(|o| o.0), ranking_score_threshold: other.ranking_score_threshold.map(|o| o.0),
locales: other.locales.map(|o| o.into_iter().collect()), locales: other.locales.map(|o| o.into_iter().collect()),
personalize,
}) })
} }
} }
@@ -328,14 +320,12 @@ pub fn fix_sort_query_parameters(sort_query: &str) -> Vec<String> {
pub async fn search_with_url_query( pub async fn search_with_url_query(
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>, index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
search_queue: web::Data<SearchQueue>, search_queue: web::Data<SearchQueue>,
personalization_service: web::Data<crate::personalization::PersonalizationService>,
index_uid: web::Path<String>, index_uid: web::Path<String>,
params: AwebQueryParameter<SearchQueryGet, DeserrQueryParamError>, params: AwebQueryParameter<SearchQueryGet, DeserrQueryParamError>,
req: HttpRequest, req: HttpRequest,
analytics: web::Data<Analytics>, analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> { ) -> Result<HttpResponse, ResponseError> {
let request_uid = Uuid::now_v7(); debug!(parameters = ?params, "Search get");
debug!(request_uid = ?request_uid, parameters = ?params, "Search get");
let index_uid = IndexUid::try_from(index_uid.into_inner())?; let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let mut query: SearchQuery = params.into_inner().try_into()?; let mut query: SearchQuery = params.into_inner().try_into()?;
@@ -349,56 +339,30 @@ pub async fn search_with_url_query(
let index = index_scheduler.index(&index_uid)?; let index = index_scheduler.index(&index_uid)?;
// Extract personalization and query string before moving query let search_kind = search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)?;
let personalize = query.personalize.take();
let search_kind =
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors); let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
// Save the query string for personalization if requested
let personalize_query = personalize.is_some().then(|| query.q.clone()).flatten();
let permit = search_queue.try_get_search_permit().await?; let permit = search_queue.try_get_search_permit().await?;
let include_metadata = parse_include_metadata_header(&req);
let search_result = tokio::task::spawn_blocking(move || { let search_result = tokio::task::spawn_blocking(move || {
perform_search( perform_search(
SearchParams { &index_uid,
index_uid: index_uid.to_string(), &index,
query, query,
search_kind, search_kind,
retrieve_vectors: retrieve_vector, retrieve_vector,
features: index_scheduler.features(), index_scheduler.features(),
request_uid,
include_metadata,
},
&index,
) )
}) })
.await; .await;
permit.drop().await; permit.drop().await;
let search_result = search_result?; let search_result = search_result?;
if let Ok((search_result, _)) = search_result.as_ref() { if let Ok(ref search_result) = search_result {
aggregate.succeed(search_result); aggregate.succeed(search_result);
} }
analytics.publish(aggregate, &req); analytics.publish(aggregate, &req);
let (mut search_result, time_budget) = search_result?; let search_result = search_result?;
// Apply personalization if requested debug!(returns = ?search_result, "Search get");
if let Some(personalize) = personalize.as_ref() {
search_result = personalization_service
.rerank_search_results(
search_result,
personalize,
personalize_query.as_deref(),
time_budget,
)
.await?;
}
debug!(request_uid = ?request_uid, returns = ?search_result, "Search get");
Ok(HttpResponse::Ok().json(search_result)) Ok(HttpResponse::Ok().json(search_result))
} }
@@ -461,17 +425,15 @@ pub async fn search_with_url_query(
pub async fn search_with_post( pub async fn search_with_post(
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>, index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
search_queue: web::Data<SearchQueue>, search_queue: web::Data<SearchQueue>,
personalization_service: web::Data<crate::personalization::PersonalizationService>,
index_uid: web::Path<String>, index_uid: web::Path<String>,
params: AwebJson<SearchQuery, DeserrJsonError>, params: AwebJson<SearchQuery, DeserrJsonError>,
req: HttpRequest, req: HttpRequest,
analytics: web::Data<Analytics>, analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> { ) -> Result<HttpResponse, ResponseError> {
let index_uid = IndexUid::try_from(index_uid.into_inner())?; let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let request_uid = Uuid::now_v7();
let mut query = params.into_inner(); let mut query = params.into_inner();
debug!(request_uid = ?request_uid, parameters = ?query, "Search post"); debug!(parameters = ?query, "Search post");
// Tenant token search_rules. // Tenant token search_rules.
if let Some(search_rules) = index_scheduler.filters().get_index_search_rules(&index_uid) { if let Some(search_rules) = index_scheduler.filters().get_index_search_rules(&index_uid) {
@@ -482,37 +444,24 @@ pub async fn search_with_post(
let index = index_scheduler.index(&index_uid)?; let index = index_scheduler.index(&index_uid)?;
// Extract personalization and query string before moving query let search_kind = search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)?;
let personalize = query.personalize.take();
let search_kind =
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors); let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
let include_metadata = parse_include_metadata_header(&req);
// Save the query string for personalization if requested
let personalize_query = personalize.is_some().then(|| query.q.clone()).flatten();
let permit = search_queue.try_get_search_permit().await?; let permit = search_queue.try_get_search_permit().await?;
let search_result = tokio::task::spawn_blocking(move || { let search_result = tokio::task::spawn_blocking(move || {
perform_search( perform_search(
SearchParams { &index_uid,
index_uid: index_uid.to_string(), &index,
query, query,
search_kind, search_kind,
retrieve_vectors, retrieve_vectors,
features: index_scheduler.features(), index_scheduler.features(),
request_uid,
include_metadata,
},
&index,
) )
}) })
.await; .await;
permit.drop().await; permit.drop().await;
let search_result = search_result?; let search_result = search_result?;
if let Ok((ref search_result, _)) = search_result { if let Ok(ref search_result) = search_result {
aggregate.succeed(search_result); aggregate.succeed(search_result);
if search_result.degraded { if search_result.degraded {
MEILISEARCH_DEGRADED_SEARCH_REQUESTS.inc(); MEILISEARCH_DEGRADED_SEARCH_REQUESTS.inc();
@@ -520,28 +469,16 @@ pub async fn search_with_post(
} }
analytics.publish(aggregate, &req); analytics.publish(aggregate, &req);
let (mut search_result, time_budget) = search_result?; let search_result = search_result?;
// Apply personalization if requested debug!(returns = ?search_result, "Search post");
if let Some(personalize) = personalize.as_ref() {
search_result = personalization_service
.rerank_search_results(
search_result,
personalize,
personalize_query.as_deref(),
time_budget,
)
.await?;
}
debug!(request_uid = ?request_uid, returns = ?search_result, "Search post");
Ok(HttpResponse::Ok().json(search_result)) Ok(HttpResponse::Ok().json(search_result))
} }
pub fn search_kind( pub fn search_kind(
query: &SearchQuery, query: &SearchQuery,
index_scheduler: &IndexScheduler, index_scheduler: &IndexScheduler,
index_uid: String, index_uid: &str,
index: &milli::Index, index: &milli::Index,
) -> Result<SearchKind, ResponseError> { ) -> Result<SearchKind, ResponseError> {
let is_placeholder_query = let is_placeholder_query =

View File

@@ -7,7 +7,6 @@ use serde_json::{json, Value};
use crate::aggregate_methods; use crate::aggregate_methods;
use crate::analytics::{Aggregate, AggregateMethod}; use crate::analytics::{Aggregate, AggregateMethod};
use crate::metrics::MEILISEARCH_PERSONALIZED_SEARCH_REQUESTS;
use crate::search::{ use crate::search::{
SearchQuery, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, SearchQuery, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT,
@@ -96,9 +95,6 @@ pub struct SearchAggregator<Method: AggregateMethod> {
show_ranking_score_details: bool, show_ranking_score_details: bool,
ranking_score_threshold: bool, ranking_score_threshold: bool,
// personalization
total_personalized: usize,
marker: std::marker::PhantomData<Method>, marker: std::marker::PhantomData<Method>,
} }
@@ -133,7 +129,6 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
hybrid, hybrid,
ranking_score_threshold, ranking_score_threshold,
locales, locales,
personalize,
} = query; } = query;
let mut ret = Self::default(); let mut ret = Self::default();
@@ -209,12 +204,6 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
ret.locales = locales.iter().copied().collect(); ret.locales = locales.iter().copied().collect();
} }
// personalization
if personalize.is_some() {
ret.total_personalized = 1;
MEILISEARCH_PERSONALIZED_SEARCH_REQUESTS.inc();
}
ret.highlight_pre_tag = *highlight_pre_tag != DEFAULT_HIGHLIGHT_PRE_TAG(); ret.highlight_pre_tag = *highlight_pre_tag != DEFAULT_HIGHLIGHT_PRE_TAG();
ret.highlight_post_tag = *highlight_post_tag != DEFAULT_HIGHLIGHT_POST_TAG(); ret.highlight_post_tag = *highlight_post_tag != DEFAULT_HIGHLIGHT_POST_TAG();
ret.crop_marker = *crop_marker != DEFAULT_CROP_MARKER(); ret.crop_marker = *crop_marker != DEFAULT_CROP_MARKER();
@@ -245,8 +234,6 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
facet_stats: _, facet_stats: _,
degraded, degraded,
used_negative_operator, used_negative_operator,
request_uid: _,
metadata: _,
} = result; } = result;
self.total_succeeded = self.total_succeeded.saturating_add(1); self.total_succeeded = self.total_succeeded.saturating_add(1);
@@ -307,7 +294,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
total_used_negative_operator, total_used_negative_operator,
ranking_score_threshold, ranking_score_threshold,
mut locales, mut locales,
total_personalized,
marker: _, marker: _,
} = *new; } = *new;
@@ -393,9 +379,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
// locales // locales
self.locales.append(&mut locales); self.locales.append(&mut locales);
// personalization
self.total_personalized = self.total_personalized.saturating_add(total_personalized);
self self
} }
@@ -441,7 +424,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
total_used_negative_operator, total_used_negative_operator,
ranking_score_threshold, ranking_score_threshold,
locales, locales,
total_personalized,
marker: _, marker: _,
} = *self; } = *self;
@@ -515,9 +497,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
"show_ranking_score_details": show_ranking_score_details, "show_ranking_score_details": show_ranking_score_details,
"ranking_score_threshold": ranking_score_threshold, "ranking_score_threshold": ranking_score_threshold,
}, },
"personalization": {
"total_personalized": total_personalized,
},
}) })
} }
} }

Some files were not shown because too many files have changed in this diff Show More