Compare commits

..

2 Commits

Author SHA1 Message Date
Paul de Nonancourt
2417650afb Merge manifests into multi-architecture Docker image 2025-11-20 15:41:52 +01:00
Paul de Nonancourt
ef3884e46f Build ARM64 and AMD64 images on Github-hosted runners 2025-11-19 17:14:20 +01:00
222 changed files with 3646 additions and 11695 deletions

View File

@@ -24,11 +24,6 @@ TBD
- [ ] If not, add the `no db change` label to your PR, and you're good to merge.
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
### Reminders when adding features
- [ ] Write unit tests using insta
- [ ] Write declarative integration tests in [workloads/tests](https://github.com/meilisearch/meilisearch/tree/main/workloads/test). Specify the routes to call and then call `cargo xtask test workloads/tests/YOUR_TEST.json --update-responses` so that responses are automatically filled.
### Reminders when modifying the API
- [ ] Update the openAPI file with utoipa:

View File

@@ -18,7 +18,7 @@ jobs:
timeout-minutes: 180 # 3h
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal

View File

@@ -66,7 +66,9 @@ jobs:
fetch-depth: 0 # fetch full history to be able to get main commit sha
ref: ${{ steps.comment-branch.outputs.head_ref }}
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
- name: Run benchmarks on PR ${{ github.event.issue.id }}
run: |

View File

@@ -12,7 +12,9 @@ jobs:
timeout-minutes: 180 # 3h
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
# Run benchmarks
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}

View File

@@ -18,7 +18,7 @@ jobs:
timeout-minutes: 4320 # 72h
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal

View File

@@ -44,7 +44,7 @@ jobs:
exit 1
fi
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal

View File

@@ -16,7 +16,7 @@ jobs:
timeout-minutes: 4320 # 72h
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: benchmarks
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: benchmarks
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: benchmarks
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal

View File

@@ -6,7 +6,7 @@ on:
env:
MESSAGE: |
### Hello, I'm a bot 🤖
### Hello, I'm a bot 🤖
You are receiving this message because you declared that this PR make changes to the Meilisearch database.
Depending on the nature of the change, additional actions might be required on your part. The following sections detail the additional actions depending on the nature of the change, please copy the relevant section in the description of your PR, and make sure to perform the required actions.
@@ -19,7 +19,6 @@ env:
- [ ] Detail the change to the DB format and why they are forward compatible
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
## This PR makes breaking changes
@@ -36,7 +35,8 @@ env:
- [ ] Write the code to go from the old database to the new one
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
- [ ] Write an integration test [here](https://github.com/meilisearch/meilisearch/blob/main/crates/meilisearch/tests/upgrade/mod.rs) ensuring you can read the old database, upgrade to the new database, and read the new database as expected
jobs:
add-comment:

View File

@@ -3,7 +3,7 @@ name: Look for flaky tests
on:
workflow_dispatch:
schedule:
- cron: "0 4 * * *" # Every day at 4:00AM
- cron: '0 4 * * *' # Every day at 4:00AM
jobs:
flaky:
@@ -13,17 +13,11 @@ jobs:
image: ubuntu:22.04
steps:
- uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- name: Install needed dependencies
run: |
apt-get update && apt-get install -y curl
apt-get install build-essential -y
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
- name: Install cargo-flaky
run: cargo install cargo-flaky
- name: Run cargo flaky in the dumps

View File

@@ -12,7 +12,9 @@ jobs:
timeout-minutes: 4320 # 72h
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
# Run benchmarks
- name: Run the fuzzer

View File

@@ -25,13 +25,7 @@ jobs:
run: |
apt-get update && apt-get install -y curl
apt-get install build-essential -y
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
- name: Install cargo-deb
run: cargo install cargo-deb
- uses: actions/checkout@v5

View File

@@ -13,6 +13,9 @@ on:
- cron: '0 23 * * *' # Every day at 11:00pm
workflow_dispatch:
env:
REGISTRY_IMAGE: getmeili/meilisearch
jobs:
build:
runs-on: ${{ matrix.runner }}
@@ -20,18 +23,11 @@ jobs:
strategy:
matrix:
platform: [amd64, arm64]
edition: [community, enterprise]
include:
- platform: amd64
runner: ubuntu-24.04
- platform: arm64
runner: ubuntu-24.04-arm
- edition: community
registry: getmeili/meilisearch
feature-flag: ""
- edition: enterprise
registry: getmeili/meilisearch-enterprise
feature-flag: "--features enterprise"
permissions: {}
steps:
@@ -58,7 +54,7 @@ jobs:
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ matrix.registry }}
images: ${{ env.REGISTRY_IMAGE }}
# Prevent `latest` to be updated for each new tag pushed.
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
flavor: latest=false
@@ -75,13 +71,12 @@ jobs:
with:
platforms: linux/${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
tags: ${{ matrix.registry }}
tags: ${{ env.REGISTRY_IMAGE }}
outputs: type=image,push-by-digest=true,name-canonical=true,push=true
build-args: |
COMMIT_SHA=${{ github.sha }}
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
GIT_TAG=${{ github.ref_name }}
EXTRA_ARGS=${{ matrix.feature-flag }}
- name: Export digest
run: |
@@ -92,21 +87,13 @@ jobs:
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ matrix.edition }}-${{ env.PLATFORM_PAIR }}
name: digests-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
merge:
runs-on: ubuntu-latest
strategy:
matrix:
edition: [community, enterprise]
include:
- edition: community
registry: getmeili/meilisearch
- edition: enterprise
registry: getmeili/meilisearch-enterprise
needs:
- build
@@ -160,7 +147,7 @@ jobs:
uses: actions/download-artifact@v4
with:
path: ${{ runner.temp }}/digests
pattern: digests-${{ matrix.edition }}-*
pattern: digests-*
merge-multiple: true
- name: Login to Docker Hub
@@ -176,7 +163,7 @@ jobs:
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ matrix.registry }}
images: ${{ env.REGISTRY_IMAGE }}
# Prevent `latest` to be updated for each new tag pushed.
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
flavor: latest=false
@@ -191,12 +178,12 @@ jobs:
working-directory: ${{ runner.temp }}/digests
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ matrix.registry }}@sha256:%s ' *)
$(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *)
- name: Inspect image to fetch digest to sign
- name: Inspect image
run: |
digest=$(docker buildx imagetools inspect --format='{{ json .Manifest }}' ${{ matrix.registry }}:${{ steps.meta.outputs.version }} | jq -r '.digest')
echo "DIGEST=${digest}" >> $GITHUB_ENV
digest=$(docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }})
echo "DIGEST=${platform}" >> $GITHUB_ENV
- name: Sign the images with GitHub OIDC Token
env:
@@ -208,10 +195,10 @@ jobs:
done
cosign sign --yes ${images}
# /!\ Don't touch this without checking with engineers working on the Cloud code base on #discussion-engineering Slack channel
- name: Notify meilisearch-cloud
# /!\ Don't touch this without checking with Cloud team
- name: Send CI information to Cloud team
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
if: ${{ (github.event_name == 'push') && (matrix.edition == 'enterprise') }}
if: github.event_name == 'push'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.MEILI_BOT_GH_PAT }}
@@ -219,13 +206,21 @@ jobs:
event-type: cloud-docker-build
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
# /!\ Don't touch this without checking with integration team members on #discussion-integrations Slack channel
- name: Notify meilisearch-kubernetes
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event), or if not stable
if: ${{ github.event_name == 'push' && matrix.edition == 'community' && steps.check-tag-format.outputs.stable == 'true' }}
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.MEILI_BOT_GH_PAT }}
repository: meilisearch/meilisearch-kubernetes
event-type: meilisearch-release
client-payload: '{ "version": "${{ github.ref_name }}" }'
# Send notification to Swarmia to notify of a deployment: https://app.swarmia.com
# - name: 'Setup jq'
# uses: dcarbone/install-jq-action
# - name: Send deployment to Swarmia
# if: github.event_name == 'push' && success()
# run: |
# JSON_STRING=$( jq --null-input --compact-output \
# --arg version "${{ github.ref_name }}" \
# --arg appName "meilisearch" \
# --arg environment "production" \
# --arg commitSha "${{ github.sha }}" \
# --arg repositoryFullName "${{ github.repository }}" \
# '{"version": $version, "appName": $appName, "environment": $environment, "commitSha": $commitSha, "repositoryFullName": $repositoryFullName}' )
# curl -H "Authorization: ${{ secrets.SWARMIA_DEPLOYMENTS_AUTHORIZATION }}" \
# -H "Content-Type: application/json" \
# -d "$JSON_STRING" \
# https://hook.swarmia.com/deployments

View File

@@ -32,61 +32,157 @@ jobs:
if: github.event_name == 'release' && steps.check-tag-format.outputs.stable == 'true'
run: bash .github/scripts/check-release.sh
publish-binaries:
name: Publish binary for ${{ matrix.release }} ${{ matrix.edition }} edition
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
edition: [community, enterprise]
release:
[macos-amd64, macos-aarch64, windows, linux-amd64, linux-aarch64]
include:
- edition: "community"
feature-flag: ""
edition-suffix: ""
- edition: "enterprise"
feature-flag: "--features enterprise"
edition-suffix: "enterprise-"
- release: macos-amd64
os: macos-15-intel
binary_path: release/meilisearch
asset_name: macos-amd64
extra-args: ""
- release: macos-aarch64
os: macos-14
binary_path: aarch64-apple-darwin/release/meilisearch
asset_name: macos-apple-silicon
extra-args: "--target aarch64-apple-darwin"
- release: windows
os: windows-2022
binary_path: release/meilisearch.exe
asset_name: windows-amd64.exe
extra-args: ""
- release: linux-amd64
os: ubuntu-22.04
binary_path: x86_64-unknown-linux-gnu/release/meilisearch
asset_name: linux-amd64
extra-args: "--target x86_64-unknown-linux-gnu"
- release: linux-aarch64
os: ubuntu-22.04-arm
binary_path: aarch64-unknown-linux-gnu/release/meilisearch
asset_name: linux-aarch64
extra-args: "--target aarch64-unknown-linux-gnu"
publish-linux:
name: Publish binary for Linux
runs-on: ubuntu-latest
needs: check-version
container:
# Use ubuntu-22.04 to compile with glibc 2.35
image: ubuntu:22.04
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1
- name: Install needed dependencies
run: |
apt-get update && apt-get install -y curl
apt-get install build-essential -y
- uses: dtolnay/rust-toolchain@1.89
- name: Build
run: cargo build --release --locked ${{ matrix.feature-flag }} ${{ matrix.extra-args }}
run: cargo build --release --locked
# No need to upload binaries for dry run (cron or workflow_dispatch)
- name: Upload binaries to release
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/${{ matrix.binary_path }}
asset_name: meilisearch-${{ matrix.edition-suffix }}${{ matrix.asset_name }}
file: target/release/meilisearch
asset_name: meilisearch-linux-amd64
tag: ${{ github.ref }}
publish-macos-windows:
name: Publish binary for ${{ matrix.os }}
runs-on: ${{ matrix.os }}
needs: check-version
strategy:
fail-fast: false
matrix:
os: [macos-13, windows-2022]
include:
- os: macos-13
artifact_name: meilisearch
asset_name: meilisearch-macos-amd64
- os: windows-2022
artifact_name: meilisearch.exe
asset_name: meilisearch-windows-amd64.exe
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- name: Build
run: cargo build --release --locked
# No need to upload binaries for dry run (cron or workflow_dispatch)
- name: Upload binaries to release
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/release/${{ matrix.artifact_name }}
asset_name: ${{ matrix.asset_name }}
tag: ${{ github.ref }}
publish-macos-apple-silicon:
name: Publish binary for macOS silicon
runs-on: macos-13
needs: check-version
strategy:
matrix:
include:
- target: aarch64-apple-darwin
asset_name: meilisearch-macos-apple-silicon
steps:
- name: Checkout repository
uses: actions/checkout@v5
- name: Installing Rust toolchain
uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
target: ${{ matrix.target }}
- name: Cargo build
uses: actions-rs/cargo@v1
with:
command: build
args: --release --target ${{ matrix.target }}
- name: Upload the binary to release
# No need to upload binaries for dry run (cron or workflow_dispatch)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/${{ matrix.target }}/release/meilisearch
asset_name: ${{ matrix.asset_name }}
tag: ${{ github.ref }}
publish-aarch64:
name: Publish binary for aarch64
runs-on: ubuntu-latest
needs: check-version
env:
DEBIAN_FRONTEND: noninteractive
container:
# Use ubuntu-22.04 to compile with glibc 2.35
image: ubuntu:22.04
strategy:
matrix:
include:
- target: aarch64-unknown-linux-gnu
asset_name: meilisearch-linux-aarch64
steps:
- name: Checkout repository
uses: actions/checkout@v5
- name: Install needed dependencies
run: |
apt-get update -y && apt upgrade -y
apt-get install -y curl build-essential gcc-aarch64-linux-gnu
- name: Set up Docker for cross compilation
run: |
apt-get install -y curl apt-transport-https ca-certificates software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update -y && apt-get install -y docker-ce
- name: Installing Rust toolchain
uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
target: ${{ matrix.target }}
- name: Configure target aarch64 GNU
## Environment variable is not passed using env:
## LD gold won't work with MUSL
# env:
# JEMALLOC_SYS_WITH_LG_PAGE: 16
# RUSTFLAGS: '-Clink-arg=-fuse-ld=gold'
run: |
echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config
echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
echo 'JEMALLOC_SYS_WITH_LG_PAGE=16' >> $GITHUB_ENV
- name: Install a default toolchain that will be used to build cargo cross
run: |
rustup default stable
- name: Cargo build
uses: actions-rs/cargo@v1
with:
command: build
use-cross: true
args: --release --target ${{ matrix.target }}
env:
CROSS_DOCKER_IN_DOCKER: true
- name: List target output files
run: ls -lR ./target
- name: Upload the binary to release
# No need to upload binaries for dry run (cron or workflow_dispatch)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/${{ matrix.target }}/release/meilisearch
asset_name: ${{ matrix.asset_name }}
tag: ${{ github.ref }}
publish-openapi-file:

View File

@@ -68,7 +68,7 @@ jobs:
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -92,7 +92,7 @@ jobs:
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -122,7 +122,7 @@ jobs:
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -149,7 +149,7 @@ jobs:
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -184,7 +184,7 @@ jobs:
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -213,7 +213,7 @@ jobs:
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -238,7 +238,7 @@ jobs:
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -263,7 +263,7 @@ jobs:
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -284,7 +284,7 @@ jobs:
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -307,7 +307,7 @@ jobs:
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -338,7 +338,7 @@ jobs:
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -370,7 +370,7 @@ jobs:
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}

View File

@@ -15,40 +15,31 @@ env:
jobs:
test-linux:
name: Tests on Ubuntu
runs-on: ${{ matrix.runner }}
strategy:
matrix:
runner: [ubuntu-22.04, ubuntu-22.04-arm]
features: ["", "--features enterprise"]
name: Tests on ubuntu-22.04
runs-on: ubuntu-latest
container:
# Use ubuntu-22.04 to compile with glibc 2.35
image: ubuntu:22.04
steps:
- uses: actions/checkout@v5
- name: check free space before
run: df -h
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
- name: Install needed dependencies
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- name: check free space after
run: df -h
apt-get update && apt-get install -y curl
apt-get install build-essential -y
- name: Setup test with Rust stable
uses: dtolnay/rust-toolchain@1.91.1
uses: dtolnay/rust-toolchain@1.89
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0
with:
key: ${{ matrix.features }}
- name: Run cargo build without any default features
- name: Run cargo check without any default features
uses: actions-rs/cargo@v1
with:
command: build
args: --locked --no-default-features --all
args: --locked --release --no-default-features --all
- name: Run cargo test
uses: actions-rs/cargo@v1
with:
command: test
args: --locked --all ${{ matrix.features }}
args: --locked --release --all
test-others:
name: Tests on ${{ matrix.os }}
@@ -56,58 +47,51 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [macos-14, windows-2022]
features: ["", "--features enterprise"]
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
os: [macos-13, windows-2022]
steps:
- uses: actions/checkout@v5
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0
- uses: dtolnay/rust-toolchain@1.91.1
- name: Run cargo build without any default features
- uses: dtolnay/rust-toolchain@1.89
- name: Run cargo check without any default features
uses: actions-rs/cargo@v1
with:
command: build
args: --locked --no-default-features --all
args: --locked --release --no-default-features --all
- name: Run cargo test
uses: actions-rs/cargo@v1
with:
command: test
args: --locked --all ${{ matrix.features }}
args: --locked --release --all
test-all-features:
name: Tests almost all features
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
container:
# Use ubuntu-22.04 to compile with glibc 2.35
image: ubuntu:22.04
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
steps:
- uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
- name: Install needed dependencies
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
apt-get update
apt-get install --assume-yes build-essential curl
- uses: dtolnay/rust-toolchain@1.89
- name: Run cargo build with almost all features
run: |
cargo build --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
- name: Run cargo test with almost all features
run: |
cargo test --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
cargo test --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
ollama-ubuntu:
name: Test with Ollama
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
env:
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
steps:
- uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- name: Install Ollama
run: |
curl -fsSL https://ollama.com/install.sh | sudo -E sh
@@ -131,21 +115,21 @@ jobs:
uses: actions-rs/cargo@v1
with:
command: test
args: --locked -p meilisearch --features test-ollama ollama
args: --locked --release --all --features test-ollama ollama
test-disabled-tokenization:
name: Test disabled tokenization
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
container:
image: ubuntu:22.04
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
steps:
- uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
- name: Install needed dependencies
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
apt-get update
apt-get install --assume-yes build-essential curl
- uses: dtolnay/rust-toolchain@1.89
- name: Run cargo tree without default features and check lindera is not present
run: |
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
@@ -156,39 +140,36 @@ jobs:
run: |
cargo tree -f '{p} {f}' -e normal | grep lindera -qz
build:
name: Build in release
runs-on: ubuntu-22.04
# We run tests in debug also, to make sure that the debug_assertions are hit
test-debug:
name: Run tests in debug
runs-on: ubuntu-latest
container:
# Use ubuntu-22.04 to compile with glibc 2.35
image: ubuntu:22.04
steps:
- uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
- name: Install needed dependencies
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
apt-get update && apt-get install -y curl
apt-get install build-essential -y
- uses: dtolnay/rust-toolchain@1.89
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0
- name: Build
run: cargo build --release --locked --target x86_64-unknown-linux-gnu
- name: Run tests in debug
uses: actions-rs/cargo@v1
with:
command: test
args: --locked --all
clippy:
name: Run Clippy
runs-on: ubuntu-22.04
strategy:
matrix:
features: ["", "--features enterprise"]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
components: clippy
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0
@@ -196,21 +177,18 @@ jobs:
uses: actions-rs/cargo@v1
with:
command: clippy
args: --all-targets ${{ matrix.features }} -- --deny warnings
args: --all-targets -- --deny warnings
fmt:
name: Run Rustfmt
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
toolchain: nightly-2024-07-09
override: true
components: rustfmt
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0
@@ -221,23 +199,3 @@ jobs:
run: |
echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs
cargo fmt --all -- --check
declarative-tests:
name: Run declarative tests
runs-on: ubuntu-22.04-arm
permissions:
contents: read
steps:
- uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0
- name: Run declarative tests
run: |
cargo xtask test workloads/tests/*.json

View File

@@ -18,13 +18,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
- uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
- name: Install sd
run: cargo install sd
- name: Update Cargo.toml file

View File

@@ -124,7 +124,6 @@ They are JSON files with the following structure (comments are not actually supp
{
// Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard.
"name": "hackernews.ndjson_1M,no-threads",
"type": "bench",
// Number of consecutive runs of the commands that should be performed.
// Each run uses a fresh instance of Meilisearch and a fresh database.
// Each run produces its own report file.

1188
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -23,7 +23,7 @@ members = [
]
[workspace.package]
version = "1.30.0"
version = "1.26.0"
authors = [
"Quentin de Quelen <quentin@dequelen.me>",
"Clément Renault <clement@meilisearch.com>",
@@ -50,5 +50,3 @@ opt-level = 3
opt-level = 3
[profile.dev.package.roaring]
opt-level = 3
[profile.dev.package.gemm-f16]
opt-level = 3

7
Cross.toml Normal file
View File

@@ -0,0 +1,7 @@
[build.env]
passthrough = [
"RUST_BACKTRACE",
"CARGO_TERM_COLOR",
"RUSTFLAGS",
"JEMALLOC_SYS_WITH_LG_PAGE"
]

View File

@@ -8,14 +8,16 @@ WORKDIR /
ARG COMMIT_SHA
ARG COMMIT_DATE
ARG GIT_TAG
ARG EXTRA_ARGS
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_DESCRIBE=${GIT_TAG}
ENV RUSTFLAGS="-C target-feature=-crt-static"
COPY . .
RUN set -eux; \
apkArch="$(apk --print-arch)"; \
cargo build --release -p meilisearch -p meilitool ${EXTRA_ARGS}
if [ "$apkArch" = "aarch64" ]; then \
export JEMALLOC_SYS_WITH_LG_PAGE=16; \
fi && \
cargo build --release -p meilisearch -p meilitool
# Run
FROM alpine:3.22

View File

@@ -1,326 +0,0 @@
# Declarative tests
Declarative tests ensure that Meilisearch features remain stable across versions.
While we already have unit tests, those are run against **temporary databases** that are created fresh each time and therefore never risk corruption.
Declarative tests instead **simulate the lifetime of a database**: they chain together commands and requests to change the binary, verifying that database state and API responses remain consistent.
## Basic example
```jsonc
{
"type": "test",
"name": "api-keys",
"binary": { // the first command will run on the binary following this specification.
"source": "release", // get the binary as a release from GitHub
"version": "1.19.0", // version to fetch
"edition": "community" // edition to fetch
},
"commands": []
}
```
This example defines a no-op test (it does nothing).
If the file is saved at `workloads/tests/example.json`, you can run it with:
```bash
cargo xtask test workloads/tests/example.json
```
## Commands
Commands represent API requests sent to Meilisearch endpoints during a test.
They are executed sequentially, and their responses can be validated to ensure consistent behavior across upgrades.
```jsonc
{
"route": "keys",
"method": "POST",
"body": {
"inline": {
"actions": [
"search",
"documents.add"
],
"description": "Test API Key",
"expiresAt": null,
"indexes": [ "movies" ]
}
}
}
```
This command issues a `POST /keys` request, creating an API key with permissions to search and add documents in the `movies` index.
### Using assets in commands
To keep tests concise and reusable, you can define **assets** at the root of the workload file.
Assets are external data sources (such as datasets) that are cached between runs, making tests faster and easier to read.
```jsonc
{
"type": "test",
"name": "movies",
"binary": {
"source": "release",
"version": "1.19.0",
"edition": "community"
},
"assets": {
"movies.json": {
"local_location": null,
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
}
},
"commands": [
{
"route": "indexes/movies/documents",
"method": "POST",
"body": {
"asset": "movies.json"
}
}
]
}
```
In this example:
- The `movies.json` dataset is defined as an asset, pointing to a remote URL.
- The SHA-256 checksum ensures integrity.
- The `POST /indexes/movies/documents` command uses this asset as the request body.
This makes the test much cleaner than inlining a large dataset directly into the command.
For asset handling, please refer to the [declarative benchmarks documentation](/BENCHMARKS.md#adding-new-assets).
### Asserting responses
Commands can specify both the **expected status code** and the **expected response body**.
```jsonc
{
"route": "indexes/movies/documents",
"method": "POST",
"body": {
"asset": "movies.json"
},
"expectedStatus": 202,
"expectedResponse": {
"enqueuedAt": "[timestamp]", // Set to a bracketed string to ignore the value
"indexUid": "movies",
"status": "enqueued",
"taskUid": 1,
"type": "documentAdditionOrUpdate"
},
"synchronous": "WaitForTask"
}
```
Manually writing `expectedResponse` fields can be tedious.
Instead, you can let the test runner populate them automatically:
```bash
# Run the workload to populate expected fields. Only adds the missing ones, doesn't change existing data
cargo xtask test workloads/tests/example.json --add-missing-responses
# OR
# Run the workload to populate expected fields. Updates all fields including existing ones
cargo xtask test workloads/tests/example.json --update-responses
```
This workflow is recommended:
1. Write the test without expected fields.
2. Run it with `--add-missing-responses` to capture the actual responses.
3. Review and commit the generated expectations.
## Changing binary
It is possible to insert an instruction to change the current Meilisearch instance from one binary specification to another during a test.
When executed, such an instruction will:
1. Stop the current Meilisearch instance.
2. Fetch the binary specified by the instruction.
3. Restart the server with the specified binary on the same database.
```jsonc
{
"type": "test",
"name": "movies",
"binary": {
"source": "release",
"version": "1.19.0", // start with version v1.19.0
"edition": "community"
},
"assets": {
"movies.json": {
"local_location": null,
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
}
},
"commands": [
// setup some data
{
"route": "indexes/movies/documents",
"method": "POST",
"body": {
"asset": "movies.json"
}
},
// switch binary to v1.24.0
{
"binary": {
"source": "release",
"version": "1.24.0",
"edition": "community"
}
}
]
}
```
### Typical Usage
In most cases, the change binary instruction will be used to update a database.
- **Set up** some data using commands on an older version.
- **Upgrade** to the latest version.
- **Assert** that the data and API behavior remain correct after the upgrade.
To properly test the dumpless upgrade, one should typically:
1. Open the database without processing the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` and `--experimental-max-number-of-batched-tasks=0` as extra CLI arguments
2. Check that the search, stats and task queue still work.
3. Open the database and process the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` as the extra CLI argument. Use a `health` command to wait for the upgrade task to finish.
4. Check that the indexing, search, stats, and task queue still work.
```jsonc
{
"type": "test",
"name": "movies",
"binary": {
"source": "release",
"version": "1.12.0",
"edition": "community"
},
"commands": [
// 0. Run commands to populate the database
{
// ..
},
// 1. Open the database with new MS without processing the update task
{
"binary": {
"source": "build", // build the binary from the sources in the current git repository
"edition": "community",
"extraCliArgs": [
"--experimental-dumpless-upgrade", // allows to open with a newer MS
"--experimental-max-number-of-batched-tasks=0" // prevent processing of the update task
]
}
},
// 2. Check the search etc.
{
// ..
},
// 3. Open the database with new MS and processing the update task
{
"binary": {
"source": "build", // build the binary from the sources in the current git repository
"edition": "community",
"extraCliArgs": [
"--experimental-dumpless-upgrade" // allows to open with a newer MS
// no `--experimental-max-number-of-batched-tasks=0`
]
}
},
// 4. Check the indexing, search, etc.
{
// ..
}
]
}
```
This ensures backward compatibility: databases created with older Meilisearch versions should remain functional and consistent after an upgrade.
## Variables
Sometimes a command needs to use a value returned by a **previous response**.
These values can be captured and reused using the register field.
```jsonc
{
"route": "keys",
"method": "POST",
"body": {
"inline": {
"actions": [
"search",
"documents.add"
],
"description": "Test API Key",
"expiresAt": null,
"indexes": [ "movies" ]
}
},
"expectedResponse": {
"key": "c6f64630bad2996b1f675007c8800168e14adf5d6a7bb1a400a6d2b158050eaf",
// ...
},
"register": {
"key": "/key"
},
"synchronous": "WaitForResponse"
}
```
The `register` field captures the value at the JSON path `/key` from the response.
Paths follow the **JavaScript Object Notation Pointer (RFC 6901)** format.
Registered variables are available for all subsequent commands.
Registered variables can be referenced by wrapping their name in double curly braces:
In the route/path:
```jsonc
{
"route": "tasks/{{ task_id }}",
"method": "GET"
}
```
In the request body:
```jsonc
{
"route": "indexes/movies/documents",
"method": "PATCH",
"body": {
"inline": {
"id": "{{ document_id }}",
"overview": "Shazam turns evil and the world is in danger.",
}
}
}
```
Or they can be referenced by their name (**without curly braces**) as an API key:
```jsonc
{
"route": "indexes/movies/documents",
"method": "POST",
"body": { /* ... */ },
"apiKeyVariable": "key" // The **content** of the key variable will be used as an API key
}
```

View File

@@ -11,27 +11,27 @@ edition.workspace = true
license.workspace = true
[dependencies]
anyhow = "1.0.100"
bumpalo = "3.19.0"
csv = "1.4.0"
memmap2 = "0.9.9"
anyhow = "1.0.98"
bumpalo = "3.18.1"
csv = "1.3.1"
memmap2 = "0.9.7"
milli = { path = "../milli" }
mimalloc = { version = "0.1.48", default-features = false }
serde_json = { version = "1.0.145", features = ["preserve_order"] }
tempfile = "3.23.0"
mimalloc = { version = "0.1.47", default-features = false }
serde_json = { version = "1.0.140", features = ["preserve_order"] }
tempfile = "3.20.0"
[dev-dependencies]
criterion = { version = "0.7.0", features = ["html_reports"] }
criterion = { version = "0.6.0", features = ["html_reports"] }
rand = "0.8.5"
rand_chacha = "0.3.1"
roaring = "0.10.12"
[build-dependencies]
anyhow = "1.0.100"
bytes = "1.11.0"
convert_case = "0.9.0"
flate2 = "1.1.5"
reqwest = { version = "0.12.24", features = ["blocking", "rustls-tls"], default-features = false }
anyhow = "1.0.98"
bytes = "1.10.1"
convert_case = "0.8.0"
flate2 = "1.1.2"
reqwest = { version = "0.12.20", features = ["blocking", "rustls-tls"], default-features = false }
[features]
default = ["milli/all-tokenizations"]

View File

@@ -21,10 +21,6 @@ use roaring::RoaringBitmap;
#[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
fn no_cancel() -> bool {
false
}
const BENCHMARK_ITERATION: usize = 10;
fn setup_dir(path: impl AsRef<Path>) {
@@ -69,7 +65,7 @@ fn setup_settings<'t>(
let sortable_fields = sortable_fields.iter().map(|s| s.to_string()).collect();
builder.set_sortable_fields(sortable_fields);
builder.execute(&no_cancel, &Progress::default(), Default::default()).unwrap();
builder.execute(&|| false, &Progress::default(), Default::default()).unwrap();
}
fn setup_index_with_settings(
@@ -156,7 +152,7 @@ fn indexing_songs_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -172,7 +168,7 @@ fn indexing_songs_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -224,7 +220,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -240,7 +236,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -270,7 +266,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -286,7 +282,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -340,7 +336,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -356,7 +352,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -418,7 +414,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -434,7 +430,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -464,7 +460,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -480,7 +476,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -506,7 +502,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -522,7 +518,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -575,7 +571,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -591,7 +587,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -643,7 +639,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -659,7 +655,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -711,7 +707,7 @@ fn indexing_wiki(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -727,7 +723,7 @@ fn indexing_wiki(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -778,7 +774,7 @@ fn reindexing_wiki(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -794,7 +790,7 @@ fn reindexing_wiki(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -824,7 +820,7 @@ fn reindexing_wiki(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -840,7 +836,7 @@ fn reindexing_wiki(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -893,7 +889,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -909,7 +905,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -971,7 +967,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -987,7 +983,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1018,7 +1014,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1034,7 +1030,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1061,7 +1057,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1077,7 +1073,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1129,7 +1125,7 @@ fn indexing_movies_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1145,7 +1141,7 @@ fn indexing_movies_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1196,7 +1192,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1212,7 +1208,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1242,7 +1238,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1258,7 +1254,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1311,7 +1307,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1327,7 +1323,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1376,7 +1372,7 @@ fn delete_documents_from_ids(index: Index, document_ids_to_delete: Vec<RoaringBi
Some(primary_key),
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1426,7 +1422,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1442,7 +1438,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1472,7 +1468,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1488,7 +1484,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1514,7 +1510,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1530,7 +1526,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1605,7 +1601,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1621,7 +1617,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1697,7 +1693,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1713,7 +1709,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1781,7 +1777,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1797,7 +1793,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1849,7 +1845,7 @@ fn indexing_geo(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1865,7 +1861,7 @@ fn indexing_geo(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1916,7 +1912,7 @@ fn reindexing_geo(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1932,7 +1928,7 @@ fn reindexing_geo(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -1962,7 +1958,7 @@ fn reindexing_geo(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -1978,7 +1974,7 @@ fn reindexing_geo(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)
@@ -2031,7 +2027,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&no_cancel,
&|| false,
Progress::default(),
None,
)
@@ -2047,7 +2043,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&no_cancel,
&|| false,
&Progress::default(),
&Default::default(),
)

View File

@@ -11,8 +11,8 @@ license.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
time = { version = "0.3.44", features = ["parsing"] }
time = { version = "0.3.41", features = ["parsing"] }
[build-dependencies]
anyhow = "1.0.100"
vergen-gitcl = "1.0.8"
anyhow = "1.0.98"
vergen-git2 = "1.0.7"

View File

@@ -15,7 +15,7 @@ fn emit_git_variables() -> anyhow::Result<()> {
// Note: any code that needs VERGEN_ environment variables should take care to define them manually in the Dockerfile and pass them
// in the corresponding GitHub workflow (publish_docker.yml).
// This is due to the Dockerfile building the binary outside of the git directory.
let mut builder = vergen_gitcl::GitclBuilder::default();
let mut builder = vergen_git2::Git2Builder::default();
builder.branch(true);
builder.commit_timestamp(true);
@@ -25,5 +25,5 @@ fn emit_git_variables() -> anyhow::Result<()> {
let git2 = builder.build()?;
vergen_gitcl::Emitter::default().fail_on_error().add_instructions(&git2)?.emit()
vergen_git2::Emitter::default().fail_on_error().add_instructions(&git2)?.emit()
}

View File

@@ -1,6 +0,0 @@
use build_info::BuildInfo;
fn main() {
let info = BuildInfo::from_build();
dbg!(info);
}

View File

@@ -11,27 +11,24 @@ readme.workspace = true
license.workspace = true
[dependencies]
anyhow = "1.0.100"
flate2 = "1.1.5"
anyhow = "1.0.98"
flate2 = "1.1.2"
http = "1.3.1"
meilisearch-types = { path = "../meilisearch-types" }
once_cell = "1.21.3"
regex = "1.12.2"
regex = "1.11.1"
roaring = { version = "0.10.12", features = ["serde"] }
serde = { version = "1.0.228", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] }
serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.140", features = ["preserve_order"] }
tar = "0.4.44"
tempfile = "3.23.0"
thiserror = "2.0.17"
time = { version = "0.3.44", features = ["serde-well-known", "formatting", "parsing", "macros"] }
tempfile = "3.20.0"
thiserror = "2.0.12"
time = { version = "0.3.41", features = ["serde-well-known", "formatting", "parsing", "macros"] }
tracing = "0.1.41"
uuid = { version = "1.18.1", features = ["serde", "v4"] }
uuid = { version = "1.17.0", features = ["serde", "v4"] }
[dev-dependencies]
big_s = "1.0.2"
maplit = "1.0.2"
meili-snap = { path = "../meili-snap" }
meilisearch-types = { path = "../meilisearch-types" }
[features]
enterprise = ["meilisearch-types/enterprise"]

View File

@@ -9,9 +9,8 @@ use meilisearch_types::error::ResponseError;
use meilisearch_types::keys::Key;
use meilisearch_types::milli::update::IndexDocumentsMethod;
use meilisearch_types::settings::Unchecked;
use meilisearch_types::tasks::network::{DbTaskNetwork, NetworkTopologyChange};
use meilisearch_types::tasks::{
Details, ExportIndexSettings, IndexSwap, KindWithContent, Status, Task, TaskId,
Details, ExportIndexSettings, IndexSwap, KindWithContent, Status, Task, TaskId, TaskNetwork,
};
use meilisearch_types::InstanceUid;
use roaring::RoaringBitmap;
@@ -96,7 +95,7 @@ pub struct TaskDump {
)]
pub finished_at: Option<OffsetDateTime>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub network: Option<DbTaskNetwork>,
pub network: Option<TaskNetwork>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub custom_metadata: Option<String>,
}
@@ -164,7 +163,6 @@ pub enum KindDump {
IndexCompaction {
index_uid: String,
},
NetworkTopologyChange(NetworkTopologyChange),
}
impl From<Task> for TaskDump {
@@ -251,9 +249,6 @@ impl From<KindWithContent> for KindDump {
KindWithContent::IndexCompaction { index_uid } => {
KindDump::IndexCompaction { index_uid }
}
KindWithContent::NetworkTopologyChange(network_topology_change) => {
KindDump::NetworkTopologyChange(network_topology_change)
}
}
}
}
@@ -267,13 +262,13 @@ pub(crate) mod test {
use big_s::S;
use maplit::{btreemap, btreeset};
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
use meilisearch_types::enterprise_edition::network::{Network, Remote};
use meilisearch_types::facet_values_sort::FacetValuesSort;
use meilisearch_types::features::RuntimeTogglableFeatures;
use meilisearch_types::index_uid_pattern::IndexUidPattern;
use meilisearch_types::keys::{Action, Key};
use meilisearch_types::milli::update::Setting;
use meilisearch_types::milli::{self, FilterableAttributesRule};
use meilisearch_types::network::{Network, Remote};
use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
use meilisearch_types::task_view::DetailsView;
use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status};
@@ -565,8 +560,7 @@ pub(crate) mod test {
Network {
local: Some("myself".to_string()),
remotes: maplit::btreemap! {"other".to_string() => Remote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
leader: None,
version: Default::default(),
sharding: false,
}
}
@@ -620,10 +614,7 @@ pub(crate) mod test {
assert_eq!(dump.features().unwrap().unwrap(), expected);
// ==== checking the network
let mut expected = create_test_network();
// from v1.29, we drop `leader` and `local` on import
expected.leader = None;
expected.local = None;
let expected = create_test_network();
assert_eq!(&expected, dump.network().unwrap().unwrap());
}
}

View File

@@ -434,11 +434,7 @@ pub(crate) mod test {
// network
let network = dump.network().unwrap().unwrap();
// since v1.29 we are dropping `local` and `leader` on import
insta::assert_snapshot!(network.local.is_none(), @"true");
insta::assert_snapshot!(network.leader.is_none(), @"true");
insta::assert_snapshot!(network.local.as_ref().unwrap(), @"ms-0");
insta::assert_snapshot!(network.remotes.get("ms-0").as_ref().unwrap().url, @"http://localhost:7700");
insta::assert_snapshot!(network.remotes.get("ms-0").as_ref().unwrap().search_api_key.is_none(), @"true");
insta::assert_snapshot!(network.remotes.get("ms-1").as_ref().unwrap().url, @"http://localhost:7701");

View File

@@ -107,14 +107,19 @@ impl Settings<Unchecked> {
}
}
#[derive(Default, Debug, Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq)]
pub enum Setting<T> {
Set(T),
Reset,
#[default]
NotSet,
}
impl<T> Default for Setting<T> {
fn default() -> Self {
Self::NotSet
}
}
impl<T> Setting<T> {
pub const fn is_not_set(&self) -> bool {
matches!(self, Self::NotSet)

View File

@@ -161,14 +161,19 @@ pub struct Facets {
pub min_level_size: Option<NonZeroUsize>,
}
#[derive(Default, Debug, Clone, PartialEq, Eq)]
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Setting<T> {
Set(T),
Reset,
#[default]
NotSet,
}
impl<T> Default for Setting<T> {
fn default() -> Self {
Self::NotSet
}
}
impl<T> Setting<T> {
pub fn map<U, F>(self, f: F) -> Setting<U>
where

View File

@@ -1,7 +1,9 @@
use std::fmt::{self, Display, Formatter};
use std::marker::PhantomData;
use std::str::FromStr;
use serde::Deserialize;
use serde::de::Visitor;
use serde::{Deserialize, Deserializer};
use uuid::Uuid;
use super::settings::{Settings, Unchecked};
@@ -80,3 +82,59 @@ impl Display for IndexUidFormatError {
}
impl std::error::Error for IndexUidFormatError {}
/// A type that tries to match either a star (*) or
/// any other thing that implements `FromStr`.
#[derive(Debug)]
#[cfg_attr(test, derive(serde::Serialize))]
pub enum StarOr<T> {
Star,
Other(T),
}
impl<'de, T, E> Deserialize<'de> for StarOr<T>
where
T: FromStr<Err = E>,
E: Display,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
/// Serde can't differentiate between `StarOr::Star` and `StarOr::Other` without a tag.
/// Simply using `#[serde(untagged)]` + `#[serde(rename="*")]` will lead to attempting to
/// deserialize everything as a `StarOr::Other`, including "*".
/// [`#[serde(other)]`](https://serde.rs/variant-attrs.html#other) might have helped but is
/// not supported on untagged enums.
struct StarOrVisitor<T>(PhantomData<T>);
impl<T, FE> Visitor<'_> for StarOrVisitor<T>
where
T: FromStr<Err = FE>,
FE: Display,
{
type Value = StarOr<T>;
fn expecting(&self, formatter: &mut Formatter) -> std::fmt::Result {
formatter.write_str("a string")
}
fn visit_str<SE>(self, v: &str) -> Result<Self::Value, SE>
where
SE: serde::de::Error,
{
match v {
"*" => Ok(StarOr::Star),
v => {
let other = FromStr::from_str(v).map_err(|e: T::Err| {
SE::custom(format!("Invalid `other` value: {}", e))
})?;
Ok(StarOr::Other(other))
}
}
}
}
deserializer.deserialize_str(StarOrVisitor(PhantomData))
}
}

View File

@@ -192,14 +192,19 @@ pub struct Facets {
pub min_level_size: Option<NonZeroUsize>,
}
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)]
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
pub enum Setting<T> {
Set(T),
Reset,
#[default]
NotSet,
}
impl<T> Default for Setting<T> {
fn default() -> Self {
Self::NotSet
}
}
impl<T> Setting<T> {
pub fn set(self) -> Option<T> {
match self {

View File

@@ -47,15 +47,20 @@ pub struct Settings<T> {
pub _kind: PhantomData<T>,
}
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)]
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
#[cfg_attr(test, derive(serde::Serialize))]
pub enum Setting<T> {
Set(T),
Reset,
#[default]
NotSet,
}
impl<T> Default for Setting<T> {
fn default() -> Self {
Self::NotSet
}
}
impl<T> Setting<T> {
pub fn set(self) -> Option<T> {
match self {

View File

@@ -322,7 +322,7 @@ impl From<Task> for TaskView {
_ => None,
});
let duration = finished_at.zip(started_at).map(|(tf, ts)| tf - ts);
let duration = finished_at.zip(started_at).map(|(tf, ts)| (tf - ts));
Self {
uid: id,

View File

@@ -24,7 +24,7 @@ pub type Batch = meilisearch_types::batches::Batch;
pub type Key = meilisearch_types::keys::Key;
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
pub type Network = meilisearch_types::network::Network;
pub type Network = meilisearch_types::enterprise_edition::network::Network;
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
// ===== Other types to clarify the code of the compat module
@@ -95,26 +95,17 @@ impl V6Reader {
Err(e) => return Err(e.into()),
};
let mut network: Option<meilisearch_types::network::Network> =
match fs::read(dump.path().join("network.json")) {
Ok(network_file) => Some(serde_json::from_reader(&*network_file)?),
Err(error) => match error.kind() {
// Allows the file to be missing, this will only result in all experimental features disabled.
ErrorKind::NotFound => {
debug!("`network.json` not found in dump");
None
}
_ => return Err(error.into()),
},
};
if let Some(network) = &mut network {
// as dumps are typically imported in a different machine as the emitter (otherwise dumpless upgrade would be used),
// we decide to remove the self to avoid alias issues
network.local = None;
// for the same reason we disable automatic sharding
network.leader = None;
}
let network = match fs::read(dump.path().join("network.json")) {
Ok(network_file) => Some(serde_json::from_reader(&*network_file)?),
Err(error) => match error.kind() {
// Allows the file to be missing, this will only result in all experimental features disabled.
ErrorKind::NotFound => {
debug!("`network.json` not found in dump");
None
}
_ => return Err(error.into()),
},
};
let webhooks = match fs::read(dump.path().join("webhooks.json")) {
Ok(webhooks_file) => Some(serde_json::from_reader(&*webhooks_file)?),

View File

@@ -5,9 +5,9 @@ use std::path::PathBuf;
use flate2::write::GzEncoder;
use flate2::Compression;
use meilisearch_types::batches::Batch;
use meilisearch_types::enterprise_edition::network::Network;
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
use meilisearch_types::keys::Key;
use meilisearch_types::network::Network;
use meilisearch_types::settings::{Checked, Settings};
use meilisearch_types::webhooks::WebhooksDumpView;
use serde_json::{Map, Value};

View File

@@ -11,7 +11,7 @@ edition.workspace = true
license.workspace = true
[dependencies]
tempfile = "3.23.0"
thiserror = "2.0.17"
tempfile = "3.20.0"
thiserror = "2.0.12"
tracing = "0.1.41"
uuid = { version = "1.18.1", features = ["serde", "v4"] }
uuid = { version = "1.17.0", features = ["serde", "v4"] }

View File

@@ -16,7 +16,7 @@ license.workspace = true
serde_json = "1.0"
[dev-dependencies]
criterion = { version = "0.7.0", features = ["html_reports"] }
criterion = { version = "0.6.0", features = ["html_reports"] }
[[bench]]
name = "benchmarks"

View File

@@ -11,12 +11,12 @@ edition.workspace = true
license.workspace = true
[dependencies]
arbitrary = { version = "1.4.2", features = ["derive"] }
bumpalo = "3.19.0"
clap = { version = "4.5.52", features = ["derive"] }
arbitrary = { version = "1.4.1", features = ["derive"] }
bumpalo = "3.18.1"
clap = { version = "4.5.40", features = ["derive"] }
either = "1.15.0"
fastrand = "2.3.0"
milli = { path = "../milli" }
serde = { version = "1.0.228", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] }
tempfile = "3.23.0"
serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.140", features = ["preserve_order"] }
tempfile = "3.20.0"

View File

@@ -11,34 +11,33 @@ edition.workspace = true
license.workspace = true
[dependencies]
anyhow = "1.0.100"
anyhow = "1.0.98"
bincode = "1.3.3"
byte-unit = "5.1.6"
bytes = "1.11.0"
bumpalo = "3.19.0"
bytes = "1.10.1"
bumpalo = "3.18.1"
bumparaw-collections = "0.1.4"
convert_case = "0.9.0"
csv = "1.4.0"
convert_case = "0.8.0"
csv = "1.3.1"
derive_builder = "0.20.2"
dump = { path = "../dump" }
enum-iterator = "2.3.0"
enum-iterator = "2.1.0"
file-store = { path = "../file-store" }
flate2 = "1.1.5"
hashbrown = "0.15.5"
indexmap = "2.12.0"
flate2 = "1.1.2"
indexmap = "2.9.0"
meilisearch-auth = { path = "../meilisearch-auth" }
meilisearch-types = { path = "../meilisearch-types" }
memmap2 = "0.9.9"
memmap2 = "0.9.7"
page_size = "0.6.0"
rayon = "1.11.0"
rayon = "1.10.0"
roaring = { version = "0.10.12", features = ["serde"] }
serde = { version = "1.0.228", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] }
serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.140", features = ["preserve_order"] }
tar = "0.4.44"
synchronoise = "1.0.1"
tempfile = "3.23.0"
thiserror = "2.0.17"
time = { version = "0.3.44", features = [
tempfile = "3.20.0"
thiserror = "2.0.12"
time = { version = "0.3.41", features = [
"serde-well-known",
"formatting",
"parsing",
@@ -46,15 +45,11 @@ time = { version = "0.3.44", features = [
] }
tracing = "0.1.41"
ureq = "2.12.1"
uuid = { version = "1.18.1", features = ["serde", "v4"] }
uuid = { version = "1.17.0", features = ["serde", "v4"] }
backoff = "0.4.0"
reqwest = { version = "0.12.24", features = [
"rustls-tls",
"http2",
], default-features = false }
reqwest = { version = "0.12.23", features = ["rustls-tls", "http2"], default-features = false }
rusty-s3 = "0.8.1"
tokio = { version = "1.48.0", features = ["full"] }
urlencoding = "2.1.3"
tokio = { version = "1.47.1", features = ["full"] }
[dev-dependencies]
big_s = "1.0.2"
@@ -63,6 +58,3 @@ crossbeam-channel = "0.5.15"
insta = { version = "=1.39.0", features = ["json", "redactions"] }
maplit = "1.0.2"
meili-snap = { path = "../meili-snap" }
[features]
enterprise = ["meilisearch-types/enterprise"]

View File

@@ -238,9 +238,6 @@ impl<'a> Dump<'a> {
KindDump::IndexCompaction { index_uid } => {
KindWithContent::IndexCompaction { index_uid }
}
KindDump::NetworkTopologyChange(network_topology_change) => {
KindWithContent::NetworkTopologyChange(network_topology_change)
}
},
};

View File

@@ -3,13 +3,10 @@ use std::fmt::Display;
use meilisearch_types::batches::BatchId;
use meilisearch_types::error::{Code, ErrorCode};
use meilisearch_types::milli::index::RollbackOutcome;
use meilisearch_types::milli::DocumentId;
use meilisearch_types::tasks::network::ReceiveTaskError;
use meilisearch_types::tasks::{Kind, Status};
use meilisearch_types::{heed, milli};
use reqwest::StatusCode;
use thiserror::Error;
use uuid::Uuid;
use crate::TaskId;
@@ -194,17 +191,6 @@ pub enum Error {
#[error(transparent)]
HeedTransaction(heed::Error),
#[error("No network topology change task is currently enqueued or processing")]
ImportTaskWithoutNetworkTask,
#[error("The network task version (`{network_task}`) does not match the import task version (`{import_task}`)")]
NetworkVersionMismatch { network_task: Uuid, import_task: Uuid },
#[error("The import task emanates from an unknown remote `{0}`")]
ImportTaskUnknownRemote(String),
#[error("The import task with key `{0}` was already received")]
ImportTaskAlreadyReceived(DocumentId),
#[error("{action} requires the Enterprise Edition")]
RequiresEnterpriseEdition { action: &'static str },
#[cfg(test)]
#[error("Planned failure for tests.")]
PlannedFailure,
@@ -262,11 +248,6 @@ impl Error {
| Error::Persist(_)
| Error::FeatureNotEnabled(_)
| Error::Export(_)
| Error::ImportTaskWithoutNetworkTask
| Error::NetworkVersionMismatch { .. }
| Error::ImportTaskAlreadyReceived(_)
| Error::ImportTaskUnknownRemote(_)
| Error::RequiresEnterpriseEdition { .. }
| Error::Anyhow(_) => true,
Error::CreateBatch(_)
| Error::CorruptedTaskQueue
@@ -326,11 +307,6 @@ impl ErrorCode for Error {
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
Error::ImportTaskWithoutNetworkTask => Code::ImportTaskWithoutNetworkTask,
Error::NetworkVersionMismatch { .. } => Code::NetworkVersionMismatch,
Error::ImportTaskAlreadyReceived(_) => Code::ImportTaskAlreadyReceived,
Error::ImportTaskUnknownRemote(_) => Code::ImportTaskUnknownRemote,
Error::RequiresEnterpriseEdition { .. } => Code::RequiresEnterpriseEdition,
Error::S3Error { status, .. } if status.is_client_error() => {
Code::InvalidS3SnapshotRequest
}
@@ -369,12 +345,3 @@ impl ErrorCode for Error {
}
}
}
impl From<ReceiveTaskError> for Error {
fn from(value: ReceiveTaskError) -> Self {
match value {
ReceiveTaskError::UnknownRemote(unknown) => Error::ImportTaskUnknownRemote(unknown),
ReceiveTaskError::DuplicateTask(dup) => Error::ImportTaskAlreadyReceived(dup),
}
}
}

View File

@@ -1,9 +1,9 @@
use std::sync::{Arc, RwLock};
use meilisearch_types::enterprise_edition::network::Network;
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
use meilisearch_types::heed::types::{SerdeJson, Str};
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
use meilisearch_types::network::Network;
use crate::error::FeatureNotEnabledError;
use crate::Result;
@@ -38,10 +38,6 @@ impl RoFeatures {
Self { runtime }
}
pub fn from_runtime_features(features: RuntimeTogglableFeatures) -> Self {
Self { runtime: features }
}
pub fn runtime_features(&self) -> RuntimeTogglableFeatures {
self.runtime
}

View File

@@ -361,12 +361,6 @@ impl IndexMapper {
Ok(())
}
/// The number of indexes in the database
#[cfg(feature = "enterprise")] // only used in enterprise edition for now
pub fn index_count(&self, rtxn: &RoTxn) -> Result<u64> {
Ok(self.index_mapping.len(rtxn)?)
}
/// Return an index, may open it if it wasn't already opened.
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
if let Some((current_name, current_index)) =

View File

@@ -6,7 +6,7 @@ use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str};
use meilisearch_types::heed::{Database, RoTxn};
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
use meilisearch_types::tasks::{Details, Kind, Status, Task};
use meilisearch_types::versioning::{self, VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
use meilisearch_types::versioning;
use roaring::RoaringBitmap;
use crate::index_mapper::IndexMapper;
@@ -27,7 +27,6 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
queue,
scheduler,
persisted,
export_default_payload_size_bytes: _,
index_mapper,
features: _,
@@ -321,18 +320,11 @@ fn snapshot_details(d: &Details) -> String {
format!("{{ url: {url:?}, api_key: {api_key:?}, payload_size: {payload_size:?}, indexes: {indexes:?} }}")
}
Details::UpgradeDatabase { from, to } => {
if to == &(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) {
format!("{{ from: {from:?}, to: [current version] }}")
} else {
format!("{{ from: {from:?}, to: {to:?} }}")
}
format!("{{ from: {from:?}, to: {to:?} }}")
}
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
}
Details::NetworkTopologyChange { moved_documents, message } => {
format!("{{ moved_documents: {moved_documents:?}, message: {message:?}")
}
}
}
@@ -408,21 +400,7 @@ pub fn snapshot_batch(batch: &Batch) -> String {
snap.push('{');
snap.push_str(&format!("uid: {uid}, "));
let details = if let Some(upgrade_to) = &details.upgrade_to {
if upgrade_to.as_str()
== format!("v{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_PATCH}").as_str()
{
let mut details = details.clone();
details.upgrade_to = Some("[current version]".into());
serde_json::to_string(&details).unwrap()
} else {
serde_json::to_string(details).unwrap()
}
} else {
serde_json::to_string(details).unwrap()
};
snap.push_str(&format!("details: {details}, "));
snap.push_str(&format!("details: {}, ", serde_json::to_string(details).unwrap()));
snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap()));
if !embedder_stats.skip_serializing() {
snap.push_str(&format!(

View File

@@ -48,13 +48,13 @@ use std::path::{Path, PathBuf};
use std::sync::{Arc, RwLock};
use std::time::Duration;
use byte_unit::Byte;
use dump::Dump;
pub use error::Error;
pub use features::RoFeatures;
use flate2::bufread::GzEncoder;
use flate2::Compression;
use meilisearch_types::batches::Batch;
use meilisearch_types::enterprise_edition::network::Network;
use meilisearch_types::features::{
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
};
@@ -67,14 +67,11 @@ use meilisearch_types::milli::vector::{
Embedder, EmbedderOptions, RuntimeEmbedder, RuntimeEmbedders, RuntimeFragment,
};
use meilisearch_types::milli::{self, Index};
use meilisearch_types::network::Network;
use meilisearch_types::task_view::TaskView;
use meilisearch_types::tasks::network::{
DbTaskNetwork, ImportData, ImportMetadata, Origin, TaskNetwork,
};
use meilisearch_types::tasks::{KindWithContent, Task};
use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork};
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
use milli::vector::db::IndexEmbeddingConfig;
use processing::ProcessingTasks;
pub use queue::Query;
use queue::Queue;
use roaring::RoaringBitmap;
@@ -85,7 +82,6 @@ use uuid::Uuid;
use versioning::Versioning;
use crate::index_mapper::IndexMapper;
use crate::processing::ProcessingTasks;
use crate::utils::clamp_to_page_size;
pub(crate) type BEI128 = I128<BE>;
@@ -148,11 +144,9 @@ pub struct IndexSchedulerOptions {
/// If the autobatcher is allowed to automatically batch tasks
/// it will only batch this defined maximum size (in bytes) of tasks at once.
pub batched_tasks_size_limit: u64,
/// The maximum size of the default payload for exporting documents, in bytes
pub export_default_payload_size_bytes: Byte,
/// The experimental features enabled for this instance.
pub instance_features: InstanceTogglableFeatures,
/// Whether the index scheduler is able to auto upgrade or not.
/// The experimental features enabled for this instance.
pub auto_upgrade: bool,
/// The maximal number of entries in the search query cache of an embedder.
///
@@ -205,9 +199,6 @@ pub struct IndexScheduler {
/// to the same embeddings for the same input text.
embedders: Arc<RwLock<HashMap<EmbedderOptions, Arc<Embedder>>>>,
/// The maximum size of the default payload for exporting documents, in bytes
pub export_default_payload_size_bytes: Byte,
// ================= test
// The next entry is dedicated to the tests.
/// Provide a way to set a breakpoint in multiple part of the scheduler.
@@ -243,7 +234,6 @@ impl IndexScheduler {
cleanup_enabled: self.cleanup_enabled,
experimental_no_edition_2024_for_dumps: self.experimental_no_edition_2024_for_dumps,
persisted: self.persisted,
export_default_payload_size_bytes: self.export_default_payload_size_bytes,
webhooks: self.webhooks.clone(),
embedders: self.embedders.clone(),
@@ -355,7 +345,6 @@ impl IndexScheduler {
persisted,
webhooks: Arc::new(webhooks),
embedders: Default::default(),
export_default_payload_size_bytes: options.export_default_payload_size_bytes,
#[cfg(test)] // Will be replaced in `new_tests` in test environments
test_breakpoint_sdr: crossbeam_channel::bounded(0).0,
@@ -711,14 +700,14 @@ impl IndexScheduler {
self.queue.get_task_ids_from_authorized_indexes(&rtxn, query, filters, &processing)
}
pub fn set_task_network(&self, task_id: TaskId, network: DbTaskNetwork) -> Result<Task> {
pub fn set_task_network(&self, task_id: TaskId, network: TaskNetwork) -> Result<()> {
let mut wtxn = self.env.write_txn()?;
let mut task =
self.queue.tasks.get_task(&wtxn, task_id)?.ok_or(Error::TaskNotFound(task_id))?;
task.network = Some(network);
self.queue.tasks.all_tasks.put(&mut wtxn, &task_id, &task)?;
wtxn.commit()?;
Ok(task)
Ok(())
}
/// Return the batches matching the query from the user's point of view along
@@ -768,30 +757,18 @@ impl IndexScheduler {
task_id: Option<TaskId>,
dry_run: bool,
) -> Result<Task> {
self.register_with_custom_metadata(kind, task_id, None, dry_run, None)
self.register_with_custom_metadata(kind, task_id, None, dry_run)
}
/// Register a new task in the scheduler, with metadata.
///
/// If it fails and data was associated with the task, it tries to delete the associated data.
///
/// # Parameters
///
/// - task_network: network of the task to check.
///
/// If the task is an import task, only accept it if:
///
/// 1. There is an ongoing network topology change task
/// 2. The task to register matches the network version of the network topology change task
///
/// Always accept the task if it is not an import task.
pub fn register_with_custom_metadata(
&self,
kind: KindWithContent,
task_id: Option<TaskId>,
custom_metadata: Option<String>,
dry_run: bool,
task_network: Option<TaskNetwork>,
) -> Result<Task> {
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
@@ -802,19 +779,7 @@ impl IndexScheduler {
}
let mut wtxn = self.env.write_txn()?;
if let Some(TaskNetwork::Import { import_from, network_change, metadata }) = &task_network {
self.update_network_task(&mut wtxn, import_from, network_change, metadata)?;
}
let task = self.queue.register(
&mut wtxn,
&kind,
task_id,
custom_metadata,
dry_run,
task_network.map(DbTaskNetwork::from),
)?;
let task = self.queue.register(&mut wtxn, &kind, task_id, custom_metadata, dry_run)?;
// If the registered task is a task cancelation
// we inform the processing tasks to stop (if necessary).
@@ -836,91 +801,6 @@ impl IndexScheduler {
Ok(task)
}
pub fn network_no_index_for_remote(
&self,
remote_name: String,
origin: Origin,
) -> Result<(), Error> {
let mut wtxn = self.env.write_txn()?;
self.update_network_task(
&mut wtxn,
&ImportData { remote_name, index_name: None, document_count: 0 },
&origin,
&ImportMetadata { index_count: 0, task_key: None, total_index_documents: 0 },
)?;
wtxn.commit()?;
// wake up the scheduler as the task state has changed
self.scheduler.wake_up.signal();
Ok(())
}
fn update_network_task(
&self,
wtxn: &mut heed::RwTxn<'_>,
import_from: &ImportData,
network_change: &Origin,
metadata: &ImportMetadata,
) -> Result<(), Error> {
let mut network_tasks = self
.queue
.tasks
.get_kind(&*wtxn, meilisearch_types::tasks::Kind::NetworkTopologyChange)?;
if network_tasks.is_empty() {
return Err(Error::ImportTaskWithoutNetworkTask);
}
let network_task = {
let processing = self.processing_tasks.read().unwrap().processing.clone();
if processing.is_disjoint(&network_tasks) {
let enqueued = self
.queue
.tasks
.get_status(&*wtxn, meilisearch_types::tasks::Status::Enqueued)?;
network_tasks &= enqueued;
if let Some(network_task) = network_tasks.into_iter().next() {
network_task
} else {
return Err(Error::ImportTaskWithoutNetworkTask);
}
} else {
network_tasks &= &*processing;
network_tasks.into_iter().next().unwrap()
}
};
let mut network_task = self.queue.tasks.get_task(&*wtxn, network_task)?.unwrap();
let network_task_version = network_task
.network
.as_ref()
.map(|network| network.network_version())
.unwrap_or_default();
if network_task_version != network_change.network_version {
return Err(Error::NetworkVersionMismatch {
network_task: network_task_version,
import_task: network_change.network_version,
});
}
let KindWithContent::NetworkTopologyChange(network_topology_change) =
&mut network_task.kind
else {
tracing::error!("unexpected network kind for network task while registering task");
return Err(Error::CorruptedTaskQueue);
};
network_topology_change.receive_remote_task(
&import_from.remote_name,
import_from.index_name.as_deref(),
metadata.task_key,
import_from.document_count,
metadata.index_count,
metadata.total_index_documents,
)?;
self.queue.tasks.update_task(wtxn, &mut network_task)?;
Ok(())
}
/// Register a new task coming from a dump in the scheduler.
/// By taking a mutable ref we're pretty sure no one will ever import a dump while actix is running.
pub fn register_dumped_task(&mut self) -> Result<Dump<'_>> {

View File

@@ -42,10 +42,12 @@ impl ProcessingTasks {
/// Set the processing tasks to an empty list
pub fn stop_processing(&mut self) -> Self {
self.progress = None;
Self {
batch: std::mem::take(&mut self.batch),
processing: std::mem::take(&mut self.processing),
progress: std::mem::take(&mut self.progress),
progress: None,
}
}

View File

@@ -15,7 +15,6 @@ use file_store::FileStore;
use meilisearch_types::batches::BatchId;
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
use meilisearch_types::milli::{CboRoaringBitmapCodec, BEU32};
use meilisearch_types::tasks::network::DbTaskNetwork;
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
use roaring::RoaringBitmap;
use time::format_description::well_known::Rfc3339;
@@ -260,7 +259,6 @@ impl Queue {
task_id: Option<TaskId>,
custom_metadata: Option<String>,
dry_run: bool,
network: Option<DbTaskNetwork>,
) -> Result<Task> {
let next_task_id = self.tasks.next_task_id(wtxn)?;
@@ -282,7 +280,7 @@ impl Queue {
details: kind.default_details(),
status: Status::Enqueued,
kind: kind.clone(),
network,
network: None,
custom_metadata,
};
// For deletion and cancelation tasks, we want to make extra sure that they
@@ -350,7 +348,6 @@ impl Queue {
None,
None,
false,
None,
)?;
Ok(())

View File

@@ -3,8 +3,7 @@ use std::ops::{Bound, RangeBounds};
use meilisearch_types::heed::types::{DecodeIgnore, SerdeBincode, SerdeJson, Str};
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
use meilisearch_types::tasks::network::DbTaskNetwork;
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
use meilisearch_types::tasks::{Kind, Status, Task};
use roaring::{MultiOps, RoaringBitmap};
use time::OffsetDateTime;
@@ -115,15 +114,14 @@ impl TaskQueue {
/// - CorruptedTaskQueue: The task doesn't exist in the database
pub(crate) fn update_task(&self, wtxn: &mut RwTxn, task: &mut Task) -> Result<()> {
let old_task = self.get_task(wtxn, task.uid)?.ok_or(Error::CorruptedTaskQueue)?;
// network topology tasks may be processed multiple times.
let maybe_reprocessing = old_task.status != Status::Enqueued
|| task.kind.as_kind() == Kind::NetworkTopologyChange;
let reprocessing = old_task.status != Status::Enqueued;
debug_assert!(old_task != *task);
debug_assert_eq!(old_task.uid, task.uid);
// If we're processing a task that failed it may already contains a batch_uid
debug_assert!(
maybe_reprocessing || (old_task.batch_uid.is_none() && task.batch_uid.is_some()),
reprocessing || (old_task.batch_uid.is_none() && task.batch_uid.is_some()),
"\n==> old: {old_task:?}\n==> new: {task:?}"
);
@@ -145,24 +143,13 @@ impl TaskQueue {
})?;
}
// Avoids rewriting part of the network topology change because of TOCTOU errors
if let (
KindWithContent::NetworkTopologyChange(old_state),
KindWithContent::NetworkTopologyChange(new_state),
) = (old_task.kind, &mut task.kind)
{
new_state.merge(old_state);
// the state possibly just changed, rewrite the details
task.details = Some(new_state.to_details());
}
assert_eq!(
old_task.enqueued_at, task.enqueued_at,
"Cannot update a task's enqueued_at time"
);
if old_task.started_at != task.started_at {
assert!(
maybe_reprocessing || old_task.started_at.is_none(),
reprocessing || old_task.started_at.is_none(),
"Cannot update a task's started_at time"
);
if let Some(started_at) = old_task.started_at {
@@ -174,7 +161,7 @@ impl TaskQueue {
}
if old_task.finished_at != task.finished_at {
assert!(
maybe_reprocessing || old_task.finished_at.is_none(),
reprocessing || old_task.finished_at.is_none(),
"Cannot update a task's finished_at time"
);
if let Some(finished_at) = old_task.finished_at {
@@ -188,16 +175,7 @@ impl TaskQueue {
task.network = match (old_task.network, task.network.take()) {
(None, None) => None,
(None, Some(network)) | (Some(network), None) => Some(network),
(Some(left), Some(right)) => Some(match (left, right) {
(
DbTaskNetwork::Remotes { remote_tasks: mut left, network_version: _ },
DbTaskNetwork::Remotes { remote_tasks: mut right, network_version },
) => {
left.append(&mut right);
DbTaskNetwork::Remotes { remote_tasks: left, network_version }
}
(_, right) => right,
}),
(Some(_), Some(network)) => Some(network),
};
self.all_tasks.put(wtxn, &task.uid, task)?;

View File

@@ -203,30 +203,26 @@ fn test_disable_auto_deletion_of_tasks() {
)
.unwrap();
{
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
}
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
drop(rtxn);
drop(proc);
// now we're above the max number of tasks
// and if we try to advance in the tick function no new task deletion should be enqueued
handle.advance_till([Start, BatchCreated]);
{
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_not_been_enqueued");
}
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_not_been_enqueued");
drop(rtxn);
drop(proc);
}
#[test]
@@ -271,69 +267,59 @@ fn test_auto_deletion_of_tasks() {
)
.unwrap();
{
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
}
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
drop(rtxn);
drop(proc);
{
// now we're above the max number of tasks
// and if we try to advance in the tick function a new task deletion should be enqueued
handle.advance_till([Start, BatchCreated]);
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_enqueued");
}
// now we're above the max number of tasks
// and if we try to advance in the tick function a new task deletion should be enqueued
handle.advance_till([Start, BatchCreated]);
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_enqueued");
drop(rtxn);
drop(proc);
{
handle.advance_till([InsideProcessBatch, ProcessBatchSucceeded, AfterProcessing]);
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_processed");
}
handle.advance_till([InsideProcessBatch, ProcessBatchSucceeded, AfterProcessing]);
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_processed");
drop(rtxn);
drop(proc);
handle.advance_one_failed_batch();
// a new task deletion has been enqueued
handle.advance_one_successful_batch();
{
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "after_the_second_task_deletion");
}
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "after_the_second_task_deletion");
drop(rtxn);
drop(proc);
handle.advance_one_failed_batch();
handle.advance_one_successful_batch();
{
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "everything_has_been_processed");
}
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "everything_has_been_processed");
drop(rtxn);
drop(proc);
}
#[test]

View File

@@ -74,7 +74,6 @@ impl From<KindWithContent> for AutobatchKind {
| KindWithContent::DumpCreation { .. }
| KindWithContent::Export { .. }
| KindWithContent::UpgradeDatabase { .. }
| KindWithContent::NetworkTopologyChange(_)
| KindWithContent::SnapshotCreation => {
panic!("The autobatcher should never be called with tasks with special priority or that don't apply to an index.")
}

View File

@@ -1,27 +0,0 @@
use meilisearch_types::milli::progress::Progress;
use meilisearch_types::tasks::Task;
use super::create_batch::Batch;
use crate::scheduler::process_batch::ProcessBatchInfo;
use crate::utils::ProcessingBatch;
use crate::{Error, IndexScheduler, Result};
impl IndexScheduler {
pub(super) fn process_network_index_batch(
&self,
_network_task: Task,
_inner_batch: Box<Batch>,
_current_batch: &mut ProcessingBatch,
_progress: Progress,
) -> Result<(Vec<Task>, ProcessBatchInfo)> {
Err(Error::RequiresEnterpriseEdition { action: "processing a network task" })
}
pub(super) fn process_network_ready(
&self,
_task: Task,
_progress: Progress,
) -> Result<(Vec<Task>, ProcessBatchInfo)> {
Err(Error::RequiresEnterpriseEdition { action: "processing a network task" })
}
}

View File

@@ -4,7 +4,6 @@ use std::io::ErrorKind;
use meilisearch_types::heed::RoTxn;
use meilisearch_types::milli::update::IndexDocumentsMethod;
use meilisearch_types::settings::{Settings, Unchecked};
use meilisearch_types::tasks::network::NetworkTopologyState;
use meilisearch_types::tasks::{BatchStopReason, Kind, KindWithContent, Status, Task};
use roaring::RoaringBitmap;
use uuid::Uuid;
@@ -60,14 +59,6 @@ pub(crate) enum Batch {
index_uid: String,
task: Task,
},
#[allow(clippy::enum_variant_names)] // warranted because we are executing an inner index batch
NetworkIndexBatch {
network_task: Task,
inner_batch: Box<Batch>,
},
NetworkReady {
task: Task,
},
}
#[derive(Debug)]
@@ -149,14 +140,9 @@ impl Batch {
..
} => RoaringBitmap::from_iter(tasks.iter().chain(other).map(|task| task.uid)),
},
Batch::IndexSwap { task } | Batch::NetworkReady { task } => {
Batch::IndexSwap { task } => {
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
}
Batch::NetworkIndexBatch { network_task, inner_batch } => {
let mut tasks = inner_batch.ids();
tasks.insert(network_task.uid);
tasks
}
}
}
@@ -170,14 +156,12 @@ impl Batch {
| Dump(_)
| Export { .. }
| UpgradeDatabase { .. }
| NetworkReady { .. }
| IndexSwap { .. } => None,
IndexOperation { op, .. } => Some(op.index_uid()),
IndexCreation { index_uid, .. }
| IndexUpdate { index_uid, .. }
| IndexDeletion { index_uid, .. }
| IndexCompaction { index_uid, .. } => Some(index_uid),
NetworkIndexBatch { network_task: _, inner_batch } => inner_batch.index_uid(),
}
}
}
@@ -200,8 +184,6 @@ impl fmt::Display for Batch {
Batch::IndexCompaction { .. } => f.write_str("IndexCompaction")?,
Batch::Export { .. } => f.write_str("Export")?,
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
Batch::NetworkIndexBatch { .. } => f.write_str("NetworkTopologyChange")?,
Batch::NetworkReady { .. } => f.write_str("NetworkTopologyChange")?,
};
match index_uid {
Some(name) => f.write_fmt(format_args!(" on {name:?} from tasks: {tasks:?}")),
@@ -470,7 +452,6 @@ impl IndexScheduler {
pub(crate) fn create_next_batch(
&self,
rtxn: &RoTxn,
processing_network_tasks: &RoaringBitmap,
) -> Result<Option<(Batch, ProcessingBatch)>> {
#[cfg(test)]
self.maybe_fail(crate::test_utils::FailureLocation::InsideCreateBatch)?;
@@ -479,6 +460,7 @@ impl IndexScheduler {
let mut current_batch = ProcessingBatch::new(batch_id);
let enqueued = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
let count_total_enqueued = enqueued.len();
let failed = &self.queue.tasks.get_status(rtxn, Status::Failed)?;
// 0. we get the last task to cancel.
@@ -527,15 +509,7 @@ impl IndexScheduler {
)));
}
// 2. Check for enqueued network topology changes
let network_changes = self.queue.tasks.get_kind(rtxn, Kind::NetworkTopologyChange)?
& (enqueued | processing_network_tasks);
if let Some(task_id) = network_changes.iter().next() {
let task = self.queue.tasks.get_task(rtxn, task_id)?.unwrap();
return self.start_processing_network(rtxn, task, enqueued, current_batch);
}
// 3. we get the next task to delete
// 2. we get the next task to delete
let to_delete = self.queue.tasks.get_kind(rtxn, Kind::TaskDeletion)? & enqueued;
if !to_delete.is_empty() {
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_delete)?;
@@ -545,7 +519,7 @@ impl IndexScheduler {
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
}
// 4. we get the next task to compact
// 3. we get the next task to compact
let to_compact = self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)? & enqueued;
if let Some(task_id) = to_compact.min() {
let mut task =
@@ -560,7 +534,7 @@ impl IndexScheduler {
return Ok(Some((Batch::IndexCompaction { index_uid, task }, current_batch)));
}
// 5. we batch the export.
// 4. we batch the export.
let to_export = self.queue.tasks.get_kind(rtxn, Kind::Export)? & enqueued;
if !to_export.is_empty() {
let task_id = to_export.iter().next().expect("There must be at least one export task");
@@ -571,7 +545,7 @@ impl IndexScheduler {
return Ok(Some((Batch::Export { task }, current_batch)));
}
// 6. we batch the snapshot.
// 5. we batch the snapshot.
let to_snapshot = self.queue.tasks.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued;
if !to_snapshot.is_empty() {
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
@@ -581,7 +555,7 @@ impl IndexScheduler {
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
}
// 7. we batch the dumps.
// 6. we batch the dumps.
let to_dump = self.queue.tasks.get_kind(rtxn, Kind::DumpCreation)? & enqueued;
if let Some(to_dump) = to_dump.min() {
let mut task =
@@ -594,66 +568,25 @@ impl IndexScheduler {
return Ok(Some((Batch::Dump(task), current_batch)));
}
let network = self.network();
// 7. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
let mut task =
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
// 8. We make a batch from the unprioritised tasks.
let (batch, current_batch) =
self.create_next_batch_unprioritized(rtxn, enqueued, current_batch, |task| {
// We want to execute all tasks, except those that have a version strictly higher than the network version
// If the task is not associated with any index, verify that it is an index swap and
// create the batch directly. Otherwise, get the index name associated with the task
// and use the autobatcher to batch the enqueued tasks associated with it
let Some(task_version) =
task.network.as_ref().map(|tastk_network| tastk_network.network_version())
else {
// do not skip tasks that have no network version, otherwise we will never execute them
return false;
};
// skip tasks with a version strictly higher than the network version
task_version > network.version
})?;
Ok(batch.map(|batch| (batch, current_batch)))
}
fn create_next_batch_unprioritized<F>(
&self,
rtxn: &RoTxn,
enqueued: &RoaringBitmap,
mut current_batch: ProcessingBatch,
mut skip_if: F,
) -> Result<(Option<Batch>, ProcessingBatch)>
where
F: FnMut(&Task) -> bool,
{
let count_total_enqueued = enqueued.len();
let mut enqueued_it = enqueued.iter();
let mut task;
let index_name = loop {
let Some(task_id) = enqueued_it.next() else {
return Ok((None, current_batch));
};
task = self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
if skip_if(&task) {
continue;
}
// If the task is not associated with any index, verify that it is an index swap and
// create the batch directly. Otherwise, get the index name associated with the task
// and use the autobatcher to batch the enqueued tasks associated with it
if let Some(&index_name) = task.indexes().first() {
break index_name;
} else {
assert!(
matches!(&task.kind, KindWithContent::IndexSwap { swaps } if swaps.is_empty())
);
current_batch.processing(Some(&mut task));
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
kind: Kind::IndexSwap,
id: task.uid,
});
return Ok((Some(Batch::IndexSwap { task }), current_batch));
};
let index_name = if let Some(&index_name) = task.indexes().first() {
index_name
} else {
assert!(matches!(&task.kind, KindWithContent::IndexSwap { swaps } if swaps.is_empty()));
current_batch.processing(Some(&mut task));
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
kind: Kind::IndexSwap,
id: task.uid,
});
return Ok(Some((Batch::IndexSwap { task }, current_batch)));
};
let index_already_exists = self.index_mapper.exists(rtxn, index_name)?;
@@ -688,10 +621,6 @@ impl IndexScheduler {
.get_task(rtxn, task_id)
.and_then(|task| task.ok_or(Error::CorruptedTaskQueue))?;
if skip_if(&task) {
continue;
}
if let Some(uuid) = task.content_uuid() {
let content_size = match self.queue.file_store.compute_size(uuid) {
Ok(content_size) => content_size,
@@ -722,116 +651,19 @@ impl IndexScheduler {
autobatcher::autobatch(enqueued, index_already_exists, primary_key.as_deref())
{
current_batch.reason(autobatch_stop_reason.unwrap_or(stop_reason));
let batch = self.create_next_batch_index(
rtxn,
index_name.to_string(),
batchkind,
&mut current_batch,
create_index,
)?;
return Ok((batch, current_batch));
return Ok(self
.create_next_batch_index(
rtxn,
index_name.to_string(),
batchkind,
&mut current_batch,
create_index,
)?
.map(|batch| (batch, current_batch)));
}
// If we found no tasks then we were notified for something that got autobatched
// somehow and there is nothing to do.
Ok((None, current_batch))
}
fn start_processing_network(
&self,
rtxn: &RoTxn,
mut task: Task,
enqueued: &RoaringBitmap,
mut current_batch: ProcessingBatch,
) -> Result<Option<(Batch, ProcessingBatch)>> {
current_batch.processing(Some(&mut task));
let change_version =
task.network.as_ref().map(|network| network.network_version()).unwrap_or_default();
let KindWithContent::NetworkTopologyChange(network_topology_change) = &task.kind else {
panic!("inconsistent kind with content")
};
match network_topology_change.state() {
NetworkTopologyState::WaitingForOlderTasks => {
let res =
self.create_next_batch_unprioritized(rtxn, enqueued, current_batch, |task| {
// in this limited mode of execution, we only want to run tasks:
// 0. with an index
// 1. with a version
// 2. that version strictly lower than the network task version
// 0. skip indexless tasks that are not index swap
if task.index_uid().is_none() && task.kind.as_kind() != Kind::IndexSwap {
return true;
}
// 1. skip tasks without version
let Some(task_version) =
task.network.as_ref().map(|network| network.network_version())
else {
return true;
};
// 2. skip tasks with a version equal or higher to the network task version
task_version >= change_version
});
let (batch, current_batch) = res?;
let batch = match batch {
Some(batch) => {
let inner_batch = Box::new(batch);
Batch::NetworkIndexBatch { network_task: task, inner_batch }
}
None => Batch::NetworkReady { task },
};
Ok(Some((batch, current_batch)))
}
NetworkTopologyState::ImportingDocuments => {
// if the import is done we need to go to the next state
if network_topology_change.is_import_finished() {
return Ok(Some((Batch::NetworkReady { task }, current_batch)));
}
let res =
self.create_next_batch_unprioritized(rtxn, enqueued, current_batch, |task| {
// in this limited mode of execution, we only want to run tasks:
// 0. with an index
// 1. with a version
// 2. that version equal to the network task version
// 0. skip indexless tasks
if task.index_uid().is_none() && task.kind.as_kind() != Kind::IndexSwap {
return true;
}
// 1. skip tasks without version
let Some(task_version) =
task.network.as_ref().map(|network| network.network_version())
else {
return true;
};
// 2. skip tasks with a version different from the network task version
task_version != change_version
});
let (batch, current_batch) = res?;
let batch = batch.map(|batch| {
let inner_batch = Box::new(batch);
(Batch::NetworkIndexBatch { network_task: task, inner_batch }, current_batch)
});
Ok(batch)
}
NetworkTopologyState::ExportingDocuments | NetworkTopologyState::Finished => {
Ok(Some((Batch::NetworkReady { task }, current_batch)))
}
}
Ok(None)
}
}

View File

@@ -1,301 +0,0 @@
// Copyright © 2025 Meilisearch Some Rights Reserved
// This file is part of Meilisearch Enterprise Edition (EE).
// Use of this source code is governed by the Business Source License 1.1,
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
use std::collections::BTreeMap;
use std::time::Duration;
use bumpalo::Bump;
use meilisearch_types::milli::documents::PrimaryKey;
use meilisearch_types::milli::progress::{EmbedderStats, Progress};
use meilisearch_types::milli::update::new::indexer;
use meilisearch_types::milli::update::new::indexer::current_edition::sharding::Shards;
use meilisearch_types::milli::{self};
use meilisearch_types::network::Remote;
use meilisearch_types::tasks::network::{NetworkTopologyState, Origin};
use meilisearch_types::tasks::{KindWithContent, Status, Task};
use roaring::RoaringBitmap;
use super::create_batch::Batch;
use crate::scheduler::process_batch::ProcessBatchInfo;
use crate::scheduler::process_export::{ExportContext, ExportOptions, TargetInstance};
use crate::utils::ProcessingBatch;
use crate::{Error, IndexScheduler, Result};
impl IndexScheduler {
pub(super) fn process_network_index_batch(
&self,
mut network_task: Task,
inner_batch: Box<Batch>,
current_batch: &mut ProcessingBatch,
progress: Progress,
) -> Result<(Vec<Task>, ProcessBatchInfo)> {
let (mut tasks, info) = self.process_batch(*inner_batch, current_batch, progress)?;
let KindWithContent::NetworkTopologyChange(network_topology_change) =
&mut network_task.kind
else {
tracing::error!("unexpected network kind for network task while processing batch");
return Err(Error::CorruptedTaskQueue);
};
for task in &tasks {
let Some(network) = task.network.as_ref() else {
continue;
};
let Some(import) = network.import_data() else {
continue;
};
if let Some(index_name) = import.index_name.as_deref() {
network_topology_change.process_remote_tasks(
&import.remote_name,
index_name,
import.document_count,
);
}
}
network_task.details = Some(network_topology_change.to_details());
tasks.push(network_task);
Ok((tasks, info))
}
pub(super) fn process_network_ready(
&self,
mut task: Task,
progress: Progress,
) -> Result<(Vec<Task>, ProcessBatchInfo)> {
let KindWithContent::NetworkTopologyChange(network_topology_change) = &mut task.kind else {
tracing::error!("network topology change task has the wrong kind with content");
return Err(Error::CorruptedTaskQueue);
};
let Some(task_network) = &task.network else {
tracing::error!("network topology change task has no network");
return Err(Error::CorruptedTaskQueue);
};
let origin;
let origin = match task_network.origin() {
Some(origin) => origin,
None => {
let myself = network_topology_change.in_name().expect("origin is not the leader");
origin = Origin {
remote_name: myself.to_string(),
task_uid: task.uid,
network_version: task_network.network_version(),
};
&origin
}
};
if let Some((remotes, out_name)) = network_topology_change.export_to_process() {
let moved_documents = self.balance_documents(
remotes,
out_name,
network_topology_change.in_name(),
origin,
&progress,
&self.scheduler.must_stop_processing,
)?;
network_topology_change.set_moved(moved_documents);
}
network_topology_change.update_state();
if network_topology_change.state() == NetworkTopologyState::Finished {
task.status = Status::Succeeded;
}
task.details = Some(network_topology_change.to_details());
Ok((vec![task], Default::default()))
}
fn balance_documents(
&self,
remotes: &BTreeMap<String, Remote>,
out_name: &str,
in_name: Option<&str>,
network_change_origin: &Origin,
progress: &Progress,
must_stop_processing: &crate::scheduler::MustStopProcessing,
) -> crate::Result<u64> {
let new_shards =
Shards::from_remotes_local(remotes.keys().map(String::as_str).chain(in_name), in_name);
// TECHDEBT: this spawns a `ureq` agent additionally to `reqwest`. We probably want to harmonize all of this.
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
let mut indexer_alloc = Bump::new();
let scheduler_rtxn = self.env.read_txn()?;
let index_count = self.index_mapper.index_count(&scheduler_rtxn)?;
// when the instance is empty, we still need to tell that to remotes, as they cannot know of that fact and will be waiting for
// data
if index_count == 0 {
for (remote_name, remote) in remotes {
let target = TargetInstance {
remote_name: Some(remote_name),
base_url: &remote.url,
api_key: remote.write_api_key.as_deref(),
};
let res = self.export_no_index(
target,
out_name,
network_change_origin,
&agent,
must_stop_processing,
);
if let Err(err) = res {
tracing::warn!("Could not signal not to wait documents to `{remote_name}` due to error: {err}");
}
}
return Ok(0);
}
let mut total_moved_documents = 0;
self.index_mapper.try_for_each_index::<(), ()>(
&scheduler_rtxn,
|index_uid, index| -> crate::Result<()> {
indexer_alloc.reset();
let err = |err| Error::from_milli(err, Some(index_uid.to_string()));
let index_rtxn = index.read_txn()?;
let all_docids = index.external_documents_ids();
let mut documents_to_move_to =
hashbrown::HashMap::<String, RoaringBitmap>::new();
let mut documents_to_delete = RoaringBitmap::new();
for res in all_docids.iter(&index_rtxn)? {
let (external_docid, docid) = res?;
match new_shards.processing_shard(external_docid) {
Some(shard) if shard.is_own => continue,
Some(shard) => {
documents_to_move_to.entry_ref(&shard.name).or_default().insert(docid);
}
None => {
documents_to_delete.insert(docid);
}
}
}
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
for (remote_name, remote) in remotes {
let documents_to_move =
documents_to_move_to.remove(remote_name).unwrap_or_default();
let target = TargetInstance {
remote_name: Some(remote_name),
base_url: &remote.url,
api_key: remote.write_api_key.as_deref(),
};
let options = ExportOptions {
index_uid,
payload_size: None,
override_settings: false,
export_mode: super::process_export::ExportMode::NetworkBalancing {
index_count,
export_old_remote_name: out_name,
network_change_origin,
},
};
let ctx = ExportContext {
index,
index_rtxn: &index_rtxn,
universe: &documents_to_move,
progress,
agent: &agent,
must_stop_processing,
};
let res = self.export_one_index(target, options, ctx);
match res {
Ok(_) =>{ documents_to_delete |= documents_to_move;}
Err(err) => {
tracing::warn!("Could not export documents to `{remote_name}` due to error: {err}\n - Note: Documents will be kept");
}
}
}
if documents_to_delete.is_empty() {
return Ok(());
}
total_moved_documents += documents_to_delete.len();
self.delete_documents_from_index(progress, must_stop_processing, &indexer_alloc, index_uid, index, &err, index_rtxn, documents_to_delete, fields_ids_map)
},
)?;
Ok(total_moved_documents)
}
#[allow(clippy::too_many_arguments)]
fn delete_documents_from_index(
&self,
progress: &Progress,
must_stop_processing: &super::MustStopProcessing,
indexer_alloc: &Bump,
index_uid: &str,
index: &milli::Index,
err: &impl Fn(milli::Error) -> Error,
index_rtxn: milli::heed::RoTxn<'_, milli::heed::WithoutTls>,
documents_to_delete: RoaringBitmap,
fields_ids_map: milli::FieldsIdsMap,
) -> std::result::Result<(), Error> {
let mut new_fields_ids_map = fields_ids_map.clone();
// candidates not empty => index not empty => a primary key is set
let primary_key = index.primary_key(&index_rtxn)?.unwrap();
let primary_key = PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
.map_err(milli::Error::from)
.map_err(err)?;
let mut index_wtxn = index.write_txn()?;
let mut indexer = indexer::DocumentDeletion::new();
indexer.delete_documents_by_docids(documents_to_delete);
let document_changes = indexer.into_changes(indexer_alloc, primary_key);
let embedders = index
.embedding_configs()
.embedding_configs(&index_wtxn)
.map_err(milli::Error::from)
.map_err(err)?;
let embedders = self.embedders(index_uid.to_string(), embedders)?;
let indexer_config = self.index_mapper.indexer_config();
let pool = &indexer_config.thread_pool;
indexer::index(
&mut index_wtxn,
index,
pool,
indexer_config.grenad_parameters(),
&fields_ids_map,
new_fields_ids_map,
None, // document deletion never changes primary key
&document_changes,
embedders,
&|| must_stop_processing.get(),
progress,
&EmbedderStats::default(),
)
.map_err(err)?;
// update stats
let mut mapper_wtxn = self.env.write_txn()?;
let stats = crate::index_mapper::IndexStats::new(index, &index_wtxn).map_err(err)?;
self.index_mapper.store_stats_of(&mut mapper_wtxn, index_uid, &stats)?;
index_wtxn.commit()?;
// update stats after committing changes to index
mapper_wtxn.commit()?;
Ok(())
}
}

View File

@@ -1,12 +1,7 @@
mod autobatcher;
#[cfg(test)]
mod autobatcher_test;
#[cfg(not(feature = "enterprise"))]
mod community_edition;
mod create_batch;
#[cfg(feature = "enterprise")]
mod enterprise_edition;
mod process_batch;
mod process_dump_creation;
mod process_export;
@@ -26,6 +21,7 @@ use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
use std::sync::Arc;
use convert_case::{Case, Casing as _};
use meilisearch_types::error::ResponseError;
use meilisearch_types::heed::{Env, WithoutTls};
use meilisearch_types::milli;
@@ -137,7 +133,6 @@ impl Scheduler {
max_number_of_tasks: _,
max_number_of_batched_tasks,
batched_tasks_size_limit,
export_default_payload_size_bytes: _,
instance_features: _,
auto_upgrade: _,
embedding_cache_cap,
@@ -183,8 +178,6 @@ impl IndexScheduler {
self.breakpoint(crate::test_utils::Breakpoint::Start);
}
let previous_processing_batch = self.processing_tasks.write().unwrap().stop_processing();
if self.cleanup_enabled {
let mut wtxn = self.env.write_txn()?;
self.queue.cleanup_task_queue(&mut wtxn)?;
@@ -192,16 +185,11 @@ impl IndexScheduler {
}
let rtxn = self.env.read_txn().map_err(Error::HeedTransaction)?;
let (batch, mut processing_batch) = match self
.create_next_batch(&rtxn, &previous_processing_batch.processing)
.map_err(|e| Error::CreateBatch(Box::new(e)))?
{
Some(batch) => batch,
None => {
*self.processing_tasks.write().unwrap() = previous_processing_batch;
return Ok(TickOutcome::WaitForSignal);
}
};
let (batch, mut processing_batch) =
match self.create_next_batch(&rtxn).map_err(|e| Error::CreateBatch(Box::new(e)))? {
Some(batch) => batch,
None => return Ok(TickOutcome::WaitForSignal),
};
let index_uid = batch.index_uid().map(ToOwned::to_owned);
drop(rtxn);
@@ -272,14 +260,7 @@ impl IndexScheduler {
self.maybe_fail(crate::test_utils::FailureLocation::AcquiringWtxn)?;
progress.update_progress(BatchProgress::WritingTasksToDisk);
processing_batch.finished();
// whether the batch made progress.
// a batch make progress if it failed or if it contains at least one fully processed (or cancelled) task.
//
// if a batch did not make progress, it means that all of its tasks are waiting on the scheduler to make progress,
// and so we must wait for new tasks. Such a batch is not persisted to DB, and is resumed on the next tick.
let mut batch_made_progress = false;
let mut stop_scheduler_forever = false;
let mut wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
let mut canceled = RoaringBitmap::new();
@@ -300,11 +281,7 @@ impl IndexScheduler {
#[allow(unused_variables)]
for (i, mut task) in tasks.into_iter().enumerate() {
task_progress.fetch_add(1, Ordering::Relaxed);
processing_batch.update_from_task(&task);
if !matches!(task.status, Status::Processing | Status::Enqueued) {
batch_made_progress = true;
processing_batch.finish_task(&mut task);
}
processing_batch.update(&mut task);
if task.status == Status::Canceled {
canceled.insert(task.uid);
canceled_by = task.canceled_by;
@@ -371,9 +348,6 @@ impl IndexScheduler {
}
// In case of a failure we must get back and patch all the tasks with the error.
Err(err) => {
// always persist failed batches
batch_made_progress = true;
#[cfg(test)]
self.breakpoint(crate::test_utils::Breakpoint::ProcessBatchFailed);
let (task_progress, task_progress_obj) = AtomicTaskStep::new(ids.len() as u32);
@@ -397,10 +371,7 @@ impl IndexScheduler {
task.status = Status::Failed;
task.error = Some(error.clone());
task.details = task.details.map(|d| d.to_failed());
processing_batch.update_from_task(&task);
if !matches!(task.status, Status::Processing | Status::Enqueued) {
processing_batch.finish_task(&mut task);
}
processing_batch.update(&mut task);
#[cfg(test)]
self.maybe_fail(
@@ -423,12 +394,44 @@ impl IndexScheduler {
let ProcessBatchInfo { congestion, pre_commit_dabases_sizes, post_commit_dabases_sizes } =
process_batch_info;
processing_batch.write_stats(
&progress,
congestion,
pre_commit_dabases_sizes,
post_commit_dabases_sizes,
);
processing_batch.stats.progress_trace =
progress.accumulated_durations().into_iter().map(|(k, v)| (k, v.into())).collect();
processing_batch.stats.write_channel_congestion = congestion.map(|congestion| {
let mut congestion_info = serde_json::Map::new();
congestion_info.insert("attempts".into(), congestion.attempts.into());
congestion_info.insert("blocking_attempts".into(), congestion.blocking_attempts.into());
congestion_info.insert("blocking_ratio".into(), congestion.congestion_ratio().into());
congestion_info
});
processing_batch.stats.internal_database_sizes = pre_commit_dabases_sizes
.iter()
.flat_map(|(dbname, pre_size)| {
post_commit_dabases_sizes
.get(dbname)
.map(|post_size| {
use std::cmp::Ordering::{Equal, Greater, Less};
use byte_unit::Byte;
use byte_unit::UnitType::Binary;
let post = Byte::from_u64(*post_size as u64).get_appropriate_unit(Binary);
let diff_size = post_size.abs_diff(*pre_size) as u64;
let diff = Byte::from_u64(diff_size).get_appropriate_unit(Binary);
let sign = match post_size.cmp(pre_size) {
Equal => return None,
Greater => "+",
Less => "-",
};
Some((
dbname.to_case(Case::Camel),
format!("{post:#.2} ({sign}{diff:#.2})").into(),
))
})
.into_iter()
.flatten()
})
.collect();
if let Some(congestion) = congestion {
tracing::debug!(
@@ -441,49 +444,46 @@ impl IndexScheduler {
tracing::debug!("call trace: {:?}", progress.accumulated_durations());
if batch_made_progress {
self.queue.write_batch(&mut wtxn, processing_batch, &ids)?;
}
self.queue.write_batch(&mut wtxn, processing_batch, &ids)?;
#[cfg(test)]
self.maybe_fail(crate::test_utils::FailureLocation::CommittingWtxn)?;
wtxn.commit().map_err(Error::HeedTransaction)?;
if batch_made_progress {
// We should stop processing AFTER everything is processed and written to disk otherwise, a batch (which only lives in RAM) may appear in the processing task
// and then become « not found » for some time until the commit everything is written and the final commit is made.
self.processing_tasks.write().unwrap().stop_processing();
// We should stop processing AFTER everything is processed and written to disk otherwise, a batch (which only lives in RAM) may appear in the processing task
// and then become « not found » for some time until the commit everything is written and the final commit is made.
self.processing_tasks.write().unwrap().stop_processing();
// Once the tasks are committed, we should delete all the update files associated ASAP to avoid leaking files in case of a restart
tracing::debug!("Deleting the update files");
// Once the tasks are committed, we should delete all the update files associated ASAP to avoid leaking files in case of a restart
tracing::debug!("Deleting the update files");
//We take one read transaction **per thread**. Then, every thread is going to pull out new IDs from the roaring bitmap with the help of an atomic shared index into the bitmap
let idx = AtomicU32::new(0);
(0..current_num_threads()).into_par_iter().try_for_each(|_| -> Result<()> {
let rtxn = self.read_txn()?;
while let Some(id) = ids.select(idx.fetch_add(1, Ordering::Relaxed)) {
let task = self
.queue
.tasks
.get_task(&rtxn, id)
.map_err(|e| Error::UnrecoverableError(Box::new(e)))?
.ok_or(Error::CorruptedTaskQueue)?;
if let Err(e) = self.queue.delete_persisted_task_data(&task) {
tracing::error!(
//We take one read transaction **per thread**. Then, every thread is going to pull out new IDs from the roaring bitmap with the help of an atomic shared index into the bitmap
let idx = AtomicU32::new(0);
(0..current_num_threads()).into_par_iter().try_for_each(|_| -> Result<()> {
let rtxn = self.read_txn()?;
while let Some(id) = ids.select(idx.fetch_add(1, Ordering::Relaxed)) {
let task = self
.queue
.tasks
.get_task(&rtxn, id)
.map_err(|e| Error::UnrecoverableError(Box::new(e)))?
.ok_or(Error::CorruptedTaskQueue)?;
if let Err(e) = self.queue.delete_persisted_task_data(&task) {
tracing::error!(
"Failure to delete the content files associated with task {}. Error: {e}",
task.uid
);
}
}
Ok(())
})?;
}
Ok(())
})?;
self.notify_webhooks(ids);
}
self.notify_webhooks(ids);
#[cfg(test)]
self.breakpoint(crate::test_utils::Breakpoint::AfterProcessing);
if stop_scheduler_forever {
Ok(TickOutcome::StopProcessingForever)
} else {

View File

@@ -539,10 +539,6 @@ impl IndexScheduler {
Ok((tasks, ProcessBatchInfo::default()))
}
Batch::NetworkIndexBatch { network_task, inner_batch } => {
self.process_network_index_batch(network_task, inner_batch, current_batch, progress)
}
Batch::NetworkReady { task } => self.process_network_ready(task, progress),
}
}

View File

@@ -1,6 +1,5 @@
use std::collections::BTreeMap;
use std::io::{self, Write as _};
use std::ops::ControlFlow;
use std::sync::atomic;
use std::time::Duration;
@@ -8,7 +7,6 @@ use backoff::ExponentialBackoff;
use byte_unit::Byte;
use flate2::write::GzEncoder;
use flate2::Compression;
use meilisearch_types::error::Code;
use meilisearch_types::index_uid_pattern::IndexUidPattern;
use meilisearch_types::milli::constants::RESERVED_VECTORS_FIELD_NAME;
use meilisearch_types::milli::index::EmbeddingsWithMetadata;
@@ -17,10 +15,7 @@ use meilisearch_types::milli::update::{request_threads, Setting};
use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOrArrayOfVectors};
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
use meilisearch_types::settings::{self, SecretPolicy};
use meilisearch_types::tasks::network::headers::SetHeader as _;
use meilisearch_types::tasks::network::{headers, ImportData, ImportMetadata, Origin};
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
use roaring::RoaringBitmap;
use serde::Deserialize;
use ureq::{json, Response};
@@ -55,7 +50,6 @@ impl IndexScheduler {
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
let must_stop_processing = self.scheduler.must_stop_processing.clone();
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
let err = |err| Error::from_milli(err, Some(uid.to_string()));
if must_stop_processing.get() {
return Err(Error::AbortedTask);
}
@@ -67,473 +61,260 @@ impl IndexScheduler {
));
let ExportIndexSettings { filter, override_settings } = export_settings;
let index = self.index(uid)?;
let index_rtxn = index.read_txn()?;
let filter = filter.as_ref().map(Filter::from_json).transpose().map_err(err)?.flatten();
let filter_universe =
filter.map(|f| f.evaluate(&index_rtxn, &index)).transpose().map_err(err)?;
let whole_universe =
index.documents_ids(&index_rtxn).map_err(milli::Error::from).map_err(err)?;
let universe = filter_universe.unwrap_or(whole_universe);
let target = TargetInstance { remote_name: None, base_url, api_key };
let ctx = ExportContext {
index: &index,
index_rtxn: &index_rtxn,
universe: &universe,
progress: &progress,
agent: &agent,
must_stop_processing: &must_stop_processing,
let bearer = api_key.map(|api_key| format!("Bearer {api_key}"));
// First, check if the index already exists
let url = format!("{base_url}/indexes/{uid}");
let response = retry(&must_stop_processing, || {
let mut request = agent.get(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(Default::default()).map_err(into_backoff_error)
});
let index_exists = match response {
Ok(response) => response.status() == 200,
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => {
false
}
Err(e) => return Err(e),
};
let options = ExportOptions {
index_uid: uid,
payload_size,
override_settings: *override_settings,
export_mode: ExportMode::ExportRoute,
};
let total_documents = self.export_one_index(target, options, ctx)?;
output.insert(
IndexUidPattern::new_unchecked(uid.clone()),
DetailsExportIndexSettings {
settings: (*export_settings).clone(),
matched_documents: Some(total_documents),
},
);
}
let primary_key = index
.primary_key(&index_rtxn)
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
Ok(output)
}
pub(super) fn export_one_index(
&self,
target: TargetInstance<'_>,
options: ExportOptions<'_>,
ctx: ExportContext<'_>,
) -> Result<u64, Error> {
let err = |err| Error::from_milli(err, Some(options.index_uid.to_string()));
let total_index_documents = ctx.universe.len();
let task_network = options.task_network(total_index_documents);
let bearer = target.api_key.map(|api_key| format!("Bearer {api_key}"));
let url = format!(
"{base_url}/indexes/{index_uid}",
base_url = target.base_url,
index_uid = options.index_uid
);
let response = retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.get(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(Default::default()).map_err(into_backoff_error)
});
let index_exists = match response {
Ok(response) => response.status() == 200,
Err(Error::FromRemoteWhenExporting { code, .. })
if code == Code::IndexNotFound.name() =>
{
false
}
Err(e) => return Err(e),
};
let primary_key =
ctx.index.primary_key(ctx.index_rtxn).map_err(milli::Error::from).map_err(err)?;
if !index_exists {
let url = format!("{base_url}/indexes", base_url = target.base_url);
let _ = handle_response(
target.remote_name,
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.post(&url);
if let Some((import_data, origin, metadata)) = &task_network {
request = set_network_ureq_headers(request, import_data, origin, metadata);
}
if let Some(bearer) = bearer.as_ref() {
// Create the index
if !index_exists {
let url = format!("{base_url}/indexes");
retry(&must_stop_processing, || {
let mut request = agent.post(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param =
json!({ "uid": options.index_uid, "primaryKey": primary_key });
let index_param = json!({ "uid": uid, "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
}),
)?;
}
if index_exists && options.override_settings {
let _ = handle_response(
target.remote_name,
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.patch(&url);
if let Some((import_data, origin, metadata)) = &task_network {
request = set_network_ureq_headers(request, import_data, origin, metadata);
}
})?;
}
// Patch the index primary key
if index_exists && *override_settings {
let url = format!("{base_url}/indexes/{uid}");
retry(&must_stop_processing, || {
let mut request = agent.patch(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
}),
)?;
}
if !index_exists || options.override_settings {
let mut settings =
settings::settings(ctx.index, ctx.index_rtxn, SecretPolicy::RevealSecrets)
.map_err(err)?;
// Remove the experimental chat setting if not enabled
if self.features().check_chat_completions("exporting chat settings").is_err() {
settings.chat = Setting::NotSet;
})?;
}
// Retry logic for sending settings
let url = format!(
"{base_url}/indexes/{index_uid}/settings",
base_url = target.base_url,
index_uid = options.index_uid
);
let _ = handle_response(
target.remote_name,
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.patch(&url);
if let Some((import_data, origin, metadata)) = &task_network {
request = set_network_ureq_headers(request, import_data, origin, metadata);
}
// Send the index settings
if !index_exists || *override_settings {
let mut settings =
settings::settings(&index, &index_rtxn, SecretPolicy::RevealSecrets)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
// Remove the experimental chat setting if not enabled
if self.features().check_chat_completions("exporting chat settings").is_err() {
settings.chat = Setting::NotSet;
}
// Retry logic for sending settings
let url = format!("{base_url}/indexes/{uid}/settings");
retry(&must_stop_processing, || {
let mut request = agent.patch(&url);
if let Some(bearer) = bearer.as_ref() {
request = request.set("Authorization", bearer);
}
request.send_json(settings.clone()).map_err(into_backoff_error)
}),
)?;
}
let fields_ids_map = ctx.index.fields_ids_map(ctx.index_rtxn)?;
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
let total_documents = ctx.universe.len() as u32;
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
ctx.progress.update_progress(progress_step);
let limit = options
.payload_size
.map(|ps| ps.as_u64() as usize)
.unwrap_or(self.export_default_payload_size_bytes.as_u64() as usize);
let documents_url = format!(
"{base_url}/indexes/{index_uid}/documents",
base_url = target.base_url,
index_uid = options.index_uid
);
// no document to send, but we must still send a task when performing network balancing
if ctx.universe.is_empty() {
if let Some((import_data, network_change_origin, metadata)) = task_network {
let mut compressed_buffer = Vec::new();
// ignore control flow, we're returning anyway
let _ = send_buffer(
b" ", // needs something otherwise meili complains about missing payload
&mut compressed_buffer,
ctx.must_stop_processing,
ctx.agent,
&documents_url,
target.remote_name,
bearer.as_deref(),
Some(&(import_data, network_change_origin.clone(), metadata)),
&err,
)?;
})?;
}
return Ok(0);
}
let results = request_threads()
.broadcast(|broadcast| {
let mut task_network = options.task_network(total_index_documents);
let filter = filter
.as_ref()
.map(Filter::from_json)
.transpose()
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?
.flatten();
let index_rtxn = ctx.index.read_txn().map_err(milli::Error::from).map_err(err)?;
let filter_universe = filter
.map(|f| f.evaluate(&index_rtxn, &index))
.transpose()
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
let whole_universe = index
.documents_ids(&index_rtxn)
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
let universe = filter_universe.unwrap_or(whole_universe);
let mut buffer = Vec::new();
let mut tmp_buffer = Vec::new();
let mut compressed_buffer = Vec::new();
for (i, docid) in ctx.universe.iter().enumerate() {
if i % broadcast.num_threads() != broadcast.index() {
continue;
}
if let Some((import_data, _, metadata)) = &mut task_network {
import_data.document_count += 1;
metadata.task_key = Some(docid);
}
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
let document = ctx.index.document(&index_rtxn, docid).map_err(err)?;
// We don't need to keep this one alive as we will
// spawn many threads to process the documents
drop(index_rtxn);
let mut document =
obkv_to_json(&all_fields, &fields_ids_map, document).map_err(err)?;
let total_documents = universe.len() as u32;
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
progress.update_progress(progress_step);
// TODO definitely factorize this code
'inject_vectors: {
let embeddings = ctx.index.embeddings(&index_rtxn, docid).map_err(err)?;
output.insert(
IndexUidPattern::new_unchecked(uid.clone()),
DetailsExportIndexSettings {
settings: (*export_settings).clone(),
matched_documents: Some(total_documents as u64),
},
);
if embeddings.is_empty() {
break 'inject_vectors;
let limit = payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024); // defaults to 20 MiB
let documents_url = format!("{base_url}/indexes/{uid}/documents");
let results = request_threads()
.broadcast(|ctx| {
let index_rtxn = index
.read_txn()
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
let mut buffer = Vec::new();
let mut tmp_buffer = Vec::new();
let mut compressed_buffer = Vec::new();
for (i, docid) in universe.iter().enumerate() {
if i % ctx.num_threads() != ctx.index() {
continue;
}
let vectors = document
.entry(RESERVED_VECTORS_FIELD_NAME)
.or_insert(serde_json::Value::Object(Default::default()));
let document = index
.document(&index_rtxn, docid)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
let serde_json::Value::Object(vectors) = vectors else {
return Err(err(milli::Error::UserError(
milli::UserError::InvalidVectorsMapType {
document_id: {
if let Ok(Some(Ok(index))) = ctx
.index
.external_id_of(&index_rtxn, std::iter::once(docid))
.map(|it| it.into_iter().next())
{
index
} else {
format!("internal docid={docid}")
}
},
value: vectors.clone(),
},
)));
};
let mut document = obkv_to_json(&all_fields, &fields_ids_map, document)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
for (
embedder_name,
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
) in embeddings
{
let embeddings = ExplicitVectors {
embeddings: Some(VectorOrArrayOfVectors::from_array_of_vectors(
embeddings,
)),
regenerate: regenerate &&
// Meilisearch does not handle well dumps with fragments, because as the fragments
// are marked as user-provided,
// all embeddings would be regenerated on any settings change or document update.
// To prevent this, we mark embeddings has non regenerate in this case.
!has_fragments,
// TODO definitely factorize this code
'inject_vectors: {
let embeddings = index
.embeddings(&index_rtxn, docid)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
if embeddings.is_empty() {
break 'inject_vectors;
}
let vectors = document
.entry(RESERVED_VECTORS_FIELD_NAME)
.or_insert(serde_json::Value::Object(Default::default()));
let serde_json::Value::Object(vectors) = vectors else {
return Err(Error::from_milli(
milli::Error::UserError(
milli::UserError::InvalidVectorsMapType {
document_id: {
if let Ok(Some(Ok(index))) = index
.external_id_of(
&index_rtxn,
std::iter::once(docid),
)
.map(|it| it.into_iter().next())
{
index
} else {
format!("internal docid={docid}")
}
},
value: vectors.clone(),
},
),
Some(uid.to_string()),
));
};
vectors
.insert(embedder_name, serde_json::to_value(embeddings).unwrap());
for (
embedder_name,
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
) in embeddings
{
let embeddings = ExplicitVectors {
embeddings: Some(
VectorOrArrayOfVectors::from_array_of_vectors(embeddings),
),
regenerate: regenerate &&
// Meilisearch does not handle well dumps with fragments, because as the fragments
// are marked as user-provided,
// all embeddings would be regenerated on any settings change or document update.
// To prevent this, we mark embeddings has non regenerate in this case.
!has_fragments,
};
vectors.insert(
embedder_name,
serde_json::to_value(embeddings).unwrap(),
);
}
}
tmp_buffer.clear();
serde_json::to_writer(&mut tmp_buffer, &document)
.map_err(milli::InternalError::from)
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
// Make sure we put at least one document in the buffer even
// though we might go above the buffer limit before sending
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
// We compress the documents before sending them
let mut encoder =
GzEncoder::new(&mut compressed_buffer, Compression::default());
encoder
.write_all(&buffer)
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
encoder
.finish()
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
retry(&must_stop_processing, || {
let mut request = agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson");
request = request.set("Content-Encoding", "gzip");
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
})?;
buffer.clear();
compressed_buffer.clear();
}
buffer.extend_from_slice(&tmp_buffer);
if i > 0 && i % 100 == 0 {
step.fetch_add(100, atomic::Ordering::Relaxed);
}
}
tmp_buffer.clear();
serde_json::to_writer(&mut tmp_buffer, &document)
.map_err(milli::InternalError::from)
.map_err(milli::Error::from)
.map_err(err)?;
// Make sure we put at least one document in the buffer even
// though we might go above the buffer limit before sending
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
let control_flow = send_buffer(
&buffer,
&mut compressed_buffer,
ctx.must_stop_processing,
ctx.agent,
&documents_url,
target.remote_name,
bearer.as_deref(),
task_network.as_ref(),
&err,
)?;
buffer.clear();
compressed_buffer.clear();
if let Some((import_data, _, metadata)) = &mut task_network {
import_data.document_count = 0;
metadata.task_key = None;
retry(&must_stop_processing, || {
let mut request = agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson");
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
if control_flow.is_break() {
return Ok(());
}
}
buffer.extend_from_slice(&tmp_buffer);
request.send_bytes(&buffer).map_err(into_backoff_error)
})?;
if i > 0 && i % 100 == 0 {
step.fetch_add(100, atomic::Ordering::Relaxed);
}
}
Ok(())
})
.map_err(|e| {
Error::from_milli(
milli::Error::InternalError(InternalError::PanicInThreadPool(e)),
Some(uid.to_string()),
)
})?;
for result in results {
result?;
}
// send the last buffered documents if any
if !buffer.is_empty() {
// ignore control flow here
let _ = send_buffer(
&buffer,
&mut compressed_buffer,
ctx.must_stop_processing,
ctx.agent,
&documents_url,
target.remote_name,
bearer.as_deref(),
task_network.as_ref(),
&err,
)?;
}
Ok(())
})
.map_err(|e| err(milli::Error::InternalError(InternalError::PanicInThreadPool(e))))?;
for result in results {
result?;
}
step.store(total_documents, atomic::Ordering::Relaxed);
Ok(total_documents as u64)
}
#[cfg(feature = "enterprise")] // only used in enterprise edition for now
pub(super) fn export_no_index(
&self,
target: TargetInstance<'_>,
export_old_remote_name: &str,
network_change_origin: &Origin,
agent: &ureq::Agent,
must_stop_processing: &MustStopProcessing,
) -> Result<(), Error> {
let bearer = target.api_key.map(|api_key| format!("Bearer {api_key}"));
let url = format!("{base_url}/network", base_url = target.base_url,);
{
let _ = handle_response(
target.remote_name,
retry(must_stop_processing, || {
let request = agent.patch(&url);
let mut request = set_network_ureq_headers(
request,
&ImportData {
remote_name: export_old_remote_name.to_string(),
index_name: None,
document_count: 0,
},
network_change_origin,
&ImportMetadata {
index_count: 0,
task_key: None,
total_index_documents: 0,
},
);
request = request.set("Content-Type", "application/json");
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request
.send_json(
// empty payload that will be disregarded
serde_json::Value::Object(Default::default()),
)
.map_err(into_backoff_error)
}),
)?;
step.store(total_documents, atomic::Ordering::Relaxed);
}
Ok(())
}
}
fn set_network_ureq_headers(
request: ureq::Request,
import_data: &ImportData,
origin: &Origin,
metadata: &ImportMetadata,
) -> ureq::Request {
let request = RequestWrapper(request);
let ImportMetadata { index_count, task_key, total_index_documents } = metadata;
let Origin { remote_name: origin_remote, task_uid, network_version } = origin;
let ImportData { remote_name: import_remote, index_name, document_count } = import_data;
let request = request
.set_origin_remote(origin_remote)
.set_origin_task_uid(*task_uid)
.set_origin_network_version(*network_version)
.set_import_remote(import_remote)
.set_import_docs(*document_count)
.set_import_index_count(*index_count)
.set_import_index_docs(*total_index_documents);
let request = if let Some(index_name) = index_name.as_deref() {
request.set_import_index(index_name)
} else {
request
};
let RequestWrapper(request) = if let Some(task_key) = task_key {
request.set_import_task_key(*task_key)
} else {
request
};
request
}
struct RequestWrapper(ureq::Request);
impl headers::SetHeader for RequestWrapper {
fn set_header(self, name: &str, value: &str) -> Self {
Self(self.0.set(name, value))
}
}
#[allow(clippy::too_many_arguments)]
fn send_buffer<'a>(
buffer: &'a [u8],
mut compressed_buffer: &'a mut Vec<u8>,
must_stop_processing: &MustStopProcessing,
agent: &ureq::Agent,
documents_url: &'a str,
remote_name: Option<&str>,
bearer: Option<&'a str>,
task_network: Option<&(ImportData, Origin, ImportMetadata)>,
err: &'a impl Fn(milli::Error) -> crate::Error,
) -> Result<ControlFlow<(), ()>> {
// We compress the documents before sending them
let mut encoder: GzEncoder<&mut &mut Vec<u8>> =
GzEncoder::new(&mut compressed_buffer, Compression::default());
encoder.write_all(buffer).map_err(milli::Error::from).map_err(err)?;
encoder.finish().map_err(milli::Error::from).map_err(err)?;
let res = retry(must_stop_processing, || {
let mut request = agent.post(documents_url);
request = request.set("Content-Type", "application/x-ndjson");
request = request.set("Content-Encoding", "gzip");
if let Some(bearer) = bearer {
request = request.set("Authorization", bearer);
}
if let Some((import_data, origin, metadata)) = task_network {
request = set_network_ureq_headers(request, import_data, origin, metadata);
}
request.send_bytes(compressed_buffer).map_err(into_backoff_error)
});
handle_response(remote_name, res)
}
fn handle_response(remote_name: Option<&str>, res: Result<Response>) -> Result<ControlFlow<()>> {
let remote_name = remote_name.unwrap_or("unnamed");
match res {
Ok(_response) => Ok(ControlFlow::Continue(())),
Err(Error::FromRemoteWhenExporting { code, .. })
if code == Code::ImportTaskAlreadyReceived.name() =>
{
Ok(ControlFlow::Continue(()))
}
Err(Error::FromRemoteWhenExporting { code, message, .. })
if code == Code::ImportTaskUnknownRemote.name() =>
{
tracing::warn!("remote `{remote_name}` answered with: {message}");
Ok(ControlFlow::Break(()))
}
// note: there has already been many attempts to get this due to exponential backoff
Err(Error::FromRemoteWhenExporting { code, message, .. })
if code == Code::ImportTaskWithoutNetworkTask.name() =>
{
tracing::warn!("remote `{remote_name}` answered with: {message}");
Ok(ControlFlow::Break(()))
}
Err(e) => {
tracing::warn!("error while exporting: {e}");
Err(e)
}
Ok(output)
}
}
@@ -593,65 +374,4 @@ fn ureq_error_into_error(error: ureq::Error) -> Error {
}
}
// export_one_index arguments
pub(super) struct TargetInstance<'a> {
pub(super) remote_name: Option<&'a str>,
pub(super) base_url: &'a str,
pub(super) api_key: Option<&'a str>,
}
pub(super) struct ExportOptions<'a> {
pub(super) index_uid: &'a str,
pub(super) payload_size: Option<&'a Byte>,
pub(super) override_settings: bool,
pub(super) export_mode: ExportMode<'a>,
}
impl ExportOptions<'_> {
fn task_network(
&self,
total_index_documents: u64,
) -> Option<(ImportData, Origin, ImportMetadata)> {
if let ExportMode::NetworkBalancing {
index_count,
export_old_remote_name,
network_change_origin,
} = self.export_mode
{
Some((
ImportData {
remote_name: export_old_remote_name.to_string(),
index_name: Some(self.index_uid.to_string()),
document_count: 0,
},
network_change_origin.clone(),
ImportMetadata { index_count, task_key: None, total_index_documents },
))
} else {
None
}
}
}
pub(super) struct ExportContext<'a> {
pub(super) index: &'a meilisearch_types::milli::Index,
pub(super) index_rtxn: &'a milli::heed::RoTxn<'a>,
pub(super) universe: &'a RoaringBitmap,
pub(super) progress: &'a Progress,
pub(super) agent: &'a ureq::Agent,
pub(super) must_stop_processing: &'a MustStopProcessing,
}
pub(super) enum ExportMode<'a> {
ExportRoute,
#[cfg_attr(not(feature = "enterprise"), allow(dead_code))]
NetworkBalancing {
index_count: u64,
export_old_remote_name: &'a str,
network_change_origin: &'a Origin,
},
}
// progress related
enum ExportIndex {}

View File

@@ -438,15 +438,12 @@ async fn multipart_stream_to_s3(
db_name: String,
reader: std::io::PipeReader,
) -> Result<(), Error> {
use std::collections::VecDeque;
use std::io;
use std::os::fd::OwnedFd;
use std::path::PathBuf;
use std::{collections::VecDeque, os::fd::OwnedFd, path::PathBuf};
use bytes::{Bytes, BytesMut};
use reqwest::{Client, Response};
use rusty_s3::actions::CreateMultipartUpload;
use rusty_s3::{Bucket, BucketError, Credentials, S3Action as _, UrlStyle};
use rusty_s3::S3Action as _;
use rusty_s3::{actions::CreateMultipartUpload, Bucket, BucketError, Credentials, UrlStyle};
use tokio::task::JoinHandle;
let reader = OwnedFd::from(reader);
@@ -520,6 +517,7 @@ async fn multipart_stream_to_s3(
while buffer.len() < (s3_multipart_part_size as usize / 2) {
// Wait for the pipe to be readable
use std::io;
reader.readable().await?;
match reader.try_read_buf(&mut buffer) {
@@ -583,17 +581,15 @@ async fn multipart_stream_to_s3(
async move {
match client.post(url).body(body).send().await {
Ok(resp) if resp.status().is_client_error() => {
Err(backoff::Error::Permanent(Error::S3Error {
status: resp.status(),
body: resp.text().await.unwrap_or_default(),
}))
resp.error_for_status().map_err(backoff::Error::Permanent)
}
Ok(resp) => Ok(resp),
Err(e) => Err(backoff::Error::transient(Error::S3HttpError(e))),
Err(e) => Err(backoff::Error::transient(e)),
}
}
})
.await?;
.await
.map_err(Error::S3HttpError)?;
let status = resp.status();
let body = resp.text().await.map_err(|e| Error::S3Error { status, body: e.to_string() })?;

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
[timestamp] [4,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
----------------------------------------------------------------------
### Status:
enqueued [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
----------------------------------------------------------------------
### Status:

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
----------------------------------------------------------------------
### Status:
@@ -37,7 +37,7 @@ catto [1,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
----------------------------------------------------------------------
@@ -40,7 +40,7 @@ doggo [2,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -43,7 +43,7 @@ doggo [2,3,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -747,7 +747,6 @@ fn basic_get_stats() {
"indexDeletion": 0,
"indexSwap": 0,
"indexUpdate": 0,
"networkTopologyChange": 0,
"settingsUpdate": 0,
"snapshotCreation": 0,
"taskCancelation": 0,
@@ -783,7 +782,6 @@ fn basic_get_stats() {
"indexDeletion": 0,
"indexSwap": 0,
"indexUpdate": 0,
"networkTopologyChange": 0,
"settingsUpdate": 0,
"snapshotCreation": 0,
"taskCancelation": 0,
@@ -826,7 +824,6 @@ fn basic_get_stats() {
"indexDeletion": 0,
"indexSwap": 0,
"indexUpdate": 0,
"networkTopologyChange": 0,
"settingsUpdate": 0,
"snapshotCreation": 0,
"taskCancelation": 0,
@@ -870,7 +867,6 @@ fn basic_get_stats() {
"indexDeletion": 0,
"indexSwap": 0,
"indexUpdate": 0,
"networkTopologyChange": 0,
"settingsUpdate": 0,
"snapshotCreation": 0,
"taskCancelation": 0,

View File

@@ -112,7 +112,6 @@ impl IndexScheduler {
max_number_of_batched_tasks: usize::MAX,
batched_tasks_size_limit: u64::MAX,
instance_features: Default::default(),
export_default_payload_size_bytes: byte_unit::Byte::parse_str("20MiB", false).unwrap(),
auto_upgrade: true, // Don't cost much and will ensure the happy path works
embedding_cache_cap: 10,
experimental_no_snapshot_compaction: false,

View File

@@ -1,93 +1,89 @@
use anyhow::bail;
use meilisearch_types::heed::{Env, RwTxn, WithoutTls};
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
use meilisearch_types::versioning;
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
use time::OffsetDateTime;
use tracing::info;
use crate::queue::TaskQueue;
use crate::versioning::Versioning;
mod v1_29;
mod v1_30;
trait UpgradeIndexScheduler {
fn upgrade(&self, env: &Env<WithoutTls>, wtxn: &mut RwTxn) -> anyhow::Result<()>;
/// Whether the migration should be applied, depending on the initial version of the index scheduler before
/// any migration was applied
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool;
/// A progress-centric description of the migration
fn description(&self) -> &'static str;
fn upgrade(
&self,
env: &Env<WithoutTls>,
wtxn: &mut RwTxn,
original: (u32, u32, u32),
) -> anyhow::Result<()>;
fn target_version(&self) -> (u32, u32, u32);
}
/// Upgrade the index scheduler to the binary version.
///
/// # Warning
///
/// The current implementation uses a single wtxn to the index scheduler for the whole duration of the upgrade.
/// If migrations start taking take a long time, it might prevent tasks from being registered.
/// If this issue manifests, then it can be mitigated by adding a `fn target_version` to `UpgradeIndexScheduler`,
/// to be able to write intermediate versions and drop the wtxn between applying migrations.
pub fn upgrade_index_scheduler(
env: &Env<WithoutTls>,
versioning: &Versioning,
initial_version: (u32, u32, u32),
from: (u32, u32, u32),
to: (u32, u32, u32),
) -> anyhow::Result<()> {
let target_major: u32 = versioning::VERSION_MAJOR;
let target_minor: u32 = versioning::VERSION_MINOR;
let target_patch: u32 = versioning::VERSION_PATCH;
let target_version = (target_major, target_minor, target_patch);
if initial_version == target_version {
return Ok(());
}
let current_major = to.0;
let current_minor = to.1;
let current_patch = to.2;
let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[
// List all upgrade functions to apply in order here.
&v1_30::MigrateNetwork,
// This is the last upgrade function, it will be called when the index is up to date.
// any other upgrade function should be added before this one.
&ToCurrentNoOp {},
];
let (initial_major, initial_minor, initial_patch) = initial_version;
if initial_version > target_version {
bail!(
"Database version {initial_major}.{initial_minor}.{initial_patch} is higher than the Meilisearch version {target_major}.{target_minor}.{target_patch}. Downgrade is not supported",
let start = match from {
(1, 12, _) => 0,
(1, 13, _) => 0,
(1, 14, _) => 0,
(1, 15, _) => 0,
(1, 16, _) => 0,
(1, 17, _) => 0,
(1, 18, _) => 0,
(1, 19, _) => 0,
(1, 20, _) => 0,
(1, 21, _) => 0,
(1, 22, _) => 0,
(1, 23, _) => 0,
(1, 24, _) => 0,
(1, 25, _) => 0,
(1, 26, _) => 0,
(major, minor, patch) => {
if major > current_major
|| (major == current_major && minor > current_minor)
|| (major == current_major && minor == current_minor && patch > current_patch)
{
bail!(
"Database version {major}.{minor}.{patch} is higher than the Meilisearch version {current_major}.{current_minor}.{current_patch}. Downgrade is not supported",
);
} else if major < 1 || (major == current_major && minor < 12) {
bail!(
"Database version {major}.{minor}.{patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{major}.{minor}.{patch} and import it in the v{current_major}.{current_minor}.{current_patch}",
);
}
if initial_version < (1, 12, 0) {
bail!(
"Database version {initial_major}.{initial_minor}.{initial_patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{initial_major}.{initial_minor}.{initial_patch} and import it in the v{target_major}.{target_minor}.{target_patch}",
);
}
} else {
bail!("Unknown database version: v{major}.{minor}.{patch}");
}
}
};
info!("Upgrading the task queue");
let mut wtxn = env.write_txn()?;
let migration_count = upgrade_functions.len();
for (migration_index, upgrade) in upgrade_functions.iter().enumerate() {
if upgrade.must_upgrade(initial_version) {
info!(
"[{migration_index}/{migration_count}]Applying migration: {}",
upgrade.description()
);
upgrade.upgrade(env, &mut wtxn)?;
info!(
"[{}/{migration_count}]Migration applied: {}",
migration_index + 1,
upgrade.description()
)
} else {
info!(
"[{migration_index}/{migration_count}]Skipping unnecessary migration: {}",
upgrade.description()
)
}
let mut local_from = from;
for upgrade in upgrade_functions[start..].iter() {
let target = upgrade.target_version();
info!(
"Upgrading from v{}.{}.{} to v{}.{}.{}",
local_from.0, local_from.1, local_from.2, target.0, target.1, target.2
);
let mut wtxn = env.write_txn()?;
upgrade.upgrade(env, &mut wtxn, local_from)?;
versioning.set_version(&mut wtxn, target)?;
wtxn.commit()?;
local_from = target;
}
versioning.set_version(&mut wtxn, target_version)?;
info!("Task queue upgraded, spawning the upgrade database task");
let mut wtxn = env.write_txn()?;
let queue = TaskQueue::new(env, &mut wtxn)?;
let uid = queue.next_task_id(&wtxn)?;
queue.register(
@@ -100,9 +96,9 @@ pub fn upgrade_index_scheduler(
finished_at: None,
error: None,
canceled_by: None,
details: Some(Details::UpgradeDatabase { from: initial_version, to: target_version }),
details: Some(Details::UpgradeDatabase { from, to }),
status: Status::Enqueued,
kind: KindWithContent::UpgradeDatabase { from: initial_version },
kind: KindWithContent::UpgradeDatabase { from },
network: None,
custom_metadata: None,
},
@@ -111,3 +107,21 @@ pub fn upgrade_index_scheduler(
Ok(())
}
#[allow(non_camel_case_types)]
struct ToCurrentNoOp {}
impl UpgradeIndexScheduler for ToCurrentNoOp {
fn upgrade(
&self,
_env: &Env<WithoutTls>,
_wtxn: &mut RwTxn,
_original: (u32, u32, u32),
) -> anyhow::Result<()> {
Ok(())
}
fn target_version(&self) -> (u32, u32, u32) {
(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
}
}

View File

@@ -1,47 +0,0 @@
use std::collections::BTreeMap;
use meilisearch_types::heed::types::{SerdeJson, Str};
use meilisearch_types::heed::{Env, RoTxn, WithoutTls};
use serde::{Deserialize, Serialize};
use crate::Result;
/// Database const names for the `FeatureData`.
mod db_name {
pub const EXPERIMENTAL_FEATURES: &str = "experimental-features";
}
mod db_keys {
pub const NETWORK: &str = "network";
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
#[serde(rename_all = "camelCase")]
pub struct Network {
#[serde(default, rename = "self")]
pub local: Option<String>,
#[serde(default)]
pub remotes: BTreeMap<String, Remote>,
#[serde(default)]
pub sharding: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct Remote {
pub url: String,
#[serde(default)]
pub search_api_key: Option<String>,
#[serde(default)]
pub write_api_key: Option<String>,
}
pub fn get_network(env: &Env<WithoutTls>, rtxn: &RoTxn) -> Result<Option<Network>> {
let Some(network_db) =
env.open_database::<Str, SerdeJson<Network>>(rtxn, Some(db_name::EXPERIMENTAL_FEATURES))?
else {
return Ok(None);
};
Ok(network_db.get(rtxn, db_keys::NETWORK)?)
}

View File

@@ -1,82 +0,0 @@
use std::collections::BTreeMap;
use meilisearch_types::heed::types::{SerdeJson, Str};
use meilisearch_types::heed::{Env, RwTxn, WithoutTls};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
#[serde(rename_all = "camelCase")]
pub struct Network {
#[serde(default, rename = "self")]
pub local: Option<String>,
#[serde(default)]
pub remotes: BTreeMap<String, Remote>,
#[serde(default)]
pub leader: Option<String>,
#[serde(default)]
pub version: Uuid,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct Remote {
pub url: String,
#[serde(default)]
pub search_api_key: Option<String>,
#[serde(default)]
pub write_api_key: Option<String>,
}
use super::v1_29;
use crate::Result;
/// Database const names for the `FeatureData`.
mod db_name {
pub const EXPERIMENTAL_FEATURES: &str = "experimental-features";
}
mod db_keys {
pub const NETWORK: &str = "network";
}
pub struct MigrateNetwork;
impl super::UpgradeIndexScheduler for MigrateNetwork {
fn upgrade(&self, env: &Env<WithoutTls>, wtxn: &mut RwTxn) -> anyhow::Result<()> {
let Some(v1_29::Network { local, remotes, sharding }) = v1_29::get_network(env, wtxn)?
else {
return Ok(());
};
let leader = if sharding { remotes.keys().next().cloned() } else { None };
let remotes = remotes
.into_iter()
.map(|(name, v1_29::Remote { url, search_api_key, write_api_key })| {
(name, Remote { url, search_api_key, write_api_key })
})
.collect();
let network = Network { local, remotes, leader, version: Uuid::now_v7() };
set_network(env, wtxn, &network)?;
Ok(())
}
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool {
initial_version < (1, 30, 0)
}
fn description(&self) -> &'static str {
"updating the network struct"
}
}
fn set_network(env: &Env<WithoutTls>, wtxn: &mut RwTxn<'_>, network: &Network) -> Result<()> {
let network_db =
env.create_database::<Str, SerdeJson<Network>>(wtxn, Some(db_name::EXPERIMENTAL_FEATURES))?;
network_db.put(wtxn, db_keys::NETWORK, network)?;
Ok(())
}

View File

@@ -4,11 +4,9 @@ use std::collections::{BTreeSet, HashSet};
use std::ops::Bound;
use std::sync::Arc;
use convert_case::{Case, Casing as _};
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchId, BatchStats};
use meilisearch_types::heed::{Database, RoTxn, RwTxn};
use meilisearch_types::milli::progress::Progress;
use meilisearch_types::milli::{CboRoaringBitmapCodec, ChannelCongestion};
use meilisearch_types::milli::CboRoaringBitmapCodec;
use meilisearch_types::task_view::DetailsView;
use meilisearch_types::tasks::{
BatchStopReason, Details, IndexSwap, Kind, KindWithContent, Status,
@@ -121,8 +119,17 @@ impl ProcessingBatch {
self.stats.total_nb_tasks = 0;
}
/// Update batch task from a processed task
pub fn update_from_task(&mut self, task: &Task) {
/// Update the timestamp of the tasks and the inner structure of this structure.
pub fn update(&mut self, task: &mut Task) {
// We must re-set this value in case we're dealing with a task that has been added between
// the `processing` and `finished` state
// We must re-set this value in case we're dealing with a task that has been added between
// the `processing` and `finished` state or that failed.
task.batch_uid = Some(self.uid);
// Same
task.started_at = Some(self.started_at);
task.finished_at = self.finished_at;
self.statuses.insert(task.status);
// Craft an aggregation of the details of all the tasks encountered in this batch.
@@ -137,63 +144,6 @@ impl ProcessingBatch {
}
}
/// Update the timestamp of the tasks after they're done
pub fn finish_task(&self, task: &mut Task) {
// We must re-set this value in case we're dealing with a task that has been added between
// the `processing` and `finished` state or that failed.
task.batch_uid = Some(self.uid);
// Same
task.started_at = Some(self.started_at);
task.finished_at = self.finished_at;
}
pub fn write_stats(
&mut self,
progress: &Progress,
congestion: Option<ChannelCongestion>,
pre_commit_dabases_sizes: indexmap::IndexMap<&'static str, usize>,
post_commit_dabases_sizes: indexmap::IndexMap<&'static str, usize>,
) {
self.stats.progress_trace =
progress.accumulated_durations().into_iter().map(|(k, v)| (k, v.into())).collect();
self.stats.write_channel_congestion = congestion.map(|congestion| {
let mut congestion_info = serde_json::Map::new();
congestion_info.insert("attempts".into(), congestion.attempts.into());
congestion_info.insert("blocking_attempts".into(), congestion.blocking_attempts.into());
congestion_info.insert("blocking_ratio".into(), congestion.congestion_ratio().into());
congestion_info
});
self.stats.internal_database_sizes = pre_commit_dabases_sizes
.iter()
.flat_map(|(dbname, pre_size)| {
post_commit_dabases_sizes
.get(dbname)
.map(|post_size| {
use std::cmp::Ordering::{Equal, Greater, Less};
use byte_unit::Byte;
use byte_unit::UnitType::Binary;
let post = Byte::from_u64(*post_size as u64).get_appropriate_unit(Binary);
let diff_size = post_size.abs_diff(*pre_size) as u64;
let diff = Byte::from_u64(diff_size).get_appropriate_unit(Binary);
let sign = match post_size.cmp(pre_size) {
Equal => return None,
Greater => "+",
Less => "-",
};
Some((
dbname.to_case(Case::Camel),
format!("{post:#.2} ({sign}{diff:#.2})").into(),
))
})
.into_iter()
.flatten()
})
.collect();
}
pub fn to_batch(&self) -> Batch {
Batch {
uid: self.uid,
@@ -336,7 +286,6 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
| K::DumpCreation { .. }
| K::Export { .. }
| K::UpgradeDatabase { .. }
| K::NetworkTopologyChange(_)
| K::SnapshotCreation => (),
};
if let Some(Details::IndexSwap { swaps }) = &mut task.details {
@@ -678,9 +627,6 @@ impl crate::IndexScheduler {
} => {
assert_eq!(kind.as_kind(), Kind::IndexCompaction);
}
Details::NetworkTopologyChange { moved_documents: _, message: _ } => {
assert_eq!(kind.as_kind(), Kind::NetworkTopologyChange);
}
}
}

View File

@@ -64,7 +64,14 @@ impl Versioning {
};
wtxn.commit()?;
upgrade_index_scheduler(env, &this, from)?;
let bin_major: u32 = versioning::VERSION_MAJOR;
let bin_minor: u32 = versioning::VERSION_MINOR;
let bin_patch: u32 = versioning::VERSION_PATCH;
let to = (bin_major, bin_minor, bin_patch);
if from != to {
upgrade_index_scheduler(env, &this, from, to)?;
}
// Once we reach this point it means the upgrade process, if there was one is entirely finished
// we can safely say we reached the latest version of the index scheduler

View File

@@ -15,7 +15,7 @@ license.workspace = true
serde_json = "1.0"
[dev-dependencies]
criterion = "0.7.0"
criterion = "0.6.0"
[[bench]]
name = "depth"

View File

@@ -13,7 +13,7 @@ license.workspace = true
[dependencies]
# fixed version due to format breakages in v1.40
insta = { version = "=1.39.0", features = ["json", "redactions"] }
md5 = "0.8.0"
md5 = "0.7.0"
once_cell = "1.21"
regex-lite = "0.1.8"
uuid = { version = "1.18.1", features = ["v4"] }
regex-lite = "0.1.6"
uuid = { version = "1.17.0", features = ["v4"] }

View File

@@ -12,15 +12,15 @@ license.workspace = true
[dependencies]
base64 = "0.22.1"
enum-iterator = "2.3.0"
enum-iterator = "2.1.0"
hmac = "0.12.1"
maplit = "1.0.2"
meilisearch-types = { path = "../meilisearch-types" }
rand = "0.8.5"
roaring = { version = "0.10.12", features = ["serde"] }
serde = { version = "1.0.228", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] }
serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.140", features = ["preserve_order"] }
sha2 = "0.10.9"
thiserror = "2.0.17"
time = { version = "0.3.44", features = ["serde-well-known", "formatting", "parsing", "macros"] }
uuid = { version = "1.18.1", features = ["serde", "v4"] }
thiserror = "2.0.12"
time = { version = "0.3.41", features = ["serde-well-known", "formatting", "parsing", "macros"] }
uuid = { version = "1.17.0", features = ["serde", "v4"] }

View File

@@ -11,41 +11,38 @@ edition.workspace = true
license.workspace = true
[dependencies]
actix-web = { version = "4.12.0", default-features = false }
anyhow = "1.0.100"
base64 = "0.22.1"
bumpalo = "3.19.0"
actix-web = { version = "4.11.0", default-features = false }
anyhow = "1.0.98"
bumpalo = "3.18.1"
bumparaw-collections = "0.1.4"
byte-unit = { version = "5.1.6", features = ["serde"] }
convert_case = "0.9.0"
csv = "1.4.0"
deserr = { version = "0.6.4", features = ["actix-web"] }
convert_case = "0.8.0"
csv = "1.3.1"
deserr = { version = "0.6.3", features = ["actix-web"] }
either = { version = "1.15.0", features = ["serde"] }
enum-iterator = "2.3.0"
enum-iterator = "2.1.0"
file-store = { path = "../file-store" }
flate2 = "1.1.5"
flate2 = "1.1.2"
fst = "0.4.7"
itertools = "0.14.0"
memmap2 = "0.9.9"
memmap2 = "0.9.7"
milli = { path = "../milli" }
roaring = { version = "0.10.12", features = ["serde"] }
rustc-hash = "2.1.1"
serde = { version = "1.0.228", features = ["derive"] }
serde = { version = "1.0.219", features = ["derive"] }
serde-cs = "0.2.4"
serde_json = { version = "1.0.145", features = ["preserve_order"] }
serde_json = { version = "1.0.140", features = ["preserve_order"] }
tar = "0.4.44"
tempfile = "3.23.0"
thiserror = "2.0.17"
time = { version = "0.3.44", features = [
tempfile = "3.20.0"
thiserror = "2.0.12"
time = { version = "0.3.41", features = [
"serde-well-known",
"formatting",
"parsing",
"macros",
] }
tokio = "1.48"
urlencoding = "2.1.3"
tokio = "1.45"
utoipa = { version = "5.4.0", features = ["macros"] }
uuid = { version = "1.18.1", features = ["serde", "v4"] }
uuid = { version = "1.17.0", features = ["serde", "v4"] }
[dev-dependencies]
# fixed version due to format breakages in v1.40
@@ -59,9 +56,6 @@ all-tokenizations = ["milli/all-tokenizations"]
# chinese specialized tokenization
chinese = ["milli/chinese"]
chinese-pinyin = ["milli/chinese-pinyin"]
enterprise = ["milli/enterprise"]
# hebrew specialized tokenization
hebrew = ["milli/hebrew"]
# japanese specialized tokenization

View File

@@ -1,16 +0,0 @@
pub mod network {
use milli::update::new::indexer::current_edition::sharding::Shards;
use crate::network::Network;
impl Network {
pub fn shards(&self) -> Option<Shards> {
None
}
pub fn sharding(&self) -> bool {
// always false in CE
false
}
}
}

View File

@@ -3,23 +3,45 @@
// Use of this source code is governed by the Business Source License 1.1,
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
use milli::update::new::indexer::enterprise_edition::sharding::Shards;
use std::collections::BTreeMap;
use crate::network::Network;
use milli::update::new::indexer::enterprise_edition::sharding::Shards;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
#[serde(rename_all = "camelCase")]
pub struct Network {
#[serde(default, rename = "self")]
pub local: Option<String>,
#[serde(default)]
pub remotes: BTreeMap<String, Remote>,
#[serde(default)]
pub sharding: bool,
}
impl Network {
pub fn shards(&self) -> Option<Shards> {
if self.sharding() {
Some(Shards::from_remotes_local(
self.remotes.keys().map(String::as_str),
self.local.as_deref(),
))
if self.sharding {
let this = self.local.as_deref().expect("Inconsistent `sharding` and `self`");
let others = self
.remotes
.keys()
.filter(|name| name.as_str() != this)
.map(|name| name.to_owned())
.collect();
Some(Shards { own: vec![this.to_owned()], others })
} else {
None
}
}
pub fn sharding(&self) -> bool {
self.leader.is_some()
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct Remote {
pub url: String,
#[serde(default)]
pub search_api_key: Option<String>,
#[serde(default)]
pub write_api_key: Option<String>,
}

View File

@@ -156,7 +156,7 @@ macro_rules! make_error_codes {
}
/// return error name, used as error code
pub fn name(&self) -> String {
fn name(&self) -> String {
match self {
$(
Code::$code_ident => stringify!($code_ident).to_case(convert_case::Case::Snake)
@@ -214,9 +214,6 @@ ImmutableApiKeyUid , InvalidRequest , BAD_REQU
ImmutableApiKeyUpdatedAt , InvalidRequest , BAD_REQUEST;
ImmutableIndexCreatedAt , InvalidRequest , BAD_REQUEST;
ImmutableIndexUpdatedAt , InvalidRequest , BAD_REQUEST;
ImportTaskAlreadyReceived , InvalidRequest , PRECONDITION_FAILED;
ImportTaskUnknownRemote , InvalidRequest , PRECONDITION_FAILED;
ImportTaskWithoutNetworkTask , InvalidRequest , SERVICE_UNAVAILABLE;
IndexAlreadyExists , InvalidRequest , CONFLICT ;
IndexCreationFailed , Internal , INTERNAL_SERVER_ERROR;
IndexNotFound , InvalidRequest , NOT_FOUND;
@@ -273,9 +270,9 @@ InvalidMultiSearchQueryRankingRules , InvalidRequest , BAD_REQU
InvalidMultiSearchQueryPosition , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchRemote , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchWeight , InvalidRequest , BAD_REQUEST ;
InvalidNetworkLeader , InvalidRequest , BAD_REQUEST ;
InvalidNetworkRemotes , InvalidRequest , BAD_REQUEST ;
InvalidNetworkSelf , InvalidRequest , BAD_REQUEST ;
InvalidNetworkSharding , InvalidRequest , BAD_REQUEST ;
InvalidNetworkSearchApiKey , InvalidRequest , BAD_REQUEST ;
InvalidNetworkWriteApiKey , InvalidRequest , BAD_REQUEST ;
InvalidNetworkUrl , InvalidRequest , BAD_REQUEST ;
@@ -380,9 +377,7 @@ MissingPayload , InvalidRequest , BAD_REQU
MissingSearchHybrid , InvalidRequest , BAD_REQUEST ;
MissingSwapIndexes , InvalidRequest , BAD_REQUEST ;
MissingTaskFilters , InvalidRequest , BAD_REQUEST ;
NetworkVersionMismatch , InvalidRequest , PRECONDITION_FAILED ;
NoSpaceLeftOnDevice , System , UNPROCESSABLE_ENTITY;
NotLeader , InvalidRequest , BAD_REQUEST ;
PayloadTooLarge , InvalidRequest , PAYLOAD_TOO_LARGE ;
RemoteBadResponse , System , BAD_GATEWAY ;
RemoteBadRequest , InvalidRequest , BAD_REQUEST ;
@@ -396,9 +391,6 @@ TaskFileNotFound , InvalidRequest , NOT_FOUN
BatchNotFound , InvalidRequest , NOT_FOUND ;
TooManyOpenFiles , System , UNPROCESSABLE_ENTITY ;
TooManyVectors , InvalidRequest , BAD_REQUEST ;
UnexpectedNetworkPreviousRemotes , InvalidRequest , BAD_REQUEST ;
NetworkVersionTooOld , InvalidRequest , BAD_REQUEST ;
UnprocessedNetworkTask , InvalidRequest , BAD_REQUEST ;
UnretrievableDocument , Internal , BAD_REQUEST ;
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
@@ -441,7 +433,6 @@ InvalidChatCompletionSearchQueryParamPrompt , InvalidRequest , BAD_REQU
InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST ;
RequiresEnterpriseEdition , InvalidRequest , UNAVAILABLE_FOR_LEGAL_REASONS ;
// Webhooks
InvalidWebhooks , InvalidRequest , BAD_REQUEST ;
InvalidWebhookUrl , InvalidRequest , BAD_REQUEST ;

View File

@@ -2,17 +2,10 @@
pub mod batch_view;
pub mod batches;
#[cfg(not(feature = "enterprise"))]
pub mod community_edition;
pub mod compression;
pub mod deserr;
pub mod document_formats;
#[cfg(feature = "enterprise")]
pub mod enterprise_edition;
#[cfg(not(feature = "enterprise"))]
pub use community_edition as current_edition;
#[cfg(feature = "enterprise")]
pub use enterprise_edition as current_edition;
pub mod error;
pub mod facet_values_sort;
pub mod features;
@@ -20,7 +13,6 @@ pub mod index_uid;
pub mod index_uid_pattern;
pub mod keys;
pub mod locales;
pub mod network;
pub mod settings;
pub mod star_or;
pub mod task_view;

View File

@@ -1,27 +0,0 @@
use std::collections::BTreeMap;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
#[serde(rename_all = "camelCase")]
pub struct Network {
#[serde(default, rename = "self")]
pub local: Option<String>,
#[serde(default)]
pub remotes: BTreeMap<String, Remote>,
#[serde(default)]
pub leader: Option<String>,
#[serde(default)]
pub version: Uuid,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct Remote {
pub url: String,
#[serde(default)]
pub search_api_key: Option<String>,
#[serde(default)]
pub write_api_key: Option<String>,
}

View File

@@ -9,12 +9,12 @@ use utoipa::ToSchema;
use crate::batches::BatchId;
use crate::error::ResponseError;
use crate::settings::{Settings, Unchecked};
use crate::tasks::network::DbTaskNetwork;
use crate::tasks::{
serialize_duration, Details, DetailsExportIndexSettings, IndexSwap, Kind, Status, Task, TaskId,
TaskNetwork,
};
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ToSchema)]
#[derive(Debug, Clone, PartialEq, Serialize, ToSchema)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
pub struct TaskView {
@@ -54,7 +54,7 @@ pub struct TaskView {
pub finished_at: Option<OffsetDateTime>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub network: Option<DbTaskNetwork>,
pub network: Option<TaskNetwork>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub custom_metadata: Option<String>,
@@ -151,11 +151,6 @@ pub struct DetailsView {
pub pre_compaction_size: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub post_compaction_size: Option<String>,
// network topology change
#[serde(skip_serializing_if = "Option::is_none")]
pub moved_documents: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
impl DetailsView {
@@ -166,17 +161,6 @@ impl DetailsView {
(None, Some(doc)) | (Some(doc), None) => Some(doc),
(Some(left), Some(right)) => Some(left + right),
},
moved_documents: match (self.moved_documents, other.moved_documents) {
(None, None) => None,
(None, Some(doc)) | (Some(doc), None) => Some(doc),
(Some(left), Some(right)) => Some(left + right),
},
message: match (&mut self.message, &other.message) {
(None, None) => None,
(None, Some(message)) => Some(message.clone()),
(Some(message), None) => Some(std::mem::take(message)),
(Some(message), Some(_)) => Some(std::mem::take(message)),
},
indexed_documents: match (self.indexed_documents, other.indexed_documents) {
(None, None) => None,
(None, Some(None)) | (Some(None), None) | (Some(None), Some(None)) => Some(None),
@@ -467,11 +451,6 @@ impl From<Details> for DetailsView {
..Default::default()
}
}
Details::NetworkTopologyChange { moved_documents, message } => DetailsView {
moved_documents: Some(moved_documents),
message: Some(message),
..Default::default()
},
}
}
}

View File

@@ -23,8 +23,6 @@ use crate::{versioning, InstanceUid};
pub type TaskId = u32;
pub mod network;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Task {
@@ -46,7 +44,7 @@ pub struct Task {
pub kind: KindWithContent,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub network: Option<network::DbTaskNetwork>,
pub network: Option<TaskNetwork>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub custom_metadata: Option<String>,
@@ -63,7 +61,6 @@ impl Task {
| TaskDeletion { .. }
| Export { .. }
| UpgradeDatabase { .. }
| NetworkTopologyChange { .. }
| IndexSwap { .. } => None,
DocumentAdditionOrUpdate { index_uid, .. }
| DocumentEdition { index_uid, .. }
@@ -102,7 +99,6 @@ impl Task {
| KindWithContent::SnapshotCreation
| KindWithContent::Export { .. }
| KindWithContent::UpgradeDatabase { .. }
| KindWithContent::NetworkTopologyChange { .. }
| KindWithContent::IndexCompaction { .. } => None,
}
}
@@ -182,7 +178,6 @@ pub enum KindWithContent {
IndexCompaction {
index_uid: String,
},
NetworkTopologyChange(network::NetworkTopologyChange),
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
@@ -220,7 +215,6 @@ impl KindWithContent {
KindWithContent::Export { .. } => Kind::Export,
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
KindWithContent::IndexCompaction { .. } => Kind::IndexCompaction,
KindWithContent::NetworkTopologyChange { .. } => Kind::NetworkTopologyChange,
}
}
@@ -233,7 +227,6 @@ impl KindWithContent {
| TaskCancelation { .. }
| TaskDeletion { .. }
| Export { .. }
| NetworkTopologyChange { .. }
| UpgradeDatabase { .. } => vec![],
DocumentAdditionOrUpdate { index_uid, .. }
| DocumentEdition { index_uid, .. }
@@ -347,10 +340,6 @@ impl KindWithContent {
pre_compaction_size: None,
post_compaction_size: None,
}),
KindWithContent::NetworkTopologyChange { .. } => Some(Details::NetworkTopologyChange {
moved_documents: 0,
message: "processing tasks for previous network versions".into(),
}),
}
}
@@ -403,7 +392,7 @@ impl KindWithContent {
})
}
KindWithContent::IndexSwap { .. } => {
unimplemented!("do not call `default_finished_details` for `IndexSwap` tasks")
todo!()
}
KindWithContent::TaskCancelation { query, tasks } => Some(Details::TaskCancelation {
matched_tasks: tasks.len(),
@@ -438,9 +427,6 @@ impl KindWithContent {
pre_compaction_size: None,
post_compaction_size: None,
}),
KindWithContent::NetworkTopologyChange(network_topology_change) => {
Some(network_topology_change.to_details())
}
}
}
}
@@ -508,9 +494,6 @@ impl From<&KindWithContent> for Option<Details> {
pre_compaction_size: None,
post_compaction_size: None,
}),
KindWithContent::NetworkTopologyChange(network_topology_change) => {
Some(network_topology_change.to_details())
}
}
}
}
@@ -622,7 +605,6 @@ pub enum Kind {
Export,
UpgradeDatabase,
IndexCompaction,
NetworkTopologyChange,
}
impl Kind {
@@ -642,7 +624,6 @@ impl Kind {
| Kind::DumpCreation
| Kind::Export
| Kind::UpgradeDatabase
| Kind::NetworkTopologyChange
| Kind::SnapshotCreation => false,
}
}
@@ -665,7 +646,6 @@ impl Display for Kind {
Kind::Export => write!(f, "export"),
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
Kind::IndexCompaction => write!(f, "indexCompaction"),
Kind::NetworkTopologyChange => write!(f, "networkTopologyChange"),
}
}
}
@@ -703,8 +683,6 @@ impl FromStr for Kind {
Ok(Kind::UpgradeDatabase)
} else if kind.eq_ignore_ascii_case("indexCompaction") {
Ok(Kind::IndexCompaction)
} else if kind.eq_ignore_ascii_case("networkTopologyChange") {
Ok(Kind::NetworkTopologyChange)
} else {
Err(ParseTaskKindError(kind.to_owned()))
}
@@ -795,10 +773,36 @@ pub enum Details {
pre_compaction_size: Option<Byte>,
post_compaction_size: Option<Byte>,
},
NetworkTopologyChange {
moved_documents: u64,
message: String,
},
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(untagged, rename_all = "camelCase")]
pub enum TaskNetwork {
Origin { origin: Origin },
Remotes { remote_tasks: BTreeMap<String, RemoteTask> },
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct Origin {
pub remote_name: String,
pub task_uid: usize,
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct RemoteTask {
#[serde(skip_serializing_if = "Option::is_none")]
task_uid: Option<TaskId>,
error: Option<ResponseError>,
}
impl From<Result<TaskId, ResponseError>> for RemoteTask {
fn from(res: Result<TaskId, ResponseError>) -> RemoteTask {
match res {
Ok(task_uid) => RemoteTask { task_uid: Some(task_uid), error: None },
Err(err) => RemoteTask { task_uid: None, error: Some(err) },
}
}
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
@@ -841,9 +845,6 @@ impl Details {
| Self::Export { .. }
| Self::UpgradeDatabase { .. }
| Self::IndexSwap { .. } => (),
Self::NetworkTopologyChange { moved_documents: _, message } => {
*message = format!("Failed. Previous status: {}", message);
}
}
details

View File

@@ -1,787 +0,0 @@
use std::collections::BTreeMap;
use base64::Engine as _;
use itertools::{EitherOrBoth, Itertools as _};
use milli::{CboRoaringBitmapCodec, DocumentId};
use roaring::RoaringBitmap;
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
use uuid::Uuid;
use crate::error::ResponseError;
use crate::network::{Network, Remote};
use crate::tasks::{Details, TaskId};
#[cfg(not(feature = "enterprise"))]
mod community_edition;
#[cfg(feature = "enterprise")]
mod enterprise_edition;
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(untagged, rename_all = "camelCase")]
// This type is used in the database, care should be taken when modifying it.
pub enum DbTaskNetwork {
/// Tasks that were duplicated from `origin`
Origin { origin: Origin },
/// Tasks that were duplicated as `remote_tasks`
Remotes {
remote_tasks: BTreeMap<String, RemoteTask>,
#[serde(default)]
network_version: Uuid,
},
/// Document import tasks sent in the context of `network_change`
Import { import_from: ImportData, network_change: Origin },
}
impl DbTaskNetwork {
pub fn network_version(&self) -> Uuid {
match self {
DbTaskNetwork::Origin { origin } => origin.network_version,
DbTaskNetwork::Remotes { remote_tasks: _, network_version } => *network_version,
DbTaskNetwork::Import { import_from: _, network_change } => {
network_change.network_version
}
}
}
pub fn import_data(&self) -> Option<&ImportData> {
match self {
DbTaskNetwork::Origin { .. } | DbTaskNetwork::Remotes { .. } => None,
DbTaskNetwork::Import { import_from, .. } => Some(import_from),
}
}
pub fn origin(&self) -> Option<&Origin> {
match self {
DbTaskNetwork::Origin { origin } => Some(origin),
DbTaskNetwork::Remotes { .. } => None,
DbTaskNetwork::Import { network_change, .. } => Some(network_change),
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum TaskNetwork {
/// Tasks that were duplicated from `origin`
Origin { origin: Origin },
/// Tasks that were duplicated as `remote_tasks`
Remotes { remote_tasks: BTreeMap<String, RemoteTask>, network_version: Uuid },
/// Document import tasks sent in the context of `network_change`
Import { import_from: ImportData, network_change: Origin, metadata: ImportMetadata },
}
impl TaskNetwork {
pub fn network_version(&self) -> Uuid {
match self {
TaskNetwork::Origin { origin } => origin.network_version,
TaskNetwork::Remotes { remote_tasks: _, network_version } => *network_version,
TaskNetwork::Import { import_from: _, network_change, metadata: _ } => {
network_change.network_version
}
}
}
}
impl From<TaskNetwork> for DbTaskNetwork {
fn from(value: TaskNetwork) -> Self {
match value {
TaskNetwork::Origin { origin } => DbTaskNetwork::Origin { origin },
TaskNetwork::Remotes { remote_tasks, network_version } => {
DbTaskNetwork::Remotes { remote_tasks, network_version }
}
TaskNetwork::Import { import_from, network_change, metadata: _ } => {
DbTaskNetwork::Import { import_from, network_change }
}
}
}
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct Origin {
pub remote_name: String,
pub task_uid: u32,
#[serde(default)]
pub network_version: Uuid,
}
/// Import data stored in a task
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct ImportData {
/// Remote that this task is imported from
pub remote_name: String,
/// Index relevant to this task
pub index_name: Option<String>,
/// Number of documents in this task
pub document_count: u64,
}
/// Import metadata associated with a task but not stored in the task
#[derive(Debug, PartialEq, Clone)]
pub struct ImportMetadata {
/// Total number of indexes to import from this host
pub index_count: u64,
/// Key unique to this (network_change, index, host, key).
///
/// In practice, an internal document id of one of the documents to import.
pub task_key: Option<DocumentId>,
/// Total number of documents to import for this index from this host.
pub total_index_documents: u64,
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct RemoteTask {
#[serde(skip_serializing_if = "Option::is_none")]
task_uid: Option<TaskId>,
error: Option<ResponseError>,
}
impl From<Result<TaskId, ResponseError>> for RemoteTask {
fn from(res: Result<TaskId, ResponseError>) -> RemoteTask {
match res {
Ok(task_uid) => RemoteTask { task_uid: Some(task_uid), error: None },
Err(err) => RemoteTask { task_uid: None, error: Some(err) },
}
}
}
/// Contains the full state of a network topology change.
///
/// A network topology change task is unique in that it can be processed in multiple different batches, as its resolution
/// depends on various document additions tasks being processed.
///
/// A network topology task has 4 states:
///
/// 1. Processing any task that was meant for an earlier version of the network. This is necessary to know that we have the right version of
/// documents.
/// 2. Sending all documents that must be moved to other remotes.
/// 3. Processing any task coming from the remotes.
/// 4. Finished.
///
/// Furthermore, it maintains some stats
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct NetworkTopologyChange {
state: NetworkTopologyState,
// in name, `None` if the node is no longer part of the network
#[serde(skip_serializing_if = "Option::is_none")]
in_name: Option<String>,
// out name, `None` if the node is new to the network
#[serde(skip_serializing_if = "Option::is_none")]
out_name: Option<String>,
out_remotes: BTreeMap<String, Remote>,
in_remotes: BTreeMap<String, InRemote>,
stats: NetworkTopologyStats,
}
impl NetworkTopologyChange {
pub fn new(old_network: Network, new_network: Network) -> Self {
// we use our new name as import name
let in_name = new_network.local;
// we use our old name as export name
let out_name = old_network.local.or_else(|| in_name.clone());
// we export to the new network
let mut out_remotes = new_network.remotes;
// don't export to ourselves
if let Some(in_name) = &in_name {
out_remotes.remove(in_name);
}
let in_remotes = if in_name.is_some() {
old_network
.remotes
.into_keys()
.chain(out_remotes.keys().cloned())
// don't await imports from ourselves
.filter(|name| Some(name.as_str()) != out_name.as_deref())
.map(|name| (name, InRemote::new()))
.collect()
} else {
Default::default()
};
Self {
state: NetworkTopologyState::WaitingForOlderTasks,
in_name,
out_name,
out_remotes,
in_remotes,
stats: NetworkTopologyStats { moved_documents: 0 },
}
}
pub fn state(&self) -> NetworkTopologyState {
self.state
}
pub fn out_name(&self) -> Option<&str> {
// unwrap: one of out name or in_name must be defined
self.out_name.as_deref()
}
pub fn in_name(&self) -> Option<&str> {
self.in_name.as_deref()
}
pub fn to_details(&self) -> Details {
let message = match self.state {
NetworkTopologyState::WaitingForOlderTasks => {
"Waiting for tasks enqueued before the network change to finish processing".into()
}
NetworkTopologyState::ExportingDocuments => "Exporting documents".into(),
NetworkTopologyState::ImportingDocuments => {
let mut finished_count = 0;
let mut first_ongoing = None;
let mut ongoing_total_indexes = 0;
let mut ongoing_processed_documents = 0;
let mut ongoing_missing_documents = 0;
let mut ongoing_total_documents = 0;
let mut other_ongoing_count = 0;
let mut first_waiting = None;
let mut other_waiting_count = 0;
for (remote_name, in_remote) in &self.in_remotes {
match &in_remote.import_state {
ImportState::WaitingForInitialTask => {
first_waiting = match first_waiting {
None => Some(remote_name),
first_waiting => {
other_waiting_count += 1;
first_waiting
}
};
}
ImportState::Ongoing { import_index_state, total_indexes } => {
first_ongoing = match first_ongoing {
None => {
ongoing_total_indexes = *total_indexes;
Some(remote_name)
}
first_ongoing => {
other_ongoing_count += 1;
first_ongoing
}
};
for import_state in import_index_state.values() {
match import_state {
ImportIndexState::Ongoing {
total_documents,
processed_documents,
received_documents,
task_keys: _,
} => {
ongoing_total_documents += total_documents;
ongoing_processed_documents += processed_documents;
ongoing_missing_documents +=
total_documents.saturating_sub(*received_documents);
}
ImportIndexState::Finished { total_documents } => {
ongoing_total_documents += total_documents;
ongoing_processed_documents += total_documents;
}
}
}
}
ImportState::Finished { total_indexes, total_documents } => {
finished_count += 1;
ongoing_total_indexes = *total_indexes;
ongoing_total_documents += *total_documents;
ongoing_processed_documents += *total_documents;
}
}
}
format!(
"Importing documents from {total} remotes{waiting}{ongoing}{finished}",
total = self.in_remotes.len(),
waiting = if let Some(first_waiting) = first_waiting {
format!(
", waiting on first task from `{}`{others}",
first_waiting,
others = if other_waiting_count > 0 {
format!(" and {other_waiting_count} other remotes")
} else {
"".into()
}
)
} else {
"".into()
},
ongoing = if let Some(first_ongoing) = first_ongoing {
format!(", awaiting {ongoing_missing_documents} and processed {ongoing_processed_documents} out of {ongoing_total_documents} documents in {ongoing_total_indexes} indexes from `{first_ongoing}`{others}",
others=if other_ongoing_count > 0 {format!(" and {other_ongoing_count} other remotes")} else {"".into()})
} else {
"".into()
},
finished = if finished_count >= 0 {
format!(", {finished_count} remotes finished processing")
} else {
"".into()
}
)
}
NetworkTopologyState::Finished => "Finished".into(),
};
Details::NetworkTopologyChange { moved_documents: self.stats.moved_documents, message }
}
pub fn merge(&mut self, other: NetworkTopologyChange) {
// The topology change has a guarantee of forward progress, so for each field we're going to keep the "most advanced" values.
let Self { state, in_name: _, out_name: _, out_remotes: _, in_remotes, stats } = self;
*state = Ord::max(*state, other.state);
*stats = Ord::max(*stats, other.stats);
for (old_value, new_value) in other.in_remotes.into_values().zip(in_remotes.values_mut()) {
new_value.import_state = match (old_value.import_state, std::mem::take(&mut new_value.import_state)) {
// waiting for initial task is always older
(ImportState::WaitingForInitialTask, newer)
| (newer, ImportState::WaitingForInitialTask)
// finished is always newer
| (_, newer @ ImportState::Finished { .. })
| (newer @ ImportState::Finished { .. }, _) => newer,
(
ImportState::Ongoing { import_index_state: left_import, total_indexes: left_total_indexes },
ImportState::Ongoing { import_index_state: right_import, total_indexes: right_total_indexes },
) => {
let import_index_state = left_import.into_iter().merge_join_by(right_import.into_iter(), |(k,_), (x, _)|k.cmp(x)).map(|eob|
match eob {
EitherOrBoth::Both((name, left), (_, right)) => {
let newer = merge_import_index_state(left, right);
(name, newer)
},
EitherOrBoth::Left(import) |
EitherOrBoth::Right(import) => import,
}
).collect();
ImportState::Ongoing{ import_index_state, total_indexes : u64::max(left_total_indexes, right_total_indexes) }
},
}
}
}
}
fn merge_import_index_state(left: ImportIndexState, right: ImportIndexState) -> ImportIndexState {
match (left, right) {
(_, newer @ ImportIndexState::Finished { .. }) => newer,
(newer @ ImportIndexState::Finished { .. }, _) => newer,
(
ImportIndexState::Ongoing {
total_documents: left_total_documents,
received_documents: left_received_documents,
processed_documents: left_processed_documents,
task_keys: mut left_task_keys,
},
ImportIndexState::Ongoing {
total_documents: right_total_documents,
received_documents: right_received_documents,
processed_documents: right_processed_documents,
task_keys: right_task_keys,
},
) => {
let total_documents = u64::max(left_total_documents, right_total_documents);
let received_documents = u64::max(left_received_documents, right_received_documents);
let processed_documents = u64::max(left_processed_documents, right_processed_documents);
left_task_keys.0 |= &right_task_keys.0;
let task_keys = left_task_keys;
ImportIndexState::Ongoing {
total_documents,
received_documents,
processed_documents,
task_keys,
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq, PartialOrd, Ord)]
#[serde(rename_all = "camelCase")]
pub enum NetworkTopologyState {
WaitingForOlderTasks,
ExportingDocuments,
ImportingDocuments,
Finished,
}
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq, PartialOrd, Ord)]
#[serde(rename_all = "camelCase")]
pub struct NetworkTopologyStats {
#[serde(default)]
pub moved_documents: u64,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct InRemote {
import_state: ImportState,
}
impl InRemote {
pub fn new() -> Self {
Self { import_state: ImportState::WaitingForInitialTask }
}
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
enum ImportState {
/// Initially Meilisearch doesn't know how many documents it should expect from a remote.
/// Any task from each remote contains the information of how many indexes will be imported,
/// and the number of documents to import for the index of the task.
#[default]
WaitingForInitialTask,
Ongoing {
import_index_state: BTreeMap<String, ImportIndexState>,
total_indexes: u64,
},
Finished {
total_indexes: u64,
total_documents: u64,
},
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
enum ImportIndexState {
Ongoing {
total_documents: u64,
received_documents: u64,
processed_documents: u64,
task_keys: TaskKeys,
},
Finished {
total_documents: u64,
},
}
#[derive(Debug, Clone, PartialEq)]
pub struct TaskKeys(pub RoaringBitmap);
impl Serialize for TaskKeys {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let TaskKeys(task_keys) = self;
let mut bytes = Vec::new();
CboRoaringBitmapCodec::serialize_into_vec(task_keys, &mut bytes);
let encoded = base64::prelude::BASE64_STANDARD.encode(&bytes);
serializer.serialize_str(&encoded)
}
}
impl<'de> Deserialize<'de> for TaskKeys {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_str(TaskKeysVisitor)
}
}
struct TaskKeysVisitor;
impl<'de> serde::de::Visitor<'de> for TaskKeysVisitor {
type Value = TaskKeys;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a base64 encoded cbo roaring bitmap")
}
fn visit_str<E>(self, encoded: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
let decoded = base64::prelude::BASE64_STANDARD.decode(encoded).map_err(|_err| {
E::invalid_value(serde::de::Unexpected::Str(encoded), &"a base64 string")
})?;
self.visit_bytes(&decoded)
}
fn visit_bytes<E>(self, decoded: &[u8]) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
let task_keys = CboRoaringBitmapCodec::deserialize_from(decoded).map_err(|_err| {
E::invalid_value(serde::de::Unexpected::Bytes(decoded), &"a cbo roaring bitmap")
})?;
Ok(TaskKeys(task_keys))
}
}
pub enum ReceiveTaskError {
UnknownRemote(String),
DuplicateTask(DocumentId),
}
pub mod headers {
use std::borrow::Cow;
use std::num::ParseIntError;
use std::string::FromUtf8Error;
use milli::DocumentId;
use uuid::Uuid;
use crate::tasks::TaskId;
/// Implement on response types to extract header values
pub trait GetHeader: Sized {
type Error: std::fmt::Debug + std::fmt::Display;
fn get_header(&self, name: &str) -> Result<Option<&str>, Self::Error>;
fn get_origin_remote(&self) -> Result<Option<Cow<'_, str>>, DecodeError<Self>> {
let Some(encoded) = get_header_and_legacy(self, PROXY_ORIGIN_REMOTE_HEADER)? else {
return Ok(None);
};
Ok(Some(urlencoding::decode(encoded).map_err(|inner| DecodeError::UrlDecoding {
inner,
header: PROXY_ORIGIN_REMOTE_HEADER,
})?))
}
fn get_origin_task_uid(&self) -> Result<Option<TaskId>, DecodeError<Self>> {
let Some(encoded) = get_header_and_legacy(self, PROXY_ORIGIN_TASK_UID_HEADER)? else {
return Ok(None);
};
let decoded = urlencoding::decode(encoded).map_err(|inner| {
DecodeError::UrlDecoding { inner, header: PROXY_ORIGIN_TASK_UID_HEADER }
})?;
let parsed = decoded.parse().map_err(|inner| DecodeError::ParseInt {
inner,
header: PROXY_ORIGIN_TASK_UID_HEADER,
})?;
Ok(Some(parsed))
}
fn get_origin_network_version(&self) -> Result<Option<Uuid>, DecodeError<Self>> {
let Some(encoded) = get_header_and_legacy(self, PROXY_ORIGIN_NETWORK_VERSION_HEADER)?
else {
return Ok(None);
};
let decoded = urlencoding::decode(encoded).map_err(|inner| {
DecodeError::UrlDecoding { inner, header: PROXY_ORIGIN_NETWORK_VERSION_HEADER }
})?;
let parsed = decoded.parse().map_err(|inner| DecodeError::ParseUuid {
inner,
header: PROXY_ORIGIN_NETWORK_VERSION_HEADER,
})?;
Ok(Some(parsed))
}
fn get_import_remote(&self) -> Result<Option<Cow<'_, str>>, DecodeError<Self>> {
let Some(encoded) = get_header_and_legacy(self, PROXY_IMPORT_REMOTE_HEADER)? else {
return Ok(None);
};
Ok(Some(urlencoding::decode(encoded).map_err(|inner| DecodeError::UrlDecoding {
inner,
header: PROXY_IMPORT_REMOTE_HEADER,
})?))
}
fn get_import_index_count(&self) -> Result<Option<u64>, DecodeError<Self>> {
let Some(encoded) = get_header_and_legacy(self, PROXY_IMPORT_INDEX_COUNT_HEADER)?
else {
return Ok(None);
};
let decoded = urlencoding::decode(encoded).map_err(|inner| {
DecodeError::UrlDecoding { inner, header: PROXY_IMPORT_INDEX_COUNT_HEADER }
})?;
let parsed = decoded.parse().map_err(|inner| DecodeError::ParseInt {
inner,
header: PROXY_IMPORT_INDEX_COUNT_HEADER,
})?;
Ok(Some(parsed))
}
fn get_import_index(&self) -> Result<Option<Cow<'_, str>>, DecodeError<Self>> {
let Some(encoded) = get_header_and_legacy(self, PROXY_IMPORT_INDEX_HEADER)? else {
return Ok(None);
};
Ok(Some(urlencoding::decode(encoded).map_err(|inner| DecodeError::UrlDecoding {
inner,
header: PROXY_IMPORT_INDEX_HEADER,
})?))
}
fn get_import_task_key(&self) -> Result<Option<DocumentId>, DecodeError<Self>> {
let Some(encoded) = get_header_and_legacy(self, PROXY_IMPORT_TASK_KEY_HEADER)? else {
return Ok(None);
};
let decoded = urlencoding::decode(encoded).map_err(|inner| {
DecodeError::UrlDecoding { inner, header: PROXY_IMPORT_TASK_KEY_HEADER }
})?;
let parsed = decoded.parse().map_err(|inner| DecodeError::ParseInt {
inner,
header: PROXY_IMPORT_TASK_KEY_HEADER,
})?;
Ok(Some(parsed))
}
fn get_import_docs(&self) -> Result<Option<u64>, DecodeError<Self>> {
let Some(encoded) = get_header_and_legacy(self, PROXY_IMPORT_DOCS_HEADER)? else {
return Ok(None);
};
let decoded = urlencoding::decode(encoded).map_err(|inner| {
DecodeError::UrlDecoding { inner, header: PROXY_IMPORT_DOCS_HEADER }
})?;
let parsed = decoded.parse().map_err(|inner| DecodeError::ParseInt {
inner,
header: PROXY_IMPORT_DOCS_HEADER,
})?;
Ok(Some(parsed))
}
fn get_import_index_docs(&self) -> Result<Option<u64>, DecodeError<Self>> {
let Some(encoded) = get_header_and_legacy(self, PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER)?
else {
return Ok(None);
};
let decoded = urlencoding::decode(encoded).map_err(|inner| {
DecodeError::UrlDecoding { inner, header: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER }
})?;
let parsed = decoded.parse().map_err(|inner| DecodeError::ParseInt {
inner,
header: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
})?;
Ok(Some(parsed))
}
}
/// Implement on query types to set header values
pub trait SetHeader: Sized {
fn set_header(self, name: &str, value: &str) -> Self;
fn set_origin_remote(self, value: &str) -> Self {
let encoded = urlencoding::encode(value);
set_header_and_legacy(self, PROXY_ORIGIN_REMOTE_HEADER, &encoded)
}
fn set_origin_task_uid(self, value: TaskId) -> Self {
let value = value.to_string();
let encoded = urlencoding::encode(&value);
set_header_and_legacy(self, PROXY_ORIGIN_TASK_UID_HEADER, &encoded)
}
fn set_origin_network_version(self, value: Uuid) -> Self {
let value = value.to_string();
let encoded = urlencoding::encode(&value);
set_header_and_legacy(self, PROXY_ORIGIN_NETWORK_VERSION_HEADER, &encoded)
}
fn set_import_remote(self, value: &str) -> Self {
let encoded = urlencoding::encode(value);
set_header_and_legacy(self, PROXY_IMPORT_REMOTE_HEADER, &encoded)
}
fn set_import_index_count(self, value: u64) -> Self {
let value = value.to_string();
let encoded = urlencoding::encode(&value);
set_header_and_legacy(self, PROXY_IMPORT_INDEX_COUNT_HEADER, &encoded)
}
fn set_import_index(self, value: &str) -> Self {
let encoded = urlencoding::encode(value);
set_header_and_legacy(self, PROXY_IMPORT_INDEX_HEADER, &encoded)
}
fn set_import_task_key(self, value: DocumentId) -> Self {
let value = value.to_string();
let encoded = urlencoding::encode(&value);
set_header_and_legacy(self, PROXY_IMPORT_TASK_KEY_HEADER, &encoded)
}
fn set_import_docs(self, value: u64) -> Self {
let value = value.to_string();
let encoded = urlencoding::encode(&value);
set_header_and_legacy(self, PROXY_IMPORT_DOCS_HEADER, &encoded)
}
fn set_import_index_docs(self, value: u64) -> Self {
let value = value.to_string();
let encoded = urlencoding::encode(&value);
set_header_and_legacy(self, PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER, &encoded)
}
}
#[derive(Debug, thiserror::Error)]
pub enum DecodeError<T: GetHeader> {
#[error("while getting header: {inner}")]
InResponse { inner: T::Error, header: &'static str },
#[error("while url-decoding: {inner}")]
UrlDecoding { inner: FromUtf8Error, header: &'static str },
#[error("while parsing as an integer: {inner}")]
ParseInt { inner: ParseIntError, header: &'static str },
#[error("while parsing as a UUID: {inner}")]
ParseUuid { inner: uuid::Error, header: &'static str },
}
impl<T: GetHeader> DecodeError<T> {
pub fn header(&self) -> &'static str {
match self {
DecodeError::InResponse { inner: _, header }
| DecodeError::UrlDecoding { inner: _, header }
| DecodeError::ParseInt { inner: _, header }
| DecodeError::ParseUuid { inner: _, header } => header,
}
}
}
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "X-Meili-Proxy-Origin-Remote";
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "X-Meili-Proxy-Origin-TaskUid";
pub const PROXY_ORIGIN_NETWORK_VERSION_HEADER: &str = "X-Meili-Proxy-Origin-Network-Version";
pub const PROXY_IMPORT_REMOTE_HEADER: &str = "X-Meili-Proxy-Import-Remote";
pub const PROXY_IMPORT_INDEX_COUNT_HEADER: &str = "X-Meili-Proxy-Import-Index-Count";
pub const PROXY_IMPORT_INDEX_HEADER: &str = "X-Meili-Proxy-Import-Index";
pub const PROXY_IMPORT_TASK_KEY_HEADER: &str = "X-Meili-Proxy-Import-Task-Key";
pub const PROXY_IMPORT_DOCS_HEADER: &str = "X-Meili-Proxy-Import-Docs";
pub const PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER: &str = "X-Meili-Proxy-Import-Total-Index-Docs";
fn get_header_and_legacy<'a, T: GetHeader>(
t: &'a T,
header: &'static str,
) -> Result<Option<&'a str>, DecodeError<T>> {
Ok(Some(
if let Some(encoded) =
t.get_header(header).map_err(|inner| DecodeError::InResponse { inner, header })?
{
encoded
} else {
let header = header.strip_prefix("X-").unwrap();
let Some(encoded) = t
.get_header(header)
.map_err(|inner| DecodeError::InResponse { inner, header })?
else {
return Ok(None);
};
encoded
},
))
}
fn set_header_and_legacy<T: SetHeader>(t: T, name: &'static str, value: &str) -> T {
let t = t.set_header(name, value);
let name = name.strip_prefix("X-").unwrap();
t.set_header(name, value)
}
}

View File

@@ -1,52 +0,0 @@
use std::collections::BTreeMap;
use milli::DocumentId;
use crate::network::Remote;
use crate::tasks::network::{ImportState, InRemote, NetworkTopologyChange, ReceiveTaskError};
impl NetworkTopologyChange {
pub fn export_to_process(&self) -> Option<(&BTreeMap<String, Remote>, &str)> {
None
}
pub fn set_moved(&mut self, _moved_documents: u64) {}
pub fn update_state(&mut self) {}
pub fn receive_remote_task(
&mut self,
_remote_name: &str,
_index_name: Option<&str>,
_task_key: Option<DocumentId>,
_document_count: u64,
_total_indexes: u64,
_total_index_documents: u64,
) -> Result<(), ReceiveTaskError> {
Ok(())
}
pub fn process_remote_tasks(
&mut self,
_remote_name: &str,
_index_name: &str,
_document_count: u64,
) {
}
pub fn is_import_finished(&self) -> bool {
true
}
}
impl InRemote {
pub fn is_finished(&self) -> bool {
matches!(self.import_state, ImportState::Finished { .. })
}
}
impl Default for InRemote {
fn default() -> Self {
Self::new()
}
}

View File

@@ -1,239 +0,0 @@
// Copyright © 2025 Meilisearch Some Rights Reserved
// This file is part of Meilisearch Enterprise Edition (EE).
// Use of this source code is governed by the Business Source License 1.1,
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
use std::collections::BTreeMap;
use milli::DocumentId;
use roaring::RoaringBitmap;
use super::TaskKeys;
use crate::network::Remote;
use crate::tasks::network::{
ImportIndexState, ImportState, InRemote, NetworkTopologyChange, NetworkTopologyState,
ReceiveTaskError,
};
impl NetworkTopologyChange {
pub fn export_to_process(&self) -> Option<(&BTreeMap<String, Remote>, &str)> {
if self.state != NetworkTopologyState::ExportingDocuments {
return None;
}
if self.out_remotes.is_empty() {
return None;
}
let out_name = self.out_name()?;
Some((&self.out_remotes, out_name))
}
pub fn set_moved(&mut self, moved_documents: u64) {
self.stats.moved_documents = moved_documents;
}
/// Compute the next state from the current state of the task.
pub fn update_state(&mut self) {
self.state = match self.state {
NetworkTopologyState::WaitingForOlderTasks => {
// no more older tasks, so finished waiting
NetworkTopologyState::ExportingDocuments
}
NetworkTopologyState::ExportingDocuments => {
// processed all exported documents
if self.is_import_finished() {
NetworkTopologyState::Finished
} else {
NetworkTopologyState::ImportingDocuments
}
}
NetworkTopologyState::ImportingDocuments => {
if self.is_import_finished() {
NetworkTopologyState::Finished
} else {
NetworkTopologyState::ImportingDocuments
}
}
NetworkTopologyState::Finished => NetworkTopologyState::Finished,
};
}
pub fn receive_remote_task(
&mut self,
remote_name: &str,
index_name: Option<&str>,
task_key: Option<DocumentId>,
document_count: u64,
total_indexes: u64,
total_index_documents: u64,
) -> Result<(), ReceiveTaskError> {
let remote = self
.in_remotes
.get_mut(remote_name)
.ok_or_else(|| ReceiveTaskError::UnknownRemote(remote_name.to_string()))?;
remote.import_state = match std::mem::take(&mut remote.import_state) {
ImportState::WaitingForInitialTask => {
if total_indexes == 0 {
ImportState::Finished { total_indexes, total_documents: 0 }
} else {
let mut task_keys = RoaringBitmap::new();
if let Some(index_name) = index_name {
if let Some(task_key) = task_key {
task_keys.insert(task_key);
}
let mut import_index_state = BTreeMap::new();
import_index_state.insert(
index_name.to_owned(),
ImportIndexState::Ongoing {
total_documents: total_index_documents,
received_documents: document_count,
task_keys: TaskKeys(task_keys),
processed_documents: 0,
},
);
ImportState::Ongoing { import_index_state, total_indexes }
} else {
ImportState::WaitingForInitialTask
}
}
}
ImportState::Ongoing { mut import_index_state, total_indexes } => {
if let Some(index_name) = index_name {
if let Some((index_name, mut index_state)) =
import_index_state.remove_entry(index_name)
{
index_state = match index_state {
ImportIndexState::Ongoing {
total_documents,
received_documents: previously_received,
processed_documents,
mut task_keys,
} => {
if let Some(task_key) = task_key {
if !task_keys.0.insert(task_key) {
return Err(ReceiveTaskError::DuplicateTask(task_key));
}
}
ImportIndexState::Ongoing {
total_documents,
received_documents: previously_received + document_count,
processed_documents,
task_keys,
}
}
ImportIndexState::Finished { total_documents } => {
ImportIndexState::Finished { total_documents }
}
};
import_index_state.insert(index_name, index_state);
} else {
let mut task_keys = RoaringBitmap::new();
if let Some(task_key) = task_key {
task_keys.insert(task_key);
}
let state = ImportIndexState::Ongoing {
total_documents: total_index_documents,
received_documents: document_count,
processed_documents: 0,
task_keys: TaskKeys(task_keys),
};
import_index_state.insert(index_name.to_string(), state);
}
ImportState::Ongoing { import_index_state, total_indexes }
} else {
ImportState::Ongoing { import_index_state, total_indexes }
}
}
ImportState::Finished { total_indexes, total_documents } => {
ImportState::Finished { total_indexes, total_documents }
}
};
Ok(())
}
pub fn process_remote_tasks(
&mut self,
remote_name: &str,
index_name: &str,
document_count: u64,
) {
let remote = self
.in_remotes
.get_mut(remote_name)
.expect("process_remote_tasks called on a remote that is not in `in_remotes`");
remote.import_state = match std::mem::take(&mut remote.import_state) {
ImportState::WaitingForInitialTask => panic!("no task received yet one processed"),
ImportState::Ongoing { mut import_index_state, total_indexes } => {
let (index_name, mut index_state) =
import_index_state.remove_entry(index_name).unwrap();
index_state = match index_state {
ImportIndexState::Ongoing {
total_documents,
received_documents,
processed_documents: previously_processed,
task_keys,
} => {
let newly_processed_documents = previously_processed + document_count;
if newly_processed_documents >= total_documents {
ImportIndexState::Finished { total_documents }
} else {
ImportIndexState::Ongoing {
total_documents,
received_documents,
processed_documents: newly_processed_documents,
task_keys,
}
}
}
ImportIndexState::Finished { total_documents } => {
ImportIndexState::Finished { total_documents }
}
};
import_index_state.insert(index_name, index_state);
if import_index_state.len() as u64 == total_indexes
&& import_index_state.values().all(|index| index.is_finished())
{
let total_documents =
import_index_state.values().map(|index| index.total_documents()).sum();
ImportState::Finished { total_indexes, total_documents }
} else {
ImportState::Ongoing { import_index_state, total_indexes }
}
}
ImportState::Finished { total_indexes, total_documents } => {
ImportState::Finished { total_indexes, total_documents }
}
}
}
pub fn is_import_finished(&self) -> bool {
self.in_remotes.values().all(|remote| remote.is_finished())
}
}
impl InRemote {
pub fn is_finished(&self) -> bool {
matches!(self.import_state, ImportState::Finished { .. })
}
}
impl Default for InRemote {
fn default() -> Self {
Self::new()
}
}
impl ImportIndexState {
pub fn is_finished(&self) -> bool {
matches!(self, ImportIndexState::Finished { .. })
}
fn total_documents(&self) -> u64 {
match *self {
ImportIndexState::Ongoing { total_documents, .. }
| ImportIndexState::Finished { total_documents } => total_documents,
}
}
}

View File

@@ -14,91 +14,91 @@ default-run = "meilisearch"
[dependencies]
actix-cors = "0.7.1"
actix-http = { version = "3.11.2", default-features = false, features = [
actix-http = { version = "3.11.0", default-features = false, features = [
"compress-brotli",
"compress-gzip",
"rustls-0_23",
] }
actix-utils = "3.0.1"
actix-web = { version = "4.12.0", default-features = false, features = [
actix-web = { version = "4.11.0", default-features = false, features = [
"macros",
"compress-brotli",
"compress-gzip",
"cookies",
"rustls-0_23",
] }
anyhow = { version = "1.0.100", features = ["backtrace"] }
bstr = "1.12.1"
anyhow = { version = "1.0.98", features = ["backtrace"] }
bstr = "1.12.0"
byte-unit = { version = "5.1.6", features = ["serde"] }
bytes = "1.11.0"
bumpalo = "3.19.0"
clap = { version = "4.5.52", features = ["derive", "env"] }
bytes = "1.10.1"
bumpalo = "3.18.1"
clap = { version = "4.5.40", features = ["derive", "env"] }
crossbeam-channel = "0.5.15"
deserr = { version = "0.6.4", features = ["actix-web"] }
deserr = { version = "0.6.3", features = ["actix-web"] }
dump = { path = "../dump" }
either = "1.15.0"
file-store = { path = "../file-store" }
flate2 = "1.1.5"
flate2 = "1.1.2"
fst = "0.4.7"
futures = "0.3.31"
futures-util = "0.3.31"
index-scheduler = { path = "../index-scheduler" }
indexmap = { version = "2.12.0", features = ["serde"] }
is-terminal = "0.4.17"
indexmap = { version = "2.9.0", features = ["serde"] }
is-terminal = "0.4.16"
itertools = "0.14.0"
jsonwebtoken = "9.3.1"
lazy_static = "1.5.0"
meilisearch-auth = { path = "../meilisearch-auth" }
meilisearch-types = { path = "../meilisearch-types" }
memmap2 = "0.9.9"
mimalloc = { version = "0.1.48", default-features = false }
memmap2 = "0.9.7"
mimalloc = { version = "0.1.47", default-features = false }
mime = "0.3.17"
num_cpus = "1.17.0"
obkv = "0.3.0"
once_cell = "1.21.3"
ordered-float = "5.1.0"
parking_lot = "0.12.5"
ordered-float = "5.0.0"
parking_lot = "0.12.4"
permissive-json-pointer = { path = "../permissive-json-pointer" }
pin-project-lite = "0.2.16"
platform-dirs = "0.3.0"
prometheus = { version = "0.14.0", features = ["process"] }
rand = "0.8.5"
rayon = "1.11.0"
regex = "1.12.2"
reqwest = { version = "0.12.24", features = [
rayon = "1.10.0"
regex = "1.11.1"
reqwest = { version = "0.12.20", features = [
"rustls-tls",
"json",
], default-features = false }
rustls = { version = "0.23.35", features = ["ring"], default-features = false }
rustls-pki-types = { version = "1.13.0", features = ["alloc"] }
rustls = { version = "0.23.28", features = ["ring"], default-features = false }
rustls-pki-types = { version = "1.12.0", features = ["alloc"] }
rustls-pemfile = "2.2.0"
segment = { version = "0.2.6" }
serde = { version = "1.0.228", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] }
serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.140", features = ["preserve_order"] }
sha2 = "0.10.9"
siphasher = "1.0.1"
slice-group-by = "0.3.1"
static-files = { version = "0.3.1", optional = true }
sysinfo = "0.37.2"
static-files = { version = "0.2.5", optional = true }
sysinfo = "0.35.2"
tar = "0.4.44"
tempfile = "3.23.0"
thiserror = "2.0.17"
time = { version = "0.3.44", features = [
tempfile = "3.20.0"
thiserror = "2.0.12"
time = { version = "0.3.41", features = [
"serde-well-known",
"formatting",
"parsing",
"macros",
] }
tokio = { version = "1.48.0", features = ["full"] }
toml = "0.9.8"
uuid = { version = "1.18.1", features = ["serde", "v4", "v7"] }
tokio = { version = "1.45.1", features = ["full"] }
toml = "0.8.23"
uuid = { version = "1.18.0", features = ["serde", "v4", "v7"] }
serde_urlencoded = "0.7.1"
termcolor = "1.4.1"
url = { version = "2.5.7", features = ["serde"] }
url = { version = "2.5.4", features = ["serde"] }
tracing = "0.1.41"
tracing-subscriber = { version = "0.3.20", features = ["json"] }
tracing-trace = { version = "0.1.0", path = "../tracing-trace" }
tracing-actix-web = "0.7.19"
tracing-actix-web = "0.7.18"
build-info = { version = "1.7.0", path = "../build-info" }
roaring = "0.10.12"
mopa-maintained = "0.2.3"
@@ -114,35 +114,35 @@ utoipa = { version = "5.4.0", features = [
utoipa-scalar = { version = "0.3.0", optional = true, features = ["actix-web"] }
async-openai = { git = "https://github.com/meilisearch/async-openai", branch = "better-error-handling" }
secrecy = "0.10.3"
actix-web-lab = { version = "0.24.3", default-features = false }
actix-web-lab = { version = "0.24.1", default-features = false }
urlencoding = "2.1.3"
backoff = { version = "0.4.0", features = ["tokio"] }
humantime = { version = "2.3.0", default-features = false }
[dev-dependencies]
actix-rt = "2.11.0"
brotli = "8.0.2"
actix-rt = "2.10.0"
brotli = "8.0.1"
# fixed version due to format breakages in v1.40
insta = { version = "=1.39.0", features = ["redactions"] }
manifest-dir-macros = "0.1.18"
maplit = "1.0.2"
meili-snap = { path = "../meili-snap" }
temp-env = "0.3.6"
wiremock = "0.6.5"
wiremock = "0.6.3"
yaup = "0.3.1"
[build-dependencies]
anyhow = { version = "1.0.100", optional = true }
cargo_toml = { version = "0.22.3", optional = true }
anyhow = { version = "1.0.98", optional = true }
cargo_toml = { version = "0.22.1", optional = true }
hex = { version = "0.4.3", optional = true }
reqwest = { version = "0.12.24", features = [
reqwest = { version = "0.12.20", features = [
"blocking",
"rustls-tls",
], default-features = false, optional = true }
sha-1 = { version = "0.10.1", optional = true }
static-files = { version = "0.3.1", optional = true }
tempfile = { version = "3.23.0", optional = true }
zip = { version = "6.0.0", optional = true }
static-files = { version = "0.2.5", optional = true }
tempfile = { version = "3.20.0", optional = true }
zip = { version = "4.1.0", optional = true }
[features]
default = ["meilisearch-types/all-tokenizations", "mini-dashboard"]
@@ -160,7 +160,6 @@ mini-dashboard = [
]
chinese = ["meilisearch-types/chinese"]
chinese-pinyin = ["meilisearch-types/chinese-pinyin"]
enterprise = ["meilisearch-types/enterprise", "index-scheduler/enterprise"]
hebrew = ["meilisearch-types/hebrew"]
japanese = ["meilisearch-types/japanese"]
korean = ["meilisearch-types/korean"]

View File

@@ -1,7 +1,7 @@
use std::any::TypeId;
use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::Path;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{Duration, Instant};
@@ -195,7 +195,7 @@ struct Infos {
experimental_enable_logs_route: bool,
experimental_reduce_indexing_memory_usage: bool,
experimental_max_number_of_batched_tasks: usize,
experimental_limit_batched_tasks_total_size: Option<u64>,
experimental_limit_batched_tasks_total_size: u64,
experimental_network: bool,
experimental_multimodal: bool,
experimental_chat_completions: bool,
@@ -344,14 +344,14 @@ impl Infos {
experimental_no_edition_2024_for_dumps,
experimental_vector_store_setting: vector_store_setting,
gpu_enabled: meilisearch_types::milli::vector::is_cuda_enabled(),
db_path: db_path != Path::new("./data.ms"),
db_path: db_path != PathBuf::from("./data.ms"),
import_dump: import_dump.is_some(),
dump_dir: dump_dir != Path::new("dumps/"),
dump_dir: dump_dir != PathBuf::from("dumps/"),
ignore_missing_dump,
ignore_dump_if_db_exists,
import_snapshot: import_snapshot.is_some(),
schedule_snapshot,
snapshot_dir: snapshot_dir != Path::new("snapshots/"),
snapshot_dir: snapshot_dir != PathBuf::from("snapshots/"),
uses_s3_snapshots: s3_snapshot_options.is_some(),
ignore_missing_snapshot,
ignore_snapshot_if_db_exists,
@@ -359,7 +359,7 @@ impl Infos {
http_payload_size_limit,
experimental_max_number_of_batched_tasks,
experimental_limit_batched_tasks_total_size:
experimental_limit_batched_tasks_total_size.map(|size| size.as_u64()),
experimental_limit_batched_tasks_total_size.into(),
task_queue_webhook: task_webhook_url.is_some(),
task_webhook_authorization_header: task_webhook_authorization_header.is_some(),
log_level: log_level.to_string(),

View File

@@ -6,14 +6,10 @@ use meilisearch_types::error::{Code, ErrorCode, ResponseError};
use meilisearch_types::index_uid::{IndexUid, IndexUidFormatError};
use meilisearch_types::milli;
use meilisearch_types::milli::OrderBy;
use meilisearch_types::tasks::network::headers::{
PROXY_IMPORT_DOCS_HEADER, PROXY_IMPORT_INDEX_COUNT_HEADER, PROXY_IMPORT_INDEX_HEADER,
PROXY_IMPORT_REMOTE_HEADER, PROXY_IMPORT_TASK_KEY_HEADER, PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER,
};
use serde_json::Value;
use tokio::task::JoinError;
use uuid::Uuid;
use crate::routes::indexes::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER};
#[derive(Debug, thiserror::Error)]
#[allow(clippy::large_enum_variant)]
@@ -97,58 +93,8 @@ pub enum MeilisearchHttpError {
} else { PROXY_ORIGIN_TASK_UID_HEADER }
)]
InconsistentOriginHeaders { is_remote_missing: bool },
#[error("Inconsistent `Import` headers: {remote}: {remote_status}, {index}: {index_status}, {docs}: {docs_status}.\n - Hint: either all three headers should be provided, or none of them",
remote = PROXY_IMPORT_REMOTE_HEADER,
remote_status = if *is_remote_missing { "missing" } else{ "provided" },
index = PROXY_IMPORT_INDEX_HEADER,
index_status = if *is_index_missing { "missing" } else { "provided" },
docs = PROXY_IMPORT_DOCS_HEADER,
docs_status = if *is_docs_missing { "missing" } else { "provided" }
)]
InconsistentImportHeaders {
is_remote_missing: bool,
is_index_missing: bool,
is_docs_missing: bool,
},
#[error("Inconsistent `Import-Metadata` headers: {index_count}: {index_count_status}, {task_key}: {task_key_status}, {total_index_documents}: {total_index_documents_status}.\n - Hint: either all three headers should be provided, or none of them",
index_count = PROXY_IMPORT_INDEX_COUNT_HEADER,
index_count_status = if *is_index_count_missing { "missing" } else { "provided"},
task_key = PROXY_IMPORT_TASK_KEY_HEADER,
task_key_status = if *is_task_key_missing { "missing" } else { "provided"},
total_index_documents = PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
total_index_documents_status = if *is_total_index_documents_missing { "missing" } else { "provided"},
)]
InconsistentImportMetadataHeaders {
is_index_count_missing: bool,
is_task_key_missing: bool,
is_total_index_documents_missing: bool,
},
#[error(
"Inconsistent task network headers: origin headers: {origin_status}, import headers: {import_status}, import metadata: {import_metadata_status}",
origin_status = if *is_missing_origin { "missing"} else { "present" },
import_status = if *is_missing_import { "missing"} else { "present" },
import_metadata_status = if *is_missing_import_metadata { "missing"} else { "present" })]
InconsistentTaskNetworkHeaders {
is_missing_origin: bool,
is_missing_import: bool,
is_missing_import_metadata: bool,
},
#[error("Invalid value for header `{header_name}`: {msg}")]
#[error("Invalid value for header {header_name}: {msg}")]
InvalidHeaderValue { header_name: &'static str, msg: String },
#[error("This remote is not the leader of the network.\n - Note: only the leader `{leader}` can receive new tasks.")]
NotLeader { leader: String },
#[error("Unexpected `previousRemotes` in network call.\n - Note: `previousRemote` is reserved for internal use.")]
UnexpectedNetworkPreviousRemotes,
#[error("The network version in request is too old.\n - Received: {received}\n - Expected at least: {expected_at_least}")]
NetworkVersionTooOld { received: Uuid, expected_at_least: Uuid },
#[error("Remote `{remote}` encountered an error: {error}")]
RemoteIndexScheduler { remote: String, error: index_scheduler::Error },
#[error("{if_remote}Already has a pending network task with uid {task_uid}.\n - Note: No network task can be registered while any previous network task is not done processing.\n - Hint: Wait for task {task_uid} to complete or cancel it.",
if_remote=if let Some(remote) = remote {
format!("Remote `{remote}` encountered an error: ")
} else {"".into()} )]
UnprocessedNetworkTask { remote: Option<String>, task_uid: meilisearch_types::tasks::TaskId },
}
impl MeilisearchHttpError {
@@ -176,7 +122,6 @@ impl ErrorCode for MeilisearchHttpError {
MeilisearchHttpError::SerdeJson(_) => Code::Internal,
MeilisearchHttpError::HeedError(_) => Code::Internal,
MeilisearchHttpError::IndexScheduler(e) => e.error_code(),
MeilisearchHttpError::RemoteIndexScheduler { error, .. } => error.error_code(),
MeilisearchHttpError::Milli { error, .. } => error.error_code(),
MeilisearchHttpError::Payload(e) => e.error_code(),
MeilisearchHttpError::FileStore(_) => Code::Internal,
@@ -197,19 +142,10 @@ impl ErrorCode for MeilisearchHttpError {
MeilisearchHttpError::PersonalizationInFederatedQuery(_) => {
Code::InvalidMultiSearchQueryPersonalization
}
MeilisearchHttpError::InconsistentOriginHeaders { .. }
| MeilisearchHttpError::InconsistentImportHeaders { .. }
| MeilisearchHttpError::InconsistentImportMetadataHeaders { .. }
| MeilisearchHttpError::InconsistentTaskNetworkHeaders { .. } => {
MeilisearchHttpError::InconsistentOriginHeaders { .. } => {
Code::InconsistentDocumentChangeHeaders
}
MeilisearchHttpError::InvalidHeaderValue { .. } => Code::InvalidHeaderValue,
MeilisearchHttpError::NotLeader { .. } => Code::NotLeader,
MeilisearchHttpError::UnexpectedNetworkPreviousRemotes => {
Code::UnexpectedNetworkPreviousRemotes
}
MeilisearchHttpError::NetworkVersionTooOld { .. } => Code::NetworkVersionTooOld,
MeilisearchHttpError::UnprocessedNetworkTask { .. } => Code::UnprocessedNetworkTask,
}
}
}
@@ -233,14 +169,6 @@ impl From<aweb::error::PayloadError> for MeilisearchHttpError {
}
}
impl<T: meilisearch_types::tasks::network::headers::GetHeader>
From<meilisearch_types::tasks::network::headers::DecodeError<T>> for MeilisearchHttpError
{
fn from(value: meilisearch_types::tasks::network::headers::DecodeError<T>) -> Self {
Self::InvalidHeaderValue { header_name: value.header(), msg: value.to_string() }
}
}
#[derive(Debug, thiserror::Error)]
pub enum ActixPayloadError {
#[error("The provided payload is incomplete and cannot be parsed")]

View File

@@ -12,7 +12,6 @@ pub mod option;
#[cfg(test)]
mod option_test;
pub mod personalization;
pub mod proxy;
pub mod routes;
pub mod search;
pub mod search_queue;
@@ -230,19 +229,8 @@ pub fn setup_meilisearch(
autobatching_enabled: true,
cleanup_enabled: !opt.experimental_replication_parameters,
max_number_of_tasks: 1_000_000,
export_default_payload_size_bytes: almost_as_big_as(opt.http_payload_size_limit),
max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks,
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size.map_or_else(
|| {
opt.indexer_options
.max_indexing_memory
// By default, we use half of the available memory to determine the size of batched tasks
.map_or(u64::MAX, |mem| mem.as_u64() / 2)
// And never exceed 10 GiB when we infer the limit
.min(10 * 1024 * 1024 * 1024)
},
|size| size.as_u64(),
),
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size.into(),
index_growth_amount: byte_unit::Byte::from_str("10GiB").unwrap().as_u64() as usize,
index_count: DEFAULT_INDEX_COUNT,
instance_features: opt.to_instance_features(),
@@ -341,13 +329,6 @@ pub fn setup_meilisearch(
Ok((index_scheduler, auth_controller))
}
/// Returns the input - 1MiB, or at least 20MiB
fn almost_as_big_as(input: byte_unit::Byte) -> byte_unit::Byte {
let with_margin = input.subtract(byte_unit::Byte::MEBIBYTE);
let at_least = byte_unit::Byte::MEBIBYTE.multiply(20).unwrap();
with_margin.unwrap_or(at_least).max(at_least)
}
/// Try to start the IndexScheduler and AuthController without checking the VERSION file or anything.
fn open_or_create_database_unchecked(
opt: &Opt,

View File

@@ -1,8 +1,7 @@
use lazy_static::lazy_static;
use prometheus::{
opts, register_gauge, register_gauge_vec, register_histogram_vec, register_int_counter_vec,
register_int_gauge, register_int_gauge_vec, Gauge, GaugeVec, HistogramVec, IntCounterVec,
IntGauge, IntGaugeVec,
opts, register_gauge, register_histogram_vec, register_int_counter_vec, register_int_gauge,
register_int_gauge_vec, Gauge, HistogramVec, IntCounterVec, IntGauge, IntGaugeVec,
};
lazy_static! {
@@ -74,20 +73,6 @@ lazy_static! {
&["kind", "value"]
)
.expect("Can't create a metric");
pub static ref MEILISEARCH_BATCH_RUNNING_PROGRESS_TRACE: GaugeVec = register_gauge_vec!(
opts!("meilisearch_batch_running_progress_trace", "The currently running progress trace"),
&["batch_uid", "step_name"]
)
.expect("Can't create a metric");
pub static ref MEILISEARCH_LAST_FINISHED_BATCHES_PROGRESS_TRACE_MS: IntGaugeVec =
register_int_gauge_vec!(
opts!(
"meilisearch_last_finished_batches_progress_trace_ms",
"The last few batches progress trace in milliseconds"
),
&["batch_uid", "step_name"]
)
.expect("Can't create a metric");
pub static ref MEILISEARCH_LAST_UPDATE: IntGauge =
register_int_gauge!(opts!("meilisearch_last_update", "Meilisearch Last Update"))
.expect("Can't create a metric");

View File

@@ -473,14 +473,11 @@ pub struct Opt {
#[serde(default = "default_limit_batched_tasks")]
pub experimental_max_number_of_batched_tasks: usize,
/// Experimentally controls the maximum total size, in bytes, of tasks that will be processed
/// simultaneously. When unspecified, defaults to half of the maximum indexing memory and
/// clamped to 10 GiB.
///
/// See: <https://github.com/orgs/meilisearch/discussions/801>
#[clap(long, env = MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE)]
#[serde(default)]
pub experimental_limit_batched_tasks_total_size: Option<Byte>,
/// Experimentally reduces the maximum total size, in bytes, of tasks that will be processed at once,
/// see: <https://github.com/orgs/meilisearch/discussions/801>
#[clap(long, env = MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE, default_value_t = default_limit_batched_tasks_total_size())]
#[serde(default = "default_limit_batched_tasks_total_size")]
pub experimental_limit_batched_tasks_total_size: Byte,
/// Enables experimental caching of search query embeddings. The value represents the maximal number of entries in the cache of each
/// distinct embedder.
@@ -704,12 +701,10 @@ impl Opt {
MEILI_EXPERIMENTAL_MAX_NUMBER_OF_BATCHED_TASKS,
experimental_max_number_of_batched_tasks.to_string(),
);
if let Some(limit) = experimental_limit_batched_tasks_total_size {
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE,
limit.to_string(),
);
}
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE,
experimental_limit_batched_tasks_total_size.to_string(),
);
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_EMBEDDING_CACHE_ENTRIES,
experimental_embedding_cache_entries.to_string(),
@@ -1278,6 +1273,10 @@ fn default_limit_batched_tasks() -> usize {
usize::MAX
}
fn default_limit_batched_tasks_total_size() -> Byte {
Byte::from_u64(u64::MAX)
}
fn default_embedding_cache_entries() -> usize {
0
}

View File

@@ -1,14 +1,14 @@
use std::time::Duration;
use meilisearch_types::error::{Code, ErrorCode, ResponseError};
use meilisearch_types::milli::TimeBudget;
use crate::search::{Personalize, SearchResult};
use meilisearch_types::{
error::{Code, ErrorCode, ResponseError},
milli::TimeBudget,
};
use rand::Rng;
use reqwest::Client;
use serde::{Deserialize, Serialize};
use std::time::Duration;
use tracing::{debug, info, warn};
use crate::search::{Personalize, SearchResult};
const COHERE_API_URL: &str = "https://api.cohere.ai/v1/rerank";
const MAX_RETRIES: u32 = 10;

View File

@@ -1,43 +0,0 @@
use std::fs::File;
use meilisearch_types::network::Remote;
pub enum Body<T, F>
where
T: serde::Serialize,
F: FnMut(&str, &Remote, &mut T),
{
NdJsonPayload(File),
Inline(T),
Generated(T, F),
None,
}
impl Body<(), fn(&str, &Remote, &mut ())> {
pub fn with_ndjson_payload(file: File) -> Self {
Self::NdJsonPayload(file)
}
pub fn none() -> Self {
Self::None
}
}
impl<T> Body<T, fn(&str, &Remote, &mut T)>
where
T: serde::Serialize,
{
pub fn inline(payload: T) -> Self {
Self::Inline(payload)
}
}
impl<T, F> Body<T, F>
where
T: serde::Serialize,
F: FnMut(&str, &Remote, &mut T),
{
pub fn generated(initial: T, f: F) -> Self {
Self::Generated(initial, f)
}
}

View File

@@ -1,31 +0,0 @@
use actix_web::HttpRequest;
use index_scheduler::IndexScheduler;
use meilisearch_types::network::{Network, Remote};
use meilisearch_types::tasks::network::{DbTaskNetwork, TaskNetwork};
use meilisearch_types::tasks::Task;
use crate::error::MeilisearchHttpError;
use crate::proxy::Body;
pub fn task_network_and_check_leader_and_version(
_req: &HttpRequest,
_network: &Network,
) -> Result<Option<TaskNetwork>, MeilisearchHttpError> {
Ok(None)
}
pub async fn proxy<T, F>(
_index_scheduler: &IndexScheduler,
_index_uid: Option<&str>,
_req: &HttpRequest,
_task_network: DbTaskNetwork,
_network: Network,
_body: Body<T, F>,
task: &Task,
) -> Result<Task, MeilisearchHttpError>
where
T: serde::Serialize,
F: FnMut(&str, &Remote, &mut T),
{
Ok(task.clone())
}

View File

@@ -1,618 +0,0 @@
// Copyright © 2025 Meilisearch Some Rights Reserved
// This file is part of Meilisearch Enterprise Edition (EE).
// Use of this source code is governed by the Business Source License 1.1,
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
use std::collections::BTreeMap;
use actix_web::http::header::CONTENT_TYPE;
use actix_web::HttpRequest;
use bytes::Bytes;
use index_scheduler::IndexScheduler;
use meilisearch_types::error::ResponseError;
use meilisearch_types::network::Remote;
use meilisearch_types::tasks::network::headers::{GetHeader, SetHeader};
use meilisearch_types::tasks::network::{
DbTaskNetwork, ImportData, ImportMetadata, Origin, TaskNetwork,
};
use meilisearch_types::tasks::{Task, TaskId};
use reqwest::{RequestBuilder, StatusCode};
use serde::de::DeserializeOwned;
use serde_json::Value;
use uuid::Uuid;
use crate::error::MeilisearchHttpError;
use crate::proxy::{Body, ProxyError, ReqwestErrorWithoutUrl};
use crate::routes::SummarizedTaskView;
mod timeouts {
use std::sync::LazyLock;
pub static CONNECT_SECONDS: LazyLock<u64> =
LazyLock::new(|| fetch_or_default("MEILI_EXPERIMENTAL_PROXY_CONNECT_TIMEOUT_SECONDS", 3));
pub static BACKOFF_SECONDS: LazyLock<u64> =
LazyLock::new(|| fetch_or_default("MEILI_EXPERIMENTAL_PROXY_BACKOFF_TIMEOUT_SECONDS", 25));
pub static REQUEST_SECONDS: LazyLock<u64> =
LazyLock::new(|| fetch_or_default("MEILI_EXPERIMENTAL_PROXY_REQUEST_TIMEOUT_SECONDS", 30));
fn fetch_or_default(key: &str, default: u64) -> u64 {
match std::env::var(key) {
Ok(timeout) => timeout.parse().unwrap_or_else(|_| {
panic!("`{key}` environment variable is not parseable as an integer: {timeout}")
}),
Err(std::env::VarError::NotPresent) => default,
Err(std::env::VarError::NotUnicode(_)) => {
panic!("`{key}` environment variable is not set to a integer")
}
}
}
}
impl<T, F> Body<T, F>
where
T: serde::Serialize,
F: FnMut(&str, &Remote, &mut T),
{
pub fn into_bytes_iter(
self,
remotes: impl IntoIterator<Item = (String, Remote)>,
) -> Result<
impl Iterator<Item = (Option<Bytes>, (String, Remote))>,
meilisearch_types::milli::Error,
> {
let bytes = match self {
Body::NdJsonPayload(file) => {
Some(Bytes::from_owner(unsafe { memmap2::Mmap::map(&file)? }))
}
Body::Inline(payload) => {
Some(Bytes::copy_from_slice(&serde_json::to_vec(&payload).unwrap()))
}
Body::None => None,
Body::Generated(mut initial, mut f) => {
return Ok(either::Right(remotes.into_iter().map(move |(name, remote)| {
f(&name, &remote, &mut initial);
let bytes =
Some(Bytes::copy_from_slice(&serde_json::to_vec(&initial).unwrap()));
(bytes, (name, remote))
})));
}
};
Ok(either::Left(std::iter::repeat(bytes).zip(remotes)))
}
pub fn into_bytes(
self,
remote_name: &str,
remote: &Remote,
) -> Result<Option<Bytes>, meilisearch_types::milli::Error> {
Ok(match self {
Body::NdJsonPayload(file) => {
Some(Bytes::from_owner(unsafe { memmap2::Mmap::map(&file)? }))
}
Body::Inline(payload) => {
Some(Bytes::copy_from_slice(&serde_json::to_vec(&payload).unwrap()))
}
Body::None => None,
Body::Generated(mut initial, mut f) => {
f(remote_name, remote, &mut initial);
Some(Bytes::copy_from_slice(&serde_json::to_vec(&initial).unwrap()))
}
})
}
}
/// Parses the header to determine if this task is a duplicate and originates with a remote.
///
/// If not, checks whether this remote is the leader and return `MeilisearchHttpError::NotLeader` if not.
///
/// If there is no leader, returns `Ok(None)`
///
/// # Errors
///
/// - `MeiliearchHttpError::NotLeader`: if the following are true simultaneously:
/// 1. The task originates with the current node
/// 2. There's a declared `leader`
/// 3. The declared leader is **not** the current node
/// - `MeilisearchHttpError::InvalidHeaderValue`: if headers cannot be parsed as a task network.
/// - `MeilisearchHttpError::InconsistentTaskNetwork`: if only some of the headers are present.
pub fn task_network_and_check_leader_and_version(
req: &HttpRequest,
network: &meilisearch_types::network::Network,
) -> Result<Option<TaskNetwork>, MeilisearchHttpError> {
let task_network =
match (origin_from_req(req)?, import_data_from_req(req)?, import_metadata_from_req(req)?) {
(Some(network_change), Some(import_from), Some(metadata)) => {
TaskNetwork::Import { import_from, network_change, metadata }
}
(Some(origin), None, None) => TaskNetwork::Origin { origin },
(None, None, None) => {
match (network.leader.as_deref(), network.local.as_deref()) {
// 1. Always allowed if there is no leader
(None, _) => return Ok(None),
// 2. Allowed if the leader is self
(Some(leader), Some(this)) if leader == this => (),
// 3. Any other change is disallowed
(Some(leader), _) => {
return Err(MeilisearchHttpError::NotLeader { leader: leader.to_string() })
}
}
TaskNetwork::Remotes {
remote_tasks: Default::default(),
network_version: network.version,
}
}
// all good cases were matched, so this is always an error
(origin, import_from, metadata) => {
return Err(MeilisearchHttpError::InconsistentTaskNetworkHeaders {
is_missing_origin: origin.is_none(),
is_missing_import: import_from.is_none(),
is_missing_import_metadata: metadata.is_none(),
})
}
};
if task_network.network_version() < network.version {
return Err(MeilisearchHttpError::NetworkVersionTooOld {
received: task_network.network_version(),
expected_at_least: network.version,
});
}
Ok(Some(task_network))
}
/// Updates the task description and, if necessary, proxies the passed request to the network and update the task description.
///
/// This function reads the custom headers from the request to determine if must proxy the request or if the request
/// has already been proxied.
///
/// - when it must proxy the request, the endpoint, method and query params are retrieved from the passed `req`, then the `body` is
/// sent to all remotes of the `network` (except `self`). The response from the remotes are collected to update the passed `task`
/// with the task ids from the task queues of the remotes.
/// - when the request has already been proxied, the custom headers contains information about the remote that created the initial task.
/// This information is copied to the passed task.
///
/// # Returns
///
/// The updated task. The task is read back from the database to avoid erasing concurrent changes.
pub async fn proxy<T, F>(
index_scheduler: &IndexScheduler,
index_uid: Option<&str>,
req: &HttpRequest,
mut task_network: DbTaskNetwork,
network: meilisearch_types::network::Network,
body: Body<T, F>,
task: &Task,
) -> Result<Task, MeilisearchHttpError>
where
T: serde::Serialize,
F: FnMut(&str, &Remote, &mut T),
{
if let DbTaskNetwork::Remotes { remote_tasks, network_version } = &mut task_network {
let network_version = *network_version;
let this = network
.local
.as_deref()
.expect("inconsistent `network.leader` and `network.self`")
.to_owned();
let content_type = match &body {
// for file bodies, force x-ndjson
Body::NdJsonPayload(_) => Some(b"application/x-ndjson".as_slice()),
// otherwise get content type from request
_ => req.headers().get(CONTENT_TYPE).map(|h| h.as_bytes()),
};
let mut in_flight_remote_queries = BTreeMap::new();
let client = reqwest::ClientBuilder::new()
.connect_timeout(std::time::Duration::from_secs(*timeouts::CONNECT_SECONDS))
.build()
.unwrap();
let method = from_old_http_method(req.method());
// send payload to all remotes
for (body, (node_name, node)) in body
.into_bytes_iter(network.remotes.into_iter().filter(|(name, _)| name.as_str() != this))
.map_err(|err| {
MeilisearchHttpError::from_milli(err, index_uid.map(ToOwned::to_owned))
})?
{
tracing::trace!(node_name, "proxying task to remote");
let client = client.clone();
let api_key = node.write_api_key;
let this = this.clone();
let task_uid = task.uid;
let method = method.clone();
let path_and_query = req.uri().path_and_query().map(|paq| paq.as_str()).unwrap_or("/");
in_flight_remote_queries.insert(
node_name,
tokio::spawn({
let url = format!("{}{}", node.url, path_and_query);
let content_type = content_type.map(|b| b.to_owned());
let backoff = backoff::ExponentialBackoffBuilder::new()
.with_max_elapsed_time(Some(std::time::Duration::from_secs(
*timeouts::BACKOFF_SECONDS,
)))
.build();
backoff::future::retry(backoff, move || {
let url = url.clone();
let client = client.clone();
let this = this.clone();
let content_type = content_type.clone();
let body = body.clone();
let api_key = api_key.clone();
let method = method.clone();
async move {
try_proxy(
method,
&url,
content_type.as_deref(),
network_version,
api_key.as_deref(),
&client,
&this,
task_uid,
body,
)
.await
}
})
}),
);
}
// wait for all in-flight queries to finish and collect their results
for (node_name, handle) in in_flight_remote_queries {
match handle.await {
Ok(Ok(res)) => {
let task_uid = res.task_uid;
remote_tasks.insert(node_name, Ok(task_uid).into());
}
Ok(Err(error)) => {
remote_tasks.insert(node_name, Err(error.as_response_error()).into());
}
Err(panic) => match panic.try_into_panic() {
Ok(panic) => {
let msg = match panic.downcast_ref::<&'static str>() {
Some(s) => *s,
None => match panic.downcast_ref::<String>() {
Some(s) => &s[..],
None => "Box<dyn Any>",
},
};
remote_tasks.insert(
node_name,
Err(ResponseError::from_msg(
msg.to_string(),
meilisearch_types::error::Code::Internal,
))
.into(),
);
}
Err(_) => {
tracing::error!("proxy task was unexpectedly cancelled")
}
},
}
}
}
Ok(index_scheduler.set_task_network(task.uid, task_network)?)
}
pub async fn send_request<T, F, U>(
path_and_query: &str,
method: reqwest::Method,
content_type: Option<String>,
body: Body<T, F>,
remote_name: &str,
remote: &Remote,
) -> Result<U, ProxyError>
where
T: serde::Serialize,
F: FnMut(&str, &Remote, &mut T),
U: DeserializeOwned,
{
let content_type = match &body {
// for file bodies, force x-ndjson
Body::NdJsonPayload(_) => Some("application/x-ndjson".into()),
// otherwise get content type from request
_ => content_type,
};
let body = body.into_bytes(remote_name, remote).map_err(Box::new)?;
let client = reqwest::ClientBuilder::new()
.connect_timeout(std::time::Duration::from_secs(*timeouts::CONNECT_SECONDS))
.build()
.unwrap();
let url = format!("{}{}", remote.url, path_and_query);
// send payload to remote
tracing::trace!(remote_name, "sending request to remote");
let api_key = remote.write_api_key.clone();
let backoff = backoff::ExponentialBackoffBuilder::new()
.with_max_elapsed_time(Some(std::time::Duration::from_secs(*timeouts::BACKOFF_SECONDS)))
.build();
backoff::future::retry(backoff, move || {
let url = url.clone();
let client = client.clone();
let content_type = content_type.clone();
let body = body.clone();
let api_key = api_key.clone();
let method = method.clone();
async move {
let request = client
.request(method, url)
.timeout(std::time::Duration::from_secs(*timeouts::REQUEST_SECONDS));
let request = if let Some(body) = body { request.body(body) } else { request };
let request =
if let Some(api_key) = api_key { request.bearer_auth(api_key) } else { request };
let request = if let Some(content_type) = content_type {
request.header(CONTENT_TYPE.as_str(), content_type)
} else {
request
};
let response = request.send().await;
let response = match response {
Ok(response) => response,
Err(error) if error.is_timeout() => {
return Err(backoff::Error::transient(ProxyError::Timeout))
}
Err(error) => {
return Err(backoff::Error::transient(ProxyError::CouldNotSendRequest(
ReqwestErrorWithoutUrl::new(error),
)))
}
};
handle_response(response).await
}
})
.await
}
async fn handle_response<U>(response: reqwest::Response) -> Result<U, backoff::Error<ProxyError>>
where
U: DeserializeOwned,
{
match response.status() {
status_code if status_code.is_success() => (),
StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => {
return Err(backoff::Error::Permanent(ProxyError::AuthenticationError))
}
status_code if status_code.is_client_error() => {
let response = parse_error(response).await;
return Err(backoff::Error::Permanent(ProxyError::BadRequest {
status_code,
response,
}));
}
status_code if status_code.is_server_error() => {
let response = parse_error(response).await;
return Err(backoff::Error::transient(ProxyError::RemoteError {
status_code,
response,
}));
}
status_code => {
tracing::warn!(
status_code = status_code.as_u16(),
"remote replied with unexpected status code"
);
}
}
let response: U = match parse_response(response).await {
Ok(response) => response,
Err(response) => {
return Err(backoff::Error::permanent(ProxyError::CouldNotParseResponse { response }))
}
};
Ok(response)
}
fn from_old_http_method(method: &actix_http::Method) -> reqwest::Method {
match method {
&actix_http::Method::CONNECT => reqwest::Method::CONNECT,
&actix_http::Method::DELETE => reqwest::Method::DELETE,
&actix_http::Method::GET => reqwest::Method::GET,
&actix_http::Method::HEAD => reqwest::Method::HEAD,
&actix_http::Method::OPTIONS => reqwest::Method::OPTIONS,
&actix_http::Method::PATCH => reqwest::Method::PATCH,
&actix_http::Method::POST => reqwest::Method::POST,
&actix_http::Method::PUT => reqwest::Method::PUT,
&actix_http::Method::TRACE => reqwest::Method::TRACE,
method => reqwest::Method::from_bytes(method.as_str().as_bytes()).unwrap(),
}
}
#[allow(clippy::too_many_arguments)]
async fn try_proxy(
method: reqwest::Method,
url: &str,
content_type: Option<&[u8]>,
network_version: Uuid,
api_key: Option<&str>,
client: &reqwest::Client,
this: &str,
task_uid: TaskId,
body: Option<Bytes>,
) -> Result<SummarizedTaskView, backoff::Error<ProxyError>> {
let request = client
.request(method, url)
.timeout(std::time::Duration::from_secs(*timeouts::REQUEST_SECONDS));
let request = if let Some(body) = body { request.body(body) } else { request };
let request = if let Some(api_key) = api_key { request.bearer_auth(api_key) } else { request };
let RequestWrapper(request) = RequestWrapper(request)
.set_origin_task_uid(task_uid)
.set_origin_network_version(network_version)
.set_origin_remote(this);
let request = if let Some(content_type) = content_type {
request.header(CONTENT_TYPE.as_str(), content_type)
} else {
request
};
let response = request.send().await;
let response = match response {
Ok(response) => response,
Err(error) if error.is_timeout() => {
return Err(backoff::Error::transient(ProxyError::Timeout))
}
Err(error) => {
return Err(backoff::Error::transient(ProxyError::CouldNotSendRequest(
ReqwestErrorWithoutUrl::new(error),
)))
}
};
handle_response(response).await
}
struct RequestWrapper(RequestBuilder);
impl meilisearch_types::tasks::network::headers::SetHeader for RequestWrapper {
fn set_header(self, name: &str, value: &str) -> Self {
Self(self.0.header(name, value))
}
}
async fn parse_error(response: reqwest::Response) -> Result<String, ReqwestErrorWithoutUrl> {
let bytes = match response.bytes().await {
Ok(bytes) => bytes,
Err(error) => return Err(ReqwestErrorWithoutUrl::new(error)),
};
Ok(parse_bytes_as_error(&bytes))
}
fn parse_bytes_as_error(bytes: &[u8]) -> String {
match serde_json::from_slice::<Value>(bytes) {
Ok(value) => value.to_string(),
Err(_) => String::from_utf8_lossy(bytes).into_owned(),
}
}
async fn parse_response<T: DeserializeOwned>(
response: reqwest::Response,
) -> Result<T, Result<String, ReqwestErrorWithoutUrl>> {
let bytes = match response.bytes().await {
Ok(bytes) => bytes,
Err(error) => return Err(Err(ReqwestErrorWithoutUrl::new(error))),
};
match serde_json::from_slice::<T>(&bytes) {
Ok(value) => Ok(value),
Err(_) => Err(Ok(parse_bytes_as_error(&bytes))),
}
}
struct ResponseWrapper<'a>(&'a HttpRequest);
impl<'a> meilisearch_types::tasks::network::headers::GetHeader for ResponseWrapper<'a> {
type Error = actix_http::header::ToStrError;
fn get_header(&self, name: &str) -> Result<Option<&str>, Self::Error> {
self.0.headers().get(name).map(|value| value.to_str()).transpose()
}
}
pub fn origin_from_req(req: &HttpRequest) -> Result<Option<Origin>, MeilisearchHttpError> {
let req = ResponseWrapper(req);
let (remote_name, task_uid, network_version) = match (
req.get_origin_remote()?,
req.get_origin_task_uid()?,
req.get_origin_network_version()?,
) {
(None, None, _) => return Ok(None),
(None, Some(_), _) => {
return Err(MeilisearchHttpError::InconsistentOriginHeaders { is_remote_missing: true })
}
(Some(_), None, _) => {
return Err(MeilisearchHttpError::InconsistentOriginHeaders {
is_remote_missing: false,
})
}
(Some(remote_name), Some(task_uid), network_version) => {
(remote_name, task_uid, network_version)
}
};
let network_version = network_version.unwrap_or_else(Uuid::nil);
Ok(Some(Origin { remote_name: remote_name.into_owned(), task_uid, network_version }))
}
pub fn import_data_from_req(req: &HttpRequest) -> Result<Option<ImportData>, MeilisearchHttpError> {
let req = ResponseWrapper(req);
let (remote_name, index_name, document_count) =
match (req.get_import_remote()?, req.get_import_index()?, req.get_import_docs()?) {
(None, None, None) => return Ok(None),
(Some(remote_name), index_name, Some(documents)) => {
(remote_name, index_name, documents)
}
// catch-all pattern that has to contain an inconsistency since we already matched (None, None, None) and (Some, Some, Some)
(remote_name, index_name, documents) => {
return Err(MeilisearchHttpError::InconsistentImportHeaders {
is_remote_missing: remote_name.is_none(),
is_index_missing: index_name.is_none(),
is_docs_missing: documents.is_none(),
})
}
};
Ok(Some(ImportData {
remote_name: remote_name.to_string(),
index_name: index_name.map(|index_name| index_name.to_string()),
document_count,
}))
}
pub fn import_metadata_from_req(
req: &HttpRequest,
) -> Result<Option<ImportMetadata>, MeilisearchHttpError> {
let req = ResponseWrapper(req);
let (index_count, task_key, total_index_documents) = match (
req.get_import_index_count()?,
req.get_import_task_key()?,
req.get_import_index_docs()?,
) {
(None, None, None) => return Ok(None),
(Some(index_count), task_key, Some(total_index_documents)) => {
(index_count, task_key, total_index_documents)
}
// catch-all pattern that has to contain an inconsistency since we already matched (None, None, None) and (Some, Some, Some)
(index_count, task_key, total_index_documents) => {
return Err(MeilisearchHttpError::InconsistentImportMetadataHeaders {
is_index_count_missing: index_count.is_none(),
is_task_key_missing: task_key.is_none(),
is_total_index_documents_missing: total_index_documents.is_none(),
})
}
};
Ok(Some(ImportMetadata { index_count, task_key, total_index_documents }))
}

Some files were not shown because too many files have changed in this diff Show More