Compare commits

..

17 Commits

Author SHA1 Message Date
Clément Renault
5b7bdf3459 Only provide the last batch info 2025-11-20 12:03:11 +01:00
Kerollmops
6e5b42e924 remove-me: Introduce the MEILI_MAX_INDEX_SIZE env variable to limit the size of an index 2025-11-20 11:47:59 +01:00
Kerollmops
03ced42ad3 Fix some errors 2025-11-20 11:47:59 +01:00
Kerollmops
4706440610 Use a NonZeroUsize to store the prefix buffer length 2025-11-20 11:47:58 +01:00
Clément Renault
6700df1319 Fix the retrieval of the doc in scope 2025-11-20 11:47:58 +01:00
Kerollmops
a068931bd9 Allow users to delete documents 2025-11-20 11:47:58 +01:00
Clément Renault
ce87e14aa5 Erase a document if the AST is set 2025-11-20 11:47:58 +01:00
Clément Renault
7ff28985a0 Use the script in the edit documents 2025-11-20 11:47:58 +01:00
Clément Renault
a70aba8b68 Introduce a new executeAfterUpdate index setting 2025-11-20 11:47:58 +01:00
Clément Renault
2d8a61b53a Seems to work great, still need to read function from settings 2025-11-20 11:47:58 +01:00
Kerollmops
bd1fdc10e6 Expose the three last batches timings 2025-11-20 11:47:58 +01:00
Kerollmops
d1d6e219b3 Update utoipa 2025-11-20 11:47:58 +01:00
Kerollmops
b6a4f917f8 Reset metrics values to keep current steps only 2025-11-20 11:47:58 +01:00
Kerollmops
5c2a431d57 Expose the step currently running on the metrics route 2025-11-20 11:47:58 +01:00
Kerollmops
5781c16957 Simplify the auth filters 2025-11-20 11:47:58 +01:00
Kerollmops
246c44aeb7 Expose the metrics for the last finished batch and not the processing
one
2025-11-20 11:47:58 +01:00
Kerollmops
b2ed891f84 Expose batch progress traces on the metrics route 2025-11-20 11:47:57 +01:00
188 changed files with 2686 additions and 6692 deletions

View File

@@ -24,11 +24,6 @@ TBD
- [ ] If not, add the `no db change` label to your PR, and you're good to merge. - [ ] If not, add the `no db change` label to your PR, and you're good to merge.
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do. - [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
### Reminders when adding features
- [ ] Write unit tests using insta
- [ ] Write declarative integration tests in [workloads/tests](https://github.com/meilisearch/meilisearch/tree/main/workloads/test). Specify the routes to call and then call `cargo xtask test workloads/tests/YOUR_TEST.json --update-responses` so that responses are automatically filled.
### Reminders when modifying the API ### Reminders when modifying the API
- [ ] Update the openAPI file with utoipa: - [ ] Update the openAPI file with utoipa:

View File

@@ -18,7 +18,7 @@ jobs:
timeout-minutes: 180 # 3h timeout-minutes: 180 # 3h
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1 - uses: dtolnay/rust-toolchain@1.89
with: with:
profile: minimal profile: minimal

View File

@@ -66,7 +66,9 @@ jobs:
fetch-depth: 0 # fetch full history to be able to get main commit sha fetch-depth: 0 # fetch full history to be able to get main commit sha
ref: ${{ steps.comment-branch.outputs.head_ref }} ref: ${{ steps.comment-branch.outputs.head_ref }}
- uses: dtolnay/rust-toolchain@1.91.1 - uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
- name: Run benchmarks on PR ${{ github.event.issue.id }} - name: Run benchmarks on PR ${{ github.event.issue.id }}
run: | run: |

View File

@@ -12,7 +12,9 @@ jobs:
timeout-minutes: 180 # 3h timeout-minutes: 180 # 3h
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1 - uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
# Run benchmarks # Run benchmarks
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }} - name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}

View File

@@ -18,7 +18,7 @@ jobs:
timeout-minutes: 4320 # 72h timeout-minutes: 4320 # 72h
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1 - uses: dtolnay/rust-toolchain@1.89
with: with:
profile: minimal profile: minimal

View File

@@ -44,7 +44,7 @@ jobs:
exit 1 exit 1
fi fi
- uses: dtolnay/rust-toolchain@1.91.1 - uses: dtolnay/rust-toolchain@1.89
with: with:
profile: minimal profile: minimal

View File

@@ -16,7 +16,7 @@ jobs:
timeout-minutes: 4320 # 72h timeout-minutes: 4320 # 72h
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1 - uses: dtolnay/rust-toolchain@1.89
with: with:
profile: minimal profile: minimal

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: benchmarks runs-on: benchmarks
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1 - uses: dtolnay/rust-toolchain@1.89
with: with:
profile: minimal profile: minimal

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: benchmarks runs-on: benchmarks
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1 - uses: dtolnay/rust-toolchain@1.89
with: with:
profile: minimal profile: minimal

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: benchmarks runs-on: benchmarks
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1 - uses: dtolnay/rust-toolchain@1.89
with: with:
profile: minimal profile: minimal

View File

@@ -19,7 +19,6 @@ env:
- [ ] Detail the change to the DB format and why they are forward compatible - [ ] Detail the change to the DB format and why they are forward compatible
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR. - [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
## This PR makes breaking changes ## This PR makes breaking changes
@@ -36,7 +35,8 @@ env:
- [ ] Write the code to go from the old database to the new one - [ ] Write the code to go from the old database to the new one
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47) - If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13) - If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage) - [ ] Write an integration test [here](https://github.com/meilisearch/meilisearch/blob/main/crates/meilisearch/tests/upgrade/mod.rs) ensuring you can read the old database, upgrade to the new database, and read the new database as expected
jobs: jobs:
add-comment: add-comment:

View File

@@ -3,7 +3,7 @@ name: Look for flaky tests
on: on:
workflow_dispatch: workflow_dispatch:
schedule: schedule:
- cron: "0 4 * * *" # Every day at 4:00AM - cron: '0 4 * * *' # Every day at 4:00AM
jobs: jobs:
flaky: flaky:
@@ -13,17 +13,11 @@ jobs:
image: ubuntu:22.04 image: ubuntu:22.04
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- name: Install needed dependencies - name: Install needed dependencies
run: | run: |
apt-get update && apt-get install -y curl apt-get update && apt-get install -y curl
apt-get install build-essential -y apt-get install build-essential -y
- uses: dtolnay/rust-toolchain@1.91.1 - uses: dtolnay/rust-toolchain@1.89
- name: Install cargo-flaky - name: Install cargo-flaky
run: cargo install cargo-flaky run: cargo install cargo-flaky
- name: Run cargo flaky in the dumps - name: Run cargo flaky in the dumps

View File

@@ -12,7 +12,9 @@ jobs:
timeout-minutes: 4320 # 72h timeout-minutes: 4320 # 72h
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1 - uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
# Run benchmarks # Run benchmarks
- name: Run the fuzzer - name: Run the fuzzer

View File

@@ -25,13 +25,7 @@ jobs:
run: | run: |
apt-get update && apt-get install -y curl apt-get update && apt-get install -y curl
apt-get install build-essential -y apt-get install build-essential -y
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709 - uses: dtolnay/rust-toolchain@1.89
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
- name: Install cargo-deb - name: Install cargo-deb
run: cargo install cargo-deb run: cargo install cargo-deb
- uses: actions/checkout@v5 - uses: actions/checkout@v5

View File

@@ -14,105 +14,10 @@ on:
workflow_dispatch: workflow_dispatch:
jobs: jobs:
build: docker:
runs-on: ${{ matrix.runner }} runs-on: docker
strategy:
matrix:
platform: [amd64, arm64]
edition: [community, enterprise]
include:
- platform: amd64
runner: ubuntu-24.04
- platform: arm64
runner: ubuntu-24.04-arm
- edition: community
registry: getmeili/meilisearch
feature-flag: ""
- edition: enterprise
registry: getmeili/meilisearch-enterprise
feature-flag: "--features enterprise"
permissions: {}
steps:
- uses: actions/checkout@v5
- name: Prepare
run: |
platform=linux/${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
platforms: linux/${{ matrix.platform }}
install: true
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ matrix.registry }}
# Prevent `latest` to be updated for each new tag pushed.
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
flavor: latest=false
tags: |
type=ref,event=tag
type=raw,value=nightly,enable=${{ github.event_name != 'push' }}
type=semver,pattern=v{{major}}.{{minor}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
- name: Build and push by digest
uses: docker/build-push-action@v6
id: build-and-push
with:
platforms: linux/${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
tags: ${{ matrix.registry }}
outputs: type=image,push-by-digest=true,name-canonical=true,push=true
build-args: |
COMMIT_SHA=${{ github.sha }}
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
GIT_TAG=${{ github.ref_name }}
EXTRA_ARGS=${{ matrix.feature-flag }}
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests
digest="${{ steps.build-and-push.outputs.digest }}"
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ matrix.edition }}-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
merge:
runs-on: ubuntu-latest
strategy:
matrix:
edition: [community, enterprise]
include:
- edition: community
registry: getmeili/meilisearch
- edition: enterprise
registry: getmeili/meilisearch-enterprise
needs:
- build
permissions: permissions:
id-token: write # This is needed to use Cosign in keyless mode id-token: write # This is needed to use Cosign in keyless mode
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
@@ -153,30 +58,26 @@ jobs:
echo "date=$commit_date" >> $GITHUB_OUTPUT echo "date=$commit_date" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Install cosign - name: Install cosign
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # tag=v3.10.0 uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # tag=v3.10.0
- name: Download digests
uses: actions/download-artifact@v4
with:
path: ${{ runner.temp }}/digests
pattern: digests-${{ matrix.edition }}-*
merge-multiple: true
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Docker meta - name: Docker meta
id: meta id: meta
uses: docker/metadata-action@v5 uses: docker/metadata-action@v5
with: with:
images: ${{ matrix.registry }} images: getmeili/meilisearch
# Prevent `latest` to be updated for each new tag pushed. # Prevent `latest` to be updated for each new tag pushed.
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases. # We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
flavor: latest=false flavor: latest=false
@@ -187,31 +88,33 @@ jobs:
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }} type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }} type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
- name: Create manifest list and push - name: Build and push
working-directory: ${{ runner.temp }}/digests uses: docker/build-push-action@v6
run: | id: build-and-push
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ with:
$(printf '${{ matrix.registry }}@sha256:%s ' *) push: true
platforms: linux/amd64,linux/arm64
- name: Inspect image to fetch digest to sign tags: ${{ steps.meta.outputs.tags }}
run: | build-args: |
digest=$(docker buildx imagetools inspect --format='{{ json .Manifest }}' ${{ matrix.registry }}:${{ steps.meta.outputs.version }} | jq -r '.digest') COMMIT_SHA=${{ github.sha }}
echo "DIGEST=${digest}" >> $GITHUB_ENV COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
GIT_TAG=${{ github.ref_name }}
- name: Sign the images with GitHub OIDC Token - name: Sign the images with GitHub OIDC Token
env: env:
DIGEST: ${{ steps.build-and-push.outputs.digest }}
TAGS: ${{ steps.meta.outputs.tags }} TAGS: ${{ steps.meta.outputs.tags }}
run: | run: |
images="" images=""
for tag in ${TAGS}; do for tag in ${TAGS}; do
images+="${tag}@${{ env.DIGEST }} " images+="${tag}@${DIGEST} "
done done
cosign sign --yes ${images} cosign sign --yes ${images}
# /!\ Don't touch this without checking with engineers working on the Cloud code base on #discussion-engineering Slack channel # /!\ Don't touch this without checking with Cloud team
- name: Notify meilisearch-cloud - name: Send CI information to Cloud team
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event) # Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
if: ${{ (github.event_name == 'push') && (matrix.edition == 'enterprise') }} if: github.event_name == 'push'
uses: peter-evans/repository-dispatch@v3 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.MEILI_BOT_GH_PAT }} token: ${{ secrets.MEILI_BOT_GH_PAT }}
@@ -219,13 +122,21 @@ jobs:
event-type: cloud-docker-build event-type: cloud-docker-build
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }' client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
# /!\ Don't touch this without checking with integration team members on #discussion-integrations Slack channel # Send notification to Swarmia to notify of a deployment: https://app.swarmia.com
- name: Notify meilisearch-kubernetes # - name: 'Setup jq'
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event), or if not stable # uses: dcarbone/install-jq-action
if: ${{ github.event_name == 'push' && matrix.edition == 'community' && steps.check-tag-format.outputs.stable == 'true' }} # - name: Send deployment to Swarmia
uses: peter-evans/repository-dispatch@v3 # if: github.event_name == 'push' && success()
with: # run: |
token: ${{ secrets.MEILI_BOT_GH_PAT }} # JSON_STRING=$( jq --null-input --compact-output \
repository: meilisearch/meilisearch-kubernetes # --arg version "${{ github.ref_name }}" \
event-type: meilisearch-release # --arg appName "meilisearch" \
client-payload: '{ "version": "${{ github.ref_name }}" }' # --arg environment "production" \
# --arg commitSha "${{ github.sha }}" \
# --arg repositoryFullName "${{ github.repository }}" \
# '{"version": $version, "appName": $appName, "environment": $environment, "commitSha": $commitSha, "repositoryFullName": $repositoryFullName}' )
# curl -H "Authorization: ${{ secrets.SWARMIA_DEPLOYMENTS_AUTHORIZATION }}" \
# -H "Content-Type: application/json" \
# -d "$JSON_STRING" \
# https://hook.swarmia.com/deployments

View File

@@ -32,61 +32,157 @@ jobs:
if: github.event_name == 'release' && steps.check-tag-format.outputs.stable == 'true' if: github.event_name == 'release' && steps.check-tag-format.outputs.stable == 'true'
run: bash .github/scripts/check-release.sh run: bash .github/scripts/check-release.sh
publish-binaries: publish-linux:
name: Publish binary for ${{ matrix.release }} ${{ matrix.edition }} edition name: Publish binary for Linux
runs-on: ${{ matrix.os }} runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
edition: [community, enterprise]
release:
[macos-amd64, macos-aarch64, windows, linux-amd64, linux-aarch64]
include:
- edition: "community"
feature-flag: ""
edition-suffix: ""
- edition: "enterprise"
feature-flag: "--features enterprise"
edition-suffix: "enterprise-"
- release: macos-amd64
os: macos-15-intel
binary_path: release/meilisearch
asset_name: macos-amd64
extra-args: ""
- release: macos-aarch64
os: macos-14
binary_path: aarch64-apple-darwin/release/meilisearch
asset_name: macos-apple-silicon
extra-args: "--target aarch64-apple-darwin"
- release: windows
os: windows-2022
binary_path: release/meilisearch.exe
asset_name: windows-amd64.exe
extra-args: ""
- release: linux-amd64
os: ubuntu-22.04
binary_path: x86_64-unknown-linux-gnu/release/meilisearch
asset_name: linux-amd64
extra-args: "--target x86_64-unknown-linux-gnu"
- release: linux-aarch64
os: ubuntu-22.04-arm
binary_path: aarch64-unknown-linux-gnu/release/meilisearch
asset_name: linux-aarch64
extra-args: "--target aarch64-unknown-linux-gnu"
needs: check-version needs: check-version
container:
# Use ubuntu-22.04 to compile with glibc 2.35
image: ubuntu:22.04
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.91.1 - name: Install needed dependencies
run: |
apt-get update && apt-get install -y curl
apt-get install build-essential -y
- uses: dtolnay/rust-toolchain@1.89
- name: Build - name: Build
run: cargo build --release --locked ${{ matrix.feature-flag }} ${{ matrix.extra-args }} run: cargo build --release --locked
# No need to upload binaries for dry run (cron or workflow_dispatch) # No need to upload binaries for dry run (cron or workflow_dispatch)
- name: Upload binaries to release - name: Upload binaries to release
if: github.event_name == 'release' if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2 uses: svenstaro/upload-release-action@2.11.2
with: with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }} repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/${{ matrix.binary_path }} file: target/release/meilisearch
asset_name: meilisearch-${{ matrix.edition-suffix }}${{ matrix.asset_name }} asset_name: meilisearch-linux-amd64
tag: ${{ github.ref }}
publish-macos-windows:
name: Publish binary for ${{ matrix.os }}
runs-on: ${{ matrix.os }}
needs: check-version
strategy:
fail-fast: false
matrix:
os: [macos-14, windows-2022]
include:
- os: macos-14
artifact_name: meilisearch
asset_name: meilisearch-macos-amd64
- os: windows-2022
artifact_name: meilisearch.exe
asset_name: meilisearch-windows-amd64.exe
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- name: Build
run: cargo build --release --locked
# No need to upload binaries for dry run (cron or workflow_dispatch)
- name: Upload binaries to release
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/release/${{ matrix.artifact_name }}
asset_name: ${{ matrix.asset_name }}
tag: ${{ github.ref }}
publish-macos-apple-silicon:
name: Publish binary for macOS silicon
runs-on: macos-14
needs: check-version
strategy:
matrix:
include:
- target: aarch64-apple-darwin
asset_name: meilisearch-macos-apple-silicon
steps:
- name: Checkout repository
uses: actions/checkout@v5
- name: Installing Rust toolchain
uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
target: ${{ matrix.target }}
- name: Cargo build
uses: actions-rs/cargo@v1
with:
command: build
args: --release --target ${{ matrix.target }}
- name: Upload the binary to release
# No need to upload binaries for dry run (cron or workflow_dispatch)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/${{ matrix.target }}/release/meilisearch
asset_name: ${{ matrix.asset_name }}
tag: ${{ github.ref }}
publish-aarch64:
name: Publish binary for aarch64
runs-on: ubuntu-latest
needs: check-version
env:
DEBIAN_FRONTEND: noninteractive
container:
# Use ubuntu-22.04 to compile with glibc 2.35
image: ubuntu:22.04
strategy:
matrix:
include:
- target: aarch64-unknown-linux-gnu
asset_name: meilisearch-linux-aarch64
steps:
- name: Checkout repository
uses: actions/checkout@v5
- name: Install needed dependencies
run: |
apt-get update -y && apt upgrade -y
apt-get install -y curl build-essential gcc-aarch64-linux-gnu
- name: Set up Docker for cross compilation
run: |
apt-get install -y curl apt-transport-https ca-certificates software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update -y && apt-get install -y docker-ce
- name: Installing Rust toolchain
uses: dtolnay/rust-toolchain@1.89
with:
profile: minimal
target: ${{ matrix.target }}
- name: Configure target aarch64 GNU
## Environment variable is not passed using env:
## LD gold won't work with MUSL
# env:
# JEMALLOC_SYS_WITH_LG_PAGE: 16
# RUSTFLAGS: '-Clink-arg=-fuse-ld=gold'
run: |
echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config
echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
echo 'JEMALLOC_SYS_WITH_LG_PAGE=16' >> $GITHUB_ENV
- name: Install a default toolchain that will be used to build cargo cross
run: |
rustup default stable
- name: Cargo build
uses: actions-rs/cargo@v1
with:
command: build
use-cross: true
args: --release --target ${{ matrix.target }}
env:
CROSS_DOCKER_IN_DOCKER: true
- name: List target output files
run: ls -lR ./target
- name: Upload the binary to release
# No need to upload binaries for dry run (cron or workflow_dispatch)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
file: target/${{ matrix.target }}/release/meilisearch
asset_name: ${{ matrix.asset_name }}
tag: ${{ github.ref }} tag: ${{ github.ref }}
publish-openapi-file: publish-openapi-file:

View File

@@ -68,7 +68,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -92,7 +92,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -122,7 +122,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -149,7 +149,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -184,7 +184,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -213,7 +213,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -238,7 +238,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -263,7 +263,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -284,7 +284,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -307,7 +307,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -338,7 +338,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
@@ -370,7 +370,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
meilisearch: meilisearch:
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }} image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env: env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }} MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}

View File

@@ -15,40 +15,31 @@ env:
jobs: jobs:
test-linux: test-linux:
name: Tests on Ubuntu name: Tests on ubuntu-22.04
runs-on: ${{ matrix.runner }} runs-on: ubuntu-latest
strategy: container:
matrix: # Use ubuntu-22.04 to compile with glibc 2.35
runner: [ubuntu-22.04, ubuntu-22.04-arm] image: ubuntu:22.04
features: ["", "--features enterprise"]
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: check free space before - name: Install needed dependencies
run: df -h
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: | run: |
sudo rm -rf "/opt/ghc" || true apt-get update && apt-get install -y curl
sudo rm -rf "/usr/share/dotnet" || true apt-get install build-essential -y
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- name: check free space after
run: df -h
- name: Setup test with Rust stable - name: Setup test with Rust stable
uses: dtolnay/rust-toolchain@1.91.1 uses: dtolnay/rust-toolchain@1.89
- name: Cache dependencies - name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0 uses: Swatinem/rust-cache@v2.8.0
with: - name: Run cargo check without any default features
key: ${{ matrix.features }}
- name: Run cargo build without any default features
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: build command: build
args: --locked --no-default-features --all args: --locked --release --no-default-features --all
- name: Run cargo test - name: Run cargo test
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: test command: test
args: --locked --all ${{ matrix.features }} args: --locked --release --all
test-others: test-others:
name: Tests on ${{ matrix.os }} name: Tests on ${{ matrix.os }}
@@ -57,57 +48,50 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
os: [macos-14, windows-2022] os: [macos-14, windows-2022]
features: ["", "--features enterprise"]
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Cache dependencies - name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0 uses: Swatinem/rust-cache@v2.8.0
- uses: dtolnay/rust-toolchain@1.91.1 - uses: dtolnay/rust-toolchain@1.89
- name: Run cargo build without any default features - name: Run cargo check without any default features
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: build command: build
args: --locked --no-default-features --all args: --locked --release --no-default-features --all
- name: Run cargo test - name: Run cargo test
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: test command: test
args: --locked --all ${{ matrix.features }} args: --locked --release --all
test-all-features: test-all-features:
name: Tests almost all features name: Tests almost all features
runs-on: ubuntu-22.04 runs-on: ubuntu-latest
container:
# Use ubuntu-22.04 to compile with glibc 2.35
image: ubuntu:22.04
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709 - name: Install needed dependencies
run: | run: |
sudo rm -rf "/opt/ghc" || true apt-get update
sudo rm -rf "/usr/share/dotnet" || true apt-get install --assume-yes build-essential curl
sudo rm -rf "/usr/local/lib/android" || true - uses: dtolnay/rust-toolchain@1.89
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
- name: Run cargo build with almost all features - name: Run cargo build with almost all features
run: | run: |
cargo build --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)" cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
- name: Run cargo test with almost all features - name: Run cargo test with almost all features
run: | run: |
cargo test --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)" cargo test --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
ollama-ubuntu: ollama-ubuntu:
name: Test with Ollama name: Test with Ollama
runs-on: ubuntu-22.04 runs-on: ubuntu-latest
env: env:
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434" MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- name: Install Ollama - name: Install Ollama
run: | run: |
curl -fsSL https://ollama.com/install.sh | sudo -E sh curl -fsSL https://ollama.com/install.sh | sudo -E sh
@@ -131,21 +115,21 @@ jobs:
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: test command: test
args: --locked -p meilisearch --features test-ollama ollama args: --locked --release --all --features test-ollama ollama
test-disabled-tokenization: test-disabled-tokenization:
name: Test disabled tokenization name: Test disabled tokenization
runs-on: ubuntu-22.04 runs-on: ubuntu-latest
container:
image: ubuntu:22.04
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709 - name: Install needed dependencies
run: | run: |
sudo rm -rf "/opt/ghc" || true apt-get update
sudo rm -rf "/usr/share/dotnet" || true apt-get install --assume-yes build-essential curl
sudo rm -rf "/usr/local/lib/android" || true - uses: dtolnay/rust-toolchain@1.89
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
- name: Run cargo tree without default features and check lindera is not present - name: Run cargo tree without default features and check lindera is not present
run: | run: |
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
@@ -156,39 +140,36 @@ jobs:
run: | run: |
cargo tree -f '{p} {f}' -e normal | grep lindera -qz cargo tree -f '{p} {f}' -e normal | grep lindera -qz
build: # We run tests in debug also, to make sure that the debug_assertions are hit
name: Build in release test-debug:
runs-on: ubuntu-22.04 name: Run tests in debug
runs-on: ubuntu-latest
container:
# Use ubuntu-22.04 to compile with glibc 2.35
image: ubuntu:22.04
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709 - name: Install needed dependencies
run: | run: |
sudo rm -rf "/opt/ghc" || true apt-get update && apt-get install -y curl
sudo rm -rf "/usr/share/dotnet" || true apt-get install build-essential -y
sudo rm -rf "/usr/local/lib/android" || true - uses: dtolnay/rust-toolchain@1.89
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
- name: Cache dependencies - name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0 uses: Swatinem/rust-cache@v2.8.0
- name: Build - name: Run tests in debug
run: cargo build --release --locked --target x86_64-unknown-linux-gnu uses: actions-rs/cargo@v1
with:
command: test
args: --locked --all
clippy: clippy:
name: Run Clippy name: Run Clippy
runs-on: ubuntu-22.04 runs-on: ubuntu-latest
strategy:
matrix:
features: ["", "--features enterprise"]
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709 - uses: dtolnay/rust-toolchain@1.89
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
with: with:
profile: minimal
components: clippy components: clippy
- name: Cache dependencies - name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0 uses: Swatinem/rust-cache@v2.8.0
@@ -196,21 +177,18 @@ jobs:
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: clippy command: clippy
args: --all-targets ${{ matrix.features }} -- --deny warnings args: --all-targets -- --deny warnings
fmt: fmt:
name: Run Rustfmt name: Run Rustfmt
runs-on: ubuntu-22.04 runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709 - uses: dtolnay/rust-toolchain@1.89
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
with: with:
profile: minimal
toolchain: nightly-2024-07-09
override: true
components: rustfmt components: rustfmt
- name: Cache dependencies - name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0 uses: Swatinem/rust-cache@v2.8.0
@@ -221,23 +199,3 @@ jobs:
run: | run: |
echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs
cargo fmt --all -- --check cargo fmt --all -- --check
declarative-tests:
name: Run declarative tests
runs-on: ubuntu-22.04-arm
permissions:
contents: read
steps:
- uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0
- name: Run declarative tests
run: |
cargo xtask test workloads/tests/*.json

View File

@@ -18,13 +18,9 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709 - uses: dtolnay/rust-toolchain@1.89
run: | with:
sudo rm -rf "/opt/ghc" || true profile: minimal
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
- name: Install sd - name: Install sd
run: cargo install sd run: cargo install sd
- name: Update Cargo.toml file - name: Update Cargo.toml file

View File

@@ -124,7 +124,6 @@ They are JSON files with the following structure (comments are not actually supp
{ {
// Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard. // Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard.
"name": "hackernews.ndjson_1M,no-threads", "name": "hackernews.ndjson_1M,no-threads",
"type": "bench",
// Number of consecutive runs of the commands that should be performed. // Number of consecutive runs of the commands that should be performed.
// Each run uses a fresh instance of Meilisearch and a fresh database. // Each run uses a fresh instance of Meilisearch and a fresh database.
// Each run produces its own report file. // Each run produces its own report file.

1176
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -23,7 +23,7 @@ members = [
] ]
[workspace.package] [workspace.package]
version = "1.29.0" version = "1.26.0"
authors = [ authors = [
"Quentin de Quelen <quentin@dequelen.me>", "Quentin de Quelen <quentin@dequelen.me>",
"Clément Renault <clement@meilisearch.com>", "Clément Renault <clement@meilisearch.com>",
@@ -50,5 +50,3 @@ opt-level = 3
opt-level = 3 opt-level = 3
[profile.dev.package.roaring] [profile.dev.package.roaring]
opt-level = 3 opt-level = 3
[profile.dev.package.gemm-f16]
opt-level = 3

7
Cross.toml Normal file
View File

@@ -0,0 +1,7 @@
[build.env]
passthrough = [
"RUST_BACKTRACE",
"CARGO_TERM_COLOR",
"RUSTFLAGS",
"JEMALLOC_SYS_WITH_LG_PAGE"
]

View File

@@ -8,14 +8,16 @@ WORKDIR /
ARG COMMIT_SHA ARG COMMIT_SHA
ARG COMMIT_DATE ARG COMMIT_DATE
ARG GIT_TAG ARG GIT_TAG
ARG EXTRA_ARGS
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_DESCRIBE=${GIT_TAG} ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_DESCRIBE=${GIT_TAG}
ENV RUSTFLAGS="-C target-feature=-crt-static" ENV RUSTFLAGS="-C target-feature=-crt-static"
COPY . . COPY . .
RUN set -eux; \ RUN set -eux; \
apkArch="$(apk --print-arch)"; \ apkArch="$(apk --print-arch)"; \
cargo build --release -p meilisearch -p meilitool ${EXTRA_ARGS} if [ "$apkArch" = "aarch64" ]; then \
export JEMALLOC_SYS_WITH_LG_PAGE=16; \
fi && \
cargo build --release -p meilisearch -p meilitool
# Run # Run
FROM alpine:3.22 FROM alpine:3.22

View File

@@ -1,326 +0,0 @@
# Declarative tests
Declarative tests ensure that Meilisearch features remain stable across versions.
While we already have unit tests, those are run against **temporary databases** that are created fresh each time and therefore never risk corruption.
Declarative tests instead **simulate the lifetime of a database**: they chain together commands and requests to change the binary, verifying that database state and API responses remain consistent.
## Basic example
```jsonc
{
"type": "test",
"name": "api-keys",
"binary": { // the first command will run on the binary following this specification.
"source": "release", // get the binary as a release from GitHub
"version": "1.19.0", // version to fetch
"edition": "community" // edition to fetch
},
"commands": []
}
```
This example defines a no-op test (it does nothing).
If the file is saved at `workloads/tests/example.json`, you can run it with:
```bash
cargo xtask test workloads/tests/example.json
```
## Commands
Commands represent API requests sent to Meilisearch endpoints during a test.
They are executed sequentially, and their responses can be validated to ensure consistent behavior across upgrades.
```jsonc
{
"route": "keys",
"method": "POST",
"body": {
"inline": {
"actions": [
"search",
"documents.add"
],
"description": "Test API Key",
"expiresAt": null,
"indexes": [ "movies" ]
}
}
}
```
This command issues a `POST /keys` request, creating an API key with permissions to search and add documents in the `movies` index.
### Using assets in commands
To keep tests concise and reusable, you can define **assets** at the root of the workload file.
Assets are external data sources (such as datasets) that are cached between runs, making tests faster and easier to read.
```jsonc
{
"type": "test",
"name": "movies",
"binary": {
"source": "release",
"version": "1.19.0",
"edition": "community"
},
"assets": {
"movies.json": {
"local_location": null,
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
}
},
"commands": [
{
"route": "indexes/movies/documents",
"method": "POST",
"body": {
"asset": "movies.json"
}
}
]
}
```
In this example:
- The `movies.json` dataset is defined as an asset, pointing to a remote URL.
- The SHA-256 checksum ensures integrity.
- The `POST /indexes/movies/documents` command uses this asset as the request body.
This makes the test much cleaner than inlining a large dataset directly into the command.
For asset handling, please refer to the [declarative benchmarks documentation](/BENCHMARKS.md#adding-new-assets).
### Asserting responses
Commands can specify both the **expected status code** and the **expected response body**.
```jsonc
{
"route": "indexes/movies/documents",
"method": "POST",
"body": {
"asset": "movies.json"
},
"expectedStatus": 202,
"expectedResponse": {
"enqueuedAt": "[timestamp]", // Set to a bracketed string to ignore the value
"indexUid": "movies",
"status": "enqueued",
"taskUid": 1,
"type": "documentAdditionOrUpdate"
},
"synchronous": "WaitForTask"
}
```
Manually writing `expectedResponse` fields can be tedious.
Instead, you can let the test runner populate them automatically:
```bash
# Run the workload to populate expected fields. Only adds the missing ones, doesn't change existing data
cargo xtask test workloads/tests/example.json --add-missing-responses
# OR
# Run the workload to populate expected fields. Updates all fields including existing ones
cargo xtask test workloads/tests/example.json --update-responses
```
This workflow is recommended:
1. Write the test without expected fields.
2. Run it with `--add-missing-responses` to capture the actual responses.
3. Review and commit the generated expectations.
## Changing binary
It is possible to insert an instruction to change the current Meilisearch instance from one binary specification to another during a test.
When executed, such an instruction will:
1. Stop the current Meilisearch instance.
2. Fetch the binary specified by the instruction.
3. Restart the server with the specified binary on the same database.
```jsonc
{
"type": "test",
"name": "movies",
"binary": {
"source": "release",
"version": "1.19.0", // start with version v1.19.0
"edition": "community"
},
"assets": {
"movies.json": {
"local_location": null,
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
}
},
"commands": [
// setup some data
{
"route": "indexes/movies/documents",
"method": "POST",
"body": {
"asset": "movies.json"
}
},
// switch binary to v1.24.0
{
"binary": {
"source": "release",
"version": "1.24.0",
"edition": "community"
}
}
]
}
```
### Typical Usage
In most cases, the change binary instruction will be used to update a database.
- **Set up** some data using commands on an older version.
- **Upgrade** to the latest version.
- **Assert** that the data and API behavior remain correct after the upgrade.
To properly test the dumpless upgrade, one should typically:
1. Open the database without processing the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` and `--experimental-max-number-of-batched-tasks=0` as extra CLI arguments
2. Check that the search, stats and task queue still work.
3. Open the database and process the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` as the extra CLI argument. Use a `health` command to wait for the upgrade task to finish.
4. Check that the indexing, search, stats, and task queue still work.
```jsonc
{
"type": "test",
"name": "movies",
"binary": {
"source": "release",
"version": "1.12.0",
"edition": "community"
},
"commands": [
// 0. Run commands to populate the database
{
// ..
},
// 1. Open the database with new MS without processing the update task
{
"binary": {
"source": "build", // build the binary from the sources in the current git repository
"edition": "community",
"extraCliArgs": [
"--experimental-dumpless-upgrade", // allows to open with a newer MS
"--experimental-max-number-of-batched-tasks=0" // prevent processing of the update task
]
}
},
// 2. Check the search etc.
{
// ..
},
// 3. Open the database with new MS and processing the update task
{
"binary": {
"source": "build", // build the binary from the sources in the current git repository
"edition": "community",
"extraCliArgs": [
"--experimental-dumpless-upgrade" // allows to open with a newer MS
// no `--experimental-max-number-of-batched-tasks=0`
]
}
},
// 4. Check the indexing, search, etc.
{
// ..
}
]
}
```
This ensures backward compatibility: databases created with older Meilisearch versions should remain functional and consistent after an upgrade.
## Variables
Sometimes a command needs to use a value returned by a **previous response**.
These values can be captured and reused using the register field.
```jsonc
{
"route": "keys",
"method": "POST",
"body": {
"inline": {
"actions": [
"search",
"documents.add"
],
"description": "Test API Key",
"expiresAt": null,
"indexes": [ "movies" ]
}
},
"expectedResponse": {
"key": "c6f64630bad2996b1f675007c8800168e14adf5d6a7bb1a400a6d2b158050eaf",
// ...
},
"register": {
"key": "/key"
},
"synchronous": "WaitForResponse"
}
```
The `register` field captures the value at the JSON path `/key` from the response.
Paths follow the **JavaScript Object Notation Pointer (RFC 6901)** format.
Registered variables are available for all subsequent commands.
Registered variables can be referenced by wrapping their name in double curly braces:
In the route/path:
```jsonc
{
"route": "tasks/{{ task_id }}",
"method": "GET"
}
```
In the request body:
```jsonc
{
"route": "indexes/movies/documents",
"method": "PATCH",
"body": {
"inline": {
"id": "{{ document_id }}",
"overview": "Shazam turns evil and the world is in danger.",
}
}
}
```
Or they can be referenced by their name (**without curly braces**) as an API key:
```jsonc
{
"route": "indexes/movies/documents",
"method": "POST",
"body": { /* ... */ },
"apiKeyVariable": "key" // The **content** of the key variable will be used as an API key
}
```

View File

@@ -11,27 +11,27 @@ edition.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
anyhow = "1.0.100" anyhow = "1.0.98"
bumpalo = "3.19.0" bumpalo = "3.18.1"
csv = "1.4.0" csv = "1.3.1"
memmap2 = "0.9.9" memmap2 = "0.9.7"
milli = { path = "../milli" } milli = { path = "../milli" }
mimalloc = { version = "0.1.48", default-features = false } mimalloc = { version = "0.1.47", default-features = false }
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
tempfile = "3.23.0" tempfile = "3.20.0"
[dev-dependencies] [dev-dependencies]
criterion = { version = "0.7.0", features = ["html_reports"] } criterion = { version = "0.6.0", features = ["html_reports"] }
rand = "0.8.5" rand = "0.8.5"
rand_chacha = "0.3.1" rand_chacha = "0.3.1"
roaring = "0.10.12" roaring = "0.10.12"
[build-dependencies] [build-dependencies]
anyhow = "1.0.100" anyhow = "1.0.98"
bytes = "1.11.0" bytes = "1.10.1"
convert_case = "0.9.0" convert_case = "0.8.0"
flate2 = "1.1.5" flate2 = "1.1.2"
reqwest = { version = "0.12.24", features = ["blocking", "rustls-tls"], default-features = false } reqwest = { version = "0.12.20", features = ["blocking", "rustls-tls"], default-features = false }
[features] [features]
default = ["milli/all-tokenizations"] default = ["milli/all-tokenizations"]

View File

@@ -21,10 +21,6 @@ use roaring::RoaringBitmap;
#[global_allocator] #[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc; static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
fn no_cancel() -> bool {
false
}
const BENCHMARK_ITERATION: usize = 10; const BENCHMARK_ITERATION: usize = 10;
fn setup_dir(path: impl AsRef<Path>) { fn setup_dir(path: impl AsRef<Path>) {
@@ -69,7 +65,7 @@ fn setup_settings<'t>(
let sortable_fields = sortable_fields.iter().map(|s| s.to_string()).collect(); let sortable_fields = sortable_fields.iter().map(|s| s.to_string()).collect();
builder.set_sortable_fields(sortable_fields); builder.set_sortable_fields(sortable_fields);
builder.execute(&no_cancel, &Progress::default(), Default::default()).unwrap(); builder.execute(&|| false, &Progress::default(), Default::default()).unwrap();
} }
fn setup_index_with_settings( fn setup_index_with_settings(
@@ -156,7 +152,7 @@ fn indexing_songs_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -172,7 +168,7 @@ fn indexing_songs_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -224,7 +220,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -240,7 +236,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -270,7 +266,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -286,7 +282,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -340,7 +336,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -356,7 +352,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -418,7 +414,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -434,7 +430,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -464,7 +460,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -480,7 +476,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -506,7 +502,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -522,7 +518,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -575,7 +571,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -591,7 +587,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -643,7 +639,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -659,7 +655,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -711,7 +707,7 @@ fn indexing_wiki(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -727,7 +723,7 @@ fn indexing_wiki(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -778,7 +774,7 @@ fn reindexing_wiki(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -794,7 +790,7 @@ fn reindexing_wiki(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -824,7 +820,7 @@ fn reindexing_wiki(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -840,7 +836,7 @@ fn reindexing_wiki(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -893,7 +889,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -909,7 +905,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -971,7 +967,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -987,7 +983,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1018,7 +1014,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1034,7 +1030,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1061,7 +1057,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1077,7 +1073,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1129,7 +1125,7 @@ fn indexing_movies_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1145,7 +1141,7 @@ fn indexing_movies_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1196,7 +1192,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1212,7 +1208,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1242,7 +1238,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1258,7 +1254,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1311,7 +1307,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1327,7 +1323,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1376,7 +1372,7 @@ fn delete_documents_from_ids(index: Index, document_ids_to_delete: Vec<RoaringBi
Some(primary_key), Some(primary_key),
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1426,7 +1422,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1442,7 +1438,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1472,7 +1468,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1488,7 +1484,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1514,7 +1510,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1530,7 +1526,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1605,7 +1601,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1621,7 +1617,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1697,7 +1693,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1713,7 +1709,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1781,7 +1777,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1797,7 +1793,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1849,7 +1845,7 @@ fn indexing_geo(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1865,7 +1861,7 @@ fn indexing_geo(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1916,7 +1912,7 @@ fn reindexing_geo(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1932,7 +1928,7 @@ fn reindexing_geo(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -1962,7 +1958,7 @@ fn reindexing_geo(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -1978,7 +1974,7 @@ fn reindexing_geo(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )
@@ -2031,7 +2027,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
&rtxn, &rtxn,
None, None,
&mut new_fields_ids_map, &mut new_fields_ids_map,
&no_cancel, &|| false,
Progress::default(), Progress::default(),
None, None,
) )
@@ -2047,7 +2043,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
primary_key, primary_key,
&document_changes, &document_changes,
RuntimeEmbedders::default(), RuntimeEmbedders::default(),
&no_cancel, &|| false,
&Progress::default(), &Progress::default(),
&Default::default(), &Default::default(),
) )

View File

@@ -11,8 +11,8 @@ license.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
time = { version = "0.3.44", features = ["parsing"] } time = { version = "0.3.41", features = ["parsing"] }
[build-dependencies] [build-dependencies]
anyhow = "1.0.100" anyhow = "1.0.98"
vergen-gitcl = "1.0.8" vergen-git2 = "1.0.7"

View File

@@ -15,7 +15,7 @@ fn emit_git_variables() -> anyhow::Result<()> {
// Note: any code that needs VERGEN_ environment variables should take care to define them manually in the Dockerfile and pass them // Note: any code that needs VERGEN_ environment variables should take care to define them manually in the Dockerfile and pass them
// in the corresponding GitHub workflow (publish_docker.yml). // in the corresponding GitHub workflow (publish_docker.yml).
// This is due to the Dockerfile building the binary outside of the git directory. // This is due to the Dockerfile building the binary outside of the git directory.
let mut builder = vergen_gitcl::GitclBuilder::default(); let mut builder = vergen_git2::Git2Builder::default();
builder.branch(true); builder.branch(true);
builder.commit_timestamp(true); builder.commit_timestamp(true);
@@ -25,5 +25,5 @@ fn emit_git_variables() -> anyhow::Result<()> {
let git2 = builder.build()?; let git2 = builder.build()?;
vergen_gitcl::Emitter::default().fail_on_error().add_instructions(&git2)?.emit() vergen_git2::Emitter::default().fail_on_error().add_instructions(&git2)?.emit()
} }

View File

@@ -1,6 +0,0 @@
use build_info::BuildInfo;
fn main() {
let info = BuildInfo::from_build();
dbg!(info);
}

View File

@@ -11,27 +11,24 @@ readme.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
anyhow = "1.0.100" anyhow = "1.0.98"
flate2 = "1.1.5" flate2 = "1.1.2"
http = "1.3.1" http = "1.3.1"
meilisearch-types = { path = "../meilisearch-types" } meilisearch-types = { path = "../meilisearch-types" }
once_cell = "1.21.3" once_cell = "1.21.3"
regex = "1.12.2" regex = "1.11.1"
roaring = { version = "0.10.12", features = ["serde"] } roaring = { version = "0.10.12", features = ["serde"] }
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
tar = "0.4.44" tar = "0.4.44"
tempfile = "3.23.0" tempfile = "3.20.0"
thiserror = "2.0.17" thiserror = "2.0.12"
time = { version = "0.3.44", features = ["serde-well-known", "formatting", "parsing", "macros"] } time = { version = "0.3.41", features = ["serde-well-known", "formatting", "parsing", "macros"] }
tracing = "0.1.41" tracing = "0.1.41"
uuid = { version = "1.18.1", features = ["serde", "v4"] } uuid = { version = "1.17.0", features = ["serde", "v4"] }
[dev-dependencies] [dev-dependencies]
big_s = "1.0.2" big_s = "1.0.2"
maplit = "1.0.2" maplit = "1.0.2"
meili-snap = { path = "../meili-snap" } meili-snap = { path = "../meili-snap" }
meilisearch-types = { path = "../meilisearch-types" } meilisearch-types = { path = "../meilisearch-types" }
[features]
enterprise = ["meilisearch-types/enterprise"]

View File

@@ -262,13 +262,13 @@ pub(crate) mod test {
use big_s::S; use big_s::S;
use maplit::{btreemap, btreeset}; use maplit::{btreemap, btreeset};
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats}; use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
use meilisearch_types::enterprise_edition::network::{Network, Remote};
use meilisearch_types::facet_values_sort::FacetValuesSort; use meilisearch_types::facet_values_sort::FacetValuesSort;
use meilisearch_types::features::RuntimeTogglableFeatures; use meilisearch_types::features::RuntimeTogglableFeatures;
use meilisearch_types::index_uid_pattern::IndexUidPattern; use meilisearch_types::index_uid_pattern::IndexUidPattern;
use meilisearch_types::keys::{Action, Key}; use meilisearch_types::keys::{Action, Key};
use meilisearch_types::milli::update::Setting; use meilisearch_types::milli::update::Setting;
use meilisearch_types::milli::{self, FilterableAttributesRule}; use meilisearch_types::milli::{self, FilterableAttributesRule};
use meilisearch_types::network::{Network, Remote};
use meilisearch_types::settings::{Checked, FacetingSettings, Settings}; use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
use meilisearch_types::task_view::DetailsView; use meilisearch_types::task_view::DetailsView;
use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status}; use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status};
@@ -341,6 +341,7 @@ pub(crate) mod test {
prefix_search: Setting::NotSet, prefix_search: Setting::NotSet,
chat: Setting::NotSet, chat: Setting::NotSet,
vector_store: Setting::NotSet, vector_store: Setting::NotSet,
execute_after_update: Setting::NotSet,
_kind: std::marker::PhantomData, _kind: std::marker::PhantomData,
}; };
settings.check() settings.check()

View File

@@ -423,6 +423,7 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
prefix_search: v6::Setting::NotSet, prefix_search: v6::Setting::NotSet,
chat: v6::Setting::NotSet, chat: v6::Setting::NotSet,
vector_store: v6::Setting::NotSet, vector_store: v6::Setting::NotSet,
execute_after_update: v6::Setting::NotSet,
_kind: std::marker::PhantomData, _kind: std::marker::PhantomData,
} }
} }

View File

@@ -107,14 +107,19 @@ impl Settings<Unchecked> {
} }
} }
#[derive(Default, Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub enum Setting<T> { pub enum Setting<T> {
Set(T), Set(T),
Reset, Reset,
#[default]
NotSet, NotSet,
} }
impl<T> Default for Setting<T> {
fn default() -> Self {
Self::NotSet
}
}
impl<T> Setting<T> { impl<T> Setting<T> {
pub const fn is_not_set(&self) -> bool { pub const fn is_not_set(&self) -> bool {
matches!(self, Self::NotSet) matches!(self, Self::NotSet)

View File

@@ -161,14 +161,19 @@ pub struct Facets {
pub min_level_size: Option<NonZeroUsize>, pub min_level_size: Option<NonZeroUsize>,
} }
#[derive(Default, Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub enum Setting<T> { pub enum Setting<T> {
Set(T), Set(T),
Reset, Reset,
#[default]
NotSet, NotSet,
} }
impl<T> Default for Setting<T> {
fn default() -> Self {
Self::NotSet
}
}
impl<T> Setting<T> { impl<T> Setting<T> {
pub fn map<U, F>(self, f: F) -> Setting<U> pub fn map<U, F>(self, f: F) -> Setting<U>
where where

View File

@@ -1,7 +1,9 @@
use std::fmt::{self, Display, Formatter}; use std::fmt::{self, Display, Formatter};
use std::marker::PhantomData;
use std::str::FromStr; use std::str::FromStr;
use serde::Deserialize; use serde::de::Visitor;
use serde::{Deserialize, Deserializer};
use uuid::Uuid; use uuid::Uuid;
use super::settings::{Settings, Unchecked}; use super::settings::{Settings, Unchecked};
@@ -80,3 +82,59 @@ impl Display for IndexUidFormatError {
} }
impl std::error::Error for IndexUidFormatError {} impl std::error::Error for IndexUidFormatError {}
/// A type that tries to match either a star (*) or
/// any other thing that implements `FromStr`.
#[derive(Debug)]
#[cfg_attr(test, derive(serde::Serialize))]
pub enum StarOr<T> {
Star,
Other(T),
}
impl<'de, T, E> Deserialize<'de> for StarOr<T>
where
T: FromStr<Err = E>,
E: Display,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
/// Serde can't differentiate between `StarOr::Star` and `StarOr::Other` without a tag.
/// Simply using `#[serde(untagged)]` + `#[serde(rename="*")]` will lead to attempting to
/// deserialize everything as a `StarOr::Other`, including "*".
/// [`#[serde(other)]`](https://serde.rs/variant-attrs.html#other) might have helped but is
/// not supported on untagged enums.
struct StarOrVisitor<T>(PhantomData<T>);
impl<T, FE> Visitor<'_> for StarOrVisitor<T>
where
T: FromStr<Err = FE>,
FE: Display,
{
type Value = StarOr<T>;
fn expecting(&self, formatter: &mut Formatter) -> std::fmt::Result {
formatter.write_str("a string")
}
fn visit_str<SE>(self, v: &str) -> Result<Self::Value, SE>
where
SE: serde::de::Error,
{
match v {
"*" => Ok(StarOr::Star),
v => {
let other = FromStr::from_str(v).map_err(|e: T::Err| {
SE::custom(format!("Invalid `other` value: {}", e))
})?;
Ok(StarOr::Other(other))
}
}
}
}
deserializer.deserialize_str(StarOrVisitor(PhantomData))
}
}

View File

@@ -192,14 +192,19 @@ pub struct Facets {
pub min_level_size: Option<NonZeroUsize>, pub min_level_size: Option<NonZeroUsize>,
} }
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)] #[derive(Debug, Clone, PartialEq, Eq, Copy)]
pub enum Setting<T> { pub enum Setting<T> {
Set(T), Set(T),
Reset, Reset,
#[default]
NotSet, NotSet,
} }
impl<T> Default for Setting<T> {
fn default() -> Self {
Self::NotSet
}
}
impl<T> Setting<T> { impl<T> Setting<T> {
pub fn set(self) -> Option<T> { pub fn set(self) -> Option<T> {
match self { match self {

View File

@@ -47,15 +47,20 @@ pub struct Settings<T> {
pub _kind: PhantomData<T>, pub _kind: PhantomData<T>,
} }
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)] #[derive(Debug, Clone, PartialEq, Eq, Copy)]
#[cfg_attr(test, derive(serde::Serialize))] #[cfg_attr(test, derive(serde::Serialize))]
pub enum Setting<T> { pub enum Setting<T> {
Set(T), Set(T),
Reset, Reset,
#[default]
NotSet, NotSet,
} }
impl<T> Default for Setting<T> {
fn default() -> Self {
Self::NotSet
}
}
impl<T> Setting<T> { impl<T> Setting<T> {
pub fn set(self) -> Option<T> { pub fn set(self) -> Option<T> {
match self { match self {

View File

@@ -322,7 +322,7 @@ impl From<Task> for TaskView {
_ => None, _ => None,
}); });
let duration = finished_at.zip(started_at).map(|(tf, ts)| tf - ts); let duration = finished_at.zip(started_at).map(|(tf, ts)| (tf - ts));
Self { Self {
uid: id, uid: id,

View File

@@ -24,7 +24,7 @@ pub type Batch = meilisearch_types::batches::Batch;
pub type Key = meilisearch_types::keys::Key; pub type Key = meilisearch_types::keys::Key;
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings; pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures; pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
pub type Network = meilisearch_types::network::Network; pub type Network = meilisearch_types::enterprise_edition::network::Network;
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView; pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
// ===== Other types to clarify the code of the compat module // ===== Other types to clarify the code of the compat module

View File

@@ -5,9 +5,9 @@ use std::path::PathBuf;
use flate2::write::GzEncoder; use flate2::write::GzEncoder;
use flate2::Compression; use flate2::Compression;
use meilisearch_types::batches::Batch; use meilisearch_types::batches::Batch;
use meilisearch_types::enterprise_edition::network::Network;
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures}; use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
use meilisearch_types::keys::Key; use meilisearch_types::keys::Key;
use meilisearch_types::network::Network;
use meilisearch_types::settings::{Checked, Settings}; use meilisearch_types::settings::{Checked, Settings};
use meilisearch_types::webhooks::WebhooksDumpView; use meilisearch_types::webhooks::WebhooksDumpView;
use serde_json::{Map, Value}; use serde_json::{Map, Value};

View File

@@ -11,7 +11,7 @@ edition.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
tempfile = "3.23.0" tempfile = "3.20.0"
thiserror = "2.0.17" thiserror = "2.0.12"
tracing = "0.1.41" tracing = "0.1.41"
uuid = { version = "1.18.1", features = ["serde", "v4"] } uuid = { version = "1.17.0", features = ["serde", "v4"] }

View File

@@ -16,7 +16,7 @@ license.workspace = true
serde_json = "1.0" serde_json = "1.0"
[dev-dependencies] [dev-dependencies]
criterion = { version = "0.7.0", features = ["html_reports"] } criterion = { version = "0.6.0", features = ["html_reports"] }
[[bench]] [[bench]]
name = "benchmarks" name = "benchmarks"

View File

@@ -11,12 +11,12 @@ edition.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
arbitrary = { version = "1.4.2", features = ["derive"] } arbitrary = { version = "1.4.1", features = ["derive"] }
bumpalo = "3.19.0" bumpalo = "3.18.1"
clap = { version = "4.5.52", features = ["derive"] } clap = { version = "4.5.40", features = ["derive"] }
either = "1.15.0" either = "1.15.0"
fastrand = "2.3.0" fastrand = "2.3.0"
milli = { path = "../milli" } milli = { path = "../milli" }
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
tempfile = "3.23.0" tempfile = "3.20.0"

View File

@@ -11,33 +11,33 @@ edition.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
anyhow = "1.0.100" anyhow = "1.0.98"
bincode = "1.3.3" bincode = "1.3.3"
byte-unit = "5.1.6" byte-unit = "5.1.6"
bytes = "1.11.0" bytes = "1.10.1"
bumpalo = "3.19.0" bumpalo = "3.18.1"
bumparaw-collections = "0.1.4" bumparaw-collections = "0.1.4"
convert_case = "0.9.0" convert_case = "0.8.0"
csv = "1.4.0" csv = "1.3.1"
derive_builder = "0.20.2" derive_builder = "0.20.2"
dump = { path = "../dump" } dump = { path = "../dump" }
enum-iterator = "2.3.0" enum-iterator = "2.1.0"
file-store = { path = "../file-store" } file-store = { path = "../file-store" }
flate2 = "1.1.5" flate2 = "1.1.2"
indexmap = "2.12.0" indexmap = "2.9.0"
meilisearch-auth = { path = "../meilisearch-auth" } meilisearch-auth = { path = "../meilisearch-auth" }
meilisearch-types = { path = "../meilisearch-types" } meilisearch-types = { path = "../meilisearch-types" }
memmap2 = "0.9.9" memmap2 = "0.9.7"
page_size = "0.6.0" page_size = "0.6.0"
rayon = "1.11.0" rayon = "1.10.0"
roaring = { version = "0.10.12", features = ["serde"] } roaring = { version = "0.10.12", features = ["serde"] }
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
tar = "0.4.44" tar = "0.4.44"
synchronoise = "1.0.1" synchronoise = "1.0.1"
tempfile = "3.23.0" tempfile = "3.20.0"
thiserror = "2.0.17" thiserror = "2.0.12"
time = { version = "0.3.44", features = [ time = { version = "0.3.41", features = [
"serde-well-known", "serde-well-known",
"formatting", "formatting",
"parsing", "parsing",
@@ -45,11 +45,11 @@ time = { version = "0.3.44", features = [
] } ] }
tracing = "0.1.41" tracing = "0.1.41"
ureq = "2.12.1" ureq = "2.12.1"
uuid = { version = "1.18.1", features = ["serde", "v4"] } uuid = { version = "1.17.0", features = ["serde", "v4"] }
backoff = "0.4.0" backoff = "0.4.0"
reqwest = { version = "0.12.24", features = ["rustls-tls", "http2"], default-features = false } reqwest = { version = "0.12.23", features = ["rustls-tls", "http2"], default-features = false }
rusty-s3 = "0.8.1" rusty-s3 = "0.8.1"
tokio = { version = "1.48.0", features = ["full"] } tokio = { version = "1.47.1", features = ["full"] }
[dev-dependencies] [dev-dependencies]
big_s = "1.0.2" big_s = "1.0.2"

View File

@@ -1,9 +1,9 @@
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use meilisearch_types::enterprise_edition::network::Network;
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures}; use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
use meilisearch_types::heed::types::{SerdeJson, Str}; use meilisearch_types::heed::types::{SerdeJson, Str};
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls}; use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
use meilisearch_types::network::Network;
use crate::error::FeatureNotEnabledError; use crate::error::FeatureNotEnabledError;
use crate::Result; use crate::Result;

View File

@@ -306,6 +306,18 @@ fn create_or_open_index(
) -> Result<Index> { ) -> Result<Index> {
let options = EnvOpenOptions::new(); let options = EnvOpenOptions::new();
let mut options = options.read_txn_without_tls(); let mut options = options.read_txn_without_tls();
let map_size = match std::env::var("MEILI_MAX_INDEX_SIZE") {
Ok(max_size) => {
let max_size = max_size.parse().unwrap();
map_size.min(max_size)
}
Err(VarError::NotPresent) => map_size,
Err(VarError::NotUnicode(e)) => {
panic!("Non unicode max index size in `MEILI_MAX_INDEX_SIZE`: {e:?}")
}
};
options.map_size(clamp_to_page_size(map_size)); options.map_size(clamp_to_page_size(map_size));
// You can find more details about this experimental // You can find more details about this experimental

View File

@@ -6,7 +6,7 @@ use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str};
use meilisearch_types::heed::{Database, RoTxn}; use meilisearch_types::heed::{Database, RoTxn};
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32}; use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
use meilisearch_types::tasks::{Details, Kind, Status, Task}; use meilisearch_types::tasks::{Details, Kind, Status, Task};
use meilisearch_types::versioning::{self, VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH}; use meilisearch_types::versioning;
use roaring::RoaringBitmap; use roaring::RoaringBitmap;
use crate::index_mapper::IndexMapper; use crate::index_mapper::IndexMapper;
@@ -320,12 +320,8 @@ fn snapshot_details(d: &Details) -> String {
format!("{{ url: {url:?}, api_key: {api_key:?}, payload_size: {payload_size:?}, indexes: {indexes:?} }}") format!("{{ url: {url:?}, api_key: {api_key:?}, payload_size: {payload_size:?}, indexes: {indexes:?} }}")
} }
Details::UpgradeDatabase { from, to } => { Details::UpgradeDatabase { from, to } => {
if to == &(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) {
format!("{{ from: {from:?}, to: [current version] }}")
} else {
format!("{{ from: {from:?}, to: {to:?} }}") format!("{{ from: {from:?}, to: {to:?} }}")
} }
}
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => { Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}") format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
} }
@@ -404,21 +400,7 @@ pub fn snapshot_batch(batch: &Batch) -> String {
snap.push('{'); snap.push('{');
snap.push_str(&format!("uid: {uid}, ")); snap.push_str(&format!("uid: {uid}, "));
let details = if let Some(upgrade_to) = &details.upgrade_to { snap.push_str(&format!("details: {}, ", serde_json::to_string(details).unwrap()));
if upgrade_to.as_str()
== format!("v{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_PATCH}").as_str()
{
let mut details = details.clone();
details.upgrade_to = Some("[current version]".into());
serde_json::to_string(&details).unwrap()
} else {
serde_json::to_string(details).unwrap()
}
} else {
serde_json::to_string(details).unwrap()
};
snap.push_str(&format!("details: {details}, "));
snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap())); snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap()));
if !embedder_stats.skip_serializing() { if !embedder_stats.skip_serializing() {
snap.push_str(&format!( snap.push_str(&format!(

View File

@@ -54,6 +54,7 @@ pub use features::RoFeatures;
use flate2::bufread::GzEncoder; use flate2::bufread::GzEncoder;
use flate2::Compression; use flate2::Compression;
use meilisearch_types::batches::Batch; use meilisearch_types::batches::Batch;
use meilisearch_types::enterprise_edition::network::Network;
use meilisearch_types::features::{ use meilisearch_types::features::{
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures, ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
}; };
@@ -66,7 +67,6 @@ use meilisearch_types::milli::vector::{
Embedder, EmbedderOptions, RuntimeEmbedder, RuntimeEmbedders, RuntimeFragment, Embedder, EmbedderOptions, RuntimeEmbedder, RuntimeEmbedders, RuntimeFragment,
}; };
use meilisearch_types::milli::{self, Index}; use meilisearch_types::milli::{self, Index};
use meilisearch_types::network::Network;
use meilisearch_types::task_view::TaskView; use meilisearch_types::task_view::TaskView;
use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork}; use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork};
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView}; use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};

View File

@@ -502,11 +502,13 @@ impl Queue {
*before_finished_at, *before_finished_at,
)?; )?;
if let Some(limit) = limit {
batches = if query.reverse.unwrap_or_default() { batches = if query.reverse.unwrap_or_default() {
batches.into_iter().take(*limit).collect() batches.into_iter().take(*limit as usize).collect()
} else { } else {
batches.into_iter().rev().take(*limit).collect() batches.into_iter().rev().take(*limit as usize).collect()
}; };
}
Ok(batches) Ok(batches)
} }
@@ -600,8 +602,11 @@ impl Queue {
Box::new(batches.into_iter().rev()) as Box<dyn Iterator<Item = u32>> Box::new(batches.into_iter().rev()) as Box<dyn Iterator<Item = u32>>
}; };
let batches = let batches = self.batches.get_existing_batches(
self.batches.get_existing_batches(rtxn, batches.take(query.limit), processing)?; rtxn,
batches.take(query.limit.unwrap_or(u32::MAX) as usize),
processing,
)?;
Ok((batches, total)) Ok((batches, total))
} }

View File

@@ -28,21 +28,21 @@ fn query_batches_from_and_limit() {
let proc = index_scheduler.processing_tasks.read().unwrap().clone(); let proc = index_scheduler.processing_tasks.read().unwrap().clone();
let rtxn = index_scheduler.env.read_txn().unwrap(); let rtxn = index_scheduler.env.read_txn().unwrap();
let query = Query { limit: 0, ..Default::default() }; let query = Query { limit: Some(0), ..Default::default() };
let (batches, _) = index_scheduler let (batches, _) = index_scheduler
.queue .queue
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc) .get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
.unwrap(); .unwrap();
snapshot!(snapshot_bitmap(&batches), @"[]"); snapshot!(snapshot_bitmap(&batches), @"[]");
let query = Query { limit: 1, ..Default::default() }; let query = Query { limit: Some(1), ..Default::default() };
let (batches, _) = index_scheduler let (batches, _) = index_scheduler
.queue .queue
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc) .get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
.unwrap(); .unwrap();
snapshot!(snapshot_bitmap(&batches), @"[2,]"); snapshot!(snapshot_bitmap(&batches), @"[2,]");
let query = Query { limit: 2, ..Default::default() }; let query = Query { limit: Some(2), ..Default::default() };
let (batches, _) = index_scheduler let (batches, _) = index_scheduler
.queue .queue
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc) .get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
@@ -63,14 +63,14 @@ fn query_batches_from_and_limit() {
.unwrap(); .unwrap();
snapshot!(snapshot_bitmap(&batches), @"[0,1,2,]"); snapshot!(snapshot_bitmap(&batches), @"[0,1,2,]");
let query = Query { from: Some(1), limit: 1, ..Default::default() }; let query = Query { from: Some(1), limit: Some(1), ..Default::default() };
let (batches, _) = index_scheduler let (batches, _) = index_scheduler
.queue .queue
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc) .get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
.unwrap(); .unwrap();
snapshot!(snapshot_bitmap(&batches), @"[1,]"); snapshot!(snapshot_bitmap(&batches), @"[1,]");
let query = Query { from: Some(1), limit: 2, ..Default::default() }; let query = Query { from: Some(1), limit: Some(2), ..Default::default() };
let (batches, _) = index_scheduler let (batches, _) = index_scheduler
.queue .queue
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc) .get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)

View File

@@ -31,9 +31,6 @@ use crate::{Error, IndexSchedulerOptions, Result, TaskId};
/// The number of database used by queue itself /// The number of database used by queue itself
const NUMBER_OF_DATABASES: u32 = 1; const NUMBER_OF_DATABASES: u32 = 1;
/// The default limit for pagination
const DEFAULT_LIMIT: usize = 20;
/// Database const names for the `IndexScheduler`. /// Database const names for the `IndexScheduler`.
mod db_name { mod db_name {
pub const BATCH_TO_TASKS_MAPPING: &str = "batch-to-tasks-mapping"; pub const BATCH_TO_TASKS_MAPPING: &str = "batch-to-tasks-mapping";
@@ -43,11 +40,11 @@ mod db_name {
/// ///
/// An empty/default query (where each field is set to `None`) matches all tasks. /// An empty/default query (where each field is set to `None`) matches all tasks.
/// Each non-null field restricts the set of tasks further. /// Each non-null field restricts the set of tasks further.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Default, Debug, Clone, PartialEq, Eq)]
pub struct Query { pub struct Query {
/// The maximum number of tasks to be matched. Defaults to 20. /// The maximum number of tasks to be matched
pub limit: usize, pub limit: Option<u32>,
/// The minimum [task id](`meilisearch_types::tasks::Task::uid`) to be matched. Defaults to 0. /// The minimum [task id](`meilisearch_types::tasks::Task::uid`) to be matched
pub from: Option<u32>, pub from: Option<u32>,
/// The order used to return the tasks. By default the newest tasks are returned first and the boolean is `false`. /// The order used to return the tasks. By default the newest tasks are returned first and the boolean is `false`.
pub reverse: Option<bool>, pub reverse: Option<bool>,
@@ -86,29 +83,32 @@ pub struct Query {
pub after_finished_at: Option<OffsetDateTime>, pub after_finished_at: Option<OffsetDateTime>,
} }
impl Default for Query {
fn default() -> Self {
Self {
limit: DEFAULT_LIMIT,
from: Default::default(),
reverse: Default::default(),
uids: Default::default(),
batch_uids: Default::default(),
statuses: Default::default(),
types: Default::default(),
index_uids: Default::default(),
canceled_by: Default::default(),
before_enqueued_at: Default::default(),
after_enqueued_at: Default::default(),
before_started_at: Default::default(),
after_started_at: Default::default(),
before_finished_at: Default::default(),
after_finished_at: Default::default(),
}
}
}
impl Query { impl Query {
/// Return `true` if every field of the query is set to `None`, such that the query
/// matches all tasks.
pub fn is_empty(&self) -> bool {
matches!(
self,
Query {
limit: None,
from: None,
reverse: None,
uids: None,
batch_uids: None,
statuses: None,
types: None,
index_uids: None,
canceled_by: None,
before_enqueued_at: None,
after_enqueued_at: None,
before_started_at: None,
after_started_at: None,
before_finished_at: None,
after_finished_at: None,
}
)
}
/// Add an [index id](meilisearch_types::tasks::Task::index_uid) to the list of permitted indexes. /// Add an [index id](meilisearch_types::tasks::Task::index_uid) to the list of permitted indexes.
pub fn with_index(self, index_uid: String) -> Self { pub fn with_index(self, index_uid: String) -> Self {
let mut index_vec = self.index_uids.unwrap_or_default(); let mut index_vec = self.index_uids.unwrap_or_default();
@@ -119,7 +119,7 @@ impl Query {
// Removes the `from` and `limit` restrictions from the query. // Removes the `from` and `limit` restrictions from the query.
// Useful to get the total number of tasks matching a filter. // Useful to get the total number of tasks matching a filter.
pub fn without_limits(self) -> Self { pub fn without_limits(self) -> Self {
Query { limit: usize::MAX, from: None, ..self } Query { limit: None, from: None, ..self }
} }
} }

View File

@@ -465,11 +465,13 @@ impl Queue {
*before_finished_at, *before_finished_at,
)?; )?;
if let Some(limit) = limit {
tasks = if query.reverse.unwrap_or_default() { tasks = if query.reverse.unwrap_or_default() {
tasks.into_iter().take(*limit).collect() tasks.into_iter().take(*limit as usize).collect()
} else { } else {
tasks.into_iter().rev().take(*limit).collect() tasks.into_iter().rev().take(*limit as usize).collect()
}; };
}
Ok(tasks) Ok(tasks)
} }
@@ -527,7 +529,9 @@ impl Queue {
} else { } else {
Box::new(tasks.into_iter().rev()) as Box<dyn Iterator<Item = u32>> Box::new(tasks.into_iter().rev()) as Box<dyn Iterator<Item = u32>>
}; };
let tasks = self.tasks.get_existing_tasks(rtxn, tasks.take(query.limit))?; let tasks = self
.tasks
.get_existing_tasks(rtxn, tasks.take(query.limit.unwrap_or(u32::MAX) as usize))?;
let ProcessingTasks { batch, processing, progress: _ } = processing_tasks; let ProcessingTasks { batch, processing, progress: _ } = processing_tasks;

View File

@@ -28,21 +28,21 @@ fn query_tasks_from_and_limit() {
let rtxn = index_scheduler.env.read_txn().unwrap(); let rtxn = index_scheduler.env.read_txn().unwrap();
let processing = index_scheduler.processing_tasks.read().unwrap(); let processing = index_scheduler.processing_tasks.read().unwrap();
let query = Query { limit: 0, ..Default::default() }; let query = Query { limit: Some(0), ..Default::default() };
let (tasks, _) = index_scheduler let (tasks, _) = index_scheduler
.queue .queue
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing) .get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
.unwrap(); .unwrap();
snapshot!(snapshot_bitmap(&tasks), @"[]"); snapshot!(snapshot_bitmap(&tasks), @"[]");
let query = Query { limit: 1, ..Default::default() }; let query = Query { limit: Some(1), ..Default::default() };
let (tasks, _) = index_scheduler let (tasks, _) = index_scheduler
.queue .queue
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing) .get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
.unwrap(); .unwrap();
snapshot!(snapshot_bitmap(&tasks), @"[2,]"); snapshot!(snapshot_bitmap(&tasks), @"[2,]");
let query = Query { limit: 2, ..Default::default() }; let query = Query { limit: Some(2), ..Default::default() };
let (tasks, _) = index_scheduler let (tasks, _) = index_scheduler
.queue .queue
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing) .get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
@@ -63,14 +63,14 @@ fn query_tasks_from_and_limit() {
.unwrap(); .unwrap();
snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,]"); snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,]");
let query = Query { from: Some(1), limit: 1, ..Default::default() }; let query = Query { from: Some(1), limit: Some(1), ..Default::default() };
let (tasks, _) = index_scheduler let (tasks, _) = index_scheduler
.queue .queue
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing) .get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
.unwrap(); .unwrap();
snapshot!(snapshot_bitmap(&tasks), @"[1,]"); snapshot!(snapshot_bitmap(&tasks), @"[1,]");
let query = Query { from: Some(1), limit: 2, ..Default::default() }; let query = Query { from: Some(1), limit: Some(2), ..Default::default() };
let (tasks, _) = index_scheduler let (tasks, _) = index_scheduler
.queue .queue
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing) .get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
[timestamp] [4,] [timestamp] [4,]
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Batches: ### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", } 0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", } 1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", } 2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", } 3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Status: ### Status:
enqueued [0,] enqueued [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Status: ### Status:

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Status: ### Status:
@@ -37,7 +37,7 @@ catto [1,]
[timestamp] [0,] [timestamp] [0,]
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Batches: ### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", } 0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Batch to tasks mapping: ### Batch to tasks mapping:
0 [0,] 0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
---------------------------------------------------------------------- ----------------------------------------------------------------------
@@ -40,7 +40,7 @@ doggo [2,]
[timestamp] [0,] [timestamp] [0,]
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Batches: ### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", } 0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Batch to tasks mapping: ### Batch to tasks mapping:
0 [0,] 0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[] []
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Tasks: ### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }} 0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }} 1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }} 3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -43,7 +43,7 @@ doggo [2,3,]
[timestamp] [0,] [timestamp] [0,]
---------------------------------------------------------------------- ----------------------------------------------------------------------
### All Batches: ### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", } 0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
---------------------------------------------------------------------- ----------------------------------------------------------------------
### Batch to tasks mapping: ### Batch to tasks mapping:
0 [0,] 0 [0,]

View File

@@ -1,7 +1,7 @@
use anyhow::bail; use anyhow::bail;
use meilisearch_types::heed::{Env, RwTxn, WithoutTls}; use meilisearch_types::heed::{Env, RwTxn, WithoutTls};
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task}; use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
use meilisearch_types::versioning; use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
use time::OffsetDateTime; use time::OffsetDateTime;
use tracing::info; use tracing::info;
@@ -9,82 +9,81 @@ use crate::queue::TaskQueue;
use crate::versioning::Versioning; use crate::versioning::Versioning;
trait UpgradeIndexScheduler { trait UpgradeIndexScheduler {
fn upgrade(&self, env: &Env<WithoutTls>, wtxn: &mut RwTxn) -> anyhow::Result<()>; fn upgrade(
/// Whether the migration should be applied, depending on the initial version of the index scheduler before &self,
/// any migration was applied env: &Env<WithoutTls>,
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool; wtxn: &mut RwTxn,
/// A progress-centric description of the migration original: (u32, u32, u32),
fn description(&self) -> &'static str; ) -> anyhow::Result<()>;
fn target_version(&self) -> (u32, u32, u32);
} }
/// Upgrade the index scheduler to the binary version.
///
/// # Warning
///
/// The current implementation uses a single wtxn to the index scheduler for the whole duration of the upgrade.
/// If migrations start taking take a long time, it might prevent tasks from being registered.
/// If this issue manifests, then it can be mitigated by adding a `fn target_version` to `UpgradeIndexScheduler`,
/// to be able to write intermediate versions and drop the wtxn between applying migrations.
pub fn upgrade_index_scheduler( pub fn upgrade_index_scheduler(
env: &Env<WithoutTls>, env: &Env<WithoutTls>,
versioning: &Versioning, versioning: &Versioning,
initial_version: (u32, u32, u32), from: (u32, u32, u32),
to: (u32, u32, u32),
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let target_major: u32 = versioning::VERSION_MAJOR; let current_major = to.0;
let target_minor: u32 = versioning::VERSION_MINOR; let current_minor = to.1;
let target_patch: u32 = versioning::VERSION_PATCH; let current_patch = to.2;
let target_version = (target_major, target_minor, target_patch);
if initial_version == target_version {
return Ok(());
}
let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[ let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[
// List all upgrade functions to apply in order here. // This is the last upgrade function, it will be called when the index is up to date.
// any other upgrade function should be added before this one.
&ToCurrentNoOp {},
]; ];
let (initial_major, initial_minor, initial_patch) = initial_version; let start = match from {
(1, 12, _) => 0,
if initial_version > target_version { (1, 13, _) => 0,
(1, 14, _) => 0,
(1, 15, _) => 0,
(1, 16, _) => 0,
(1, 17, _) => 0,
(1, 18, _) => 0,
(1, 19, _) => 0,
(1, 20, _) => 0,
(1, 21, _) => 0,
(1, 22, _) => 0,
(1, 23, _) => 0,
(1, 24, _) => 0,
(1, 25, _) => 0,
(1, 26, _) => 0,
(major, minor, patch) => {
if major > current_major
|| (major == current_major && minor > current_minor)
|| (major == current_major && minor == current_minor && patch > current_patch)
{
bail!( bail!(
"Database version {initial_major}.{initial_minor}.{initial_patch} is higher than the Meilisearch version {target_major}.{target_minor}.{target_patch}. Downgrade is not supported", "Database version {major}.{minor}.{patch} is higher than the Meilisearch version {current_major}.{current_minor}.{current_patch}. Downgrade is not supported",
); );
} } else if major < 1 || (major == current_major && minor < 12) {
if initial_version < (1, 12, 0) {
bail!( bail!(
"Database version {initial_major}.{initial_minor}.{initial_patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{initial_major}.{initial_minor}.{initial_patch} and import it in the v{target_major}.{target_minor}.{target_patch}", "Database version {major}.{minor}.{patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{major}.{minor}.{patch} and import it in the v{current_major}.{current_minor}.{current_patch}",
); );
} else {
bail!("Unknown database version: v{major}.{minor}.{patch}");
} }
}
};
info!("Upgrading the task queue"); info!("Upgrading the task queue");
let mut wtxn = env.write_txn()?; let mut local_from = from;
let migration_count = upgrade_functions.len(); for upgrade in upgrade_functions[start..].iter() {
for (migration_index, upgrade) in upgrade_functions.iter().enumerate() { let target = upgrade.target_version();
if upgrade.must_upgrade(initial_version) {
info!( info!(
"[{migration_index}/{migration_count}]Applying migration: {}", "Upgrading from v{}.{}.{} to v{}.{}.{}",
upgrade.description() local_from.0, local_from.1, local_from.2, target.0, target.1, target.2
); );
let mut wtxn = env.write_txn()?;
upgrade.upgrade(env, &mut wtxn)?; upgrade.upgrade(env, &mut wtxn, local_from)?;
versioning.set_version(&mut wtxn, target)?;
info!( wtxn.commit()?;
"[{}/{migration_count}]Migration applied: {}", local_from = target;
migration_index + 1,
upgrade.description()
)
} else {
info!(
"[{migration_index}/{migration_count}]Skipping unnecessary migration: {}",
upgrade.description()
)
}
} }
versioning.set_version(&mut wtxn, target_version)?; let mut wtxn = env.write_txn()?;
info!("Task queue upgraded, spawning the upgrade database task");
let queue = TaskQueue::new(env, &mut wtxn)?; let queue = TaskQueue::new(env, &mut wtxn)?;
let uid = queue.next_task_id(&wtxn)?; let uid = queue.next_task_id(&wtxn)?;
queue.register( queue.register(
@@ -97,9 +96,9 @@ pub fn upgrade_index_scheduler(
finished_at: None, finished_at: None,
error: None, error: None,
canceled_by: None, canceled_by: None,
details: Some(Details::UpgradeDatabase { from: initial_version, to: target_version }), details: Some(Details::UpgradeDatabase { from, to }),
status: Status::Enqueued, status: Status::Enqueued,
kind: KindWithContent::UpgradeDatabase { from: initial_version }, kind: KindWithContent::UpgradeDatabase { from },
network: None, network: None,
custom_metadata: None, custom_metadata: None,
}, },
@@ -108,3 +107,21 @@ pub fn upgrade_index_scheduler(
Ok(()) Ok(())
} }
#[allow(non_camel_case_types)]
struct ToCurrentNoOp {}
impl UpgradeIndexScheduler for ToCurrentNoOp {
fn upgrade(
&self,
_env: &Env<WithoutTls>,
_wtxn: &mut RwTxn,
_original: (u32, u32, u32),
) -> anyhow::Result<()> {
Ok(())
}
fn target_version(&self) -> (u32, u32, u32) {
(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
}
}

View File

@@ -64,7 +64,14 @@ impl Versioning {
}; };
wtxn.commit()?; wtxn.commit()?;
upgrade_index_scheduler(env, &this, from)?; let bin_major: u32 = versioning::VERSION_MAJOR;
let bin_minor: u32 = versioning::VERSION_MINOR;
let bin_patch: u32 = versioning::VERSION_PATCH;
let to = (bin_major, bin_minor, bin_patch);
if from != to {
upgrade_index_scheduler(env, &this, from, to)?;
}
// Once we reach this point it means the upgrade process, if there was one is entirely finished // Once we reach this point it means the upgrade process, if there was one is entirely finished
// we can safely say we reached the latest version of the index scheduler // we can safely say we reached the latest version of the index scheduler

View File

@@ -15,7 +15,7 @@ license.workspace = true
serde_json = "1.0" serde_json = "1.0"
[dev-dependencies] [dev-dependencies]
criterion = "0.7.0" criterion = "0.6.0"
[[bench]] [[bench]]
name = "depth" name = "depth"

View File

@@ -13,7 +13,7 @@ license.workspace = true
[dependencies] [dependencies]
# fixed version due to format breakages in v1.40 # fixed version due to format breakages in v1.40
insta = { version = "=1.39.0", features = ["json", "redactions"] } insta = { version = "=1.39.0", features = ["json", "redactions"] }
md5 = "0.8.0" md5 = "0.7.0"
once_cell = "1.21" once_cell = "1.21"
regex-lite = "0.1.8" regex-lite = "0.1.6"
uuid = { version = "1.18.1", features = ["v4"] } uuid = { version = "1.17.0", features = ["v4"] }

View File

@@ -12,15 +12,15 @@ license.workspace = true
[dependencies] [dependencies]
base64 = "0.22.1" base64 = "0.22.1"
enum-iterator = "2.3.0" enum-iterator = "2.1.0"
hmac = "0.12.1" hmac = "0.12.1"
maplit = "1.0.2" maplit = "1.0.2"
meilisearch-types = { path = "../meilisearch-types" } meilisearch-types = { path = "../meilisearch-types" }
rand = "0.8.5" rand = "0.8.5"
roaring = { version = "0.10.12", features = ["serde"] } roaring = { version = "0.10.12", features = ["serde"] }
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
sha2 = "0.10.9" sha2 = "0.10.9"
thiserror = "2.0.17" thiserror = "2.0.12"
time = { version = "0.3.44", features = ["serde-well-known", "formatting", "parsing", "macros"] } time = { version = "0.3.41", features = ["serde-well-known", "formatting", "parsing", "macros"] }
uuid = { version = "1.18.1", features = ["serde", "v4"] } uuid = { version = "1.17.0", features = ["serde", "v4"] }

View File

@@ -11,38 +11,38 @@ edition.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
actix-web = { version = "4.12.0", default-features = false } actix-web = { version = "4.11.0", default-features = false }
anyhow = "1.0.100" anyhow = "1.0.98"
bumpalo = "3.19.0" bumpalo = "3.18.1"
bumparaw-collections = "0.1.4" bumparaw-collections = "0.1.4"
byte-unit = { version = "5.1.6", features = ["serde"] } byte-unit = { version = "5.1.6", features = ["serde"] }
convert_case = "0.9.0" convert_case = "0.8.0"
csv = "1.4.0" csv = "1.3.1"
deserr = { version = "0.6.4", features = ["actix-web"] } deserr = { version = "0.6.3", features = ["actix-web"] }
either = { version = "1.15.0", features = ["serde"] } either = { version = "1.15.0", features = ["serde"] }
enum-iterator = "2.3.0" enum-iterator = "2.1.0"
file-store = { path = "../file-store" } file-store = { path = "../file-store" }
flate2 = "1.1.5" flate2 = "1.1.2"
fst = "0.4.7" fst = "0.4.7"
memmap2 = "0.9.9" memmap2 = "0.9.7"
milli = { path = "../milli" } milli = { path = "../milli" }
roaring = { version = "0.10.12", features = ["serde"] } roaring = { version = "0.10.12", features = ["serde"] }
rustc-hash = "2.1.1" rustc-hash = "2.1.1"
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.219", features = ["derive"] }
serde-cs = "0.2.4" serde-cs = "0.2.4"
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
tar = "0.4.44" tar = "0.4.44"
tempfile = "3.23.0" tempfile = "3.20.0"
thiserror = "2.0.17" thiserror = "2.0.12"
time = { version = "0.3.44", features = [ time = { version = "0.3.41", features = [
"serde-well-known", "serde-well-known",
"formatting", "formatting",
"parsing", "parsing",
"macros", "macros",
] } ] }
tokio = "1.48" tokio = "1.45"
utoipa = { version = "5.4.0", features = ["macros"] } utoipa = { version = "5.4.0", features = ["macros"] }
uuid = { version = "1.18.1", features = ["serde", "v4"] } uuid = { version = "1.17.0", features = ["serde", "v4"] }
[dev-dependencies] [dev-dependencies]
# fixed version due to format breakages in v1.40 # fixed version due to format breakages in v1.40
@@ -56,9 +56,6 @@ all-tokenizations = ["milli/all-tokenizations"]
# chinese specialized tokenization # chinese specialized tokenization
chinese = ["milli/chinese"] chinese = ["milli/chinese"]
chinese-pinyin = ["milli/chinese-pinyin"] chinese-pinyin = ["milli/chinese-pinyin"]
enterprise = ["milli/enterprise"]
# hebrew specialized tokenization # hebrew specialized tokenization
hebrew = ["milli/hebrew"] hebrew = ["milli/hebrew"]
# japanese specialized tokenization # japanese specialized tokenization

View File

@@ -1,16 +0,0 @@
pub mod network {
use milli::update::new::indexer::current_edition::sharding::Shards;
use crate::network::Network;
impl Network {
pub fn shards(&self) -> Option<Shards> {
None
}
pub fn sharding(&self) -> bool {
// always false in CE
false
}
}
}

View File

@@ -3,9 +3,21 @@
// Use of this source code is governed by the Business Source License 1.1, // Use of this source code is governed by the Business Source License 1.1,
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11> // as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
use milli::update::new::indexer::enterprise_edition::sharding::Shards; use std::collections::BTreeMap;
use crate::network::Network; use milli::update::new::indexer::enterprise_edition::sharding::Shards;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
#[serde(rename_all = "camelCase")]
pub struct Network {
#[serde(default, rename = "self")]
pub local: Option<String>,
#[serde(default)]
pub remotes: BTreeMap<String, Remote>,
#[serde(default)]
pub sharding: bool,
}
impl Network { impl Network {
pub fn shards(&self) -> Option<Shards> { pub fn shards(&self) -> Option<Shards> {
@@ -22,8 +34,14 @@ impl Network {
None None
} }
} }
}
pub fn sharding(&self) -> bool {
self.sharding #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
} #[serde(rename_all = "camelCase")]
pub struct Remote {
pub url: String,
#[serde(default)]
pub search_api_key: Option<String>,
#[serde(default)]
pub write_api_key: Option<String>,
} }

View File

@@ -324,6 +324,7 @@ InvalidSettingsDisplayedAttributes , InvalidRequest , BAD_REQU
InvalidSettingsDistinctAttribute , InvalidRequest , BAD_REQUEST ; InvalidSettingsDistinctAttribute , InvalidRequest , BAD_REQUEST ;
InvalidSettingsProximityPrecision , InvalidRequest , BAD_REQUEST ; InvalidSettingsProximityPrecision , InvalidRequest , BAD_REQUEST ;
InvalidSettingsFacetSearch , InvalidRequest , BAD_REQUEST ; InvalidSettingsFacetSearch , InvalidRequest , BAD_REQUEST ;
InvalidSettingsexecuteAfterUpdate , InvalidRequest , BAD_REQUEST ;
InvalidSettingsPrefixSearch , InvalidRequest , BAD_REQUEST ; InvalidSettingsPrefixSearch , InvalidRequest , BAD_REQUEST ;
InvalidSettingsFaceting , InvalidRequest , BAD_REQUEST ; InvalidSettingsFaceting , InvalidRequest , BAD_REQUEST ;
InvalidSettingsFilterableAttributes , InvalidRequest , BAD_REQUEST ; InvalidSettingsFilterableAttributes , InvalidRequest , BAD_REQUEST ;
@@ -433,7 +434,6 @@ InvalidChatCompletionSearchQueryParamPrompt , InvalidRequest , BAD_REQU
InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ; InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ; InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST ; InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST ;
RequiresEnterpriseEdition , InvalidRequest , UNAVAILABLE_FOR_LEGAL_REASONS ;
// Webhooks // Webhooks
InvalidWebhooks , InvalidRequest , BAD_REQUEST ; InvalidWebhooks , InvalidRequest , BAD_REQUEST ;
InvalidWebhookUrl , InvalidRequest , BAD_REQUEST ; InvalidWebhookUrl , InvalidRequest , BAD_REQUEST ;

View File

@@ -2,17 +2,10 @@
pub mod batch_view; pub mod batch_view;
pub mod batches; pub mod batches;
#[cfg(not(feature = "enterprise"))]
pub mod community_edition;
pub mod compression; pub mod compression;
pub mod deserr; pub mod deserr;
pub mod document_formats; pub mod document_formats;
#[cfg(feature = "enterprise")]
pub mod enterprise_edition; pub mod enterprise_edition;
#[cfg(not(feature = "enterprise"))]
pub use community_edition as current_edition;
#[cfg(feature = "enterprise")]
pub use enterprise_edition as current_edition;
pub mod error; pub mod error;
pub mod facet_values_sort; pub mod facet_values_sort;
pub mod features; pub mod features;
@@ -20,7 +13,6 @@ pub mod index_uid;
pub mod index_uid_pattern; pub mod index_uid_pattern;
pub mod keys; pub mod keys;
pub mod locales; pub mod locales;
pub mod network;
pub mod settings; pub mod settings;
pub mod star_or; pub mod star_or;
pub mod task_view; pub mod task_view;

View File

@@ -1,24 +0,0 @@
use std::collections::BTreeMap;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
#[serde(rename_all = "camelCase")]
pub struct Network {
#[serde(default, rename = "self")]
pub local: Option<String>,
#[serde(default)]
pub remotes: BTreeMap<String, Remote>,
#[serde(default)]
pub sharding: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct Remote {
pub url: String,
#[serde(default)]
pub search_api_key: Option<String>,
#[serde(default)]
pub write_api_key: Option<String>,
}

View File

@@ -326,6 +326,12 @@ pub struct Settings<T> {
#[schema(value_type = Option<VectorStoreBackend>)] #[schema(value_type = Option<VectorStoreBackend>)]
pub vector_store: Setting<VectorStoreBackend>, pub vector_store: Setting<VectorStoreBackend>,
/// Function to execute after an update
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
#[deserr(default, error = DeserrJsonError<InvalidSettingsexecuteAfterUpdate>)]
#[schema(value_type = Option<String>, example = json!("doc.likes += 1"))]
pub execute_after_update: Setting<String>,
#[serde(skip)] #[serde(skip)]
#[deserr(skip)] #[deserr(skip)]
pub _kind: PhantomData<T>, pub _kind: PhantomData<T>,
@@ -395,6 +401,7 @@ impl Settings<Checked> {
prefix_search: Setting::Reset, prefix_search: Setting::Reset,
chat: Setting::Reset, chat: Setting::Reset,
vector_store: Setting::Reset, vector_store: Setting::Reset,
execute_after_update: Setting::Reset,
_kind: PhantomData, _kind: PhantomData,
} }
} }
@@ -423,6 +430,7 @@ impl Settings<Checked> {
prefix_search, prefix_search,
chat, chat,
vector_store, vector_store,
execute_after_update,
_kind, _kind,
} = self; } = self;
@@ -449,6 +457,7 @@ impl Settings<Checked> {
prefix_search, prefix_search,
vector_store, vector_store,
chat, chat,
execute_after_update,
_kind: PhantomData, _kind: PhantomData,
} }
} }
@@ -501,6 +510,7 @@ impl Settings<Unchecked> {
prefix_search: self.prefix_search, prefix_search: self.prefix_search,
chat: self.chat, chat: self.chat,
vector_store: self.vector_store, vector_store: self.vector_store,
execute_after_update: self.execute_after_update,
_kind: PhantomData, _kind: PhantomData,
} }
} }
@@ -582,6 +592,10 @@ impl Settings<Unchecked> {
prefix_search: other.prefix_search.or(self.prefix_search), prefix_search: other.prefix_search.or(self.prefix_search),
chat: other.chat.clone().or(self.chat.clone()), chat: other.chat.clone().or(self.chat.clone()),
vector_store: other.vector_store.or(self.vector_store), vector_store: other.vector_store.or(self.vector_store),
execute_after_update: other
.execute_after_update
.clone()
.or(self.execute_after_update.clone()),
_kind: PhantomData, _kind: PhantomData,
} }
} }
@@ -622,6 +636,7 @@ pub fn apply_settings_to_builder(
prefix_search, prefix_search,
chat, chat,
vector_store, vector_store,
execute_after_update,
_kind, _kind,
} = settings; } = settings;
@@ -845,6 +860,14 @@ pub fn apply_settings_to_builder(
Setting::Reset => builder.reset_vector_store(), Setting::Reset => builder.reset_vector_store(),
Setting::NotSet => (), Setting::NotSet => (),
} }
match execute_after_update {
Setting::Set(execute_after_update) => {
builder.set_execute_after_update(execute_after_update.clone())
}
Setting::Reset => builder.reset_execute_after_update(),
Setting::NotSet => (),
}
} }
pub enum SecretPolicy { pub enum SecretPolicy {
@@ -944,13 +967,13 @@ pub fn settings(
.collect(); .collect();
let vector_store = index.get_vector_store(rtxn)?; let vector_store = index.get_vector_store(rtxn)?;
let embedders = Setting::Set(embedders); let embedders = Setting::Set(embedders);
let search_cutoff_ms = index.search_cutoff(rtxn)?; let search_cutoff_ms = index.search_cutoff(rtxn)?;
let localized_attributes_rules = index.localized_attributes_rules(rtxn)?; let localized_attributes_rules = index.localized_attributes_rules(rtxn)?;
let prefix_search = index.prefix_search(rtxn)?.map(PrefixSearchSettings::from); let prefix_search = index.prefix_search(rtxn)?.map(PrefixSearchSettings::from);
let facet_search = index.facet_search(rtxn)?; let facet_search = index.facet_search(rtxn)?;
let chat = index.chat_config(rtxn).map(ChatSettings::from)?; let chat = index.chat_config(rtxn).map(ChatSettings::from)?;
let execute_after_update = index.execute_after_update(rtxn)?;
let mut settings = Settings { let mut settings = Settings {
displayed_attributes: match displayed_attributes { displayed_attributes: match displayed_attributes {
@@ -995,6 +1018,10 @@ pub fn settings(
Some(vector_store) => Setting::Set(vector_store), Some(vector_store) => Setting::Set(vector_store),
None => Setting::Reset, None => Setting::Reset,
}, },
execute_after_update: match execute_after_update {
Some(function) => Setting::Set(function.to_string()),
None => Setting::NotSet,
},
_kind: PhantomData, _kind: PhantomData,
}; };
@@ -1225,6 +1252,7 @@ pub(crate) mod test {
prefix_search: Setting::NotSet, prefix_search: Setting::NotSet,
chat: Setting::NotSet, chat: Setting::NotSet,
vector_store: Setting::NotSet, vector_store: Setting::NotSet,
execute_after_update: Setting::NotSet,
_kind: PhantomData::<Unchecked>, _kind: PhantomData::<Unchecked>,
}; };
@@ -1258,7 +1286,7 @@ pub(crate) mod test {
prefix_search: Setting::NotSet, prefix_search: Setting::NotSet,
chat: Setting::NotSet, chat: Setting::NotSet,
vector_store: Setting::NotSet, vector_store: Setting::NotSet,
execute_after_update: Setting::NotSet,
_kind: PhantomData::<Unchecked>, _kind: PhantomData::<Unchecked>,
}; };

View File

@@ -14,91 +14,91 @@ default-run = "meilisearch"
[dependencies] [dependencies]
actix-cors = "0.7.1" actix-cors = "0.7.1"
actix-http = { version = "3.11.2", default-features = false, features = [ actix-http = { version = "3.11.0", default-features = false, features = [
"compress-brotli", "compress-brotli",
"compress-gzip", "compress-gzip",
"rustls-0_23", "rustls-0_23",
] } ] }
actix-utils = "3.0.1" actix-utils = "3.0.1"
actix-web = { version = "4.12.0", default-features = false, features = [ actix-web = { version = "4.11.0", default-features = false, features = [
"macros", "macros",
"compress-brotli", "compress-brotli",
"compress-gzip", "compress-gzip",
"cookies", "cookies",
"rustls-0_23", "rustls-0_23",
] } ] }
anyhow = { version = "1.0.100", features = ["backtrace"] } anyhow = { version = "1.0.98", features = ["backtrace"] }
bstr = "1.12.1" bstr = "1.12.0"
byte-unit = { version = "5.1.6", features = ["serde"] } byte-unit = { version = "5.1.6", features = ["serde"] }
bytes = "1.11.0" bytes = "1.10.1"
bumpalo = "3.19.0" bumpalo = "3.18.1"
clap = { version = "4.5.52", features = ["derive", "env"] } clap = { version = "4.5.40", features = ["derive", "env"] }
crossbeam-channel = "0.5.15" crossbeam-channel = "0.5.15"
deserr = { version = "0.6.4", features = ["actix-web"] } deserr = { version = "0.6.3", features = ["actix-web"] }
dump = { path = "../dump" } dump = { path = "../dump" }
either = "1.15.0" either = "1.15.0"
file-store = { path = "../file-store" } file-store = { path = "../file-store" }
flate2 = "1.1.5" flate2 = "1.1.2"
fst = "0.4.7" fst = "0.4.7"
futures = "0.3.31" futures = "0.3.31"
futures-util = "0.3.31" futures-util = "0.3.31"
index-scheduler = { path = "../index-scheduler" } index-scheduler = { path = "../index-scheduler" }
indexmap = { version = "2.12.0", features = ["serde"] } indexmap = { version = "2.9.0", features = ["serde"] }
is-terminal = "0.4.17" is-terminal = "0.4.16"
itertools = "0.14.0" itertools = "0.14.0"
jsonwebtoken = "9.3.1" jsonwebtoken = "9.3.1"
lazy_static = "1.5.0" lazy_static = "1.5.0"
meilisearch-auth = { path = "../meilisearch-auth" } meilisearch-auth = { path = "../meilisearch-auth" }
meilisearch-types = { path = "../meilisearch-types" } meilisearch-types = { path = "../meilisearch-types" }
memmap2 = "0.9.9" memmap2 = "0.9.7"
mimalloc = { version = "0.1.48", default-features = false } mimalloc = { version = "0.1.47", default-features = false }
mime = "0.3.17" mime = "0.3.17"
num_cpus = "1.17.0" num_cpus = "1.17.0"
obkv = "0.3.0" obkv = "0.3.0"
once_cell = "1.21.3" once_cell = "1.21.3"
ordered-float = "5.1.0" ordered-float = "5.0.0"
parking_lot = "0.12.5" parking_lot = "0.12.4"
permissive-json-pointer = { path = "../permissive-json-pointer" } permissive-json-pointer = { path = "../permissive-json-pointer" }
pin-project-lite = "0.2.16" pin-project-lite = "0.2.16"
platform-dirs = "0.3.0" platform-dirs = "0.3.0"
prometheus = { version = "0.14.0", features = ["process"] } prometheus = { version = "0.14.0", features = ["process"] }
rand = "0.8.5" rand = "0.8.5"
rayon = "1.11.0" rayon = "1.10.0"
regex = "1.12.2" regex = "1.11.1"
reqwest = { version = "0.12.24", features = [ reqwest = { version = "0.12.20", features = [
"rustls-tls", "rustls-tls",
"json", "json",
], default-features = false } ], default-features = false }
rustls = { version = "0.23.35", features = ["ring"], default-features = false } rustls = { version = "0.23.28", features = ["ring"], default-features = false }
rustls-pki-types = { version = "1.13.0", features = ["alloc"] } rustls-pki-types = { version = "1.12.0", features = ["alloc"] }
rustls-pemfile = "2.2.0" rustls-pemfile = "2.2.0"
segment = { version = "0.2.6" } segment = { version = "0.2.6" }
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["preserve_order"] } serde_json = { version = "1.0.140", features = ["preserve_order"] }
sha2 = "0.10.9" sha2 = "0.10.9"
siphasher = "1.0.1" siphasher = "1.0.1"
slice-group-by = "0.3.1" slice-group-by = "0.3.1"
static-files = { version = "0.3.1", optional = true } static-files = { version = "0.2.5", optional = true }
sysinfo = "0.37.2" sysinfo = "0.35.2"
tar = "0.4.44" tar = "0.4.44"
tempfile = "3.23.0" tempfile = "3.20.0"
thiserror = "2.0.17" thiserror = "2.0.12"
time = { version = "0.3.44", features = [ time = { version = "0.3.41", features = [
"serde-well-known", "serde-well-known",
"formatting", "formatting",
"parsing", "parsing",
"macros", "macros",
] } ] }
tokio = { version = "1.48.0", features = ["full"] } tokio = { version = "1.45.1", features = ["full"] }
toml = "0.9.8" toml = "0.8.23"
uuid = { version = "1.18.1", features = ["serde", "v4", "v7"] } uuid = { version = "1.18.0", features = ["serde", "v4", "v7"] }
serde_urlencoded = "0.7.1" serde_urlencoded = "0.7.1"
termcolor = "1.4.1" termcolor = "1.4.1"
url = { version = "2.5.7", features = ["serde"] } url = { version = "2.5.4", features = ["serde"] }
tracing = "0.1.41" tracing = "0.1.41"
tracing-subscriber = { version = "0.3.20", features = ["json"] } tracing-subscriber = { version = "0.3.20", features = ["json"] }
tracing-trace = { version = "0.1.0", path = "../tracing-trace" } tracing-trace = { version = "0.1.0", path = "../tracing-trace" }
tracing-actix-web = "0.7.19" tracing-actix-web = "0.7.18"
build-info = { version = "1.7.0", path = "../build-info" } build-info = { version = "1.7.0", path = "../build-info" }
roaring = "0.10.12" roaring = "0.10.12"
mopa-maintained = "0.2.3" mopa-maintained = "0.2.3"
@@ -114,35 +114,35 @@ utoipa = { version = "5.4.0", features = [
utoipa-scalar = { version = "0.3.0", optional = true, features = ["actix-web"] } utoipa-scalar = { version = "0.3.0", optional = true, features = ["actix-web"] }
async-openai = { git = "https://github.com/meilisearch/async-openai", branch = "better-error-handling" } async-openai = { git = "https://github.com/meilisearch/async-openai", branch = "better-error-handling" }
secrecy = "0.10.3" secrecy = "0.10.3"
actix-web-lab = { version = "0.24.3", default-features = false } actix-web-lab = { version = "0.24.1", default-features = false }
urlencoding = "2.1.3" urlencoding = "2.1.3"
backoff = { version = "0.4.0", features = ["tokio"] } backoff = { version = "0.4.0", features = ["tokio"] }
humantime = { version = "2.3.0", default-features = false } humantime = { version = "2.3.0", default-features = false }
[dev-dependencies] [dev-dependencies]
actix-rt = "2.11.0" actix-rt = "2.10.0"
brotli = "8.0.2" brotli = "8.0.1"
# fixed version due to format breakages in v1.40 # fixed version due to format breakages in v1.40
insta = { version = "=1.39.0", features = ["redactions"] } insta = { version = "=1.39.0", features = ["redactions"] }
manifest-dir-macros = "0.1.18" manifest-dir-macros = "0.1.18"
maplit = "1.0.2" maplit = "1.0.2"
meili-snap = { path = "../meili-snap" } meili-snap = { path = "../meili-snap" }
temp-env = "0.3.6" temp-env = "0.3.6"
wiremock = "0.6.5" wiremock = "0.6.3"
yaup = "0.3.1" yaup = "0.3.1"
[build-dependencies] [build-dependencies]
anyhow = { version = "1.0.100", optional = true } anyhow = { version = "1.0.98", optional = true }
cargo_toml = { version = "0.22.3", optional = true } cargo_toml = { version = "0.22.1", optional = true }
hex = { version = "0.4.3", optional = true } hex = { version = "0.4.3", optional = true }
reqwest = { version = "0.12.24", features = [ reqwest = { version = "0.12.20", features = [
"blocking", "blocking",
"rustls-tls", "rustls-tls",
], default-features = false, optional = true } ], default-features = false, optional = true }
sha-1 = { version = "0.10.1", optional = true } sha-1 = { version = "0.10.1", optional = true }
static-files = { version = "0.3.1", optional = true } static-files = { version = "0.2.5", optional = true }
tempfile = { version = "3.23.0", optional = true } tempfile = { version = "3.20.0", optional = true }
zip = { version = "6.0.0", optional = true } zip = { version = "4.1.0", optional = true }
[features] [features]
default = ["meilisearch-types/all-tokenizations", "mini-dashboard"] default = ["meilisearch-types/all-tokenizations", "mini-dashboard"]
@@ -160,7 +160,6 @@ mini-dashboard = [
] ]
chinese = ["meilisearch-types/chinese"] chinese = ["meilisearch-types/chinese"]
chinese-pinyin = ["meilisearch-types/chinese-pinyin"] chinese-pinyin = ["meilisearch-types/chinese-pinyin"]
enterprise = ["meilisearch-types/enterprise"]
hebrew = ["meilisearch-types/hebrew"] hebrew = ["meilisearch-types/hebrew"]
japanese = ["meilisearch-types/japanese"] japanese = ["meilisearch-types/japanese"]
korean = ["meilisearch-types/korean"] korean = ["meilisearch-types/korean"]

View File

@@ -1,7 +1,7 @@
use std::any::TypeId; use std::any::TypeId;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::fs; use std::fs;
use std::path::Path; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
@@ -344,14 +344,14 @@ impl Infos {
experimental_no_edition_2024_for_dumps, experimental_no_edition_2024_for_dumps,
experimental_vector_store_setting: vector_store_setting, experimental_vector_store_setting: vector_store_setting,
gpu_enabled: meilisearch_types::milli::vector::is_cuda_enabled(), gpu_enabled: meilisearch_types::milli::vector::is_cuda_enabled(),
db_path: db_path != Path::new("./data.ms"), db_path: db_path != PathBuf::from("./data.ms"),
import_dump: import_dump.is_some(), import_dump: import_dump.is_some(),
dump_dir: dump_dir != Path::new("dumps/"), dump_dir: dump_dir != PathBuf::from("dumps/"),
ignore_missing_dump, ignore_missing_dump,
ignore_dump_if_db_exists, ignore_dump_if_db_exists,
import_snapshot: import_snapshot.is_some(), import_snapshot: import_snapshot.is_some(),
schedule_snapshot, schedule_snapshot,
snapshot_dir: snapshot_dir != Path::new("snapshots/"), snapshot_dir: snapshot_dir != PathBuf::from("snapshots/"),
uses_s3_snapshots: s3_snapshot_options.is_some(), uses_s3_snapshots: s3_snapshot_options.is_some(),
ignore_missing_snapshot, ignore_missing_snapshot,
ignore_snapshot_if_db_exists, ignore_snapshot_if_db_exists,

View File

@@ -231,14 +231,8 @@ pub fn setup_meilisearch(
max_number_of_tasks: 1_000_000, max_number_of_tasks: 1_000_000,
max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks, max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks,
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size.map_or_else( batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size.map_or_else(
|| {
opt.indexer_options
.max_indexing_memory
// By default, we use half of the available memory to determine the size of batched tasks // By default, we use half of the available memory to determine the size of batched tasks
.map_or(u64::MAX, |mem| mem.as_u64() / 2) || opt.indexer_options.max_indexing_memory.map_or(u64::MAX, |mem| mem.as_u64() / 2),
// And never exceed 10 GiB when we infer the limit
.min(10 * 1024 * 1024 * 1024)
},
|size| size.as_u64(), |size| size.as_u64(),
), ),
index_growth_amount: byte_unit::Byte::from_str("10GiB").unwrap().as_u64() as usize, index_growth_amount: byte_unit::Byte::from_str("10GiB").unwrap().as_u64() as usize,

View File

@@ -474,8 +474,7 @@ pub struct Opt {
pub experimental_max_number_of_batched_tasks: usize, pub experimental_max_number_of_batched_tasks: usize,
/// Experimentally controls the maximum total size, in bytes, of tasks that will be processed /// Experimentally controls the maximum total size, in bytes, of tasks that will be processed
/// simultaneously. When unspecified, defaults to half of the maximum indexing memory and /// simultaneously. When unspecified, defaults to half of the maximum indexing memory.
/// clamped to 10 GiB.
/// ///
/// See: <https://github.com/orgs/meilisearch/discussions/801> /// See: <https://github.com/orgs/meilisearch/discussions/801>
#[clap(long, env = MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE)] #[clap(long, env = MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE)]

View File

@@ -1,39 +0,0 @@
pub mod proxy {
use std::fs::File;
use actix_web::HttpRequest;
use index_scheduler::IndexScheduler;
use crate::error::MeilisearchHttpError;
pub enum Body<T: serde::Serialize> {
NdJsonPayload,
Inline(T),
None,
}
impl Body<()> {
pub fn with_ndjson_payload(_file: File) -> Self {
Self::NdJsonPayload
}
pub fn none() -> Self {
Self::None
}
}
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "Meili-Proxy-Origin-Remote";
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "Meili-Proxy-Origin-TaskUid";
pub async fn proxy<T: serde::Serialize>(
_index_scheduler: &IndexScheduler,
_index_uid: &str,
_req: &HttpRequest,
_network: meilisearch_types::network::Network,
_body: Body<T>,
_task: &meilisearch_types::tasks::Task,
) -> Result<(), MeilisearchHttpError> {
Ok(())
}
}

View File

@@ -45,7 +45,7 @@ use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::GuardedData; use crate::extractors::authentication::GuardedData;
use crate::extractors::payload::Payload; use crate::extractors::payload::Payload;
use crate::extractors::sequential_extractor::SeqHandler; use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::indexes::current_edition::proxy::{proxy, Body}; use crate::routes::indexes::enterprise_edition::proxy::{proxy, Body};
use crate::routes::indexes::search::fix_sort_query_parameters; use crate::routes::indexes::search::fix_sort_query_parameters;
use crate::routes::{ use crate::routes::{
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT, get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
@@ -367,7 +367,7 @@ pub async fn delete_document(
.await?? .await??
}; };
if network.sharding() && !dry_run { if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?; proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
} }
@@ -1098,7 +1098,7 @@ async fn document_addition(
} }
}; };
if network.sharding() { if network.sharding {
if let Some(file) = file { if let Some(file) = file {
proxy( proxy(
&index_scheduler, &index_scheduler,
@@ -1222,7 +1222,7 @@ pub async fn delete_documents_batch(
.await?? .await??
}; };
if network.sharding() && !dry_run { if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?; proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
} }
@@ -1320,7 +1320,7 @@ pub async fn delete_documents_by_filter(
.await?? .await??
}; };
if network.sharding() && !dry_run { if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(filter), &task).await?; proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(filter), &task).await?;
} }
@@ -1475,7 +1475,7 @@ pub async fn edit_documents_by_function(
.await?? .await??
}; };
if network.sharding() && !dry_run { if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?; proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
} }
@@ -1549,7 +1549,7 @@ pub async fn clear_all_documents(
.await?? .await??
}; };
if network.sharding() && !dry_run { if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?; proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
} }

View File

@@ -52,7 +52,7 @@ pub async fn proxy<T: serde::Serialize>(
index_scheduler: &IndexScheduler, index_scheduler: &IndexScheduler,
index_uid: &str, index_uid: &str,
req: &HttpRequest, req: &HttpRequest,
network: meilisearch_types::network::Network, network: meilisearch_types::enterprise_edition::network::Network,
body: Body<T>, body: Body<T>,
task: &meilisearch_types::tasks::Task, task: &meilisearch_types::tasks::Task,
) -> Result<(), MeilisearchHttpError> { ) -> Result<(), MeilisearchHttpError> {

View File

@@ -30,16 +30,7 @@ use crate::Opt;
pub mod compact; pub mod compact;
pub mod documents; pub mod documents;
#[cfg(not(feature = "enterprise"))]
mod community_edition;
#[cfg(feature = "enterprise")]
mod enterprise_edition; mod enterprise_edition;
#[cfg(not(feature = "enterprise"))]
use community_edition as current_edition;
#[cfg(feature = "enterprise")]
use enterprise_edition as current_edition;
pub mod facet_search; pub mod facet_search;
pub mod search; pub mod search;
mod search_analytics; mod search_analytics;
@@ -50,7 +41,7 @@ mod settings_analytics;
pub mod similar; pub mod similar;
mod similar_analytics; mod similar_analytics;
pub use current_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER}; pub use enterprise_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER};
#[derive(OpenApi)] #[derive(OpenApi)]
#[openapi( #[openapi(

View File

@@ -498,6 +498,17 @@ make_setting_routes!(
camelcase_attr: "facetSearch", camelcase_attr: "facetSearch",
analytics: FacetSearchAnalytics analytics: FacetSearchAnalytics
}, },
{
route: "/execute-after-update",
update_verb: put,
value_type: String,
err_type: meilisearch_types::deserr::DeserrJsonError<
meilisearch_types::error::deserr_codes::InvalidSettingsexecuteAfterUpdate,
>,
attr: execute_after_update,
camelcase_attr: "executeAfterUpdate",
analytics: ExecuteAfterUpdateAnalytics
},
{ {
route: "/prefix-search", route: "/prefix-search",
update_verb: put, update_verb: put,
@@ -619,6 +630,9 @@ pub async fn update_all(
new_settings.non_separator_tokens.as_ref().set(), new_settings.non_separator_tokens.as_ref().set(),
), ),
facet_search: FacetSearchAnalytics::new(new_settings.facet_search.as_ref().set()), facet_search: FacetSearchAnalytics::new(new_settings.facet_search.as_ref().set()),
execute_after_update: ExecuteAfterUpdateAnalytics::new(
new_settings.execute_after_update.as_ref().set(),
),
prefix_search: PrefixSearchAnalytics::new(new_settings.prefix_search.as_ref().set()), prefix_search: PrefixSearchAnalytics::new(new_settings.prefix_search.as_ref().set()),
chat: ChatAnalytics::new(new_settings.chat.as_ref().set()), chat: ChatAnalytics::new(new_settings.chat.as_ref().set()),
vector_store: VectorStoreAnalytics::new(new_settings.vector_store.as_ref().set()), vector_store: VectorStoreAnalytics::new(new_settings.vector_store.as_ref().set()),

View File

@@ -42,6 +42,7 @@ pub struct SettingsAnalytics {
pub prefix_search: PrefixSearchAnalytics, pub prefix_search: PrefixSearchAnalytics,
pub chat: ChatAnalytics, pub chat: ChatAnalytics,
pub vector_store: VectorStoreAnalytics, pub vector_store: VectorStoreAnalytics,
pub execute_after_update: ExecuteAfterUpdateAnalytics,
} }
impl Aggregate for SettingsAnalytics { impl Aggregate for SettingsAnalytics {
@@ -197,6 +198,9 @@ impl Aggregate for SettingsAnalytics {
set: new.facet_search.set | self.facet_search.set, set: new.facet_search.set | self.facet_search.set,
value: new.facet_search.value.or(self.facet_search.value), value: new.facet_search.value.or(self.facet_search.value),
}, },
execute_after_update: ExecuteAfterUpdateAnalytics {
set: new.execute_after_update.set | self.execute_after_update.set,
},
prefix_search: PrefixSearchAnalytics { prefix_search: PrefixSearchAnalytics {
set: new.prefix_search.set | self.prefix_search.set, set: new.prefix_search.set | self.prefix_search.set,
value: new.prefix_search.value.or(self.prefix_search.value), value: new.prefix_search.value.or(self.prefix_search.value),
@@ -669,6 +673,21 @@ impl FacetSearchAnalytics {
} }
} }
#[derive(Serialize, Default)]
pub struct ExecuteAfterUpdateAnalytics {
pub set: bool,
}
impl ExecuteAfterUpdateAnalytics {
pub fn new(distinct: Option<&String>) -> Self {
Self { set: distinct.is_some() }
}
pub fn into_settings(self) -> SettingsAnalytics {
SettingsAnalytics { execute_after_update: self, ..Default::default() }
}
}
#[derive(Serialize, Default)] #[derive(Serialize, Default)]
pub struct PrefixSearchAnalytics { pub struct PrefixSearchAnalytics {
pub set: bool, pub set: bool,

View File

@@ -183,11 +183,7 @@ pub async fn get_metrics(
crate::metrics::MEILISEARCH_LAST_FINISHED_BATCHES_PROGRESS_TRACE_MS.reset(); crate::metrics::MEILISEARCH_LAST_FINISHED_BATCHES_PROGRESS_TRACE_MS.reset();
let (batches, _total) = index_scheduler.get_batches_from_authorized_indexes( let (batches, _total) = index_scheduler.get_batches_from_authorized_indexes(
// Fetch the finished batches... // Fetch the finished batches...
&Query { &Query { statuses: Some(vec![Status::Succeeded, Status::Failed]), ..Query::default() },
statuses: Some(vec![Status::Succeeded, Status::Failed]),
limit: 1,
..Query::default()
},
auth_filters, auth_filters,
)?; )?;
// ...and get the last batch only. // ...and get the last batch only.
@@ -214,7 +210,7 @@ pub async fn get_metrics(
let task_queue_latency_seconds = index_scheduler let task_queue_latency_seconds = index_scheduler
.get_tasks_from_authorized_indexes( .get_tasks_from_authorized_indexes(
&Query { &Query {
limit: 1, limit: Some(1),
reverse: Some(true), reverse: Some(true),
statuses: Some(vec![Status::Enqueued, Status::Processing]), statuses: Some(vec![Status::Enqueued, Status::Processing]),
..Query::default() ..Query::default()

View File

@@ -7,6 +7,7 @@ use deserr::Deserr;
use index_scheduler::IndexScheduler; use index_scheduler::IndexScheduler;
use itertools::{EitherOrBoth, Itertools}; use itertools::{EitherOrBoth, Itertools};
use meilisearch_types::deserr::DeserrJsonError; use meilisearch_types::deserr::DeserrJsonError;
use meilisearch_types::enterprise_edition::network::{Network as DbNetwork, Remote as DbRemote};
use meilisearch_types::error::deserr_codes::{ use meilisearch_types::error::deserr_codes::{
InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf, InvalidNetworkSharding, InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf, InvalidNetworkSharding,
InvalidNetworkUrl, InvalidNetworkWriteApiKey, InvalidNetworkUrl, InvalidNetworkWriteApiKey,
@@ -14,7 +15,6 @@ use meilisearch_types::error::deserr_codes::{
use meilisearch_types::error::ResponseError; use meilisearch_types::error::ResponseError;
use meilisearch_types::keys::actions; use meilisearch_types::keys::actions;
use meilisearch_types::milli::update::Setting; use meilisearch_types::milli::update::Setting;
use meilisearch_types::network::{Network as DbNetwork, Remote as DbRemote};
use serde::Serialize; use serde::Serialize;
use tracing::debug; use tracing::debug;
use utoipa::{OpenApi, ToSchema}; use utoipa::{OpenApi, ToSchema};
@@ -211,16 +211,6 @@ async fn patch_network(
let old_network = index_scheduler.network(); let old_network = index_scheduler.network();
debug!(parameters = ?new_network, "Patch network"); debug!(parameters = ?new_network, "Patch network");
#[cfg(not(feature = "enterprise"))]
if new_network.sharding.set().is_some() {
use meilisearch_types::error::Code;
return Err(ResponseError::from_msg(
"Meilisearch Enterprise Edition is required to set `network.sharding`".into(),
Code::RequiresEnterpriseEdition,
));
}
let merged_self = match new_network.local { let merged_self = match new_network.local {
Setting::Set(new_self) => Some(new_self), Setting::Set(new_self) => Some(new_self),
Setting::Reset => None, Setting::Reset => None,
@@ -322,7 +312,6 @@ async fn patch_network(
let merged_network = let merged_network =
DbNetwork { local: merged_self, remotes: merged_remotes, sharding: merged_sharding }; DbNetwork { local: merged_self, remotes: merged_remotes, sharding: merged_sharding };
index_scheduler.put_network(merged_network.clone())?; index_scheduler.put_network(merged_network.clone())?;
debug!(returns = ?merged_network, "Patch network"); debug!(returns = ?merged_network, "Patch network");
Ok(HttpResponse::Ok().json(merged_network)) Ok(HttpResponse::Ok().json(merged_network))

View File

@@ -126,7 +126,7 @@ pub struct TasksFilterQuery {
impl TasksFilterQuery { impl TasksFilterQuery {
pub(crate) fn into_query(self) -> Query { pub(crate) fn into_query(self) -> Query {
Query { Query {
limit: self.limit.0 as usize, limit: Some(self.limit.0),
from: self.from.as_deref().copied(), from: self.from.as_deref().copied(),
reverse: self.reverse.as_deref().copied(), reverse: self.reverse.as_deref().copied(),
batch_uids: self.batch_uids.merge_star_and_none(), batch_uids: self.batch_uids.merge_star_and_none(),
@@ -225,8 +225,7 @@ pub struct TaskDeletionOrCancelationQuery {
impl TaskDeletionOrCancelationQuery { impl TaskDeletionOrCancelationQuery {
fn into_query(self) -> Query { fn into_query(self) -> Query {
Query { Query {
// We want to delete all tasks that match the given filters limit: None,
limit: usize::MAX,
from: None, from: None,
reverse: None, reverse: None,
batch_uids: self.batch_uids.merge_star_and_none(), batch_uids: self.batch_uids.merge_star_and_none(),

View File

@@ -9,12 +9,12 @@ use std::vec::{IntoIter, Vec};
use actix_http::StatusCode; use actix_http::StatusCode;
use index_scheduler::{IndexScheduler, RoFeatures}; use index_scheduler::{IndexScheduler, RoFeatures};
use itertools::Itertools; use itertools::Itertools;
use meilisearch_types::enterprise_edition::network::{Network, Remote};
use meilisearch_types::error::ResponseError; use meilisearch_types::error::ResponseError;
use meilisearch_types::milli::order_by_map::OrderByMap; use meilisearch_types::milli::order_by_map::OrderByMap;
use meilisearch_types::milli::score_details::{ScoreDetails, WeightedScoreValue}; use meilisearch_types::milli::score_details::{ScoreDetails, WeightedScoreValue};
use meilisearch_types::milli::vector::Embedding; use meilisearch_types::milli::vector::Embedding;
use meilisearch_types::milli::{self, DocumentId, OrderBy, TimeBudget, DEFAULT_VALUES_PER_FACET}; use meilisearch_types::milli::{self, DocumentId, OrderBy, TimeBudget, DEFAULT_VALUES_PER_FACET};
use meilisearch_types::network::{Network, Remote};
use roaring::RoaringBitmap; use roaring::RoaringBitmap;
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use uuid::Uuid; use uuid::Uuid;

View File

@@ -1,6 +1,6 @@
pub use error::ProxySearchError; pub use error::ProxySearchError;
use error::ReqwestErrorWithoutUrl; use error::ReqwestErrorWithoutUrl;
use meilisearch_types::network::Remote; use meilisearch_types::enterprise_edition::network::Remote;
use rand::Rng as _; use rand::Rng as _;
use reqwest::{Client, Response, StatusCode}; use reqwest::{Client, Response, StatusCode};
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;

View File

@@ -789,12 +789,11 @@ impl TryFrom<Value> for ExternalDocumentId {
} }
} }
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Deserr, ToSchema, Serialize)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Deserr, ToSchema, Serialize)]
#[deserr(rename_all = camelCase)] #[deserr(rename_all = camelCase)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub enum MatchingStrategy { pub enum MatchingStrategy {
/// Remove query words from last to first /// Remove query words from last to first
#[default]
Last, Last,
/// All query words are mandatory /// All query words are mandatory
All, All,
@@ -802,6 +801,12 @@ pub enum MatchingStrategy {
Frequency, Frequency,
} }
impl Default for MatchingStrategy {
fn default() -> Self {
Self::Last
}
}
impl From<MatchingStrategy> for TermsMatchingStrategy { impl From<MatchingStrategy> for TermsMatchingStrategy {
fn from(other: MatchingStrategy) -> Self { fn from(other: MatchingStrategy) -> Self {
match other { match other {

View File

@@ -187,7 +187,7 @@ macro_rules! compute_forbidden_search {
#[actix_rt::test] #[actix_rt::test]
async fn search_authorized_simple_token() { async fn search_authorized_simple_token() {
let tenant_tokens = [ let tenant_tokens = vec![
hashmap! { hashmap! {
"searchRules" => json!({"*": {}}), "searchRules" => json!({"*": {}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp()) "exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -239,7 +239,7 @@ async fn search_authorized_simple_token() {
#[actix_rt::test] #[actix_rt::test]
async fn search_authorized_filter_token() { async fn search_authorized_filter_token() {
let tenant_tokens = [ let tenant_tokens = vec![
hashmap! { hashmap! {
"searchRules" => json!({"*": {"filter": "color = blue"}}), "searchRules" => json!({"*": {"filter": "color = blue"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp()) "exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -292,7 +292,7 @@ async fn search_authorized_filter_token() {
#[actix_rt::test] #[actix_rt::test]
async fn filter_search_authorized_filter_token() { async fn filter_search_authorized_filter_token() {
let tenant_tokens = [ let tenant_tokens = vec![
hashmap! { hashmap! {
"searchRules" => json!({"*": {"filter": "color = blue"}}), "searchRules" => json!({"*": {"filter": "color = blue"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp()) "exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -353,7 +353,7 @@ async fn filter_search_authorized_filter_token() {
/// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above. /// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above.
#[actix_rt::test] #[actix_rt::test]
async fn error_search_token_forbidden_parent_key() { async fn error_search_token_forbidden_parent_key() {
let tenant_tokens = [ let tenant_tokens = vec![
hashmap! { hashmap! {
"searchRules" => json!({"*": {}}), "searchRules" => json!({"*": {}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp()) "exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -389,7 +389,7 @@ async fn error_search_token_forbidden_parent_key() {
#[actix_rt::test] #[actix_rt::test]
async fn error_search_forbidden_token() { async fn error_search_forbidden_token() {
let tenant_tokens = [ let tenant_tokens = vec![
// bad index // bad index
hashmap! { hashmap! {
"searchRules" => json!({"products": {}}), "searchRules" => json!({"products": {}}),

View File

@@ -680,7 +680,7 @@ async fn multi_search_authorized_simple_token() {
#[actix_rt::test] #[actix_rt::test]
async fn single_search_authorized_filter_token() { async fn single_search_authorized_filter_token() {
let tenant_tokens = [ let tenant_tokens = vec![
hashmap! { hashmap! {
"searchRules" => json!({"*": {"filter": "color = blue"}}), "searchRules" => json!({"*": {"filter": "color = blue"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp()) "exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -733,7 +733,7 @@ async fn single_search_authorized_filter_token() {
#[actix_rt::test] #[actix_rt::test]
async fn multi_search_authorized_filter_token() { async fn multi_search_authorized_filter_token() {
let both_tenant_tokens = [ let both_tenant_tokens = vec![
hashmap! { hashmap! {
"searchRules" => json!({"sales": {"filter": "color = blue"}, "products": {"filter": "doggos.age <= 5"}}), "searchRules" => json!({"sales": {"filter": "color = blue"}, "products": {"filter": "doggos.age <= 5"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp()) "exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -842,7 +842,7 @@ async fn filter_single_search_authorized_filter_token() {
#[actix_rt::test] #[actix_rt::test]
async fn filter_multi_search_authorized_filter_token() { async fn filter_multi_search_authorized_filter_token() {
let tenant_tokens = [ let tenant_tokens = vec![
hashmap! { hashmap! {
"searchRules" => json!({"sales": {"filter": "color = blue"}, "products": {"filter": "doggos.age <= 5"}}), "searchRules" => json!({"sales": {"filter": "color = blue"}, "products": {"filter": "doggos.age <= 5"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp()) "exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -900,7 +900,7 @@ async fn filter_multi_search_authorized_filter_token() {
/// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above. /// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above.
#[actix_rt::test] #[actix_rt::test]
async fn error_single_search_token_forbidden_parent_key() { async fn error_single_search_token_forbidden_parent_key() {
let tenant_tokens = [ let tenant_tokens = vec![
hashmap! { hashmap! {
"searchRules" => json!({"*": {}}), "searchRules" => json!({"*": {}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp()) "exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -941,7 +941,7 @@ async fn error_single_search_token_forbidden_parent_key() {
/// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above. /// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above.
#[actix_rt::test] #[actix_rt::test]
async fn error_multi_search_token_forbidden_parent_key() { async fn error_multi_search_token_forbidden_parent_key() {
let tenant_tokens = [ let tenant_tokens = vec![
hashmap! { hashmap! {
"searchRules" => json!({"*": {}}), "searchRules" => json!({"*": {}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp()) "exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())

View File

@@ -1339,266 +1339,3 @@ async fn get_document_with_vectors() {
} }
"###); "###);
} }
#[actix_rt::test]
async fn test_fetch_documents_pagination_with_sorting() {
let server = Server::new_shared();
let index = server.unique_index();
let (task, _code) = index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
// Set name as sortable attribute
let (task, code) = index.update_settings_sortable_attributes(json!(["name"])).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
let documents = json!((0..50)
.map(|i| json!({"id": i, "name": format!("doc_{:05}", std::cmp::min(i, 5))}))
.collect::<Vec<_>>());
// Add documents as described in the bug report
let (task, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Request 1 (first page): offset 0, limit 2
let (response, code) = index
.fetch_documents(json!({
"offset": 0,
"limit": 2,
"sort": ["name:asc"]
}))
.await;
assert_eq!(code, 200);
let results = response["results"].as_array().unwrap();
snapshot!(json_string!(results), @r###"
[
{
"id": 0,
"name": "doc_00000"
},
{
"id": 1,
"name": "doc_00001"
}
]
"###);
// Request 2 (second page): offset 2, limit 2
let (response, code) = index
.fetch_documents(json!({
"offset": 2,
"limit": 2,
"sort": ["name:asc"]
}))
.await;
assert_eq!(code, 200);
let results = response["results"].as_array().unwrap();
snapshot!(json_string!(results), @r###"
[
{
"id": 2,
"name": "doc_00002"
},
{
"id": 3,
"name": "doc_00003"
}
]
"###);
// Request 3 (third page): offset 4, limit 2
let (response, code) = index
.fetch_documents(json!({
"offset": 4,
"limit": 2,
"sort": ["name:asc"]
}))
.await;
assert_eq!(code, 200);
let results = response["results"].as_array().unwrap();
snapshot!(json_string!(results), @r###"
[
{
"id": 4,
"name": "doc_00004"
},
{
"id": 5,
"name": "doc_00005"
}
]
"###);
// Request 4 (fourth page): offset 6, limit 2
let (response, code) = index
.fetch_documents(json!({
"offset": 6,
"limit": 2,
"sort": ["name:asc"]
}))
.await;
assert_eq!(code, 200);
let results = response["results"].as_array().unwrap();
snapshot!(json_string!(results), @r###"
[
{
"id": 6,
"name": "doc_00005"
},
{
"id": 7,
"name": "doc_00005"
}
]
"###);
}
// <https://github.com/meilisearch/meilisearch/issues/5998>
#[actix_rt::test]
async fn get_document_sort_field_not_in_any_document() {
let server = Server::new_shared();
let index = server.unique_index();
let (task, _code) = index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let (task, _code) = index.update_settings_sortable_attributes(json!(["created_at"])).await;
server.wait_task(task.uid()).await.succeeded();
let documents = json!([
{ "id": 1, "name": "Document 1" },
{ "id": 2, "name": "Document 2" }
]);
let (task, _code) = index.add_documents(documents, None).await;
server.wait_task(task.uid()).await.succeeded();
let (response, code) = index
.fetch_documents(json!({
"sort": ["created_at:asc"]
}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"results": [
{
"id": 1,
"name": "Document 1"
},
{
"id": 2,
"name": "Document 2"
}
],
"offset": 0,
"limit": 20,
"total": 2
}
"###);
}
#[actix_rt::test]
async fn get_document_sort_includes_docs_without_field() {
let server = Server::new_shared();
let index = server.unique_index();
let (task, _code) = index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let (task, _code) = index.update_settings_sortable_attributes(json!(["created_at"])).await;
server.wait_task(task.uid()).await.succeeded();
let documents = json!([
{ "id": 1, "name": "Doc without created_at" },
{ "id": 2, "name": "Doc with created_at", "created_at": "2025-01-15" },
{ "id": 3, "name": "Another doc without created_at" },
{ "id": 4, "name": "Another doc with created_at", "created_at": "2025-01-10" }
]);
let (task, _code) = index.add_documents(documents, None).await;
server.wait_task(task.uid()).await.succeeded();
let (response, code) = index
.fetch_documents(json!({
"sort": ["created_at:asc"]
}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"results": [
{
"id": 4,
"name": "Another doc with created_at",
"created_at": "2025-01-10"
},
{
"id": 2,
"name": "Doc with created_at",
"created_at": "2025-01-15"
},
{
"id": 1,
"name": "Doc without created_at"
},
{
"id": 3,
"name": "Another doc without created_at"
}
],
"offset": 0,
"limit": 20,
"total": 4
}
"###);
}
#[actix_rt::test]
async fn get_document_sort_desc_includes_docs_without_field() {
let server = Server::new_shared();
let index = server.unique_index();
let (task, _code) = index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let (task, _code) = index.update_settings_sortable_attributes(json!(["priority"])).await;
server.wait_task(task.uid()).await.succeeded();
let documents = json!([
{ "id": 1, "name": "Low priority", "priority": 1 },
{ "id": 2, "name": "No priority" },
{ "id": 3, "name": "High priority", "priority": 10 }
]);
let (task, _code) = index.add_documents(documents, None).await;
server.wait_task(task.uid()).await.succeeded();
let (response, code) = index
.fetch_documents(json!({
"sort": ["priority:desc"]
}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"results": [
{
"id": 3,
"name": "High priority",
"priority": 10
},
{
"id": 1,
"name": "Low priority",
"priority": 1
},
{
"id": 2,
"name": "No priority"
}
],
"offset": 0,
"limit": 20,
"total": 3
}
"###);
}

View File

@@ -3142,7 +3142,6 @@ fn fail(override_response_body: Option<&str>) -> ResponseTemplate {
} }
} }
#[cfg(feature = "enterprise")]
#[actix_rt::test] #[actix_rt::test]
async fn remote_auto_sharding() { async fn remote_auto_sharding() {
let ms0 = Server::new().await; let ms0 = Server::new().await;
@@ -3162,6 +3161,7 @@ async fn remote_auto_sharding() {
snapshot!(json_string!(response["network"]), @"true"); snapshot!(json_string!(response["network"]), @"true");
// set self & sharding // set self & sharding
let (response, code) = ms0.set_network(json!({"self": "ms0", "sharding": true})).await; let (response, code) = ms0.set_network(json!({"self": "ms0", "sharding": true})).await;
snapshot!(code, @"200 OK"); snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###" snapshot!(json_string!(response), @r###"
@@ -3462,30 +3462,6 @@ async fn remote_auto_sharding() {
"###); "###);
} }
#[cfg(not(feature = "enterprise"))]
#[actix_rt::test]
async fn sharding_not_enterprise() {
let ms0 = Server::new().await;
// enable feature
let (response, code) = ms0.set_features(json!({"network": true})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["network"]), @"true");
let (response, code) = ms0.set_network(json!({"self": "ms0", "sharding": true})).await;
snapshot!(code, @"451 Unavailable For Legal Reasons");
snapshot!(json_string!(response), @r###"
{
"message": "Meilisearch Enterprise Edition is required to set `network.sharding`",
"code": "requires_enterprise_edition",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#requires_enterprise_edition"
}
"###);
}
#[cfg(feature = "enterprise")]
#[actix_rt::test] #[actix_rt::test]
async fn remote_auto_sharding_with_custom_metadata() { async fn remote_auto_sharding_with_custom_metadata() {
let ms0 = Server::new().await; let ms0 = Server::new().await;

View File

@@ -197,7 +197,7 @@ test_setting_routes!(
{ {
setting: vector_store, setting: vector_store,
update_verb: patch, update_verb: patch,
default_value: "experimental" default_value: null
}, },
); );

View File

@@ -2,7 +2,6 @@ mod chat;
mod distinct; mod distinct;
mod errors; mod errors;
mod get_settings; mod get_settings;
mod parent_seachable_fields;
mod prefix_search_settings; mod prefix_search_settings;
mod proximity_settings; mod proximity_settings;
mod tokenizer_customization; mod tokenizer_customization;

View File

@@ -1,114 +0,0 @@
use meili_snap::{json_string, snapshot};
use once_cell::sync::Lazy;
use crate::common::Server;
use crate::json;
static DOCUMENTS: Lazy<crate::common::Value> = Lazy::new(|| {
json!([
{
"id": 1,
"meta": {
"title": "Soup of the day",
"description": "many the fish",
}
},
{
"id": 2,
"meta": {
"title": "Soup of day",
"description": "many the lazy fish",
}
},
{
"id": 3,
"meta": {
"title": "the Soup of day",
"description": "many the fish",
}
},
])
});
#[actix_rt::test]
async fn nested_field_becomes_searchable() {
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.add_documents(DOCUMENTS.clone(), None).await;
server.wait_task(task.uid()).await.succeeded();
let (response, code) = index
.update_settings(json!({
"searchableAttributes": ["meta.title"]
}))
.await;
assert_eq!("202", code.as_str(), "{response:?}");
server.wait_task(response.uid()).await.succeeded();
// We expect no documents when searching for
// a nested non-searchable field
index
.search(json!({"q": "many fish"}), |response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"[]"###);
})
.await;
let (response, code) = index
.update_settings(json!({
"searchableAttributes": ["meta.title", "meta.description"]
}))
.await;
assert_eq!("202", code.as_str(), "{response:?}");
server.wait_task(response.uid()).await.succeeded();
// We expect all the documents when the nested field becomes searchable
index
.search(json!({"q": "many fish"}), |response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 1,
"meta": {
"title": "Soup of the day",
"description": "many the fish"
}
},
{
"id": 3,
"meta": {
"title": "the Soup of day",
"description": "many the fish"
}
},
{
"id": 2,
"meta": {
"title": "Soup of day",
"description": "many the lazy fish"
}
}
]
"###);
})
.await;
let (response, code) = index
.update_settings(json!({
"searchableAttributes": ["meta.title"]
}))
.await;
assert_eq!("202", code.as_str(), "{response:?}");
server.wait_task(response.uid()).await.succeeded();
// We expect no documents when searching for
// a nested non-searchable field
index
.search(json!({"q": "many fish"}), |response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"[]"###);
})
.await;
}

View File

@@ -42,16 +42,8 @@ async fn version_too_old() {
std::fs::create_dir_all(&db_path).unwrap(); std::fs::create_dir_all(&db_path).unwrap();
std::fs::write(db_path.join("VERSION"), "1.11.9999").unwrap(); std::fs::write(db_path.join("VERSION"), "1.11.9999").unwrap();
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings }; let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err().to_string(); let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.26.0");
let major = meilisearch_types::versioning::VERSION_MAJOR;
let minor = meilisearch_types::versioning::VERSION_MINOR;
let patch = meilisearch_types::versioning::VERSION_PATCH;
let current_version = format!("{major}.{minor}.{patch}");
let err = err.replace(&current_version, "[current version]");
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v[current version]");
} }
#[actix_rt::test] #[actix_rt::test]
@@ -62,21 +54,11 @@ async fn version_requires_downgrade() {
std::fs::create_dir_all(&db_path).unwrap(); std::fs::create_dir_all(&db_path).unwrap();
let major = meilisearch_types::versioning::VERSION_MAJOR; let major = meilisearch_types::versioning::VERSION_MAJOR;
let minor = meilisearch_types::versioning::VERSION_MINOR; let minor = meilisearch_types::versioning::VERSION_MINOR;
let mut patch = meilisearch_types::versioning::VERSION_PATCH; let patch = meilisearch_types::versioning::VERSION_PATCH + 1;
std::fs::write(db_path.join("VERSION"), format!("{major}.{minor}.{patch}")).unwrap();
let current_version = format!("{major}.{minor}.{patch}");
patch += 1;
let future_version = format!("{major}.{minor}.{patch}");
std::fs::write(db_path.join("VERSION"), &future_version).unwrap();
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings }; let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err(); let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
snapshot!(err, @"Database version 1.26.1 is higher than the Meilisearch version 1.26.0. Downgrade is not supported");
let err = err.to_string();
let err = err.replace(&current_version, "[current version]");
let err = err.replace(&future_version, "[future version]");
snapshot!(err, @"Database version [future version] is higher than the Meilisearch version [current version]. Downgrade is not supported");
} }
#[actix_rt::test] #[actix_rt::test]

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null, "progress": null,
"details": { "details": {
"upgradeFrom": "v1.12.0", "upgradeFrom": "v1.12.0",
"upgradeTo": "[current version]" "upgradeTo": "v1.26.0"
}, },
"stats": { "stats": {
"totalNbTasks": 1, "totalNbTasks": 1,

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null, "progress": null,
"details": { "details": {
"upgradeFrom": "v1.12.0", "upgradeFrom": "v1.12.0",
"upgradeTo": "[current version]" "upgradeTo": "v1.26.0"
}, },
"stats": { "stats": {
"totalNbTasks": 1, "totalNbTasks": 1,

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null, "progress": null,
"details": { "details": {
"upgradeFrom": "v1.12.0", "upgradeFrom": "v1.12.0",
"upgradeTo": "[current version]" "upgradeTo": "v1.26.0"
}, },
"stats": { "stats": {
"totalNbTasks": 1, "totalNbTasks": 1,

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null, "canceledBy": null,
"details": { "details": {
"upgradeFrom": "v1.12.0", "upgradeFrom": "v1.12.0",
"upgradeTo": "[current version]" "upgradeTo": "v1.26.0"
}, },
"error": null, "error": null,
"duration": "[duration]", "duration": "[duration]",

Some files were not shown because too many files have changed in this diff Show More