mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-11 07:05:43 +00:00
Compare commits
1 Commits
proper-def
...
support-aw
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a719c1f972 |
5
.github/ISSUE_TEMPLATE/new_feature_issue.md
vendored
5
.github/ISSUE_TEMPLATE/new_feature_issue.md
vendored
@@ -24,11 +24,6 @@ TBD
|
|||||||
- [ ] If not, add the `no db change` label to your PR, and you're good to merge.
|
- [ ] If not, add the `no db change` label to your PR, and you're good to merge.
|
||||||
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
|
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
|
||||||
|
|
||||||
### Reminders when adding features
|
|
||||||
|
|
||||||
- [ ] Write unit tests using insta
|
|
||||||
- [ ] Write declarative integration tests in [workloads/tests](https://github.com/meilisearch/meilisearch/tree/main/workloads/test). Specify the routes to call and then call `cargo xtask test workloads/tests/YOUR_TEST.json --update-responses` so that responses are automatically filled.
|
|
||||||
|
|
||||||
### Reminders when modifying the API
|
### Reminders when modifying the API
|
||||||
|
|
||||||
- [ ] Update the openAPI file with utoipa:
|
- [ ] Update the openAPI file with utoipa:
|
||||||
|
|||||||
1
.github/dependabot.yml
vendored
1
.github/dependabot.yml
vendored
@@ -7,5 +7,6 @@ updates:
|
|||||||
schedule:
|
schedule:
|
||||||
interval: "monthly"
|
interval: "monthly"
|
||||||
labels:
|
labels:
|
||||||
|
- 'skip changelog'
|
||||||
- 'dependencies'
|
- 'dependencies'
|
||||||
rebase-strategy: disabled
|
rebase-strategy: disabled
|
||||||
|
|||||||
33
.github/release-draft-template.yml
vendored
Normal file
33
.github/release-draft-template.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
name-template: 'v$RESOLVED_VERSION'
|
||||||
|
tag-template: 'v$RESOLVED_VERSION'
|
||||||
|
exclude-labels:
|
||||||
|
- 'skip changelog'
|
||||||
|
version-resolver:
|
||||||
|
minor:
|
||||||
|
labels:
|
||||||
|
- 'enhancement'
|
||||||
|
default: patch
|
||||||
|
categories:
|
||||||
|
- title: '⚠️ Breaking changes'
|
||||||
|
label: 'breaking-change'
|
||||||
|
- title: '🚀 Enhancements'
|
||||||
|
label: 'enhancement'
|
||||||
|
- title: '🐛 Bug Fixes'
|
||||||
|
label: 'bug'
|
||||||
|
- title: '🔒 Security'
|
||||||
|
label: 'security'
|
||||||
|
- title: '⚙️ Maintenance/misc'
|
||||||
|
label:
|
||||||
|
- 'maintenance'
|
||||||
|
- 'documentation'
|
||||||
|
template: |
|
||||||
|
$CHANGES
|
||||||
|
|
||||||
|
❤️ Huge thanks to our contributors: $CONTRIBUTORS.
|
||||||
|
no-changes-template: 'Changes are coming soon 😎'
|
||||||
|
sort-direction: 'ascending'
|
||||||
|
replacers:
|
||||||
|
- search: '/(?:and )?@dependabot-preview(?:\[bot\])?,?/g'
|
||||||
|
replace: ''
|
||||||
|
- search: '/(?:and )?@dependabot(?:\[bot\])?,?/g'
|
||||||
|
replace: ''
|
||||||
2
.github/workflows/bench-manual.yml
vendored
2
.github/workflows/bench-manual.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/bench-pr.yml
vendored
4
.github/workflows/bench-pr.yml
vendored
@@ -66,7 +66,9 @@ jobs:
|
|||||||
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
||||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
|
||||||
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
4
.github/workflows/bench-push-indexing.yml
vendored
4
.github/workflows/bench-push-indexing.yml
vendored
@@ -12,7 +12,9 @@ jobs:
|
|||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
|
||||||
# Run benchmarks
|
# Run benchmarks
|
||||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
||||||
|
|||||||
2
.github/workflows/benchmarks-manual.yml
vendored
2
.github/workflows/benchmarks-manual.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/benchmarks-pr.yml
vendored
2
.github/workflows/benchmarks-pr.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
6
.github/workflows/db-change-comments.yml
vendored
6
.github/workflows/db-change-comments.yml
vendored
@@ -6,7 +6,7 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
MESSAGE: |
|
MESSAGE: |
|
||||||
### Hello, I'm a bot 🤖
|
### Hello, I'm a bot 🤖
|
||||||
|
|
||||||
You are receiving this message because you declared that this PR make changes to the Meilisearch database.
|
You are receiving this message because you declared that this PR make changes to the Meilisearch database.
|
||||||
Depending on the nature of the change, additional actions might be required on your part. The following sections detail the additional actions depending on the nature of the change, please copy the relevant section in the description of your PR, and make sure to perform the required actions.
|
Depending on the nature of the change, additional actions might be required on your part. The following sections detail the additional actions depending on the nature of the change, please copy the relevant section in the description of your PR, and make sure to perform the required actions.
|
||||||
@@ -19,7 +19,6 @@ env:
|
|||||||
|
|
||||||
- [ ] Detail the change to the DB format and why they are forward compatible
|
- [ ] Detail the change to the DB format and why they are forward compatible
|
||||||
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
|
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
|
||||||
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
|
|
||||||
|
|
||||||
|
|
||||||
## This PR makes breaking changes
|
## This PR makes breaking changes
|
||||||
@@ -36,7 +35,8 @@ env:
|
|||||||
- [ ] Write the code to go from the old database to the new one
|
- [ ] Write the code to go from the old database to the new one
|
||||||
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
|
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
|
||||||
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
|
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
|
||||||
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
|
- [ ] Write an integration test [here](https://github.com/meilisearch/meilisearch/blob/main/crates/meilisearch/tests/upgrade/mod.rs) ensuring you can read the old database, upgrade to the new database, and read the new database as expected
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
add-comment:
|
add-comment:
|
||||||
|
|||||||
10
.github/workflows/flaky-tests.yml
vendored
10
.github/workflows/flaky-tests.yml
vendored
@@ -3,7 +3,7 @@ name: Look for flaky tests
|
|||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 4 * * *" # Every day at 4:00AM
|
- cron: '0 4 * * *' # Every day at 4:00AM
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
flaky:
|
flaky:
|
||||||
@@ -13,17 +13,11 @@ jobs:
|
|||||||
image: ubuntu:22.04
|
image: ubuntu:22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
|
||||||
run: |
|
|
||||||
sudo rm -rf "/opt/ghc" || true
|
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Install cargo-flaky
|
- name: Install cargo-flaky
|
||||||
run: cargo install cargo-flaky
|
run: cargo install cargo-flaky
|
||||||
- name: Run cargo flaky in the dumps
|
- name: Run cargo flaky in the dumps
|
||||||
|
|||||||
4
.github/workflows/fuzzer-indexing.yml
vendored
4
.github/workflows/fuzzer-indexing.yml
vendored
@@ -12,7 +12,9 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
|
||||||
# Run benchmarks
|
# Run benchmarks
|
||||||
- name: Run the fuzzer
|
- name: Run the fuzzer
|
||||||
|
|||||||
8
.github/workflows/publish-apt-brew-pkg.yml
vendored
8
.github/workflows/publish-apt-brew-pkg.yml
vendored
@@ -25,13 +25,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
run: |
|
|
||||||
sudo rm -rf "/opt/ghc" || true
|
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
- name: Install cargo-deb
|
- name: Install cargo-deb
|
||||||
run: cargo install cargo-deb
|
run: cargo install cargo-deb
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
|||||||
177
.github/workflows/publish-docker-images.yml
vendored
177
.github/workflows/publish-docker-images.yml
vendored
@@ -14,105 +14,10 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
docker:
|
||||||
runs-on: ${{ matrix.runner }}
|
runs-on: docker
|
||||||
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
platform: [amd64, arm64]
|
|
||||||
edition: [community, enterprise]
|
|
||||||
include:
|
|
||||||
- platform: amd64
|
|
||||||
runner: ubuntu-24.04
|
|
||||||
- platform: arm64
|
|
||||||
runner: ubuntu-24.04-arm
|
|
||||||
- edition: community
|
|
||||||
registry: getmeili/meilisearch
|
|
||||||
feature-flag: ""
|
|
||||||
- edition: enterprise
|
|
||||||
registry: getmeili/meilisearch-enterprise
|
|
||||||
feature-flag: "--features enterprise"
|
|
||||||
|
|
||||||
permissions: {}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
|
|
||||||
- name: Prepare
|
|
||||||
run: |
|
|
||||||
platform=linux/${{ matrix.platform }}
|
|
||||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
with:
|
|
||||||
platforms: linux/${{ matrix.platform }}
|
|
||||||
install: true
|
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Docker meta
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
with:
|
|
||||||
images: ${{ matrix.registry }}
|
|
||||||
# Prevent `latest` to be updated for each new tag pushed.
|
|
||||||
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
|
|
||||||
flavor: latest=false
|
|
||||||
tags: |
|
|
||||||
type=ref,event=tag
|
|
||||||
type=raw,value=nightly,enable=${{ github.event_name != 'push' }}
|
|
||||||
type=semver,pattern=v{{major}}.{{minor}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
|
||||||
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
|
||||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
|
||||||
|
|
||||||
- name: Build and push by digest
|
|
||||||
uses: docker/build-push-action@v6
|
|
||||||
id: build-and-push
|
|
||||||
with:
|
|
||||||
platforms: linux/${{ matrix.platform }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
tags: ${{ matrix.registry }}
|
|
||||||
outputs: type=image,push-by-digest=true,name-canonical=true,push=true
|
|
||||||
build-args: |
|
|
||||||
COMMIT_SHA=${{ github.sha }}
|
|
||||||
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
|
||||||
GIT_TAG=${{ github.ref_name }}
|
|
||||||
EXTRA_ARGS=${{ matrix.feature-flag }}
|
|
||||||
|
|
||||||
- name: Export digest
|
|
||||||
run: |
|
|
||||||
mkdir -p ${{ runner.temp }}/digests
|
|
||||||
digest="${{ steps.build-and-push.outputs.digest }}"
|
|
||||||
touch "${{ runner.temp }}/digests/${digest#sha256:}"
|
|
||||||
|
|
||||||
- name: Upload digest
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: digests-${{ matrix.edition }}-${{ env.PLATFORM_PAIR }}
|
|
||||||
path: ${{ runner.temp }}/digests/*
|
|
||||||
if-no-files-found: error
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
merge:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
edition: [community, enterprise]
|
|
||||||
include:
|
|
||||||
- edition: community
|
|
||||||
registry: getmeili/meilisearch
|
|
||||||
- edition: enterprise
|
|
||||||
registry: getmeili/meilisearch-enterprise
|
|
||||||
needs:
|
|
||||||
- build
|
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
id-token: write # This is needed to use Cosign in keyless mode
|
id-token: write # This is needed to use Cosign in keyless mode
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
@@ -153,15 +58,14 @@ jobs:
|
|||||||
|
|
||||||
echo "date=$commit_date" >> $GITHUB_OUTPUT
|
echo "date=$commit_date" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Install cosign
|
- name: Set up QEMU
|
||||||
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # tag=v3.10.0
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Download digests
|
- name: Set up Docker Buildx
|
||||||
uses: actions/download-artifact@v4
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
|
||||||
path: ${{ runner.temp }}/digests
|
- name: Install cosign
|
||||||
pattern: digests-${{ matrix.edition }}-*
|
uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # tag=v3.9.2
|
||||||
merge-multiple: true
|
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
@@ -169,14 +73,11 @@ jobs:
|
|||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: ${{ matrix.registry }}
|
images: getmeili/meilisearch
|
||||||
# Prevent `latest` to be updated for each new tag pushed.
|
# Prevent `latest` to be updated for each new tag pushed.
|
||||||
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
|
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
|
||||||
flavor: latest=false
|
flavor: latest=false
|
||||||
@@ -187,31 +88,33 @@ jobs:
|
|||||||
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
||||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
||||||
|
|
||||||
- name: Create manifest list and push
|
- name: Build and push
|
||||||
working-directory: ${{ runner.temp }}/digests
|
uses: docker/build-push-action@v6
|
||||||
run: |
|
id: build-and-push
|
||||||
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
with:
|
||||||
$(printf '${{ matrix.registry }}@sha256:%s ' *)
|
push: true
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
- name: Inspect image to fetch digest to sign
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
run: |
|
build-args: |
|
||||||
digest=$(docker buildx imagetools inspect --format='{{ json .Manifest }}' ${{ matrix.registry }}:${{ steps.meta.outputs.version }} | jq -r '.digest')
|
COMMIT_SHA=${{ github.sha }}
|
||||||
echo "DIGEST=${digest}" >> $GITHUB_ENV
|
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
||||||
|
GIT_TAG=${{ github.ref_name }}
|
||||||
|
|
||||||
- name: Sign the images with GitHub OIDC Token
|
- name: Sign the images with GitHub OIDC Token
|
||||||
env:
|
env:
|
||||||
|
DIGEST: ${{ steps.build-and-push.outputs.digest }}
|
||||||
TAGS: ${{ steps.meta.outputs.tags }}
|
TAGS: ${{ steps.meta.outputs.tags }}
|
||||||
run: |
|
run: |
|
||||||
images=""
|
images=""
|
||||||
for tag in ${TAGS}; do
|
for tag in ${TAGS}; do
|
||||||
images+="${tag}@${{ env.DIGEST }} "
|
images+="${tag}@${DIGEST} "
|
||||||
done
|
done
|
||||||
cosign sign --yes ${images}
|
cosign sign --yes ${images}
|
||||||
|
|
||||||
# /!\ Don't touch this without checking with engineers working on the Cloud code base on #discussion-engineering Slack channel
|
# /!\ Don't touch this without checking with Cloud team
|
||||||
- name: Notify meilisearch-cloud
|
- name: Send CI information to Cloud team
|
||||||
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
|
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
|
||||||
if: ${{ (github.event_name == 'push') && (matrix.edition == 'enterprise') }}
|
if: github.event_name == 'push'
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v3
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
@@ -219,13 +122,21 @@ jobs:
|
|||||||
event-type: cloud-docker-build
|
event-type: cloud-docker-build
|
||||||
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
||||||
|
|
||||||
# /!\ Don't touch this without checking with integration team members on #discussion-integrations Slack channel
|
# Send notification to Swarmia to notify of a deployment: https://app.swarmia.com
|
||||||
- name: Notify meilisearch-kubernetes
|
# - name: 'Setup jq'
|
||||||
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event), or if not stable
|
# uses: dcarbone/install-jq-action
|
||||||
if: ${{ github.event_name == 'push' && matrix.edition == 'community' && steps.check-tag-format.outputs.stable == 'true' }}
|
# - name: Send deployment to Swarmia
|
||||||
uses: peter-evans/repository-dispatch@v3
|
# if: github.event_name == 'push' && success()
|
||||||
with:
|
# run: |
|
||||||
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
# JSON_STRING=$( jq --null-input --compact-output \
|
||||||
repository: meilisearch/meilisearch-kubernetes
|
# --arg version "${{ github.ref_name }}" \
|
||||||
event-type: meilisearch-release
|
# --arg appName "meilisearch" \
|
||||||
client-payload: '{ "version": "${{ github.ref_name }}" }'
|
# --arg environment "production" \
|
||||||
|
# --arg commitSha "${{ github.sha }}" \
|
||||||
|
# --arg repositoryFullName "${{ github.repository }}" \
|
||||||
|
# '{"version": $version, "appName": $appName, "environment": $environment, "commitSha": $commitSha, "repositoryFullName": $repositoryFullName}' )
|
||||||
|
|
||||||
|
# curl -H "Authorization: ${{ secrets.SWARMIA_DEPLOYMENTS_AUTHORIZATION }}" \
|
||||||
|
# -H "Content-Type: application/json" \
|
||||||
|
# -d "$JSON_STRING" \
|
||||||
|
# https://hook.swarmia.com/deployments
|
||||||
|
|||||||
193
.github/workflows/publish-release-assets.yml
vendored
193
.github/workflows/publish-release-assets.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
check-version:
|
check-version:
|
||||||
name: Check the version validity
|
name: Check the version validity
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# No need to check the version for dry run (cron or workflow_dispatch)
|
# No need to check the version for dry run (cron)
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
# Check if the tag has the v<nmumber>.<number>.<number> format.
|
# Check if the tag has the v<nmumber>.<number>.<number> format.
|
||||||
@@ -32,66 +32,161 @@ jobs:
|
|||||||
if: github.event_name == 'release' && steps.check-tag-format.outputs.stable == 'true'
|
if: github.event_name == 'release' && steps.check-tag-format.outputs.stable == 'true'
|
||||||
run: bash .github/scripts/check-release.sh
|
run: bash .github/scripts/check-release.sh
|
||||||
|
|
||||||
publish-binaries:
|
publish-linux:
|
||||||
name: Publish binary for ${{ matrix.release }} ${{ matrix.edition }} edition
|
name: Publish binary for Linux
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
edition: [community, enterprise]
|
|
||||||
release:
|
|
||||||
[macos-amd64, macos-aarch64, windows, linux-amd64, linux-aarch64]
|
|
||||||
include:
|
|
||||||
- edition: "community"
|
|
||||||
feature-flag: ""
|
|
||||||
edition-suffix: ""
|
|
||||||
- edition: "enterprise"
|
|
||||||
feature-flag: "--features enterprise"
|
|
||||||
edition-suffix: "enterprise-"
|
|
||||||
- release: macos-amd64
|
|
||||||
os: macos-15-intel
|
|
||||||
binary_path: release/meilisearch
|
|
||||||
asset_name: macos-amd64
|
|
||||||
extra-args: ""
|
|
||||||
- release: macos-aarch64
|
|
||||||
os: macos-14
|
|
||||||
binary_path: aarch64-apple-darwin/release/meilisearch
|
|
||||||
asset_name: macos-apple-silicon
|
|
||||||
extra-args: "--target aarch64-apple-darwin"
|
|
||||||
- release: windows
|
|
||||||
os: windows-2022
|
|
||||||
binary_path: release/meilisearch.exe
|
|
||||||
asset_name: windows-amd64.exe
|
|
||||||
extra-args: ""
|
|
||||||
- release: linux-amd64
|
|
||||||
os: ubuntu-22.04
|
|
||||||
binary_path: x86_64-unknown-linux-gnu/release/meilisearch
|
|
||||||
asset_name: linux-amd64
|
|
||||||
extra-args: "--target x86_64-unknown-linux-gnu"
|
|
||||||
- release: linux-aarch64
|
|
||||||
os: ubuntu-22.04-arm
|
|
||||||
binary_path: aarch64-unknown-linux-gnu/release/meilisearch
|
|
||||||
asset_name: linux-aarch64
|
|
||||||
extra-args: "--target aarch64-unknown-linux-gnu"
|
|
||||||
needs: check-version
|
needs: check-version
|
||||||
|
container:
|
||||||
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
|
image: ubuntu:22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- name: Install needed dependencies
|
||||||
|
run: |
|
||||||
|
apt-get update && apt-get install -y curl
|
||||||
|
apt-get install build-essential -y
|
||||||
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --release --locked ${{ matrix.feature-flag }} ${{ matrix.extra-args }}
|
run: cargo build --release --locked
|
||||||
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
# No need to upload binaries for dry run (cron)
|
||||||
- name: Upload binaries to release
|
- name: Upload binaries to release
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.11.2
|
uses: svenstaro/upload-release-action@2.11.2
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/${{ matrix.binary_path }}
|
file: target/release/meilisearch
|
||||||
asset_name: meilisearch-${{ matrix.edition-suffix }}${{ matrix.asset_name }}
|
asset_name: meilisearch-linux-amd64
|
||||||
|
tag: ${{ github.ref }}
|
||||||
|
|
||||||
|
publish-macos-windows:
|
||||||
|
name: Publish binary for ${{ matrix.os }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
needs: check-version
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os: [macos-13, windows-2022]
|
||||||
|
include:
|
||||||
|
- os: macos-13
|
||||||
|
artifact_name: meilisearch
|
||||||
|
asset_name: meilisearch-macos-amd64
|
||||||
|
- os: windows-2022
|
||||||
|
artifact_name: meilisearch.exe
|
||||||
|
asset_name: meilisearch-windows-amd64.exe
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v5
|
||||||
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
|
- name: Build
|
||||||
|
run: cargo build --release --locked
|
||||||
|
# No need to upload binaries for dry run (cron)
|
||||||
|
- name: Upload binaries to release
|
||||||
|
if: github.event_name == 'release'
|
||||||
|
uses: svenstaro/upload-release-action@2.11.2
|
||||||
|
with:
|
||||||
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
|
file: target/release/${{ matrix.artifact_name }}
|
||||||
|
asset_name: ${{ matrix.asset_name }}
|
||||||
|
tag: ${{ github.ref }}
|
||||||
|
|
||||||
|
publish-macos-apple-silicon:
|
||||||
|
name: Publish binary for macOS silicon
|
||||||
|
runs-on: macos-13
|
||||||
|
needs: check-version
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- target: aarch64-apple-darwin
|
||||||
|
asset_name: meilisearch-macos-apple-silicon
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
- name: Installing Rust toolchain
|
||||||
|
uses: dtolnay/rust-toolchain@1.89
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
target: ${{ matrix.target }}
|
||||||
|
- name: Cargo build
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: build
|
||||||
|
args: --release --target ${{ matrix.target }}
|
||||||
|
- name: Upload the binary to release
|
||||||
|
# No need to upload binaries for dry run (cron)
|
||||||
|
if: github.event_name == 'release'
|
||||||
|
uses: svenstaro/upload-release-action@2.11.2
|
||||||
|
with:
|
||||||
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
|
file: target/${{ matrix.target }}/release/meilisearch
|
||||||
|
asset_name: ${{ matrix.asset_name }}
|
||||||
|
tag: ${{ github.ref }}
|
||||||
|
|
||||||
|
publish-aarch64:
|
||||||
|
name: Publish binary for aarch64
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: check-version
|
||||||
|
env:
|
||||||
|
DEBIAN_FRONTEND: noninteractive
|
||||||
|
container:
|
||||||
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
|
image: ubuntu:22.04
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- target: aarch64-unknown-linux-gnu
|
||||||
|
asset_name: meilisearch-linux-aarch64
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
- name: Install needed dependencies
|
||||||
|
run: |
|
||||||
|
apt-get update -y && apt upgrade -y
|
||||||
|
apt-get install -y curl build-essential gcc-aarch64-linux-gnu
|
||||||
|
- name: Set up Docker for cross compilation
|
||||||
|
run: |
|
||||||
|
apt-get install -y curl apt-transport-https ca-certificates software-properties-common
|
||||||
|
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||||
|
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||||
|
apt-get update -y && apt-get install -y docker-ce
|
||||||
|
- name: Installing Rust toolchain
|
||||||
|
uses: dtolnay/rust-toolchain@1.89
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
target: ${{ matrix.target }}
|
||||||
|
- name: Configure target aarch64 GNU
|
||||||
|
## Environment variable is not passed using env:
|
||||||
|
## LD gold won't work with MUSL
|
||||||
|
# env:
|
||||||
|
# JEMALLOC_SYS_WITH_LG_PAGE: 16
|
||||||
|
# RUSTFLAGS: '-Clink-arg=-fuse-ld=gold'
|
||||||
|
run: |
|
||||||
|
echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config
|
||||||
|
echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
||||||
|
echo 'JEMALLOC_SYS_WITH_LG_PAGE=16' >> $GITHUB_ENV
|
||||||
|
- name: Install a default toolchain that will be used to build cargo cross
|
||||||
|
run: |
|
||||||
|
rustup default stable
|
||||||
|
- name: Cargo build
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: build
|
||||||
|
use-cross: true
|
||||||
|
args: --release --target ${{ matrix.target }}
|
||||||
|
env:
|
||||||
|
CROSS_DOCKER_IN_DOCKER: true
|
||||||
|
- name: List target output files
|
||||||
|
run: ls -lR ./target
|
||||||
|
- name: Upload the binary to release
|
||||||
|
# No need to upload binaries for dry run (cron)
|
||||||
|
if: github.event_name == 'release'
|
||||||
|
uses: svenstaro/upload-release-action@2.11.2
|
||||||
|
with:
|
||||||
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
|
file: target/${{ matrix.target }}/release/meilisearch
|
||||||
|
asset_name: ${{ matrix.asset_name }}
|
||||||
tag: ${{ github.ref }}
|
tag: ${{ github.ref }}
|
||||||
|
|
||||||
publish-openapi-file:
|
publish-openapi-file:
|
||||||
name: Publish OpenAPI file
|
name: Publish OpenAPI file
|
||||||
needs: check-version
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
@@ -106,7 +201,7 @@ jobs:
|
|||||||
cd crates/openapi-generator
|
cd crates/openapi-generator
|
||||||
cargo run --release -- --pretty --output ../../meilisearch.json
|
cargo run --release -- --pretty --output ../../meilisearch.json
|
||||||
- name: Upload OpenAPI to Release
|
- name: Upload OpenAPI to Release
|
||||||
# No need to upload for dry run (cron or workflow_dispatch)
|
# No need to upload for dry run (cron)
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.11.2
|
uses: svenstaro/upload-release-action@2.11.2
|
||||||
with:
|
with:
|
||||||
|
|||||||
20
.github/workflows/release-drafter.yml
vendored
Normal file
20
.github/workflows/release-drafter.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
name: Release Drafter
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
update_release_draft:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: release-drafter/release-drafter@v6
|
||||||
|
with:
|
||||||
|
config-name: release-draft-template.yml
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.RELEASE_DRAFTER_TOKEN }}
|
||||||
40
.github/workflows/sdks-tests.yml
vendored
40
.github/workflows/sdks-tests.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-dotnet
|
repository: meilisearch/meilisearch-dotnet
|
||||||
- name: Setup .NET Core
|
- name: Setup .NET Core
|
||||||
uses: actions/setup-dotnet@v5
|
uses: actions/setup-dotnet@v4
|
||||||
with:
|
with:
|
||||||
dotnet-version: "8.0.x"
|
dotnet-version: "8.0.x"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
@@ -68,7 +68,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -92,7 +92,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -100,7 +100,7 @@ jobs:
|
|||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v6
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: stable
|
go-version: stable
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
@@ -122,7 +122,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -135,13 +135,13 @@ jobs:
|
|||||||
- name: Set up Java
|
- name: Set up Java
|
||||||
uses: actions/setup-java@v5
|
uses: actions/setup-java@v5
|
||||||
with:
|
with:
|
||||||
java-version: 17
|
java-version: 8
|
||||||
distribution: 'temurin'
|
distribution: 'zulu'
|
||||||
cache: gradle
|
cache: gradle
|
||||||
- name: Grant execute permission for gradlew
|
- name: Grant execute permission for gradlew
|
||||||
run: chmod +x gradlew
|
run: chmod +x gradlew
|
||||||
- name: Build and run unit and integration tests
|
- name: Build and run unit and integration tests
|
||||||
run: ./gradlew build integrationTest --info
|
run: ./gradlew build integrationTest
|
||||||
|
|
||||||
meilisearch-js-tests:
|
meilisearch-js-tests:
|
||||||
needs: define-docker-image
|
needs: define-docker-image
|
||||||
@@ -149,7 +149,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -160,7 +160,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-js
|
repository: meilisearch/meilisearch-js
|
||||||
- name: Setup node
|
- name: Setup node
|
||||||
uses: actions/setup-node@v5
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
cache: 'yarn'
|
cache: 'yarn'
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
@@ -184,7 +184,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -213,7 +213,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -224,7 +224,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-python
|
repository: meilisearch/meilisearch-python
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v6
|
uses: actions/setup-python@v5
|
||||||
- name: Install pipenv
|
- name: Install pipenv
|
||||||
uses: dschep/install-pipenv-action@v1
|
uses: dschep/install-pipenv-action@v1
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
@@ -238,7 +238,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -263,7 +263,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -284,7 +284,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -307,7 +307,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -318,7 +318,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-js-plugins
|
repository: meilisearch/meilisearch-js-plugins
|
||||||
- name: Setup node
|
- name: Setup node
|
||||||
uses: actions/setup-node@v5
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
cache: yarn
|
cache: yarn
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
@@ -338,7 +338,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -370,7 +370,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch-enterprise:${{ needs.define-docker-image.outputs.docker-image }}
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
|
|||||||
164
.github/workflows/test-suite.yml
vendored
164
.github/workflows/test-suite.yml
vendored
@@ -15,40 +15,31 @@ env:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test-linux:
|
test-linux:
|
||||||
name: Tests on Ubuntu
|
name: Tests on ubuntu-22.04
|
||||||
runs-on: ${{ matrix.runner }}
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
container:
|
||||||
matrix:
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
runner: [ubuntu-22.04, ubuntu-22.04-arm]
|
image: ubuntu:22.04
|
||||||
features: ["", "--features enterprise"]
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: check free space before
|
- name: Install needed dependencies
|
||||||
run: df -h
|
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
|
||||||
run: |
|
run: |
|
||||||
sudo rm -rf "/opt/ghc" || true
|
apt-get update && apt-get install -y curl
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
apt-get install build-essential -y
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- name: check free space after
|
|
||||||
run: df -h
|
|
||||||
- name: Setup test with Rust stable
|
- name: Setup test with Rust stable
|
||||||
uses: dtolnay/rust-toolchain@1.91.1
|
uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
with:
|
- name: Run cargo check without any default features
|
||||||
key: ${{ matrix.features }}
|
|
||||||
- name: Run cargo build without any default features
|
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: build
|
command: build
|
||||||
args: --locked --no-default-features --all
|
args: --locked --release --no-default-features --all
|
||||||
- name: Run cargo test
|
- name: Run cargo test
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: test
|
command: test
|
||||||
args: --locked --all ${{ matrix.features }}
|
args: --locked --release --all
|
||||||
|
|
||||||
test-others:
|
test-others:
|
||||||
name: Tests on ${{ matrix.os }}
|
name: Tests on ${{ matrix.os }}
|
||||||
@@ -56,58 +47,51 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [macos-14, windows-2022]
|
os: [macos-13, windows-2022]
|
||||||
features: ["", "--features enterprise"]
|
|
||||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Run cargo build without any default features
|
- name: Run cargo check without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: build
|
command: build
|
||||||
args: --locked --no-default-features --all
|
args: --locked --release --no-default-features --all
|
||||||
- name: Run cargo test
|
- name: Run cargo test
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: test
|
command: test
|
||||||
args: --locked --all ${{ matrix.features }}
|
args: --locked --release --all
|
||||||
|
|
||||||
test-all-features:
|
test-all-features:
|
||||||
name: Tests almost all features
|
name: Tests almost all features
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
|
image: ubuntu:22.04
|
||||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo rm -rf "/opt/ghc" || true
|
apt-get update
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
apt-get install --assume-yes build-essential curl
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
- name: Run cargo build with almost all features
|
- name: Run cargo build with almost all features
|
||||||
run: |
|
run: |
|
||||||
cargo build --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||||
- name: Run cargo test with almost all features
|
- name: Run cargo test with almost all features
|
||||||
run: |
|
run: |
|
||||||
cargo test --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
cargo test --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||||
|
|
||||||
ollama-ubuntu:
|
ollama-ubuntu:
|
||||||
name: Test with Ollama
|
name: Test with Ollama
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
|
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
|
||||||
run: |
|
|
||||||
sudo rm -rf "/opt/ghc" || true
|
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- name: Install Ollama
|
- name: Install Ollama
|
||||||
run: |
|
run: |
|
||||||
curl -fsSL https://ollama.com/install.sh | sudo -E sh
|
curl -fsSL https://ollama.com/install.sh | sudo -E sh
|
||||||
@@ -131,21 +115,21 @@ jobs:
|
|||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: test
|
command: test
|
||||||
args: --locked -p meilisearch --features test-ollama ollama
|
args: --locked --release --all --features test-ollama ollama
|
||||||
|
|
||||||
test-disabled-tokenization:
|
test-disabled-tokenization:
|
||||||
name: Test disabled tokenization
|
name: Test disabled tokenization
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ubuntu:22.04
|
||||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo rm -rf "/opt/ghc" || true
|
apt-get update
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
apt-get install --assume-yes build-essential curl
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
- name: Run cargo tree without default features and check lindera is not present
|
- name: Run cargo tree without default features and check lindera is not present
|
||||||
run: |
|
run: |
|
||||||
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
||||||
@@ -156,39 +140,36 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cargo tree -f '{p} {f}' -e normal | grep lindera -qz
|
cargo tree -f '{p} {f}' -e normal | grep lindera -qz
|
||||||
|
|
||||||
build:
|
# We run tests in debug also, to make sure that the debug_assertions are hit
|
||||||
name: Build in release
|
test-debug:
|
||||||
runs-on: ubuntu-22.04
|
name: Run tests in debug
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
|
image: ubuntu:22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo rm -rf "/opt/ghc" || true
|
apt-get update && apt-get install -y curl
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
apt-get install build-essential -y
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
- name: Build
|
- name: Run tests in debug
|
||||||
run: cargo build --release --locked --target x86_64-unknown-linux-gnu
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: test
|
||||||
|
args: --locked --all
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Run Clippy
|
name: Run Clippy
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
features: ["", "--features enterprise"]
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
run: |
|
|
||||||
sudo rm -rf "/opt/ghc" || true
|
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
with:
|
with:
|
||||||
|
profile: minimal
|
||||||
components: clippy
|
components: clippy
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
@@ -196,21 +177,18 @@ jobs:
|
|||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
command: clippy
|
command: clippy
|
||||||
args: --all-targets ${{ matrix.features }} -- --deny warnings
|
args: --all-targets -- --deny warnings
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Run Rustfmt
|
name: Run Rustfmt
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
run: |
|
|
||||||
sudo rm -rf "/opt/ghc" || true
|
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
with:
|
with:
|
||||||
|
profile: minimal
|
||||||
|
toolchain: nightly-2024-07-09
|
||||||
|
override: true
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
@@ -221,23 +199,3 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs
|
echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs
|
||||||
cargo fmt --all -- --check
|
cargo fmt --all -- --check
|
||||||
|
|
||||||
declarative-tests:
|
|
||||||
name: Run declarative tests
|
|
||||||
runs-on: ubuntu-22.04-arm
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
|
||||||
run: |
|
|
||||||
sudo rm -rf "/opt/ghc" || true
|
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
- name: Cache dependencies
|
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
|
||||||
- name: Run declarative tests
|
|
||||||
run: |
|
|
||||||
cargo xtask test workloads/tests/*.json
|
|
||||||
|
|||||||
10
.github/workflows/update-cargo-toml-version.yml
vendored
10
.github/workflows/update-cargo-toml-version.yml
vendored
@@ -18,13 +18,9 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
run: |
|
with:
|
||||||
sudo rm -rf "/opt/ghc" || true
|
profile: minimal
|
||||||
sudo rm -rf "/usr/share/dotnet" || true
|
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
|
||||||
- name: Install sd
|
- name: Install sd
|
||||||
run: cargo install sd
|
run: cargo install sd
|
||||||
- name: Update Cargo.toml file
|
- name: Update Cargo.toml file
|
||||||
|
|||||||
@@ -124,7 +124,6 @@ They are JSON files with the following structure (comments are not actually supp
|
|||||||
{
|
{
|
||||||
// Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard.
|
// Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard.
|
||||||
"name": "hackernews.ndjson_1M,no-threads",
|
"name": "hackernews.ndjson_1M,no-threads",
|
||||||
"type": "bench",
|
|
||||||
// Number of consecutive runs of the commands that should be performed.
|
// Number of consecutive runs of the commands that should be performed.
|
||||||
// Each run uses a fresh instance of Meilisearch and a fresh database.
|
// Each run uses a fresh instance of Meilisearch and a fresh database.
|
||||||
// Each run produces its own report file.
|
// Each run produces its own report file.
|
||||||
|
|||||||
1560
Cargo.lock
generated
1560
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -23,7 +23,7 @@ members = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.29.0"
|
version = "1.22.0"
|
||||||
authors = [
|
authors = [
|
||||||
"Quentin de Quelen <quentin@dequelen.me>",
|
"Quentin de Quelen <quentin@dequelen.me>",
|
||||||
"Clément Renault <clement@meilisearch.com>",
|
"Clément Renault <clement@meilisearch.com>",
|
||||||
@@ -50,5 +50,3 @@ opt-level = 3
|
|||||||
opt-level = 3
|
opt-level = 3
|
||||||
[profile.dev.package.roaring]
|
[profile.dev.package.roaring]
|
||||||
opt-level = 3
|
opt-level = 3
|
||||||
[profile.dev.package.gemm-f16]
|
|
||||||
opt-level = 3
|
|
||||||
|
|||||||
7
Cross.toml
Normal file
7
Cross.toml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
[build.env]
|
||||||
|
passthrough = [
|
||||||
|
"RUST_BACKTRACE",
|
||||||
|
"CARGO_TERM_COLOR",
|
||||||
|
"RUSTFLAGS",
|
||||||
|
"JEMALLOC_SYS_WITH_LG_PAGE"
|
||||||
|
]
|
||||||
10
Dockerfile
10
Dockerfile
@@ -1,5 +1,5 @@
|
|||||||
# Compile
|
# Compile
|
||||||
FROM rust:1.89-alpine3.22 AS compiler
|
FROM rust:1.89-alpine3.20 AS compiler
|
||||||
|
|
||||||
RUN apk add -q --no-cache build-base openssl-dev
|
RUN apk add -q --no-cache build-base openssl-dev
|
||||||
|
|
||||||
@@ -8,17 +8,19 @@ WORKDIR /
|
|||||||
ARG COMMIT_SHA
|
ARG COMMIT_SHA
|
||||||
ARG COMMIT_DATE
|
ARG COMMIT_DATE
|
||||||
ARG GIT_TAG
|
ARG GIT_TAG
|
||||||
ARG EXTRA_ARGS
|
|
||||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_DESCRIBE=${GIT_TAG}
|
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_DESCRIBE=${GIT_TAG}
|
||||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN set -eux; \
|
RUN set -eux; \
|
||||||
apkArch="$(apk --print-arch)"; \
|
apkArch="$(apk --print-arch)"; \
|
||||||
cargo build --release -p meilisearch -p meilitool ${EXTRA_ARGS}
|
if [ "$apkArch" = "aarch64" ]; then \
|
||||||
|
export JEMALLOC_SYS_WITH_LG_PAGE=16; \
|
||||||
|
fi && \
|
||||||
|
cargo build --release -p meilisearch -p meilitool
|
||||||
|
|
||||||
# Run
|
# Run
|
||||||
FROM alpine:3.22
|
FROM alpine:3.20
|
||||||
LABEL org.opencontainers.image.source="https://github.com/meilisearch/meilisearch"
|
LABEL org.opencontainers.image.source="https://github.com/meilisearch/meilisearch"
|
||||||
|
|
||||||
ENV MEILI_HTTP_ADDR 0.0.0.0:7700
|
ENV MEILI_HTTP_ADDR 0.0.0.0:7700
|
||||||
|
|||||||
28
LICENSE
28
LICENSE
@@ -1,9 +1,29 @@
|
|||||||
# License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2019-2025 Meili SAS
|
Copyright (c) 2019-2025 Meili SAS
|
||||||
|
|
||||||
Part of this work fall under the Meilisearch Enterprise Edition (EE) and are licensed under the Business Source License 1.1, please refer to [LICENSE-EE](./LICENSE-EE) for details.
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
The other parts of this work are licensed under the [MIT license](./LICENSE-MIT).
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
`SPDX-License-Identifier: MIT AND BUSL-1.1`
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
🔒 Meilisearch Enterprise Edition (EE)
|
||||||
|
|
||||||
|
Certain parts of this codebase are not licensed under the MIT license and governed by the Business Source License 1.1.
|
||||||
|
|
||||||
|
See the LICENSE-EE file for details.
|
||||||
|
|||||||
21
LICENSE-MIT
21
LICENSE-MIT
@@ -1,21 +0,0 @@
|
|||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2019-2025 Meili SAS
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
@@ -39,7 +39,6 @@
|
|||||||
## 🖥 Examples
|
## 🖥 Examples
|
||||||
|
|
||||||
- [**Movies**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — An application to help you find streaming platforms to watch movies using [hybrid search](https://www.meilisearch.com/solutions/hybrid-search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos).
|
- [**Movies**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — An application to help you find streaming platforms to watch movies using [hybrid search](https://www.meilisearch.com/solutions/hybrid-search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos).
|
||||||
- [**Flickr**](https://flickr.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — Search and explore one hundred million Flickr images with semantic search.
|
|
||||||
- [**Ecommerce**](https://ecommerce.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Ecommerce website using disjunctive [facets](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos), range and rating filtering, and pagination.
|
- [**Ecommerce**](https://ecommerce.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Ecommerce website using disjunctive [facets](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos), range and rating filtering, and pagination.
|
||||||
- [**Songs**](https://music.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search through 47 million of songs.
|
- [**Songs**](https://music.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search through 47 million of songs.
|
||||||
- [**SaaS**](https://saas.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search for contacts, deals, and companies in this [multi-tenant](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) CRM application.
|
- [**SaaS**](https://saas.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search for contacts, deals, and companies in this [multi-tenant](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) CRM application.
|
||||||
@@ -122,7 +121,7 @@ If you want to know more about the kind of data we collect and what we use it fo
|
|||||||
|
|
||||||
Meilisearch is a search engine created by [Meili](https://www.meilisearch.com/careers), a software development company headquartered in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
|
Meilisearch is a search engine created by [Meili](https://www.meilisearch.com/careers), a software development company headquartered in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
|
||||||
|
|
||||||
🗞 [Subscribe to our newsletter](https://share-eu1.hsforms.com/1LN5N0x_GQgq7ss7tXmSykwfg3aq) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
|
🗞 [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
|
||||||
|
|
||||||
💌 Want to make a suggestion or give feedback? Here are some of the channels where you can reach us:
|
💌 Want to make a suggestion or give feedback? Here are some of the channels where you can reach us:
|
||||||
|
|
||||||
|
|||||||
326
TESTING.md
326
TESTING.md
@@ -1,326 +0,0 @@
|
|||||||
# Declarative tests
|
|
||||||
|
|
||||||
Declarative tests ensure that Meilisearch features remain stable across versions.
|
|
||||||
|
|
||||||
While we already have unit tests, those are run against **temporary databases** that are created fresh each time and therefore never risk corruption.
|
|
||||||
|
|
||||||
Declarative tests instead **simulate the lifetime of a database**: they chain together commands and requests to change the binary, verifying that database state and API responses remain consistent.
|
|
||||||
|
|
||||||
## Basic example
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"type": "test",
|
|
||||||
"name": "api-keys",
|
|
||||||
"binary": { // the first command will run on the binary following this specification.
|
|
||||||
"source": "release", // get the binary as a release from GitHub
|
|
||||||
"version": "1.19.0", // version to fetch
|
|
||||||
"edition": "community" // edition to fetch
|
|
||||||
},
|
|
||||||
"commands": []
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This example defines a no-op test (it does nothing).
|
|
||||||
|
|
||||||
If the file is saved at `workloads/tests/example.json`, you can run it with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cargo xtask test workloads/tests/example.json
|
|
||||||
```
|
|
||||||
|
|
||||||
## Commands
|
|
||||||
|
|
||||||
Commands represent API requests sent to Meilisearch endpoints during a test.
|
|
||||||
|
|
||||||
They are executed sequentially, and their responses can be validated to ensure consistent behavior across upgrades.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
|
|
||||||
{
|
|
||||||
"route": "keys",
|
|
||||||
"method": "POST",
|
|
||||||
"body": {
|
|
||||||
"inline": {
|
|
||||||
"actions": [
|
|
||||||
"search",
|
|
||||||
"documents.add"
|
|
||||||
],
|
|
||||||
"description": "Test API Key",
|
|
||||||
"expiresAt": null,
|
|
||||||
"indexes": [ "movies" ]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This command issues a `POST /keys` request, creating an API key with permissions to search and add documents in the `movies` index.
|
|
||||||
|
|
||||||
### Using assets in commands
|
|
||||||
|
|
||||||
To keep tests concise and reusable, you can define **assets** at the root of the workload file.
|
|
||||||
|
|
||||||
Assets are external data sources (such as datasets) that are cached between runs, making tests faster and easier to read.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"type": "test",
|
|
||||||
"name": "movies",
|
|
||||||
"binary": {
|
|
||||||
"source": "release",
|
|
||||||
"version": "1.19.0",
|
|
||||||
"edition": "community"
|
|
||||||
},
|
|
||||||
"assets": {
|
|
||||||
"movies.json": {
|
|
||||||
"local_location": null,
|
|
||||||
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
|
|
||||||
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"commands": [
|
|
||||||
{
|
|
||||||
"route": "indexes/movies/documents",
|
|
||||||
"method": "POST",
|
|
||||||
"body": {
|
|
||||||
"asset": "movies.json"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
In this example:
|
|
||||||
- The `movies.json` dataset is defined as an asset, pointing to a remote URL.
|
|
||||||
- The SHA-256 checksum ensures integrity.
|
|
||||||
- The `POST /indexes/movies/documents` command uses this asset as the request body.
|
|
||||||
|
|
||||||
This makes the test much cleaner than inlining a large dataset directly into the command.
|
|
||||||
|
|
||||||
For asset handling, please refer to the [declarative benchmarks documentation](/BENCHMARKS.md#adding-new-assets).
|
|
||||||
|
|
||||||
### Asserting responses
|
|
||||||
|
|
||||||
Commands can specify both the **expected status code** and the **expected response body**.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"route": "indexes/movies/documents",
|
|
||||||
"method": "POST",
|
|
||||||
"body": {
|
|
||||||
"asset": "movies.json"
|
|
||||||
},
|
|
||||||
"expectedStatus": 202,
|
|
||||||
"expectedResponse": {
|
|
||||||
"enqueuedAt": "[timestamp]", // Set to a bracketed string to ignore the value
|
|
||||||
"indexUid": "movies",
|
|
||||||
"status": "enqueued",
|
|
||||||
"taskUid": 1,
|
|
||||||
"type": "documentAdditionOrUpdate"
|
|
||||||
},
|
|
||||||
"synchronous": "WaitForTask"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Manually writing `expectedResponse` fields can be tedious.
|
|
||||||
|
|
||||||
Instead, you can let the test runner populate them automatically:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Run the workload to populate expected fields. Only adds the missing ones, doesn't change existing data
|
|
||||||
cargo xtask test workloads/tests/example.json --add-missing-responses
|
|
||||||
|
|
||||||
# OR
|
|
||||||
|
|
||||||
# Run the workload to populate expected fields. Updates all fields including existing ones
|
|
||||||
cargo xtask test workloads/tests/example.json --update-responses
|
|
||||||
```
|
|
||||||
|
|
||||||
This workflow is recommended:
|
|
||||||
|
|
||||||
1. Write the test without expected fields.
|
|
||||||
2. Run it with `--add-missing-responses` to capture the actual responses.
|
|
||||||
3. Review and commit the generated expectations.
|
|
||||||
|
|
||||||
## Changing binary
|
|
||||||
|
|
||||||
It is possible to insert an instruction to change the current Meilisearch instance from one binary specification to another during a test.
|
|
||||||
|
|
||||||
When executed, such an instruction will:
|
|
||||||
1. Stop the current Meilisearch instance.
|
|
||||||
2. Fetch the binary specified by the instruction.
|
|
||||||
3. Restart the server with the specified binary on the same database.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"type": "test",
|
|
||||||
"name": "movies",
|
|
||||||
"binary": {
|
|
||||||
"source": "release",
|
|
||||||
"version": "1.19.0", // start with version v1.19.0
|
|
||||||
"edition": "community"
|
|
||||||
},
|
|
||||||
"assets": {
|
|
||||||
"movies.json": {
|
|
||||||
"local_location": null,
|
|
||||||
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
|
|
||||||
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"commands": [
|
|
||||||
// setup some data
|
|
||||||
{
|
|
||||||
"route": "indexes/movies/documents",
|
|
||||||
"method": "POST",
|
|
||||||
"body": {
|
|
||||||
"asset": "movies.json"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
// switch binary to v1.24.0
|
|
||||||
{
|
|
||||||
"binary": {
|
|
||||||
"source": "release",
|
|
||||||
"version": "1.24.0",
|
|
||||||
"edition": "community"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Typical Usage
|
|
||||||
|
|
||||||
In most cases, the change binary instruction will be used to update a database.
|
|
||||||
|
|
||||||
- **Set up** some data using commands on an older version.
|
|
||||||
- **Upgrade** to the latest version.
|
|
||||||
- **Assert** that the data and API behavior remain correct after the upgrade.
|
|
||||||
|
|
||||||
To properly test the dumpless upgrade, one should typically:
|
|
||||||
|
|
||||||
1. Open the database without processing the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` and `--experimental-max-number-of-batched-tasks=0` as extra CLI arguments
|
|
||||||
2. Check that the search, stats and task queue still work.
|
|
||||||
3. Open the database and process the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` as the extra CLI argument. Use a `health` command to wait for the upgrade task to finish.
|
|
||||||
4. Check that the indexing, search, stats, and task queue still work.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"type": "test",
|
|
||||||
"name": "movies",
|
|
||||||
"binary": {
|
|
||||||
"source": "release",
|
|
||||||
"version": "1.12.0",
|
|
||||||
"edition": "community"
|
|
||||||
},
|
|
||||||
"commands": [
|
|
||||||
// 0. Run commands to populate the database
|
|
||||||
{
|
|
||||||
// ..
|
|
||||||
},
|
|
||||||
// 1. Open the database with new MS without processing the update task
|
|
||||||
{
|
|
||||||
"binary": {
|
|
||||||
"source": "build", // build the binary from the sources in the current git repository
|
|
||||||
"edition": "community",
|
|
||||||
"extraCliArgs": [
|
|
||||||
"--experimental-dumpless-upgrade", // allows to open with a newer MS
|
|
||||||
"--experimental-max-number-of-batched-tasks=0" // prevent processing of the update task
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
// 2. Check the search etc.
|
|
||||||
{
|
|
||||||
// ..
|
|
||||||
},
|
|
||||||
// 3. Open the database with new MS and processing the update task
|
|
||||||
{
|
|
||||||
"binary": {
|
|
||||||
"source": "build", // build the binary from the sources in the current git repository
|
|
||||||
"edition": "community",
|
|
||||||
"extraCliArgs": [
|
|
||||||
"--experimental-dumpless-upgrade" // allows to open with a newer MS
|
|
||||||
// no `--experimental-max-number-of-batched-tasks=0`
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
// 4. Check the indexing, search, etc.
|
|
||||||
{
|
|
||||||
// ..
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This ensures backward compatibility: databases created with older Meilisearch versions should remain functional and consistent after an upgrade.
|
|
||||||
|
|
||||||
## Variables
|
|
||||||
|
|
||||||
Sometimes a command needs to use a value returned by a **previous response**.
|
|
||||||
These values can be captured and reused using the register field.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"route": "keys",
|
|
||||||
"method": "POST",
|
|
||||||
"body": {
|
|
||||||
"inline": {
|
|
||||||
"actions": [
|
|
||||||
"search",
|
|
||||||
"documents.add"
|
|
||||||
],
|
|
||||||
"description": "Test API Key",
|
|
||||||
"expiresAt": null,
|
|
||||||
"indexes": [ "movies" ]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"expectedResponse": {
|
|
||||||
"key": "c6f64630bad2996b1f675007c8800168e14adf5d6a7bb1a400a6d2b158050eaf",
|
|
||||||
// ...
|
|
||||||
},
|
|
||||||
"register": {
|
|
||||||
"key": "/key"
|
|
||||||
},
|
|
||||||
"synchronous": "WaitForResponse"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The `register` field captures the value at the JSON path `/key` from the response.
|
|
||||||
Paths follow the **JavaScript Object Notation Pointer (RFC 6901)** format.
|
|
||||||
Registered variables are available for all subsequent commands.
|
|
||||||
|
|
||||||
Registered variables can be referenced by wrapping their name in double curly braces:
|
|
||||||
|
|
||||||
In the route/path:
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"route": "tasks/{{ task_id }}",
|
|
||||||
"method": "GET"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
In the request body:
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"route": "indexes/movies/documents",
|
|
||||||
"method": "PATCH",
|
|
||||||
"body": {
|
|
||||||
"inline": {
|
|
||||||
"id": "{{ document_id }}",
|
|
||||||
"overview": "Shazam turns evil and the world is in danger.",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Or they can be referenced by their name (**without curly braces**) as an API key:
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"route": "indexes/movies/documents",
|
|
||||||
"method": "POST",
|
|
||||||
"body": { /* ... */ },
|
|
||||||
"apiKeyVariable": "key" // The **content** of the key variable will be used as an API key
|
|
||||||
}
|
|
||||||
```
|
|
||||||
@@ -11,27 +11,27 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.98"
|
||||||
bumpalo = "3.19.0"
|
bumpalo = "3.18.1"
|
||||||
csv = "1.4.0"
|
csv = "1.3.1"
|
||||||
memmap2 = "0.9.9"
|
memmap2 = "0.9.7"
|
||||||
milli = { path = "../milli" }
|
milli = { path = "../milli" }
|
||||||
mimalloc = { version = "0.1.48", default-features = false }
|
mimalloc = { version = "0.1.47", default-features = false }
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = { version = "0.7.0", features = ["html_reports"] }
|
criterion = { version = "0.6.0", features = ["html_reports"] }
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
rand_chacha = "0.3.1"
|
rand_chacha = "0.3.1"
|
||||||
roaring = "0.10.12"
|
roaring = "0.10.12"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.98"
|
||||||
bytes = "1.11.0"
|
bytes = "1.10.1"
|
||||||
convert_case = "0.9.0"
|
convert_case = "0.8.0"
|
||||||
flate2 = "1.1.5"
|
flate2 = "1.1.2"
|
||||||
reqwest = { version = "0.12.24", features = ["blocking", "rustls-tls"], default-features = false }
|
reqwest = { version = "0.12.20", features = ["blocking", "rustls-tls"], default-features = false }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["milli/all-tokenizations"]
|
default = ["milli/all-tokenizations"]
|
||||||
|
|||||||
@@ -21,10 +21,6 @@ use roaring::RoaringBitmap;
|
|||||||
#[global_allocator]
|
#[global_allocator]
|
||||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||||
|
|
||||||
fn no_cancel() -> bool {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
const BENCHMARK_ITERATION: usize = 10;
|
const BENCHMARK_ITERATION: usize = 10;
|
||||||
|
|
||||||
fn setup_dir(path: impl AsRef<Path>) {
|
fn setup_dir(path: impl AsRef<Path>) {
|
||||||
@@ -69,7 +65,7 @@ fn setup_settings<'t>(
|
|||||||
let sortable_fields = sortable_fields.iter().map(|s| s.to_string()).collect();
|
let sortable_fields = sortable_fields.iter().map(|s| s.to_string()).collect();
|
||||||
builder.set_sortable_fields(sortable_fields);
|
builder.set_sortable_fields(sortable_fields);
|
||||||
|
|
||||||
builder.execute(&no_cancel, &Progress::default(), Default::default()).unwrap();
|
builder.execute(&|| false, &Progress::default(), Default::default()).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn setup_index_with_settings(
|
fn setup_index_with_settings(
|
||||||
@@ -156,7 +152,7 @@ fn indexing_songs_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -172,7 +168,7 @@ fn indexing_songs_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -224,7 +220,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -240,7 +236,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -270,7 +266,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -286,7 +282,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -340,7 +336,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -356,7 +352,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -418,7 +414,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -434,7 +430,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -464,7 +460,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -480,7 +476,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -506,7 +502,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -522,7 +518,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -575,7 +571,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -591,7 +587,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -643,7 +639,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -659,7 +655,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -711,7 +707,7 @@ fn indexing_wiki(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -727,7 +723,7 @@ fn indexing_wiki(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -778,7 +774,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -794,7 +790,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -824,7 +820,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -840,7 +836,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -893,7 +889,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -909,7 +905,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -971,7 +967,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -987,7 +983,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1018,7 +1014,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1034,7 +1030,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1061,7 +1057,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1077,7 +1073,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1129,7 +1125,7 @@ fn indexing_movies_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1145,7 +1141,7 @@ fn indexing_movies_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1196,7 +1192,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1212,7 +1208,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1242,7 +1238,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1258,7 +1254,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1311,7 +1307,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1327,7 +1323,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1376,7 +1372,7 @@ fn delete_documents_from_ids(index: Index, document_ids_to_delete: Vec<RoaringBi
|
|||||||
Some(primary_key),
|
Some(primary_key),
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1426,7 +1422,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1442,7 +1438,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1472,7 +1468,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1488,7 +1484,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1514,7 +1510,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1530,7 +1526,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1605,7 +1601,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1621,7 +1617,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1697,7 +1693,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1713,7 +1709,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1781,7 +1777,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1797,7 +1793,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1849,7 +1845,7 @@ fn indexing_geo(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1865,7 +1861,7 @@ fn indexing_geo(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1916,7 +1912,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1932,7 +1928,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -1962,7 +1958,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -1978,7 +1974,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
@@ -2031,7 +2027,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
|
|||||||
&rtxn,
|
&rtxn,
|
||||||
None,
|
None,
|
||||||
&mut new_fields_ids_map,
|
&mut new_fields_ids_map,
|
||||||
&no_cancel,
|
&|| false,
|
||||||
Progress::default(),
|
Progress::default(),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@@ -2047,7 +2043,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
|
|||||||
primary_key,
|
primary_key,
|
||||||
&document_changes,
|
&document_changes,
|
||||||
RuntimeEmbedders::default(),
|
RuntimeEmbedders::default(),
|
||||||
&no_cancel,
|
&|| false,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -11,8 +11,8 @@ license.workspace = true
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
time = { version = "0.3.44", features = ["parsing"] }
|
time = { version = "0.3.41", features = ["parsing"] }
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.98"
|
||||||
vergen-gitcl = "1.0.8"
|
vergen-git2 = "1.0.7"
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ fn emit_git_variables() -> anyhow::Result<()> {
|
|||||||
// Note: any code that needs VERGEN_ environment variables should take care to define them manually in the Dockerfile and pass them
|
// Note: any code that needs VERGEN_ environment variables should take care to define them manually in the Dockerfile and pass them
|
||||||
// in the corresponding GitHub workflow (publish_docker.yml).
|
// in the corresponding GitHub workflow (publish_docker.yml).
|
||||||
// This is due to the Dockerfile building the binary outside of the git directory.
|
// This is due to the Dockerfile building the binary outside of the git directory.
|
||||||
let mut builder = vergen_gitcl::GitclBuilder::default();
|
let mut builder = vergen_git2::Git2Builder::default();
|
||||||
|
|
||||||
builder.branch(true);
|
builder.branch(true);
|
||||||
builder.commit_timestamp(true);
|
builder.commit_timestamp(true);
|
||||||
@@ -25,5 +25,5 @@ fn emit_git_variables() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
let git2 = builder.build()?;
|
let git2 = builder.build()?;
|
||||||
|
|
||||||
vergen_gitcl::Emitter::default().fail_on_error().add_instructions(&git2)?.emit()
|
vergen_git2::Emitter::default().fail_on_error().add_instructions(&git2)?.emit()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
use build_info::BuildInfo;
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let info = BuildInfo::from_build();
|
|
||||||
dbg!(info);
|
|
||||||
}
|
|
||||||
@@ -11,27 +11,24 @@ readme.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.98"
|
||||||
flate2 = "1.1.5"
|
flate2 = "1.1.2"
|
||||||
http = "1.3.1"
|
http = "1.3.1"
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
once_cell = "1.21.3"
|
once_cell = "1.21.3"
|
||||||
regex = "1.12.2"
|
regex = "1.11.1"
|
||||||
roaring = { version = "0.10.12", features = ["serde"] }
|
roaring = { version = "0.10.12", features = ["serde"] }
|
||||||
serde = { version = "1.0.228", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
tar = "0.4.44"
|
tar = "0.4.44"
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
thiserror = "2.0.17"
|
thiserror = "2.0.12"
|
||||||
time = { version = "0.3.44", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
time = { version = "0.3.41", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||||
tracing = "0.1.41"
|
tracing = "0.1.41"
|
||||||
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
big_s = "1.0.2"
|
big_s = "1.0.2"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
meili-snap = { path = "../meili-snap" }
|
meili-snap = { path = "../meili-snap" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
|
|
||||||
[features]
|
|
||||||
enterprise = ["meilisearch-types/enterprise"]
|
|
||||||
@@ -96,8 +96,6 @@ pub struct TaskDump {
|
|||||||
pub finished_at: Option<OffsetDateTime>,
|
pub finished_at: Option<OffsetDateTime>,
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub network: Option<TaskNetwork>,
|
pub network: Option<TaskNetwork>,
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub custom_metadata: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A `Kind` specific version made for the dump. If modified you may break the dump.
|
// A `Kind` specific version made for the dump. If modified you may break the dump.
|
||||||
@@ -160,9 +158,6 @@ pub enum KindDump {
|
|||||||
UpgradeDatabase {
|
UpgradeDatabase {
|
||||||
from: (u32, u32, u32),
|
from: (u32, u32, u32),
|
||||||
},
|
},
|
||||||
IndexCompaction {
|
|
||||||
index_uid: String,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Task> for TaskDump {
|
impl From<Task> for TaskDump {
|
||||||
@@ -180,7 +175,6 @@ impl From<Task> for TaskDump {
|
|||||||
started_at: task.started_at,
|
started_at: task.started_at,
|
||||||
finished_at: task.finished_at,
|
finished_at: task.finished_at,
|
||||||
network: task.network,
|
network: task.network,
|
||||||
custom_metadata: task.custom_metadata,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -246,9 +240,6 @@ impl From<KindWithContent> for KindDump {
|
|||||||
KindWithContent::UpgradeDatabase { from: version } => {
|
KindWithContent::UpgradeDatabase { from: version } => {
|
||||||
KindDump::UpgradeDatabase { from: version }
|
KindDump::UpgradeDatabase { from: version }
|
||||||
}
|
}
|
||||||
KindWithContent::IndexCompaction { index_uid } => {
|
|
||||||
KindDump::IndexCompaction { index_uid }
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -262,13 +253,13 @@ pub(crate) mod test {
|
|||||||
use big_s::S;
|
use big_s::S;
|
||||||
use maplit::{btreemap, btreeset};
|
use maplit::{btreemap, btreeset};
|
||||||
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
|
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
|
||||||
|
use meilisearch_types::enterprise_edition::network::{Network, Remote};
|
||||||
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
||||||
use meilisearch_types::features::RuntimeTogglableFeatures;
|
use meilisearch_types::features::RuntimeTogglableFeatures;
|
||||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||||
use meilisearch_types::keys::{Action, Key};
|
use meilisearch_types::keys::{Action, Key};
|
||||||
use meilisearch_types::milli::update::Setting;
|
use meilisearch_types::milli::update::Setting;
|
||||||
use meilisearch_types::milli::{self, FilterableAttributesRule};
|
use meilisearch_types::milli::{self, FilterableAttributesRule};
|
||||||
use meilisearch_types::network::{Network, Remote};
|
|
||||||
use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
|
use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
|
||||||
use meilisearch_types::task_view::DetailsView;
|
use meilisearch_types::task_view::DetailsView;
|
||||||
use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status};
|
use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status};
|
||||||
@@ -399,7 +390,6 @@ pub(crate) mod test {
|
|||||||
started_at: Some(datetime!(2022-11-20 0:00 UTC)),
|
started_at: Some(datetime!(2022-11-20 0:00 UTC)),
|
||||||
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
||||||
network: None,
|
network: None,
|
||||||
custom_metadata: None,
|
|
||||||
},
|
},
|
||||||
None,
|
None,
|
||||||
),
|
),
|
||||||
@@ -425,7 +415,6 @@ pub(crate) mod test {
|
|||||||
started_at: None,
|
started_at: None,
|
||||||
finished_at: None,
|
finished_at: None,
|
||||||
network: None,
|
network: None,
|
||||||
custom_metadata: None,
|
|
||||||
},
|
},
|
||||||
Some(vec![
|
Some(vec![
|
||||||
json!({ "id": 4, "race": "leonberg" }).as_object().unwrap().clone(),
|
json!({ "id": 4, "race": "leonberg" }).as_object().unwrap().clone(),
|
||||||
@@ -446,7 +435,6 @@ pub(crate) mod test {
|
|||||||
started_at: None,
|
started_at: None,
|
||||||
finished_at: None,
|
finished_at: None,
|
||||||
network: None,
|
network: None,
|
||||||
custom_metadata: None,
|
|
||||||
},
|
},
|
||||||
None,
|
None,
|
||||||
),
|
),
|
||||||
|
|||||||
@@ -164,7 +164,6 @@ impl CompatV5ToV6 {
|
|||||||
started_at: task_view.started_at,
|
started_at: task_view.started_at,
|
||||||
finished_at: task_view.finished_at,
|
finished_at: task_view.finished_at,
|
||||||
network: None,
|
network: None,
|
||||||
custom_metadata: None,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
(task, content_file)
|
(task, content_file)
|
||||||
|
|||||||
@@ -107,14 +107,19 @@ impl Settings<Unchecked> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
#[default]
|
|
||||||
NotSet,
|
NotSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Setting<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::NotSet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Setting<T> {
|
impl<T> Setting<T> {
|
||||||
pub const fn is_not_set(&self) -> bool {
|
pub const fn is_not_set(&self) -> bool {
|
||||||
matches!(self, Self::NotSet)
|
matches!(self, Self::NotSet)
|
||||||
|
|||||||
@@ -161,14 +161,19 @@ pub struct Facets {
|
|||||||
pub min_level_size: Option<NonZeroUsize>,
|
pub min_level_size: Option<NonZeroUsize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
#[default]
|
|
||||||
NotSet,
|
NotSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Setting<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::NotSet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Setting<T> {
|
impl<T> Setting<T> {
|
||||||
pub fn map<U, F>(self, f: F) -> Setting<U>
|
pub fn map<U, F>(self, f: F) -> Setting<U>
|
||||||
where
|
where
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
use std::fmt::{self, Display, Formatter};
|
use std::fmt::{self, Display, Formatter};
|
||||||
|
use std::marker::PhantomData;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use serde::Deserialize;
|
use serde::de::Visitor;
|
||||||
|
use serde::{Deserialize, Deserializer};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use super::settings::{Settings, Unchecked};
|
use super::settings::{Settings, Unchecked};
|
||||||
@@ -80,3 +82,59 @@ impl Display for IndexUidFormatError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl std::error::Error for IndexUidFormatError {}
|
impl std::error::Error for IndexUidFormatError {}
|
||||||
|
|
||||||
|
/// A type that tries to match either a star (*) or
|
||||||
|
/// any other thing that implements `FromStr`.
|
||||||
|
#[derive(Debug)]
|
||||||
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
|
pub enum StarOr<T> {
|
||||||
|
Star,
|
||||||
|
Other(T),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de, T, E> Deserialize<'de> for StarOr<T>
|
||||||
|
where
|
||||||
|
T: FromStr<Err = E>,
|
||||||
|
E: Display,
|
||||||
|
{
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
/// Serde can't differentiate between `StarOr::Star` and `StarOr::Other` without a tag.
|
||||||
|
/// Simply using `#[serde(untagged)]` + `#[serde(rename="*")]` will lead to attempting to
|
||||||
|
/// deserialize everything as a `StarOr::Other`, including "*".
|
||||||
|
/// [`#[serde(other)]`](https://serde.rs/variant-attrs.html#other) might have helped but is
|
||||||
|
/// not supported on untagged enums.
|
||||||
|
struct StarOrVisitor<T>(PhantomData<T>);
|
||||||
|
|
||||||
|
impl<T, FE> Visitor<'_> for StarOrVisitor<T>
|
||||||
|
where
|
||||||
|
T: FromStr<Err = FE>,
|
||||||
|
FE: Display,
|
||||||
|
{
|
||||||
|
type Value = StarOr<T>;
|
||||||
|
|
||||||
|
fn expecting(&self, formatter: &mut Formatter) -> std::fmt::Result {
|
||||||
|
formatter.write_str("a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_str<SE>(self, v: &str) -> Result<Self::Value, SE>
|
||||||
|
where
|
||||||
|
SE: serde::de::Error,
|
||||||
|
{
|
||||||
|
match v {
|
||||||
|
"*" => Ok(StarOr::Star),
|
||||||
|
v => {
|
||||||
|
let other = FromStr::from_str(v).map_err(|e: T::Err| {
|
||||||
|
SE::custom(format!("Invalid `other` value: {}", e))
|
||||||
|
})?;
|
||||||
|
Ok(StarOr::Other(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
deserializer.deserialize_str(StarOrVisitor(PhantomData))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -192,14 +192,19 @@ pub struct Facets {
|
|||||||
pub min_level_size: Option<NonZeroUsize>,
|
pub min_level_size: Option<NonZeroUsize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)]
|
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
#[default]
|
|
||||||
NotSet,
|
NotSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Setting<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::NotSet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Setting<T> {
|
impl<T> Setting<T> {
|
||||||
pub fn set(self) -> Option<T> {
|
pub fn set(self) -> Option<T> {
|
||||||
match self {
|
match self {
|
||||||
|
|||||||
@@ -47,15 +47,20 @@ pub struct Settings<T> {
|
|||||||
pub _kind: PhantomData<T>,
|
pub _kind: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)]
|
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
#[default]
|
|
||||||
NotSet,
|
NotSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Setting<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::NotSet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Setting<T> {
|
impl<T> Setting<T> {
|
||||||
pub fn set(self) -> Option<T> {
|
pub fn set(self) -> Option<T> {
|
||||||
match self {
|
match self {
|
||||||
|
|||||||
@@ -322,7 +322,7 @@ impl From<Task> for TaskView {
|
|||||||
_ => None,
|
_ => None,
|
||||||
});
|
});
|
||||||
|
|
||||||
let duration = finished_at.zip(started_at).map(|(tf, ts)| tf - ts);
|
let duration = finished_at.zip(started_at).map(|(tf, ts)| (tf - ts));
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
uid: id,
|
uid: id,
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ pub type Batch = meilisearch_types::batches::Batch;
|
|||||||
pub type Key = meilisearch_types::keys::Key;
|
pub type Key = meilisearch_types::keys::Key;
|
||||||
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
|
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
|
||||||
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
|
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
|
||||||
pub type Network = meilisearch_types::network::Network;
|
pub type Network = meilisearch_types::enterprise_edition::network::Network;
|
||||||
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
|
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
|
||||||
|
|
||||||
// ===== Other types to clarify the code of the compat module
|
// ===== Other types to clarify the code of the compat module
|
||||||
|
|||||||
@@ -5,9 +5,9 @@ use std::path::PathBuf;
|
|||||||
use flate2::write::GzEncoder;
|
use flate2::write::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use meilisearch_types::batches::Batch;
|
use meilisearch_types::batches::Batch;
|
||||||
|
use meilisearch_types::enterprise_edition::network::Network;
|
||||||
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
|
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
|
||||||
use meilisearch_types::keys::Key;
|
use meilisearch_types::keys::Key;
|
||||||
use meilisearch_types::network::Network;
|
|
||||||
use meilisearch_types::settings::{Checked, Settings};
|
use meilisearch_types::settings::{Checked, Settings};
|
||||||
use meilisearch_types::webhooks::WebhooksDumpView;
|
use meilisearch_types::webhooks::WebhooksDumpView;
|
||||||
use serde_json::{Map, Value};
|
use serde_json::{Map, Value};
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
thiserror = "2.0.17"
|
thiserror = "2.0.12"
|
||||||
tracing = "0.1.41"
|
tracing = "0.1.41"
|
||||||
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ impl FileStore {
|
|||||||
|
|
||||||
/// Returns the file corresponding to the requested uuid.
|
/// Returns the file corresponding to the requested uuid.
|
||||||
pub fn get_update(&self, uuid: Uuid) -> Result<StdFile> {
|
pub fn get_update(&self, uuid: Uuid) -> Result<StdFile> {
|
||||||
let path = self.update_path(uuid);
|
let path = self.get_update_path(uuid);
|
||||||
let file = match StdFile::open(path) {
|
let file = match StdFile::open(path) {
|
||||||
Ok(file) => file,
|
Ok(file) => file,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@@ -72,7 +72,7 @@ impl FileStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the path that correspond to this uuid, the path could not exists.
|
/// Returns the path that correspond to this uuid, the path could not exists.
|
||||||
pub fn update_path(&self, uuid: Uuid) -> PathBuf {
|
pub fn get_update_path(&self, uuid: Uuid) -> PathBuf {
|
||||||
self.path.join(uuid.to_string())
|
self.path.join(uuid.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ license.workspace = true
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = { version = "0.7.0", features = ["html_reports"] }
|
criterion = { version = "0.6.0", features = ["html_reports"] }
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "benchmarks"
|
name = "benchmarks"
|
||||||
|
|||||||
@@ -11,12 +11,12 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
arbitrary = { version = "1.4.2", features = ["derive"] }
|
arbitrary = { version = "1.4.1", features = ["derive"] }
|
||||||
bumpalo = "3.19.0"
|
bumpalo = "3.18.1"
|
||||||
clap = { version = "4.5.52", features = ["derive"] }
|
clap = { version = "4.5.40", features = ["derive"] }
|
||||||
either = "1.15.0"
|
either = "1.15.0"
|
||||||
fastrand = "2.3.0"
|
fastrand = "2.3.0"
|
||||||
milli = { path = "../milli" }
|
milli = { path = "../milli" }
|
||||||
serde = { version = "1.0.228", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
|
|||||||
@@ -11,33 +11,31 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.98"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
byte-unit = "5.1.6"
|
byte-unit = "5.1.6"
|
||||||
bytes = "1.11.0"
|
bumpalo = "3.18.1"
|
||||||
bumpalo = "3.19.0"
|
|
||||||
bumparaw-collections = "0.1.4"
|
bumparaw-collections = "0.1.4"
|
||||||
convert_case = "0.9.0"
|
convert_case = "0.8.0"
|
||||||
csv = "1.4.0"
|
csv = "1.3.1"
|
||||||
derive_builder = "0.20.2"
|
derive_builder = "0.20.2"
|
||||||
dump = { path = "../dump" }
|
dump = { path = "../dump" }
|
||||||
enum-iterator = "2.3.0"
|
enum-iterator = "2.1.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.1.5"
|
flate2 = "1.1.2"
|
||||||
indexmap = "2.12.0"
|
indexmap = "2.9.0"
|
||||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
memmap2 = "0.9.9"
|
memmap2 = "0.9.7"
|
||||||
page_size = "0.6.0"
|
page_size = "0.6.0"
|
||||||
rayon = "1.11.0"
|
rayon = "1.10.0"
|
||||||
roaring = { version = "0.10.12", features = ["serde"] }
|
roaring = { version = "0.10.12", features = ["serde"] }
|
||||||
serde = { version = "1.0.228", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
tar = "0.4.44"
|
|
||||||
synchronoise = "1.0.1"
|
synchronoise = "1.0.1"
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
thiserror = "2.0.17"
|
thiserror = "2.0.12"
|
||||||
time = { version = "0.3.44", features = [
|
time = { version = "0.3.41", features = [
|
||||||
"serde-well-known",
|
"serde-well-known",
|
||||||
"formatting",
|
"formatting",
|
||||||
"parsing",
|
"parsing",
|
||||||
@@ -45,11 +43,8 @@ time = { version = "0.3.44", features = [
|
|||||||
] }
|
] }
|
||||||
tracing = "0.1.41"
|
tracing = "0.1.41"
|
||||||
ureq = "2.12.1"
|
ureq = "2.12.1"
|
||||||
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||||
backoff = "0.4.0"
|
backoff = "0.4.0"
|
||||||
reqwest = { version = "0.12.24", features = ["rustls-tls", "http2"], default-features = false }
|
|
||||||
rusty-s3 = "0.8.1"
|
|
||||||
tokio = { version = "1.48.0", features = ["full"] }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
big_s = "1.0.2"
|
big_s = "1.0.2"
|
||||||
|
|||||||
@@ -150,7 +150,6 @@ impl<'a> Dump<'a> {
|
|||||||
details: task.details,
|
details: task.details,
|
||||||
status: task.status,
|
status: task.status,
|
||||||
network: task.network,
|
network: task.network,
|
||||||
custom_metadata: task.custom_metadata,
|
|
||||||
kind: match task.kind {
|
kind: match task.kind {
|
||||||
KindDump::DocumentImport {
|
KindDump::DocumentImport {
|
||||||
primary_key,
|
primary_key,
|
||||||
@@ -235,9 +234,6 @@ impl<'a> Dump<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
KindDump::UpgradeDatabase { from } => KindWithContent::UpgradeDatabase { from },
|
KindDump::UpgradeDatabase { from } => KindWithContent::UpgradeDatabase { from },
|
||||||
KindDump::IndexCompaction { index_uid } => {
|
|
||||||
KindWithContent::IndexCompaction { index_uid }
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ use meilisearch_types::error::{Code, ErrorCode};
|
|||||||
use meilisearch_types::milli::index::RollbackOutcome;
|
use meilisearch_types::milli::index::RollbackOutcome;
|
||||||
use meilisearch_types::tasks::{Kind, Status};
|
use meilisearch_types::tasks::{Kind, Status};
|
||||||
use meilisearch_types::{heed, milli};
|
use meilisearch_types::{heed, milli};
|
||||||
use reqwest::StatusCode;
|
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
use crate::TaskId;
|
use crate::TaskId;
|
||||||
@@ -128,14 +127,6 @@ pub enum Error {
|
|||||||
#[error("Aborted task")]
|
#[error("Aborted task")]
|
||||||
AbortedTask,
|
AbortedTask,
|
||||||
|
|
||||||
#[error("S3 error: status: {status}, body: {body}")]
|
|
||||||
S3Error { status: StatusCode, body: String },
|
|
||||||
#[error("S3 HTTP error: {0}")]
|
|
||||||
S3HttpError(reqwest::Error),
|
|
||||||
#[error("S3 XML error: {0}")]
|
|
||||||
S3XmlError(Box<dyn std::error::Error + Send + Sync>),
|
|
||||||
#[error("S3 bucket error: {0}")]
|
|
||||||
S3BucketError(rusty_s3::BucketError),
|
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
Dump(#[from] dump::Error),
|
Dump(#[from] dump::Error),
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
@@ -235,10 +226,6 @@ impl Error {
|
|||||||
| Error::TaskCancelationWithEmptyQuery
|
| Error::TaskCancelationWithEmptyQuery
|
||||||
| Error::FromRemoteWhenExporting { .. }
|
| Error::FromRemoteWhenExporting { .. }
|
||||||
| Error::AbortedTask
|
| Error::AbortedTask
|
||||||
| Error::S3Error { .. }
|
|
||||||
| Error::S3HttpError(_)
|
|
||||||
| Error::S3XmlError(_)
|
|
||||||
| Error::S3BucketError(_)
|
|
||||||
| Error::Dump(_)
|
| Error::Dump(_)
|
||||||
| Error::Heed(_)
|
| Error::Heed(_)
|
||||||
| Error::Milli { .. }
|
| Error::Milli { .. }
|
||||||
@@ -306,14 +293,8 @@ impl ErrorCode for Error {
|
|||||||
Error::BatchNotFound(_) => Code::BatchNotFound,
|
Error::BatchNotFound(_) => Code::BatchNotFound,
|
||||||
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
|
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
|
||||||
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
|
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
|
||||||
|
// TODO: not sure of the Code to use
|
||||||
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
|
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
|
||||||
Error::S3Error { status, .. } if status.is_client_error() => {
|
|
||||||
Code::InvalidS3SnapshotRequest
|
|
||||||
}
|
|
||||||
Error::S3Error { .. } => Code::S3SnapshotServerError,
|
|
||||||
Error::S3HttpError(_) => Code::S3SnapshotServerError,
|
|
||||||
Error::S3XmlError(_) => Code::S3SnapshotServerError,
|
|
||||||
Error::S3BucketError(_) => Code::InvalidS3SnapshotParameters,
|
|
||||||
Error::Dump(e) => e.error_code(),
|
Error::Dump(e) => e.error_code(),
|
||||||
Error::Milli { error, .. } => error.error_code(),
|
Error::Milli { error, .. } => error.error_code(),
|
||||||
Error::ProcessBatchPanicked(_) => Code::Internal,
|
Error::ProcessBatchPanicked(_) => Code::Internal,
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
|
use meilisearch_types::enterprise_edition::network::Network;
|
||||||
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
|
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
|
||||||
use meilisearch_types::heed::types::{SerdeJson, Str};
|
use meilisearch_types::heed::types::{SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
|
||||||
use meilisearch_types::network::Network;
|
|
||||||
|
|
||||||
use crate::error::FeatureNotEnabledError;
|
use crate::error::FeatureNotEnabledError;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|||||||
@@ -199,7 +199,7 @@ impl IndexMapper {
|
|||||||
let uuid = Uuid::new_v4();
|
let uuid = Uuid::new_v4();
|
||||||
self.index_mapping.put(&mut wtxn, name, &uuid)?;
|
self.index_mapping.put(&mut wtxn, name, &uuid)?;
|
||||||
|
|
||||||
let index_path = self.index_path(uuid);
|
let index_path = self.base_path.join(uuid.to_string());
|
||||||
fs::create_dir_all(&index_path)?;
|
fs::create_dir_all(&index_path)?;
|
||||||
|
|
||||||
// Error if the UUIDv4 somehow already exists in the map, since it should be fresh.
|
// Error if the UUIDv4 somehow already exists in the map, since it should be fresh.
|
||||||
@@ -286,7 +286,7 @@ impl IndexMapper {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let index_map = self.index_map.clone();
|
let index_map = self.index_map.clone();
|
||||||
let index_path = self.index_path(uuid);
|
let index_path = self.base_path.join(uuid.to_string());
|
||||||
let index_name = name.to_string();
|
let index_name = name.to_string();
|
||||||
thread::Builder::new()
|
thread::Builder::new()
|
||||||
.name(String::from("index_deleter"))
|
.name(String::from("index_deleter"))
|
||||||
@@ -341,26 +341,6 @@ impl IndexMapper {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Closes the specified index.
|
|
||||||
///
|
|
||||||
/// This operation involves closing the underlying environment and so can take a long time to complete.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// - If the Index corresponding to the passed name is concurrently being deleted/resized or cannot be found in the
|
|
||||||
/// in memory hash map.
|
|
||||||
pub fn close_index(&self, rtxn: &RoTxn, name: &str) -> Result<()> {
|
|
||||||
let uuid = self
|
|
||||||
.index_mapping
|
|
||||||
.get(rtxn, name)?
|
|
||||||
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
|
||||||
|
|
||||||
// We remove the index from the in-memory index map.
|
|
||||||
self.index_map.write().unwrap().close_for_resize(&uuid, self.enable_mdb_writemap, 0);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return an index, may open it if it wasn't already opened.
|
/// Return an index, may open it if it wasn't already opened.
|
||||||
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
|
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
|
||||||
if let Some((current_name, current_index)) =
|
if let Some((current_name, current_index)) =
|
||||||
@@ -408,7 +388,7 @@ impl IndexMapper {
|
|||||||
} else {
|
} else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
let index_path = self.index_path(uuid);
|
let index_path = self.base_path.join(uuid.to_string());
|
||||||
// take the lock to reopen the environment.
|
// take the lock to reopen the environment.
|
||||||
reopen
|
reopen
|
||||||
.reopen(&mut self.index_map.write().unwrap(), &index_path)
|
.reopen(&mut self.index_map.write().unwrap(), &index_path)
|
||||||
@@ -425,7 +405,7 @@ impl IndexMapper {
|
|||||||
// if it's not already there.
|
// if it's not already there.
|
||||||
match index_map.get(&uuid) {
|
match index_map.get(&uuid) {
|
||||||
Missing => {
|
Missing => {
|
||||||
let index_path = self.index_path(uuid);
|
let index_path = self.base_path.join(uuid.to_string());
|
||||||
|
|
||||||
break index_map
|
break index_map
|
||||||
.create(
|
.create(
|
||||||
@@ -452,14 +432,6 @@ impl IndexMapper {
|
|||||||
Ok(index)
|
Ok(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the path of the index.
|
|
||||||
///
|
|
||||||
/// The folder located at this path is containing the data.mdb,
|
|
||||||
/// the lock.mdb and an optional data.mdb.cpy file.
|
|
||||||
pub fn index_path(&self, uuid: Uuid) -> PathBuf {
|
|
||||||
self.base_path.join(uuid.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rollback_index(
|
pub fn rollback_index(
|
||||||
&self,
|
&self,
|
||||||
rtxn: &RoTxn,
|
rtxn: &RoTxn,
|
||||||
@@ -500,7 +472,7 @@ impl IndexMapper {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
let index_path = self.index_path(uuid);
|
let index_path = self.base_path.join(uuid.to_string());
|
||||||
Index::rollback(milli::heed::EnvOpenOptions::new().read_txn_without_tls(), index_path, to)
|
Index::rollback(milli::heed::EnvOpenOptions::new().read_txn_without_tls(), index_path, to)
|
||||||
.map_err(|err| crate::Error::from_milli(err, Some(name.to_string())))
|
.map_err(|err| crate::Error::from_milli(err, Some(name.to_string())))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str};
|
|||||||
use meilisearch_types::heed::{Database, RoTxn};
|
use meilisearch_types::heed::{Database, RoTxn};
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
||||||
use meilisearch_types::tasks::{Details, Kind, Status, Task};
|
use meilisearch_types::tasks::{Details, Kind, Status, Task};
|
||||||
use meilisearch_types::versioning::{self, VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
use meilisearch_types::versioning;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
|
|
||||||
use crate::index_mapper::IndexMapper;
|
use crate::index_mapper::IndexMapper;
|
||||||
@@ -36,7 +36,6 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
|||||||
run_loop_iteration: _,
|
run_loop_iteration: _,
|
||||||
embedders: _,
|
embedders: _,
|
||||||
chat_settings: _,
|
chat_settings: _,
|
||||||
runtime: _,
|
|
||||||
} = scheduler;
|
} = scheduler;
|
||||||
|
|
||||||
let rtxn = env.read_txn().unwrap();
|
let rtxn = env.read_txn().unwrap();
|
||||||
@@ -232,7 +231,6 @@ pub fn snapshot_task(task: &Task) -> String {
|
|||||||
status,
|
status,
|
||||||
kind,
|
kind,
|
||||||
network,
|
network,
|
||||||
custom_metadata,
|
|
||||||
} = task;
|
} = task;
|
||||||
snap.push('{');
|
snap.push('{');
|
||||||
snap.push_str(&format!("uid: {uid}, "));
|
snap.push_str(&format!("uid: {uid}, "));
|
||||||
@@ -253,9 +251,6 @@ pub fn snapshot_task(task: &Task) -> String {
|
|||||||
if let Some(network) = network {
|
if let Some(network) = network {
|
||||||
snap.push_str(&format!("network: {network:?}, "))
|
snap.push_str(&format!("network: {network:?}, "))
|
||||||
}
|
}
|
||||||
if let Some(custom_metadata) = custom_metadata {
|
|
||||||
snap.push_str(&format!("custom_metadata: {custom_metadata:?}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
snap.push('}');
|
snap.push('}');
|
||||||
snap
|
snap
|
||||||
@@ -320,14 +315,7 @@ fn snapshot_details(d: &Details) -> String {
|
|||||||
format!("{{ url: {url:?}, api_key: {api_key:?}, payload_size: {payload_size:?}, indexes: {indexes:?} }}")
|
format!("{{ url: {url:?}, api_key: {api_key:?}, payload_size: {payload_size:?}, indexes: {indexes:?} }}")
|
||||||
}
|
}
|
||||||
Details::UpgradeDatabase { from, to } => {
|
Details::UpgradeDatabase { from, to } => {
|
||||||
if to == &(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) {
|
format!("{{ from: {from:?}, to: {to:?} }}")
|
||||||
format!("{{ from: {from:?}, to: [current version] }}")
|
|
||||||
} else {
|
|
||||||
format!("{{ from: {from:?}, to: {to:?} }}")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
|
|
||||||
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -404,21 +392,7 @@ pub fn snapshot_batch(batch: &Batch) -> String {
|
|||||||
|
|
||||||
snap.push('{');
|
snap.push('{');
|
||||||
snap.push_str(&format!("uid: {uid}, "));
|
snap.push_str(&format!("uid: {uid}, "));
|
||||||
let details = if let Some(upgrade_to) = &details.upgrade_to {
|
snap.push_str(&format!("details: {}, ", serde_json::to_string(details).unwrap()));
|
||||||
if upgrade_to.as_str()
|
|
||||||
== format!("v{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_PATCH}").as_str()
|
|
||||||
{
|
|
||||||
let mut details = details.clone();
|
|
||||||
|
|
||||||
details.upgrade_to = Some("[current version]".into());
|
|
||||||
serde_json::to_string(&details).unwrap()
|
|
||||||
} else {
|
|
||||||
serde_json::to_string(details).unwrap()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
serde_json::to_string(details).unwrap()
|
|
||||||
};
|
|
||||||
snap.push_str(&format!("details: {details}, "));
|
|
||||||
snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap()));
|
snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap()));
|
||||||
if !embedder_stats.skip_serializing() {
|
if !embedder_stats.skip_serializing() {
|
||||||
snap.push_str(&format!(
|
snap.push_str(&format!(
|
||||||
|
|||||||
@@ -54,6 +54,7 @@ pub use features::RoFeatures;
|
|||||||
use flate2::bufread::GzEncoder;
|
use flate2::bufread::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use meilisearch_types::batches::Batch;
|
use meilisearch_types::batches::Batch;
|
||||||
|
use meilisearch_types::enterprise_edition::network::Network;
|
||||||
use meilisearch_types::features::{
|
use meilisearch_types::features::{
|
||||||
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
|
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
|
||||||
};
|
};
|
||||||
@@ -66,7 +67,6 @@ use meilisearch_types::milli::vector::{
|
|||||||
Embedder, EmbedderOptions, RuntimeEmbedder, RuntimeEmbedders, RuntimeFragment,
|
Embedder, EmbedderOptions, RuntimeEmbedder, RuntimeEmbedders, RuntimeFragment,
|
||||||
};
|
};
|
||||||
use meilisearch_types::milli::{self, Index};
|
use meilisearch_types::milli::{self, Index};
|
||||||
use meilisearch_types::network::Network;
|
|
||||||
use meilisearch_types::task_view::TaskView;
|
use meilisearch_types::task_view::TaskView;
|
||||||
use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork};
|
use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork};
|
||||||
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
|
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
|
||||||
@@ -216,9 +216,6 @@ pub struct IndexScheduler {
|
|||||||
/// A counter that is incremented before every call to [`tick`](IndexScheduler::tick)
|
/// A counter that is incremented before every call to [`tick`](IndexScheduler::tick)
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
run_loop_iteration: Arc<RwLock<usize>>,
|
run_loop_iteration: Arc<RwLock<usize>>,
|
||||||
|
|
||||||
/// The tokio runtime used for asynchronous tasks.
|
|
||||||
runtime: Option<tokio::runtime::Handle>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IndexScheduler {
|
impl IndexScheduler {
|
||||||
@@ -245,7 +242,6 @@ impl IndexScheduler {
|
|||||||
run_loop_iteration: self.run_loop_iteration.clone(),
|
run_loop_iteration: self.run_loop_iteration.clone(),
|
||||||
features: self.features.clone(),
|
features: self.features.clone(),
|
||||||
chat_settings: self.chat_settings,
|
chat_settings: self.chat_settings,
|
||||||
runtime: self.runtime.clone(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -259,23 +255,13 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create an index scheduler and start its run loop.
|
/// Create an index scheduler and start its run loop.
|
||||||
|
#[allow(private_interfaces)] // because test_utils is private
|
||||||
pub fn new(
|
pub fn new(
|
||||||
options: IndexSchedulerOptions,
|
options: IndexSchedulerOptions,
|
||||||
auth_env: Env<WithoutTls>,
|
auth_env: Env<WithoutTls>,
|
||||||
from_db_version: (u32, u32, u32),
|
from_db_version: (u32, u32, u32),
|
||||||
runtime: Option<tokio::runtime::Handle>,
|
#[cfg(test)] test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
|
||||||
) -> Result<Self> {
|
#[cfg(test)] planned_failures: Vec<(usize, test_utils::FailureLocation)>,
|
||||||
let this = Self::new_without_run(options, auth_env, from_db_version, runtime)?;
|
|
||||||
|
|
||||||
this.run();
|
|
||||||
Ok(this)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_without_run(
|
|
||||||
options: IndexSchedulerOptions,
|
|
||||||
auth_env: Env<WithoutTls>,
|
|
||||||
from_db_version: (u32, u32, u32),
|
|
||||||
runtime: Option<tokio::runtime::Handle>,
|
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
std::fs::create_dir_all(&options.tasks_path)?;
|
std::fs::create_dir_all(&options.tasks_path)?;
|
||||||
std::fs::create_dir_all(&options.update_file_path)?;
|
std::fs::create_dir_all(&options.update_file_path)?;
|
||||||
@@ -330,7 +316,8 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
|
|
||||||
Ok(Self {
|
// allow unreachable_code to get rids of the warning in the case of a test build.
|
||||||
|
let this = Self {
|
||||||
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
|
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
|
||||||
version,
|
version,
|
||||||
queue,
|
queue,
|
||||||
@@ -346,32 +333,15 @@ impl IndexScheduler {
|
|||||||
webhooks: Arc::new(webhooks),
|
webhooks: Arc::new(webhooks),
|
||||||
embedders: Default::default(),
|
embedders: Default::default(),
|
||||||
|
|
||||||
#[cfg(test)] // Will be replaced in `new_tests` in test environments
|
#[cfg(test)]
|
||||||
test_breakpoint_sdr: crossbeam_channel::bounded(0).0,
|
test_breakpoint_sdr,
|
||||||
#[cfg(test)] // Will be replaced in `new_tests` in test environments
|
#[cfg(test)]
|
||||||
planned_failures: Default::default(),
|
planned_failures,
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
run_loop_iteration: Arc::new(RwLock::new(0)),
|
run_loop_iteration: Arc::new(RwLock::new(0)),
|
||||||
features,
|
features,
|
||||||
chat_settings,
|
chat_settings,
|
||||||
runtime,
|
};
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create an index scheduler and start its run loop.
|
|
||||||
#[cfg(test)]
|
|
||||||
fn new_test(
|
|
||||||
options: IndexSchedulerOptions,
|
|
||||||
auth_env: Env<WithoutTls>,
|
|
||||||
from_db_version: (u32, u32, u32),
|
|
||||||
runtime: Option<tokio::runtime::Handle>,
|
|
||||||
test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
|
|
||||||
planned_failures: Vec<(usize, test_utils::FailureLocation)>,
|
|
||||||
) -> Result<Self> {
|
|
||||||
let mut this = Self::new_without_run(options, auth_env, from_db_version, runtime)?;
|
|
||||||
|
|
||||||
this.test_breakpoint_sdr = test_breakpoint_sdr;
|
|
||||||
this.planned_failures = planned_failures;
|
|
||||||
|
|
||||||
this.run();
|
this.run();
|
||||||
Ok(this)
|
Ok(this)
|
||||||
@@ -756,19 +726,6 @@ impl IndexScheduler {
|
|||||||
kind: KindWithContent,
|
kind: KindWithContent,
|
||||||
task_id: Option<TaskId>,
|
task_id: Option<TaskId>,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
) -> Result<Task> {
|
|
||||||
self.register_with_custom_metadata(kind, task_id, None, dry_run)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Register a new task in the scheduler, with metadata.
|
|
||||||
///
|
|
||||||
/// If it fails and data was associated with the task, it tries to delete the associated data.
|
|
||||||
pub fn register_with_custom_metadata(
|
|
||||||
&self,
|
|
||||||
kind: KindWithContent,
|
|
||||||
task_id: Option<TaskId>,
|
|
||||||
custom_metadata: Option<String>,
|
|
||||||
dry_run: bool,
|
|
||||||
) -> Result<Task> {
|
) -> Result<Task> {
|
||||||
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
|
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
|
||||||
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
|
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
|
||||||
@@ -779,7 +736,7 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut wtxn = self.env.write_txn()?;
|
let mut wtxn = self.env.write_txn()?;
|
||||||
let task = self.queue.register(&mut wtxn, &kind, task_id, custom_metadata, dry_run)?;
|
let task = self.queue.register(&mut wtxn, &kind, task_id, dry_run)?;
|
||||||
|
|
||||||
// If the registered task is a task cancelation
|
// If the registered task is a task cancelation
|
||||||
// we inform the processing tasks to stop (if necessary).
|
// we inform the processing tasks to stop (if necessary).
|
||||||
|
|||||||
@@ -75,7 +75,6 @@ make_enum_progress! {
|
|||||||
pub enum TaskCancelationProgress {
|
pub enum TaskCancelationProgress {
|
||||||
RetrievingTasks,
|
RetrievingTasks,
|
||||||
CancelingUpgrade,
|
CancelingUpgrade,
|
||||||
CleaningCompactionLeftover,
|
|
||||||
UpdatingTasks,
|
UpdatingTasks,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -139,17 +138,6 @@ make_enum_progress! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
make_enum_progress! {
|
|
||||||
pub enum IndexCompaction {
|
|
||||||
RetrieveTheIndex,
|
|
||||||
CreateTemporaryFile,
|
|
||||||
CopyAndCompactTheIndex,
|
|
||||||
PersistTheCompactedIndex,
|
|
||||||
CloseTheIndex,
|
|
||||||
ReopenTheIndex,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
make_enum_progress! {
|
make_enum_progress! {
|
||||||
pub enum InnerSwappingTwoIndexes {
|
pub enum InnerSwappingTwoIndexes {
|
||||||
RetrieveTheTasks,
|
RetrieveTheTasks,
|
||||||
|
|||||||
@@ -502,11 +502,13 @@ impl Queue {
|
|||||||
*before_finished_at,
|
*before_finished_at,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
batches = if query.reverse.unwrap_or_default() {
|
if let Some(limit) = limit {
|
||||||
batches.into_iter().take(*limit).collect()
|
batches = if query.reverse.unwrap_or_default() {
|
||||||
} else {
|
batches.into_iter().take(*limit as usize).collect()
|
||||||
batches.into_iter().rev().take(*limit).collect()
|
} else {
|
||||||
};
|
batches.into_iter().rev().take(*limit as usize).collect()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
Ok(batches)
|
Ok(batches)
|
||||||
}
|
}
|
||||||
@@ -600,8 +602,11 @@ impl Queue {
|
|||||||
Box::new(batches.into_iter().rev()) as Box<dyn Iterator<Item = u32>>
|
Box::new(batches.into_iter().rev()) as Box<dyn Iterator<Item = u32>>
|
||||||
};
|
};
|
||||||
|
|
||||||
let batches =
|
let batches = self.batches.get_existing_batches(
|
||||||
self.batches.get_existing_batches(rtxn, batches.take(query.limit), processing)?;
|
rtxn,
|
||||||
|
batches.take(query.limit.unwrap_or(u32::MAX) as usize),
|
||||||
|
processing,
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok((batches, total))
|
Ok((batches, total))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,21 +28,21 @@ fn query_batches_from_and_limit() {
|
|||||||
|
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap().clone();
|
let proc = index_scheduler.processing_tasks.read().unwrap().clone();
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let query = Query { limit: 0, ..Default::default() };
|
let query = Query { limit: Some(0), ..Default::default() };
|
||||||
let (batches, _) = index_scheduler
|
let (batches, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&batches), @"[]");
|
snapshot!(snapshot_bitmap(&batches), @"[]");
|
||||||
|
|
||||||
let query = Query { limit: 1, ..Default::default() };
|
let query = Query { limit: Some(1), ..Default::default() };
|
||||||
let (batches, _) = index_scheduler
|
let (batches, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&batches), @"[2,]");
|
snapshot!(snapshot_bitmap(&batches), @"[2,]");
|
||||||
|
|
||||||
let query = Query { limit: 2, ..Default::default() };
|
let query = Query { limit: Some(2), ..Default::default() };
|
||||||
let (batches, _) = index_scheduler
|
let (batches, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
||||||
@@ -63,14 +63,14 @@ fn query_batches_from_and_limit() {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&batches), @"[0,1,2,]");
|
snapshot!(snapshot_bitmap(&batches), @"[0,1,2,]");
|
||||||
|
|
||||||
let query = Query { from: Some(1), limit: 1, ..Default::default() };
|
let query = Query { from: Some(1), limit: Some(1), ..Default::default() };
|
||||||
let (batches, _) = index_scheduler
|
let (batches, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&batches), @"[1,]");
|
snapshot!(snapshot_bitmap(&batches), @"[1,]");
|
||||||
|
|
||||||
let query = Query { from: Some(1), limit: 2, ..Default::default() };
|
let query = Query { from: Some(1), limit: Some(2), ..Default::default() };
|
||||||
let (batches, _) = index_scheduler
|
let (batches, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
||||||
|
|||||||
@@ -31,9 +31,6 @@ use crate::{Error, IndexSchedulerOptions, Result, TaskId};
|
|||||||
|
|
||||||
/// The number of database used by queue itself
|
/// The number of database used by queue itself
|
||||||
const NUMBER_OF_DATABASES: u32 = 1;
|
const NUMBER_OF_DATABASES: u32 = 1;
|
||||||
/// The default limit for pagination
|
|
||||||
const DEFAULT_LIMIT: usize = 20;
|
|
||||||
|
|
||||||
/// Database const names for the `IndexScheduler`.
|
/// Database const names for the `IndexScheduler`.
|
||||||
mod db_name {
|
mod db_name {
|
||||||
pub const BATCH_TO_TASKS_MAPPING: &str = "batch-to-tasks-mapping";
|
pub const BATCH_TO_TASKS_MAPPING: &str = "batch-to-tasks-mapping";
|
||||||
@@ -43,11 +40,11 @@ mod db_name {
|
|||||||
///
|
///
|
||||||
/// An empty/default query (where each field is set to `None`) matches all tasks.
|
/// An empty/default query (where each field is set to `None`) matches all tasks.
|
||||||
/// Each non-null field restricts the set of tasks further.
|
/// Each non-null field restricts the set of tasks further.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Default, Debug, Clone, PartialEq, Eq)]
|
||||||
pub struct Query {
|
pub struct Query {
|
||||||
/// The maximum number of tasks to be matched. Defaults to 20.
|
/// The maximum number of tasks to be matched
|
||||||
pub limit: usize,
|
pub limit: Option<u32>,
|
||||||
/// The minimum [task id](`meilisearch_types::tasks::Task::uid`) to be matched. Defaults to 0.
|
/// The minimum [task id](`meilisearch_types::tasks::Task::uid`) to be matched
|
||||||
pub from: Option<u32>,
|
pub from: Option<u32>,
|
||||||
/// The order used to return the tasks. By default the newest tasks are returned first and the boolean is `false`.
|
/// The order used to return the tasks. By default the newest tasks are returned first and the boolean is `false`.
|
||||||
pub reverse: Option<bool>,
|
pub reverse: Option<bool>,
|
||||||
@@ -86,29 +83,32 @@ pub struct Query {
|
|||||||
pub after_finished_at: Option<OffsetDateTime>,
|
pub after_finished_at: Option<OffsetDateTime>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Query {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
limit: DEFAULT_LIMIT,
|
|
||||||
from: Default::default(),
|
|
||||||
reverse: Default::default(),
|
|
||||||
uids: Default::default(),
|
|
||||||
batch_uids: Default::default(),
|
|
||||||
statuses: Default::default(),
|
|
||||||
types: Default::default(),
|
|
||||||
index_uids: Default::default(),
|
|
||||||
canceled_by: Default::default(),
|
|
||||||
before_enqueued_at: Default::default(),
|
|
||||||
after_enqueued_at: Default::default(),
|
|
||||||
before_started_at: Default::default(),
|
|
||||||
after_started_at: Default::default(),
|
|
||||||
before_finished_at: Default::default(),
|
|
||||||
after_finished_at: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Query {
|
impl Query {
|
||||||
|
/// Return `true` if every field of the query is set to `None`, such that the query
|
||||||
|
/// matches all tasks.
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
matches!(
|
||||||
|
self,
|
||||||
|
Query {
|
||||||
|
limit: None,
|
||||||
|
from: None,
|
||||||
|
reverse: None,
|
||||||
|
uids: None,
|
||||||
|
batch_uids: None,
|
||||||
|
statuses: None,
|
||||||
|
types: None,
|
||||||
|
index_uids: None,
|
||||||
|
canceled_by: None,
|
||||||
|
before_enqueued_at: None,
|
||||||
|
after_enqueued_at: None,
|
||||||
|
before_started_at: None,
|
||||||
|
after_started_at: None,
|
||||||
|
before_finished_at: None,
|
||||||
|
after_finished_at: None,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// Add an [index id](meilisearch_types::tasks::Task::index_uid) to the list of permitted indexes.
|
/// Add an [index id](meilisearch_types::tasks::Task::index_uid) to the list of permitted indexes.
|
||||||
pub fn with_index(self, index_uid: String) -> Self {
|
pub fn with_index(self, index_uid: String) -> Self {
|
||||||
let mut index_vec = self.index_uids.unwrap_or_default();
|
let mut index_vec = self.index_uids.unwrap_or_default();
|
||||||
@@ -119,7 +119,7 @@ impl Query {
|
|||||||
// Removes the `from` and `limit` restrictions from the query.
|
// Removes the `from` and `limit` restrictions from the query.
|
||||||
// Useful to get the total number of tasks matching a filter.
|
// Useful to get the total number of tasks matching a filter.
|
||||||
pub fn without_limits(self) -> Self {
|
pub fn without_limits(self) -> Self {
|
||||||
Query { limit: usize::MAX, from: None, ..self }
|
Query { limit: None, from: None, ..self }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -257,7 +257,6 @@ impl Queue {
|
|||||||
wtxn: &mut RwTxn,
|
wtxn: &mut RwTxn,
|
||||||
kind: &KindWithContent,
|
kind: &KindWithContent,
|
||||||
task_id: Option<TaskId>,
|
task_id: Option<TaskId>,
|
||||||
custom_metadata: Option<String>,
|
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
) -> Result<Task> {
|
) -> Result<Task> {
|
||||||
let next_task_id = self.tasks.next_task_id(wtxn)?;
|
let next_task_id = self.tasks.next_task_id(wtxn)?;
|
||||||
@@ -281,7 +280,6 @@ impl Queue {
|
|||||||
status: Status::Enqueued,
|
status: Status::Enqueued,
|
||||||
kind: kind.clone(),
|
kind: kind.clone(),
|
||||||
network: None,
|
network: None,
|
||||||
custom_metadata,
|
|
||||||
};
|
};
|
||||||
// For deletion and cancelation tasks, we want to make extra sure that they
|
// For deletion and cancelation tasks, we want to make extra sure that they
|
||||||
// don't attempt to delete/cancel tasks that are newer than themselves.
|
// don't attempt to delete/cancel tasks that are newer than themselves.
|
||||||
@@ -312,8 +310,7 @@ impl Queue {
|
|||||||
| self.tasks.status.get(wtxn, &Status::Failed)?.unwrap_or_default()
|
| self.tasks.status.get(wtxn, &Status::Failed)?.unwrap_or_default()
|
||||||
| self.tasks.status.get(wtxn, &Status::Canceled)?.unwrap_or_default();
|
| self.tasks.status.get(wtxn, &Status::Canceled)?.unwrap_or_default();
|
||||||
|
|
||||||
let to_delete =
|
let to_delete = RoaringBitmap::from_iter(finished.into_iter().rev().take(100_000));
|
||||||
RoaringBitmap::from_sorted_iter(finished.into_iter().take(100_000)).unwrap();
|
|
||||||
|
|
||||||
// /!\ the len must be at least 2 or else we might enter an infinite loop where we only delete
|
// /!\ the len must be at least 2 or else we might enter an infinite loop where we only delete
|
||||||
// the deletion tasks we enqueued ourselves.
|
// the deletion tasks we enqueued ourselves.
|
||||||
@@ -346,7 +343,6 @@ impl Queue {
|
|||||||
tasks: to_delete,
|
tasks: to_delete,
|
||||||
},
|
},
|
||||||
None,
|
None,
|
||||||
None,
|
|
||||||
false,
|
false,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
|||||||
@@ -465,11 +465,13 @@ impl Queue {
|
|||||||
*before_finished_at,
|
*before_finished_at,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
tasks = if query.reverse.unwrap_or_default() {
|
if let Some(limit) = limit {
|
||||||
tasks.into_iter().take(*limit).collect()
|
tasks = if query.reverse.unwrap_or_default() {
|
||||||
} else {
|
tasks.into_iter().take(*limit as usize).collect()
|
||||||
tasks.into_iter().rev().take(*limit).collect()
|
} else {
|
||||||
};
|
tasks.into_iter().rev().take(*limit as usize).collect()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
Ok(tasks)
|
Ok(tasks)
|
||||||
}
|
}
|
||||||
@@ -527,7 +529,9 @@ impl Queue {
|
|||||||
} else {
|
} else {
|
||||||
Box::new(tasks.into_iter().rev()) as Box<dyn Iterator<Item = u32>>
|
Box::new(tasks.into_iter().rev()) as Box<dyn Iterator<Item = u32>>
|
||||||
};
|
};
|
||||||
let tasks = self.tasks.get_existing_tasks(rtxn, tasks.take(query.limit))?;
|
let tasks = self
|
||||||
|
.tasks
|
||||||
|
.get_existing_tasks(rtxn, tasks.take(query.limit.unwrap_or(u32::MAX) as usize))?;
|
||||||
|
|
||||||
let ProcessingTasks { batch, processing, progress: _ } = processing_tasks;
|
let ProcessingTasks { batch, processing, progress: _ } = processing_tasks;
|
||||||
|
|
||||||
|
|||||||
@@ -28,21 +28,21 @@ fn query_tasks_from_and_limit() {
|
|||||||
|
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let processing = index_scheduler.processing_tasks.read().unwrap();
|
let processing = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let query = Query { limit: 0, ..Default::default() };
|
let query = Query { limit: Some(0), ..Default::default() };
|
||||||
let (tasks, _) = index_scheduler
|
let (tasks, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||||
|
|
||||||
let query = Query { limit: 1, ..Default::default() };
|
let query = Query { limit: Some(1), ..Default::default() };
|
||||||
let (tasks, _) = index_scheduler
|
let (tasks, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||||
|
|
||||||
let query = Query { limit: 2, ..Default::default() };
|
let query = Query { limit: Some(2), ..Default::default() };
|
||||||
let (tasks, _) = index_scheduler
|
let (tasks, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
||||||
@@ -63,14 +63,14 @@ fn query_tasks_from_and_limit() {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,]");
|
snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,]");
|
||||||
|
|
||||||
let query = Query { from: Some(1), limit: 1, ..Default::default() };
|
let query = Query { from: Some(1), limit: Some(1), ..Default::default() };
|
||||||
let (tasks, _) = index_scheduler
|
let (tasks, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&tasks), @"[1,]");
|
snapshot!(snapshot_bitmap(&tasks), @"[1,]");
|
||||||
|
|
||||||
let query = Query { from: Some(1), limit: 2, ..Default::default() };
|
let query = Query { from: Some(1), limit: Some(2), ..Default::default() };
|
||||||
let (tasks, _) = index_scheduler
|
let (tasks, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
||||||
|
|||||||
@@ -68,14 +68,13 @@ impl From<KindWithContent> for AutobatchKind {
|
|||||||
KindWithContent::IndexCreation { .. } => AutobatchKind::IndexCreation,
|
KindWithContent::IndexCreation { .. } => AutobatchKind::IndexCreation,
|
||||||
KindWithContent::IndexUpdate { .. } => AutobatchKind::IndexUpdate,
|
KindWithContent::IndexUpdate { .. } => AutobatchKind::IndexUpdate,
|
||||||
KindWithContent::IndexSwap { .. } => AutobatchKind::IndexSwap,
|
KindWithContent::IndexSwap { .. } => AutobatchKind::IndexSwap,
|
||||||
KindWithContent::IndexCompaction { .. }
|
KindWithContent::TaskCancelation { .. }
|
||||||
| KindWithContent::TaskCancelation { .. }
|
|
||||||
| KindWithContent::TaskDeletion { .. }
|
| KindWithContent::TaskDeletion { .. }
|
||||||
| KindWithContent::DumpCreation { .. }
|
| KindWithContent::DumpCreation { .. }
|
||||||
| KindWithContent::Export { .. }
|
| KindWithContent::Export { .. }
|
||||||
| KindWithContent::UpgradeDatabase { .. }
|
| KindWithContent::UpgradeDatabase { .. }
|
||||||
| KindWithContent::SnapshotCreation => {
|
| KindWithContent::SnapshotCreation => {
|
||||||
panic!("The autobatcher should never be called with tasks with special priority or that don't apply to an index.")
|
panic!("The autobatcher should never be called with tasks that don't apply to an index.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -288,10 +287,8 @@ impl BatchKind {
|
|||||||
};
|
};
|
||||||
|
|
||||||
match (self, autobatch_kind) {
|
match (self, autobatch_kind) {
|
||||||
// We don't batch any of these operations
|
// We don't batch any of these operations
|
||||||
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition) => {
|
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition) => Break((this, BatchStopReason::TaskCannotBeBatched { kind, id })),
|
||||||
Break((this, BatchStopReason::TaskCannotBeBatched { kind, id }))
|
|
||||||
},
|
|
||||||
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
|
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
|
||||||
(this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
|
(this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
|
||||||
Break((this, BatchStopReason::IndexCreationMismatch { id }))
|
Break((this, BatchStopReason::IndexCreationMismatch { id }))
|
||||||
|
|||||||
@@ -55,10 +55,6 @@ pub(crate) enum Batch {
|
|||||||
UpgradeDatabase {
|
UpgradeDatabase {
|
||||||
tasks: Vec<Task>,
|
tasks: Vec<Task>,
|
||||||
},
|
},
|
||||||
IndexCompaction {
|
|
||||||
index_uid: String,
|
|
||||||
task: Task,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -114,8 +110,7 @@ impl Batch {
|
|||||||
| Batch::Dump(task)
|
| Batch::Dump(task)
|
||||||
| Batch::IndexCreation { task, .. }
|
| Batch::IndexCreation { task, .. }
|
||||||
| Batch::Export { task }
|
| Batch::Export { task }
|
||||||
| Batch::IndexUpdate { task, .. }
|
| Batch::IndexUpdate { task, .. } => {
|
||||||
| Batch::IndexCompaction { task, .. } => {
|
|
||||||
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
||||||
}
|
}
|
||||||
Batch::SnapshotCreation(tasks)
|
Batch::SnapshotCreation(tasks)
|
||||||
@@ -160,8 +155,7 @@ impl Batch {
|
|||||||
IndexOperation { op, .. } => Some(op.index_uid()),
|
IndexOperation { op, .. } => Some(op.index_uid()),
|
||||||
IndexCreation { index_uid, .. }
|
IndexCreation { index_uid, .. }
|
||||||
| IndexUpdate { index_uid, .. }
|
| IndexUpdate { index_uid, .. }
|
||||||
| IndexDeletion { index_uid, .. }
|
| IndexDeletion { index_uid, .. } => Some(index_uid),
|
||||||
| IndexCompaction { index_uid, .. } => Some(index_uid),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -181,7 +175,6 @@ impl fmt::Display for Batch {
|
|||||||
Batch::IndexUpdate { .. } => f.write_str("IndexUpdate")?,
|
Batch::IndexUpdate { .. } => f.write_str("IndexUpdate")?,
|
||||||
Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?,
|
Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?,
|
||||||
Batch::IndexSwap { .. } => f.write_str("IndexSwap")?,
|
Batch::IndexSwap { .. } => f.write_str("IndexSwap")?,
|
||||||
Batch::IndexCompaction { .. } => f.write_str("IndexCompaction")?,
|
|
||||||
Batch::Export { .. } => f.write_str("Export")?,
|
Batch::Export { .. } => f.write_str("Export")?,
|
||||||
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
|
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
|
||||||
};
|
};
|
||||||
@@ -519,33 +512,17 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
|
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. we get the next task to compact
|
// 3. we batch the export.
|
||||||
let to_compact = self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)? & enqueued;
|
|
||||||
if let Some(task_id) = to_compact.min() {
|
|
||||||
let mut task =
|
|
||||||
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
|
||||||
current_batch.processing(Some(&mut task));
|
|
||||||
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
|
|
||||||
kind: Kind::IndexCompaction,
|
|
||||||
id: task_id,
|
|
||||||
});
|
|
||||||
let index_uid =
|
|
||||||
task.index_uid().expect("Compaction task must have an index uid").to_owned();
|
|
||||||
return Ok(Some((Batch::IndexCompaction { index_uid, task }, current_batch)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. we batch the export.
|
|
||||||
let to_export = self.queue.tasks.get_kind(rtxn, Kind::Export)? & enqueued;
|
let to_export = self.queue.tasks.get_kind(rtxn, Kind::Export)? & enqueued;
|
||||||
if !to_export.is_empty() {
|
if !to_export.is_empty() {
|
||||||
let task_id = to_export.iter().next().expect("There must be at least one export task");
|
let task_id = to_export.iter().next().expect("There must be at least one export task");
|
||||||
let mut task = self.queue.tasks.get_task(rtxn, task_id)?.unwrap();
|
let mut task = self.queue.tasks.get_task(rtxn, task_id)?.unwrap();
|
||||||
current_batch.processing([&mut task]);
|
current_batch.processing([&mut task]);
|
||||||
current_batch
|
current_batch.reason(BatchStopReason::TaskKindCannotBeBatched { kind: Kind::Export });
|
||||||
.reason(BatchStopReason::TaskCannotBeBatched { kind: Kind::Export, id: task_id });
|
|
||||||
return Ok(Some((Batch::Export { task }, current_batch)));
|
return Ok(Some((Batch::Export { task }, current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 5. we batch the snapshot.
|
// 4. we batch the snapshot.
|
||||||
let to_snapshot = self.queue.tasks.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued;
|
let to_snapshot = self.queue.tasks.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued;
|
||||||
if !to_snapshot.is_empty() {
|
if !to_snapshot.is_empty() {
|
||||||
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
|
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
|
||||||
@@ -555,7 +532,7 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
|
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. we batch the dumps.
|
// 5. we batch the dumps.
|
||||||
let to_dump = self.queue.tasks.get_kind(rtxn, Kind::DumpCreation)? & enqueued;
|
let to_dump = self.queue.tasks.get_kind(rtxn, Kind::DumpCreation)? & enqueued;
|
||||||
if let Some(to_dump) = to_dump.min() {
|
if let Some(to_dump) = to_dump.min() {
|
||||||
let mut task =
|
let mut task =
|
||||||
@@ -568,7 +545,7 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::Dump(task), current_batch)));
|
return Ok(Some((Batch::Dump(task), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 7. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
|
// 6. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
|
||||||
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
|
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
|
||||||
let mut task =
|
let mut task =
|
||||||
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ use convert_case::{Case, Casing as _};
|
|||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::heed::{Env, WithoutTls};
|
use meilisearch_types::heed::{Env, WithoutTls};
|
||||||
use meilisearch_types::milli;
|
use meilisearch_types::milli;
|
||||||
use meilisearch_types::milli::update::S3SnapshotOptions;
|
|
||||||
use meilisearch_types::tasks::Status;
|
use meilisearch_types::tasks::Status;
|
||||||
use process_batch::ProcessBatchInfo;
|
use process_batch::ProcessBatchInfo;
|
||||||
use rayon::current_num_threads;
|
use rayon::current_num_threads;
|
||||||
@@ -88,14 +87,11 @@ pub struct Scheduler {
|
|||||||
|
|
||||||
/// Snapshot compaction status.
|
/// Snapshot compaction status.
|
||||||
pub(crate) experimental_no_snapshot_compaction: bool,
|
pub(crate) experimental_no_snapshot_compaction: bool,
|
||||||
|
|
||||||
/// S3 Snapshot options.
|
|
||||||
pub(crate) s3_snapshot_options: Option<S3SnapshotOptions>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Scheduler {
|
impl Scheduler {
|
||||||
pub(crate) fn private_clone(&self) -> Self {
|
pub(crate) fn private_clone(&self) -> Scheduler {
|
||||||
Self {
|
Scheduler {
|
||||||
must_stop_processing: self.must_stop_processing.clone(),
|
must_stop_processing: self.must_stop_processing.clone(),
|
||||||
wake_up: self.wake_up.clone(),
|
wake_up: self.wake_up.clone(),
|
||||||
autobatching_enabled: self.autobatching_enabled,
|
autobatching_enabled: self.autobatching_enabled,
|
||||||
@@ -107,52 +103,23 @@ impl Scheduler {
|
|||||||
version_file_path: self.version_file_path.clone(),
|
version_file_path: self.version_file_path.clone(),
|
||||||
embedding_cache_cap: self.embedding_cache_cap,
|
embedding_cache_cap: self.embedding_cache_cap,
|
||||||
experimental_no_snapshot_compaction: self.experimental_no_snapshot_compaction,
|
experimental_no_snapshot_compaction: self.experimental_no_snapshot_compaction,
|
||||||
s3_snapshot_options: self.s3_snapshot_options.clone(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(options: &IndexSchedulerOptions, auth_env: Env<WithoutTls>) -> Scheduler {
|
pub fn new(options: &IndexSchedulerOptions, auth_env: Env<WithoutTls>) -> Scheduler {
|
||||||
let IndexSchedulerOptions {
|
|
||||||
version_file_path,
|
|
||||||
auth_path: _,
|
|
||||||
tasks_path: _,
|
|
||||||
update_file_path: _,
|
|
||||||
indexes_path: _,
|
|
||||||
snapshots_path,
|
|
||||||
dumps_path,
|
|
||||||
cli_webhook_url: _,
|
|
||||||
cli_webhook_authorization: _,
|
|
||||||
task_db_size: _,
|
|
||||||
index_base_map_size: _,
|
|
||||||
enable_mdb_writemap: _,
|
|
||||||
index_growth_amount: _,
|
|
||||||
index_count: _,
|
|
||||||
indexer_config,
|
|
||||||
autobatching_enabled,
|
|
||||||
cleanup_enabled: _,
|
|
||||||
max_number_of_tasks: _,
|
|
||||||
max_number_of_batched_tasks,
|
|
||||||
batched_tasks_size_limit,
|
|
||||||
instance_features: _,
|
|
||||||
auto_upgrade: _,
|
|
||||||
embedding_cache_cap,
|
|
||||||
experimental_no_snapshot_compaction,
|
|
||||||
} = options;
|
|
||||||
|
|
||||||
Scheduler {
|
Scheduler {
|
||||||
must_stop_processing: MustStopProcessing::default(),
|
must_stop_processing: MustStopProcessing::default(),
|
||||||
// we want to start the loop right away in case meilisearch was ctrl+Ced while processing things
|
// we want to start the loop right away in case meilisearch was ctrl+Ced while processing things
|
||||||
wake_up: Arc::new(SignalEvent::auto(true)),
|
wake_up: Arc::new(SignalEvent::auto(true)),
|
||||||
autobatching_enabled: *autobatching_enabled,
|
autobatching_enabled: options.autobatching_enabled,
|
||||||
max_number_of_batched_tasks: *max_number_of_batched_tasks,
|
max_number_of_batched_tasks: options.max_number_of_batched_tasks,
|
||||||
batched_tasks_size_limit: *batched_tasks_size_limit,
|
batched_tasks_size_limit: options.batched_tasks_size_limit,
|
||||||
dumps_path: dumps_path.clone(),
|
dumps_path: options.dumps_path.clone(),
|
||||||
snapshots_path: snapshots_path.clone(),
|
snapshots_path: options.snapshots_path.clone(),
|
||||||
auth_env,
|
auth_env,
|
||||||
version_file_path: version_file_path.clone(),
|
version_file_path: options.version_file_path.clone(),
|
||||||
embedding_cache_cap: *embedding_cache_cap,
|
embedding_cache_cap: options.embedding_cache_cap,
|
||||||
experimental_no_snapshot_compaction: *experimental_no_snapshot_compaction,
|
experimental_no_snapshot_compaction: options.experimental_no_snapshot_compaction,
|
||||||
s3_snapshot_options: indexer_config.s3_snapshot_options.clone(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,27 +1,22 @@
|
|||||||
use std::collections::{BTreeSet, HashMap, HashSet};
|
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||||
use std::fs::{remove_file, File};
|
|
||||||
use std::io::{ErrorKind, Seek, SeekFrom};
|
|
||||||
use std::panic::{catch_unwind, AssertUnwindSafe};
|
use std::panic::{catch_unwind, AssertUnwindSafe};
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
|
|
||||||
use byte_unit::Byte;
|
|
||||||
use meilisearch_types::batches::{BatchEnqueuedAt, BatchId};
|
use meilisearch_types::batches::{BatchEnqueuedAt, BatchId};
|
||||||
use meilisearch_types::heed::{RoTxn, RwTxn};
|
use meilisearch_types::heed::{RoTxn, RwTxn};
|
||||||
use meilisearch_types::milli::heed::CompactionOption;
|
|
||||||
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
||||||
use meilisearch_types::milli::{self, ChannelCongestion};
|
use meilisearch_types::milli::{self, ChannelCongestion};
|
||||||
use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status, Task};
|
||||||
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
||||||
use milli::update::Settings as MilliSettings;
|
use milli::update::Settings as MilliSettings;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
use tempfile::{PersistError, TempPath};
|
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
|
|
||||||
use super::create_batch::Batch;
|
use super::create_batch::Batch;
|
||||||
use crate::processing::{
|
use crate::processing::{
|
||||||
AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress, FinalizingIndexStep,
|
AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress, FinalizingIndexStep,
|
||||||
IndexCompaction, InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress,
|
InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, TaskDeletionProgress,
|
||||||
TaskDeletionProgress, UpdateIndexProgress,
|
UpdateIndexProgress,
|
||||||
};
|
};
|
||||||
use crate::utils::{
|
use crate::utils::{
|
||||||
self, remove_n_tasks_datetime_earlier_than, remove_task_datetime, swap_index_uid_in_task,
|
self, remove_n_tasks_datetime_earlier_than, remove_task_datetime, swap_index_uid_in_task,
|
||||||
@@ -29,9 +24,6 @@ use crate::utils::{
|
|||||||
};
|
};
|
||||||
use crate::{Error, IndexScheduler, Result, TaskId};
|
use crate::{Error, IndexScheduler, Result, TaskId};
|
||||||
|
|
||||||
/// The name of the copy of the data.mdb file used during compaction.
|
|
||||||
const DATA_MDB_COPY_NAME: &str = "data.mdb.cpy";
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
pub struct ProcessBatchInfo {
|
pub struct ProcessBatchInfo {
|
||||||
/// The write channel congestion. None when unavailable: settings update.
|
/// The write channel congestion. None when unavailable: settings update.
|
||||||
@@ -426,47 +418,6 @@ impl IndexScheduler {
|
|||||||
task.status = Status::Succeeded;
|
task.status = Status::Succeeded;
|
||||||
Ok((vec![task], ProcessBatchInfo::default()))
|
Ok((vec![task], ProcessBatchInfo::default()))
|
||||||
}
|
}
|
||||||
Batch::IndexCompaction { index_uid: _, mut task } => {
|
|
||||||
let KindWithContent::IndexCompaction { index_uid } = &task.kind else {
|
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
|
|
||||||
let rtxn = self.env.read_txn()?;
|
|
||||||
let ret = catch_unwind(AssertUnwindSafe(|| {
|
|
||||||
self.apply_compaction(&rtxn, &progress, index_uid)
|
|
||||||
}));
|
|
||||||
|
|
||||||
let (pre_size, post_size) = match ret {
|
|
||||||
Ok(Ok(stats)) => stats,
|
|
||||||
Ok(Err(Error::AbortedTask)) => return Err(Error::AbortedTask),
|
|
||||||
Ok(Err(e)) => return Err(e),
|
|
||||||
Err(e) => {
|
|
||||||
let msg = match e.downcast_ref::<&'static str>() {
|
|
||||||
Some(s) => *s,
|
|
||||||
None => match e.downcast_ref::<String>() {
|
|
||||||
Some(s) => &s[..],
|
|
||||||
None => "Box<dyn Any>",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
return Err(Error::Export(Box::new(Error::ProcessBatchPanicked(
|
|
||||||
msg.to_string(),
|
|
||||||
))));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
task.status = Status::Succeeded;
|
|
||||||
if let Some(Details::IndexCompaction {
|
|
||||||
index_uid: _,
|
|
||||||
pre_compaction_size,
|
|
||||||
post_compaction_size,
|
|
||||||
}) = task.details.as_mut()
|
|
||||||
{
|
|
||||||
*pre_compaction_size = Some(Byte::from_u64(pre_size));
|
|
||||||
*post_compaction_size = Some(Byte::from_u64(post_size));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((vec![task], ProcessBatchInfo::default()))
|
|
||||||
}
|
|
||||||
Batch::Export { mut task } => {
|
Batch::Export { mut task } => {
|
||||||
let KindWithContent::Export { url, api_key, payload_size, indexes } = &task.kind
|
let KindWithContent::Export { url, api_key, payload_size, indexes } = &task.kind
|
||||||
else {
|
else {
|
||||||
@@ -542,92 +493,6 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_compaction(
|
|
||||||
&self,
|
|
||||||
rtxn: &RoTxn,
|
|
||||||
progress: &Progress,
|
|
||||||
index_uid: &str,
|
|
||||||
) -> Result<(u64, u64)> {
|
|
||||||
// 1. Verify that the index exists
|
|
||||||
if !self.index_mapper.index_exists(rtxn, index_uid)? {
|
|
||||||
return Err(Error::IndexNotFound(index_uid.to_owned()));
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. We retrieve the index and create a temporary file in the index directory
|
|
||||||
progress.update_progress(IndexCompaction::RetrieveTheIndex);
|
|
||||||
let index = self.index_mapper.index(rtxn, index_uid)?;
|
|
||||||
|
|
||||||
// the index operation can take a long time, so save this handle to make it available to the search for the duration of the tick
|
|
||||||
self.index_mapper
|
|
||||||
.set_currently_updating_index(Some((index_uid.to_string(), index.clone())));
|
|
||||||
|
|
||||||
progress.update_progress(IndexCompaction::CreateTemporaryFile);
|
|
||||||
let src_path = index.path().join("data.mdb");
|
|
||||||
let pre_size = std::fs::metadata(&src_path)?.len();
|
|
||||||
|
|
||||||
let dst_path = TempPath::from_path(index.path().join(DATA_MDB_COPY_NAME));
|
|
||||||
let file = File::create(&dst_path)?;
|
|
||||||
let mut file = tempfile::NamedTempFile::from_parts(file, dst_path);
|
|
||||||
|
|
||||||
// 3. We copy the index data to the temporary file
|
|
||||||
progress.update_progress(IndexCompaction::CopyAndCompactTheIndex);
|
|
||||||
index
|
|
||||||
.copy_to_file(file.as_file_mut(), CompactionOption::Enabled)
|
|
||||||
.map_err(|error| Error::Milli { error, index_uid: Some(index_uid.to_string()) })?;
|
|
||||||
// ...and reset the file position as specified in the documentation
|
|
||||||
file.seek(SeekFrom::Start(0))?;
|
|
||||||
|
|
||||||
// 4. We replace the index data file with the temporary file
|
|
||||||
progress.update_progress(IndexCompaction::PersistTheCompactedIndex);
|
|
||||||
match file.persist(src_path) {
|
|
||||||
Ok(file) => file.sync_all()?,
|
|
||||||
// TODO see if we have a _resource busy_ error and probably handle this by:
|
|
||||||
// 1. closing the index, 2. replacing and 3. reopening it
|
|
||||||
Err(PersistError { error, file: _ }) => return Err(Error::IoError(error)),
|
|
||||||
};
|
|
||||||
|
|
||||||
// 5. Prepare to close the index
|
|
||||||
progress.update_progress(IndexCompaction::CloseTheIndex);
|
|
||||||
|
|
||||||
// unmark that the index is the processing one so we don't keep a handle to it, preventing its closing
|
|
||||||
self.index_mapper.set_currently_updating_index(None);
|
|
||||||
|
|
||||||
self.index_mapper.close_index(rtxn, index_uid)?;
|
|
||||||
drop(index);
|
|
||||||
|
|
||||||
progress.update_progress(IndexCompaction::ReopenTheIndex);
|
|
||||||
// 6. Reopen the index
|
|
||||||
// The index will use the compacted data file when being reopened
|
|
||||||
let index = self.index_mapper.index(rtxn, index_uid)?;
|
|
||||||
|
|
||||||
// if the update processed successfully, we're going to store the new
|
|
||||||
// stats of the index. Since the tasks have already been processed and
|
|
||||||
// this is a non-critical operation. If it fails, we should not fail
|
|
||||||
// the entire batch.
|
|
||||||
let res = || -> Result<_> {
|
|
||||||
let mut wtxn = self.env.write_txn()?;
|
|
||||||
let index_rtxn = index.read_txn()?;
|
|
||||||
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(index_uid.to_string())))?;
|
|
||||||
self.index_mapper.store_stats_of(&mut wtxn, index_uid, &stats)?;
|
|
||||||
wtxn.commit()?;
|
|
||||||
Ok(stats.database_size)
|
|
||||||
}();
|
|
||||||
|
|
||||||
let post_size = match res {
|
|
||||||
Ok(post_size) => post_size,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!(
|
|
||||||
error = &e as &dyn std::error::Error,
|
|
||||||
"Could not write the stats of the index"
|
|
||||||
);
|
|
||||||
0
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok((pre_size, post_size))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Swap the index `lhs` with the index `rhs`.
|
/// Swap the index `lhs` with the index `rhs`.
|
||||||
fn apply_index_swap(
|
fn apply_index_swap(
|
||||||
&self,
|
&self,
|
||||||
@@ -915,10 +780,9 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
let enqueued_tasks = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
let enqueued_tasks = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
||||||
|
|
||||||
// 0. Check if any upgrade or compaction tasks were matched.
|
// 0. Check if any upgrade task was matched.
|
||||||
// If so, we cancel all the failed or enqueued upgrade tasks.
|
// If so, we cancel all the failed or enqueued upgrade tasks.
|
||||||
let upgrade_tasks = &self.queue.tasks.get_kind(rtxn, Kind::UpgradeDatabase)?;
|
let upgrade_tasks = &self.queue.tasks.get_kind(rtxn, Kind::UpgradeDatabase)?;
|
||||||
let compaction_tasks = &self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)?;
|
|
||||||
let is_canceling_upgrade = !matched_tasks.is_disjoint(upgrade_tasks);
|
let is_canceling_upgrade = !matched_tasks.is_disjoint(upgrade_tasks);
|
||||||
if is_canceling_upgrade {
|
if is_canceling_upgrade {
|
||||||
let failed_tasks = self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
let failed_tasks = self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
||||||
@@ -983,33 +847,7 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. If we are cancelling a compaction task, remove the tempfiles after incomplete compactions
|
// 3. We now have a list of tasks to cancel, cancel them
|
||||||
for compaction_task in &tasks_to_cancel & compaction_tasks {
|
|
||||||
progress.update_progress(TaskCancelationProgress::CleaningCompactionLeftover);
|
|
||||||
let task = self.queue.tasks.get_task(rtxn, compaction_task)?.unwrap();
|
|
||||||
let Some(Details::IndexCompaction {
|
|
||||||
index_uid,
|
|
||||||
pre_compaction_size: _,
|
|
||||||
post_compaction_size: _,
|
|
||||||
}) = task.details
|
|
||||||
else {
|
|
||||||
unreachable!("wrong details for compaction task {compaction_task}")
|
|
||||||
};
|
|
||||||
|
|
||||||
let index_path = match self.index_mapper.index_mapping.get(rtxn, &index_uid)? {
|
|
||||||
Some(index_uuid) => self.index_mapper.index_path(index_uuid),
|
|
||||||
None => continue,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(e) = remove_file(index_path.join(DATA_MDB_COPY_NAME)) {
|
|
||||||
match e.kind() {
|
|
||||||
ErrorKind::NotFound => (),
|
|
||||||
_ => return Err(Error::IoError(e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. We now have a list of tasks to cancel, cancel them
|
|
||||||
let (task_progress, progress_obj) = AtomicTaskStep::new(tasks_to_cancel.len() as u32);
|
let (task_progress, progress_obj) = AtomicTaskStep::new(tasks_to_cancel.len() as u32);
|
||||||
progress.update_progress(progress_obj);
|
progress.update_progress(progress_obj);
|
||||||
|
|
||||||
|
|||||||
@@ -12,8 +12,6 @@ use crate::processing::{AtomicUpdateFileStep, SnapshotCreationProgress};
|
|||||||
use crate::queue::TaskQueue;
|
use crate::queue::TaskQueue;
|
||||||
use crate::{Error, IndexScheduler, Result};
|
use crate::{Error, IndexScheduler, Result};
|
||||||
|
|
||||||
const UPDATE_FILES_DIR_NAME: &str = "update_files";
|
|
||||||
|
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// See [`EnvOpenOptions::open`].
|
/// See [`EnvOpenOptions::open`].
|
||||||
@@ -80,32 +78,10 @@ impl IndexScheduler {
|
|||||||
pub(super) fn process_snapshot(
|
pub(super) fn process_snapshot(
|
||||||
&self,
|
&self,
|
||||||
progress: Progress,
|
progress: Progress,
|
||||||
tasks: Vec<Task>,
|
mut tasks: Vec<Task>,
|
||||||
) -> Result<Vec<Task>> {
|
) -> Result<Vec<Task>> {
|
||||||
progress.update_progress(SnapshotCreationProgress::StartTheSnapshotCreation);
|
progress.update_progress(SnapshotCreationProgress::StartTheSnapshotCreation);
|
||||||
|
|
||||||
match self.scheduler.s3_snapshot_options.clone() {
|
|
||||||
Some(options) => {
|
|
||||||
#[cfg(not(unix))]
|
|
||||||
{
|
|
||||||
let _ = options;
|
|
||||||
panic!("Non-unix platform does not support S3 snapshotting");
|
|
||||||
}
|
|
||||||
#[cfg(unix)]
|
|
||||||
self.runtime
|
|
||||||
.as_ref()
|
|
||||||
.expect("Runtime not initialized")
|
|
||||||
.block_on(self.process_snapshot_to_s3(progress, options, tasks))
|
|
||||||
}
|
|
||||||
None => self.process_snapshots_to_disk(progress, tasks),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_snapshots_to_disk(
|
|
||||||
&self,
|
|
||||||
progress: Progress,
|
|
||||||
mut tasks: Vec<Task>,
|
|
||||||
) -> Result<Vec<Task>, Error> {
|
|
||||||
fs::create_dir_all(&self.scheduler.snapshots_path)?;
|
fs::create_dir_all(&self.scheduler.snapshots_path)?;
|
||||||
let temp_snapshot_dir = tempfile::tempdir()?;
|
let temp_snapshot_dir = tempfile::tempdir()?;
|
||||||
|
|
||||||
@@ -152,7 +128,7 @@ impl IndexScheduler {
|
|||||||
let rtxn = self.env.read_txn()?;
|
let rtxn = self.env.read_txn()?;
|
||||||
|
|
||||||
// 2.4 Create the update files directory
|
// 2.4 Create the update files directory
|
||||||
let update_files_dir = temp_snapshot_dir.path().join(UPDATE_FILES_DIR_NAME);
|
let update_files_dir = temp_snapshot_dir.path().join("update_files");
|
||||||
fs::create_dir_all(&update_files_dir)?;
|
fs::create_dir_all(&update_files_dir)?;
|
||||||
|
|
||||||
// 2.5 Only copy the update files of the enqueued tasks
|
// 2.5 Only copy the update files of the enqueued tasks
|
||||||
@@ -164,7 +140,7 @@ impl IndexScheduler {
|
|||||||
let task =
|
let task =
|
||||||
self.queue.tasks.get_task(&rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
self.queue.tasks.get_task(&rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
if let Some(content_uuid) = task.content_uuid() {
|
if let Some(content_uuid) = task.content_uuid() {
|
||||||
let src = self.queue.file_store.update_path(content_uuid);
|
let src = self.queue.file_store.get_update_path(content_uuid);
|
||||||
let dst = update_files_dir.join(content_uuid.to_string());
|
let dst = update_files_dir.join(content_uuid.to_string());
|
||||||
fs::copy(src, dst)?;
|
fs::copy(src, dst)?;
|
||||||
}
|
}
|
||||||
@@ -230,407 +206,4 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
Ok(tasks)
|
Ok(tasks)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
pub(super) async fn process_snapshot_to_s3(
|
|
||||||
&self,
|
|
||||||
progress: Progress,
|
|
||||||
opts: meilisearch_types::milli::update::S3SnapshotOptions,
|
|
||||||
mut tasks: Vec<Task>,
|
|
||||||
) -> Result<Vec<Task>> {
|
|
||||||
use meilisearch_types::milli::update::S3SnapshotOptions;
|
|
||||||
|
|
||||||
let S3SnapshotOptions {
|
|
||||||
s3_bucket_url,
|
|
||||||
s3_bucket_region,
|
|
||||||
s3_bucket_name,
|
|
||||||
s3_snapshot_prefix,
|
|
||||||
s3_access_key,
|
|
||||||
s3_secret_key,
|
|
||||||
s3_max_in_flight_parts,
|
|
||||||
s3_compression_level: level,
|
|
||||||
s3_signature_duration,
|
|
||||||
s3_multipart_part_size,
|
|
||||||
} = opts;
|
|
||||||
|
|
||||||
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
|
||||||
let retry_backoff = backoff::ExponentialBackoff::default();
|
|
||||||
let db_name = {
|
|
||||||
let mut base_path = self.env.path().to_owned();
|
|
||||||
base_path.pop();
|
|
||||||
base_path.file_name().and_then(OsStr::to_str).unwrap_or("data.ms").to_string()
|
|
||||||
};
|
|
||||||
|
|
||||||
let (reader, writer) = std::io::pipe()?;
|
|
||||||
let uploader_task = tokio::spawn(multipart_stream_to_s3(
|
|
||||||
s3_bucket_url,
|
|
||||||
s3_bucket_region,
|
|
||||||
s3_bucket_name,
|
|
||||||
s3_snapshot_prefix,
|
|
||||||
s3_access_key,
|
|
||||||
s3_secret_key,
|
|
||||||
s3_max_in_flight_parts,
|
|
||||||
s3_signature_duration,
|
|
||||||
s3_multipart_part_size,
|
|
||||||
must_stop_processing,
|
|
||||||
retry_backoff,
|
|
||||||
db_name,
|
|
||||||
reader,
|
|
||||||
));
|
|
||||||
|
|
||||||
let index_scheduler = IndexScheduler::private_clone(self);
|
|
||||||
let builder_task = tokio::task::spawn_blocking(move || {
|
|
||||||
stream_tarball_into_pipe(progress, level, writer, index_scheduler)
|
|
||||||
});
|
|
||||||
|
|
||||||
let (uploader_result, builder_result) = tokio::join!(uploader_task, builder_task);
|
|
||||||
|
|
||||||
// Check uploader result first to early return on task abortion.
|
|
||||||
// safety: JoinHandle can return an error if the task was aborted, cancelled, or panicked.
|
|
||||||
uploader_result.unwrap()?;
|
|
||||||
builder_result.unwrap()?;
|
|
||||||
|
|
||||||
for task in &mut tasks {
|
|
||||||
task.status = Status::Succeeded;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(tasks)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Streams a tarball of the database content into a pipe.
|
|
||||||
#[cfg(unix)]
|
|
||||||
fn stream_tarball_into_pipe(
|
|
||||||
progress: Progress,
|
|
||||||
level: u32,
|
|
||||||
writer: std::io::PipeWriter,
|
|
||||||
index_scheduler: IndexScheduler,
|
|
||||||
) -> std::result::Result<(), Error> {
|
|
||||||
use std::io::Write as _;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
let writer = flate2::write::GzEncoder::new(writer, flate2::Compression::new(level));
|
|
||||||
let mut tarball = tar::Builder::new(writer);
|
|
||||||
|
|
||||||
// 1. Snapshot the version file
|
|
||||||
tarball
|
|
||||||
.append_path_with_name(&index_scheduler.scheduler.version_file_path, VERSION_FILE_NAME)?;
|
|
||||||
|
|
||||||
// 2. Snapshot the index scheduler LMDB env
|
|
||||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexScheduler);
|
|
||||||
let tasks_env_file = index_scheduler.env.try_clone_inner_file()?;
|
|
||||||
let path = Path::new("tasks").join("data.mdb");
|
|
||||||
append_file_to_tarball(&mut tarball, path, tasks_env_file)?;
|
|
||||||
|
|
||||||
// 2.3 Create a read transaction on the index-scheduler
|
|
||||||
let rtxn = index_scheduler.env.read_txn()?;
|
|
||||||
|
|
||||||
// 2.4 Create the update files directory
|
|
||||||
// And only copy the update files of the enqueued tasks
|
|
||||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheUpdateFiles);
|
|
||||||
let enqueued = index_scheduler.queue.tasks.get_status(&rtxn, Status::Enqueued)?;
|
|
||||||
let (atomic, update_file_progress) = AtomicUpdateFileStep::new(enqueued.len() as u32);
|
|
||||||
progress.update_progress(update_file_progress);
|
|
||||||
|
|
||||||
// We create the update_files directory so that it
|
|
||||||
// always exists even if there are no update files
|
|
||||||
let update_files_dir = Path::new(UPDATE_FILES_DIR_NAME);
|
|
||||||
let src_update_files_dir = {
|
|
||||||
let mut path = index_scheduler.env.path().to_path_buf();
|
|
||||||
path.pop();
|
|
||||||
path.join(UPDATE_FILES_DIR_NAME)
|
|
||||||
};
|
|
||||||
tarball.append_dir(update_files_dir, src_update_files_dir)?;
|
|
||||||
|
|
||||||
for task_id in enqueued {
|
|
||||||
let task = index_scheduler
|
|
||||||
.queue
|
|
||||||
.tasks
|
|
||||||
.get_task(&rtxn, task_id)?
|
|
||||||
.ok_or(Error::CorruptedTaskQueue)?;
|
|
||||||
if let Some(content_uuid) = task.content_uuid() {
|
|
||||||
use std::fs::File;
|
|
||||||
|
|
||||||
let src = index_scheduler.queue.file_store.update_path(content_uuid);
|
|
||||||
let mut update_file = File::open(src)?;
|
|
||||||
let path = update_files_dir.join(content_uuid.to_string());
|
|
||||||
tarball.append_file(path, &mut update_file)?;
|
|
||||||
}
|
|
||||||
atomic.fetch_add(1, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. Snapshot every indexes
|
|
||||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexes);
|
|
||||||
let index_mapping = index_scheduler.index_mapper.index_mapping;
|
|
||||||
let nb_indexes = index_mapping.len(&rtxn)? as u32;
|
|
||||||
let indexes_dir = Path::new("indexes");
|
|
||||||
let indexes_references: Vec<_> = index_scheduler
|
|
||||||
.index_mapper
|
|
||||||
.index_mapping
|
|
||||||
.iter(&rtxn)?
|
|
||||||
.map(|res| res.map_err(Error::from).map(|(name, uuid)| (name.to_string(), uuid)))
|
|
||||||
.collect::<Result<_, Error>>()?;
|
|
||||||
|
|
||||||
// It's prettier to use a for loop instead of the IndexMapper::try_for_each_index
|
|
||||||
// method, especially when we need to access the UUID, local path and index number.
|
|
||||||
for (i, (name, uuid)) in indexes_references.into_iter().enumerate() {
|
|
||||||
progress.update_progress(VariableNameStep::<SnapshotCreationProgress>::new(
|
|
||||||
&name, i as u32, nb_indexes,
|
|
||||||
));
|
|
||||||
let path = indexes_dir.join(uuid.to_string()).join("data.mdb");
|
|
||||||
let index = index_scheduler.index_mapper.index(&rtxn, &name)?;
|
|
||||||
let index_file = index.try_clone_inner_file()?;
|
|
||||||
tracing::trace!("Appending index file for {name} in {}", path.display());
|
|
||||||
append_file_to_tarball(&mut tarball, path, index_file)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(rtxn);
|
|
||||||
|
|
||||||
// 4. Snapshot the auth LMDB env
|
|
||||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheApiKeys);
|
|
||||||
let auth_env_file = index_scheduler.scheduler.auth_env.try_clone_inner_file()?;
|
|
||||||
let path = Path::new("auth").join("data.mdb");
|
|
||||||
append_file_to_tarball(&mut tarball, path, auth_env_file)?;
|
|
||||||
|
|
||||||
let mut gzencoder = tarball.into_inner()?;
|
|
||||||
gzencoder.flush()?;
|
|
||||||
gzencoder.try_finish()?;
|
|
||||||
let mut writer = gzencoder.finish()?;
|
|
||||||
writer.flush()?;
|
|
||||||
|
|
||||||
Result::<_, Error>::Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
fn append_file_to_tarball<W, P>(
|
|
||||||
tarball: &mut tar::Builder<W>,
|
|
||||||
path: P,
|
|
||||||
mut auth_env_file: fs::File,
|
|
||||||
) -> Result<(), Error>
|
|
||||||
where
|
|
||||||
W: std::io::Write,
|
|
||||||
P: AsRef<std::path::Path>,
|
|
||||||
{
|
|
||||||
use std::io::{Seek as _, SeekFrom};
|
|
||||||
|
|
||||||
// Note: A previous snapshot operation may have left the cursor
|
|
||||||
// at the end of the file so we need to seek to the start.
|
|
||||||
auth_env_file.seek(SeekFrom::Start(0))?;
|
|
||||||
tarball.append_file(path, &mut auth_env_file)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Streams the content read from the given reader to S3.
|
|
||||||
#[cfg(unix)]
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
async fn multipart_stream_to_s3(
|
|
||||||
s3_bucket_url: String,
|
|
||||||
s3_bucket_region: String,
|
|
||||||
s3_bucket_name: String,
|
|
||||||
s3_snapshot_prefix: String,
|
|
||||||
s3_access_key: String,
|
|
||||||
s3_secret_key: String,
|
|
||||||
s3_max_in_flight_parts: std::num::NonZero<usize>,
|
|
||||||
s3_signature_duration: std::time::Duration,
|
|
||||||
s3_multipart_part_size: u64,
|
|
||||||
must_stop_processing: super::MustStopProcessing,
|
|
||||||
retry_backoff: backoff::exponential::ExponentialBackoff<backoff::SystemClock>,
|
|
||||||
db_name: String,
|
|
||||||
reader: std::io::PipeReader,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
use std::collections::VecDeque;
|
|
||||||
use std::io;
|
|
||||||
use std::os::fd::OwnedFd;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
use bytes::{Bytes, BytesMut};
|
|
||||||
use reqwest::{Client, Response};
|
|
||||||
use rusty_s3::actions::CreateMultipartUpload;
|
|
||||||
use rusty_s3::{Bucket, BucketError, Credentials, S3Action as _, UrlStyle};
|
|
||||||
use tokio::task::JoinHandle;
|
|
||||||
|
|
||||||
let reader = OwnedFd::from(reader);
|
|
||||||
let reader = tokio::net::unix::pipe::Receiver::from_owned_fd(reader)?;
|
|
||||||
let s3_snapshot_prefix = PathBuf::from(s3_snapshot_prefix);
|
|
||||||
let url =
|
|
||||||
s3_bucket_url.parse().map_err(BucketError::ParseError).map_err(Error::S3BucketError)?;
|
|
||||||
let bucket = Bucket::new(url, UrlStyle::Path, s3_bucket_name, s3_bucket_region)
|
|
||||||
.map_err(Error::S3BucketError)?;
|
|
||||||
let credential = Credentials::new(s3_access_key, s3_secret_key);
|
|
||||||
|
|
||||||
// Note for the future (rust 1.91+): use with_added_extension, it's prettier
|
|
||||||
let object_path = s3_snapshot_prefix.join(format!("{db_name}.snapshot"));
|
|
||||||
// Note: It doesn't work on Windows and if a port to this platform is needed,
|
|
||||||
// use the slash-path crate or similar to get the correct path separator.
|
|
||||||
let object = object_path.display().to_string();
|
|
||||||
|
|
||||||
let action = bucket.create_multipart_upload(Some(&credential), &object);
|
|
||||||
let url = action.sign(s3_signature_duration);
|
|
||||||
|
|
||||||
let client = Client::new();
|
|
||||||
let resp = client.post(url).send().await.map_err(Error::S3HttpError)?;
|
|
||||||
let status = resp.status();
|
|
||||||
|
|
||||||
let body = match resp.error_for_status_ref() {
|
|
||||||
Ok(_) => resp.text().await.map_err(Error::S3HttpError)?,
|
|
||||||
Err(_) => {
|
|
||||||
return Err(Error::S3Error { status, body: resp.text().await.unwrap_or_default() })
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let multipart =
|
|
||||||
CreateMultipartUpload::parse_response(&body).map_err(|e| Error::S3XmlError(Box::new(e)))?;
|
|
||||||
tracing::debug!("Starting the upload of the snapshot to {object}");
|
|
||||||
|
|
||||||
// We use this bumpalo for etags strings.
|
|
||||||
let bump = bumpalo::Bump::new();
|
|
||||||
let mut etags = Vec::<&str>::new();
|
|
||||||
let mut in_flight = VecDeque::<(JoinHandle<reqwest::Result<Response>>, Bytes)>::with_capacity(
|
|
||||||
s3_max_in_flight_parts.get(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Part numbers start at 1 and cannot be larger than 10k
|
|
||||||
for part_number in 1u16.. {
|
|
||||||
if must_stop_processing.get() {
|
|
||||||
return Err(Error::AbortedTask);
|
|
||||||
}
|
|
||||||
|
|
||||||
let part_upload =
|
|
||||||
bucket.upload_part(Some(&credential), &object, part_number, multipart.upload_id());
|
|
||||||
let url = part_upload.sign(s3_signature_duration);
|
|
||||||
|
|
||||||
// Wait for a buffer to be ready if there are in-flight parts that landed
|
|
||||||
let mut buffer = if in_flight.len() >= s3_max_in_flight_parts.get() {
|
|
||||||
let (handle, buffer) = in_flight.pop_front().expect("At least one in flight request");
|
|
||||||
let resp = join_and_map_error(handle).await?;
|
|
||||||
extract_and_append_etag(&bump, &mut etags, resp.headers())?;
|
|
||||||
|
|
||||||
let mut buffer = match buffer.try_into_mut() {
|
|
||||||
Ok(buffer) => buffer,
|
|
||||||
Err(_) => unreachable!("All bytes references were consumed in the task"),
|
|
||||||
};
|
|
||||||
buffer.clear();
|
|
||||||
buffer
|
|
||||||
} else {
|
|
||||||
BytesMut::with_capacity(s3_multipart_part_size as usize)
|
|
||||||
};
|
|
||||||
|
|
||||||
// If we successfully read enough bytes,
|
|
||||||
// we can continue and send the buffer/part
|
|
||||||
while buffer.len() < (s3_multipart_part_size as usize / 2) {
|
|
||||||
// Wait for the pipe to be readable
|
|
||||||
|
|
||||||
reader.readable().await?;
|
|
||||||
|
|
||||||
match reader.try_read_buf(&mut buffer) {
|
|
||||||
Ok(0) => break,
|
|
||||||
// We read some bytes but maybe not enough
|
|
||||||
Ok(_) => continue,
|
|
||||||
// The readiness event is a false positive.
|
|
||||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
|
|
||||||
Err(e) => return Err(e.into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if buffer.is_empty() {
|
|
||||||
// Break the loop if the buffer is
|
|
||||||
// empty after we tried to read bytes
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
let body = buffer.freeze();
|
|
||||||
tracing::trace!("Sending part {part_number}");
|
|
||||||
let task = tokio::spawn({
|
|
||||||
let client = client.clone();
|
|
||||||
let body = body.clone();
|
|
||||||
backoff::future::retry(retry_backoff.clone(), move || {
|
|
||||||
let client = client.clone();
|
|
||||||
let url = url.clone();
|
|
||||||
let body = body.clone();
|
|
||||||
async move {
|
|
||||||
match client.put(url).body(body).send().await {
|
|
||||||
Ok(resp) if resp.status().is_client_error() => {
|
|
||||||
resp.error_for_status().map_err(backoff::Error::Permanent)
|
|
||||||
}
|
|
||||||
Ok(resp) => Ok(resp),
|
|
||||||
Err(e) => Err(backoff::Error::transient(e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
});
|
|
||||||
in_flight.push_back((task, body));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (handle, _buffer) in in_flight {
|
|
||||||
let resp = join_and_map_error(handle).await?;
|
|
||||||
extract_and_append_etag(&bump, &mut etags, resp.headers())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing::debug!("Finalizing the multipart upload");
|
|
||||||
|
|
||||||
let action = bucket.complete_multipart_upload(
|
|
||||||
Some(&credential),
|
|
||||||
&object,
|
|
||||||
multipart.upload_id(),
|
|
||||||
etags.iter().map(AsRef::as_ref),
|
|
||||||
);
|
|
||||||
let url = action.sign(s3_signature_duration);
|
|
||||||
let body = action.body();
|
|
||||||
let resp = backoff::future::retry(retry_backoff, move || {
|
|
||||||
let client = client.clone();
|
|
||||||
let url = url.clone();
|
|
||||||
let body = body.clone();
|
|
||||||
async move {
|
|
||||||
match client.post(url).body(body).send().await {
|
|
||||||
Ok(resp) if resp.status().is_client_error() => {
|
|
||||||
Err(backoff::Error::Permanent(Error::S3Error {
|
|
||||||
status: resp.status(),
|
|
||||||
body: resp.text().await.unwrap_or_default(),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
Ok(resp) => Ok(resp),
|
|
||||||
Err(e) => Err(backoff::Error::transient(Error::S3HttpError(e))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let status = resp.status();
|
|
||||||
let body = resp.text().await.map_err(|e| Error::S3Error { status, body: e.to_string() })?;
|
|
||||||
if status.is_success() {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(Error::S3Error { status, body })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
async fn join_and_map_error(
|
|
||||||
join_handle: tokio::task::JoinHandle<Result<reqwest::Response, reqwest::Error>>,
|
|
||||||
) -> Result<reqwest::Response> {
|
|
||||||
// safety: Panic happens if the task (JoinHandle) was aborted, cancelled, or panicked
|
|
||||||
let request = join_handle.await.unwrap();
|
|
||||||
let resp = request.map_err(Error::S3HttpError)?;
|
|
||||||
match resp.error_for_status_ref() {
|
|
||||||
Ok(_) => Ok(resp),
|
|
||||||
Err(_) => Err(Error::S3Error {
|
|
||||||
status: resp.status(),
|
|
||||||
body: resp.text().await.unwrap_or_default(),
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
fn extract_and_append_etag<'b>(
|
|
||||||
bump: &'b bumpalo::Bump,
|
|
||||||
etags: &mut Vec<&'b str>,
|
|
||||||
headers: &reqwest::header::HeaderMap,
|
|
||||||
) -> Result<()> {
|
|
||||||
use reqwest::header::ETAG;
|
|
||||||
|
|
||||||
let etag = headers.get(ETAG).ok_or_else(|| Error::S3XmlError("Missing ETag header".into()))?;
|
|
||||||
let etag = etag.to_str().map_err(|e| Error::S3XmlError(Box::new(e)))?;
|
|
||||||
etags.push(bump.alloc_str(etag));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 22, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [4,]
|
[timestamp] [4,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.22.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||||
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }
|
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 22, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
enqueued [0,]
|
enqueued [0,]
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 22, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 22, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Status:
|
### Status:
|
||||||
@@ -37,7 +37,7 @@ catto [1,]
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.22.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 22, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
@@ -40,7 +40,7 @@ doggo [2,]
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.22.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
|||||||
[]
|
[]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 22, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||||
@@ -43,7 +43,7 @@ doggo [2,3,]
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.22.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -722,7 +722,7 @@ fn basic_get_stats() {
|
|||||||
let kind = index_creation_task("whalo", "fish");
|
let kind = index_creation_task("whalo", "fish");
|
||||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||||
|
|
||||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
|
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
||||||
{
|
{
|
||||||
"indexes": {
|
"indexes": {
|
||||||
"catto": 1,
|
"catto": 1,
|
||||||
@@ -742,7 +742,6 @@ fn basic_get_stats() {
|
|||||||
"documentEdition": 0,
|
"documentEdition": 0,
|
||||||
"dumpCreation": 0,
|
"dumpCreation": 0,
|
||||||
"export": 0,
|
"export": 0,
|
||||||
"indexCompaction": 0,
|
|
||||||
"indexCreation": 3,
|
"indexCreation": 3,
|
||||||
"indexDeletion": 0,
|
"indexDeletion": 0,
|
||||||
"indexSwap": 0,
|
"indexSwap": 0,
|
||||||
@@ -754,10 +753,10 @@ fn basic_get_stats() {
|
|||||||
"upgradeDatabase": 0
|
"upgradeDatabase": 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"###);
|
"#);
|
||||||
|
|
||||||
handle.advance_till([Start, BatchCreated]);
|
handle.advance_till([Start, BatchCreated]);
|
||||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
|
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
||||||
{
|
{
|
||||||
"indexes": {
|
"indexes": {
|
||||||
"catto": 1,
|
"catto": 1,
|
||||||
@@ -777,7 +776,6 @@ fn basic_get_stats() {
|
|||||||
"documentEdition": 0,
|
"documentEdition": 0,
|
||||||
"dumpCreation": 0,
|
"dumpCreation": 0,
|
||||||
"export": 0,
|
"export": 0,
|
||||||
"indexCompaction": 0,
|
|
||||||
"indexCreation": 3,
|
"indexCreation": 3,
|
||||||
"indexDeletion": 0,
|
"indexDeletion": 0,
|
||||||
"indexSwap": 0,
|
"indexSwap": 0,
|
||||||
@@ -789,7 +787,7 @@ fn basic_get_stats() {
|
|||||||
"upgradeDatabase": 0
|
"upgradeDatabase": 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"###);
|
"#);
|
||||||
|
|
||||||
handle.advance_till([
|
handle.advance_till([
|
||||||
InsideProcessBatch,
|
InsideProcessBatch,
|
||||||
@@ -799,7 +797,7 @@ fn basic_get_stats() {
|
|||||||
Start,
|
Start,
|
||||||
BatchCreated,
|
BatchCreated,
|
||||||
]);
|
]);
|
||||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
|
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
||||||
{
|
{
|
||||||
"indexes": {
|
"indexes": {
|
||||||
"catto": 1,
|
"catto": 1,
|
||||||
@@ -819,7 +817,6 @@ fn basic_get_stats() {
|
|||||||
"documentEdition": 0,
|
"documentEdition": 0,
|
||||||
"dumpCreation": 0,
|
"dumpCreation": 0,
|
||||||
"export": 0,
|
"export": 0,
|
||||||
"indexCompaction": 0,
|
|
||||||
"indexCreation": 3,
|
"indexCreation": 3,
|
||||||
"indexDeletion": 0,
|
"indexDeletion": 0,
|
||||||
"indexSwap": 0,
|
"indexSwap": 0,
|
||||||
@@ -831,7 +828,7 @@ fn basic_get_stats() {
|
|||||||
"upgradeDatabase": 0
|
"upgradeDatabase": 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"###);
|
"#);
|
||||||
|
|
||||||
// now we make one more batch, the started_at field of the new tasks will be past `second_start_time`
|
// now we make one more batch, the started_at field of the new tasks will be past `second_start_time`
|
||||||
handle.advance_till([
|
handle.advance_till([
|
||||||
@@ -842,7 +839,7 @@ fn basic_get_stats() {
|
|||||||
Start,
|
Start,
|
||||||
BatchCreated,
|
BatchCreated,
|
||||||
]);
|
]);
|
||||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
|
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
||||||
{
|
{
|
||||||
"indexes": {
|
"indexes": {
|
||||||
"catto": 1,
|
"catto": 1,
|
||||||
@@ -862,7 +859,6 @@ fn basic_get_stats() {
|
|||||||
"documentEdition": 0,
|
"documentEdition": 0,
|
||||||
"dumpCreation": 0,
|
"dumpCreation": 0,
|
||||||
"export": 0,
|
"export": 0,
|
||||||
"indexCompaction": 0,
|
|
||||||
"indexCreation": 3,
|
"indexCreation": 3,
|
||||||
"indexDeletion": 0,
|
"indexDeletion": 0,
|
||||||
"indexSwap": 0,
|
"indexSwap": 0,
|
||||||
@@ -874,7 +870,7 @@ fn basic_get_stats() {
|
|||||||
"upgradeDatabase": 0
|
"upgradeDatabase": 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"###);
|
"#);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -126,7 +126,7 @@ impl IndexScheduler {
|
|||||||
std::fs::create_dir_all(&options.auth_path).unwrap();
|
std::fs::create_dir_all(&options.auth_path).unwrap();
|
||||||
let auth_env = open_auth_store_env(&options.auth_path).unwrap();
|
let auth_env = open_auth_store_env(&options.auth_path).unwrap();
|
||||||
let index_scheduler =
|
let index_scheduler =
|
||||||
Self::new_test(options, auth_env, version, None, sender, planned_failures).unwrap();
|
Self::new(options, auth_env, version, sender, planned_failures).unwrap();
|
||||||
|
|
||||||
// To be 100% consistent between all test we're going to start the scheduler right now
|
// To be 100% consistent between all test we're going to start the scheduler right now
|
||||||
// and ensure it's in the expected starting state.
|
// and ensure it's in the expected starting state.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use anyhow::bail;
|
use anyhow::bail;
|
||||||
use meilisearch_types::heed::{Env, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Env, RwTxn, WithoutTls};
|
||||||
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
|
||||||
use meilisearch_types::versioning;
|
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
@@ -9,82 +9,76 @@ use crate::queue::TaskQueue;
|
|||||||
use crate::versioning::Versioning;
|
use crate::versioning::Versioning;
|
||||||
|
|
||||||
trait UpgradeIndexScheduler {
|
trait UpgradeIndexScheduler {
|
||||||
fn upgrade(&self, env: &Env<WithoutTls>, wtxn: &mut RwTxn) -> anyhow::Result<()>;
|
fn upgrade(
|
||||||
/// Whether the migration should be applied, depending on the initial version of the index scheduler before
|
&self,
|
||||||
/// any migration was applied
|
env: &Env<WithoutTls>,
|
||||||
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool;
|
wtxn: &mut RwTxn,
|
||||||
/// A progress-centric description of the migration
|
original: (u32, u32, u32),
|
||||||
fn description(&self) -> &'static str;
|
) -> anyhow::Result<()>;
|
||||||
|
fn target_version(&self) -> (u32, u32, u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Upgrade the index scheduler to the binary version.
|
|
||||||
///
|
|
||||||
/// # Warning
|
|
||||||
///
|
|
||||||
/// The current implementation uses a single wtxn to the index scheduler for the whole duration of the upgrade.
|
|
||||||
/// If migrations start taking take a long time, it might prevent tasks from being registered.
|
|
||||||
/// If this issue manifests, then it can be mitigated by adding a `fn target_version` to `UpgradeIndexScheduler`,
|
|
||||||
/// to be able to write intermediate versions and drop the wtxn between applying migrations.
|
|
||||||
pub fn upgrade_index_scheduler(
|
pub fn upgrade_index_scheduler(
|
||||||
env: &Env<WithoutTls>,
|
env: &Env<WithoutTls>,
|
||||||
versioning: &Versioning,
|
versioning: &Versioning,
|
||||||
initial_version: (u32, u32, u32),
|
from: (u32, u32, u32),
|
||||||
|
to: (u32, u32, u32),
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let target_major: u32 = versioning::VERSION_MAJOR;
|
let current_major = to.0;
|
||||||
let target_minor: u32 = versioning::VERSION_MINOR;
|
let current_minor = to.1;
|
||||||
let target_patch: u32 = versioning::VERSION_PATCH;
|
let current_patch = to.2;
|
||||||
let target_version = (target_major, target_minor, target_patch);
|
|
||||||
|
|
||||||
if initial_version == target_version {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[
|
let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[
|
||||||
// List all upgrade functions to apply in order here.
|
// This is the last upgrade function, it will be called when the index is up to date.
|
||||||
|
// any other upgrade function should be added before this one.
|
||||||
|
&ToCurrentNoOp {},
|
||||||
];
|
];
|
||||||
|
|
||||||
let (initial_major, initial_minor, initial_patch) = initial_version;
|
let start = match from {
|
||||||
|
(1, 12, _) => 0,
|
||||||
if initial_version > target_version {
|
(1, 13, _) => 0,
|
||||||
bail!(
|
(1, 14, _) => 0,
|
||||||
"Database version {initial_major}.{initial_minor}.{initial_patch} is higher than the Meilisearch version {target_major}.{target_minor}.{target_patch}. Downgrade is not supported",
|
(1, 15, _) => 0,
|
||||||
|
(1, 16, _) => 0,
|
||||||
|
(1, 17, _) => 0,
|
||||||
|
(1, 18, _) => 0,
|
||||||
|
(1, 19, _) => 0,
|
||||||
|
(1, 20, _) => 0,
|
||||||
|
(1, 21, _) => 0,
|
||||||
|
(major, minor, patch) => {
|
||||||
|
if major > current_major
|
||||||
|
|| (major == current_major && minor > current_minor)
|
||||||
|
|| (major == current_major && minor == current_minor && patch > current_patch)
|
||||||
|
{
|
||||||
|
bail!(
|
||||||
|
"Database version {major}.{minor}.{patch} is higher than the Meilisearch version {current_major}.{current_minor}.{current_patch}. Downgrade is not supported",
|
||||||
|
);
|
||||||
|
} else if major < 1 || (major == current_major && minor < 12) {
|
||||||
|
bail!(
|
||||||
|
"Database version {major}.{minor}.{patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{major}.{minor}.{patch} and import it in the v{current_major}.{current_minor}.{current_patch}",
|
||||||
);
|
);
|
||||||
}
|
} else {
|
||||||
|
bail!("Unknown database version: v{major}.{minor}.{patch}");
|
||||||
if initial_version < (1, 12, 0) {
|
}
|
||||||
bail!(
|
}
|
||||||
"Database version {initial_major}.{initial_minor}.{initial_patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{initial_major}.{initial_minor}.{initial_patch} and import it in the v{target_major}.{target_minor}.{target_patch}",
|
};
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Upgrading the task queue");
|
info!("Upgrading the task queue");
|
||||||
let mut wtxn = env.write_txn()?;
|
let mut local_from = from;
|
||||||
let migration_count = upgrade_functions.len();
|
for upgrade in upgrade_functions[start..].iter() {
|
||||||
for (migration_index, upgrade) in upgrade_functions.iter().enumerate() {
|
let target = upgrade.target_version();
|
||||||
if upgrade.must_upgrade(initial_version) {
|
info!(
|
||||||
info!(
|
"Upgrading from v{}.{}.{} to v{}.{}.{}",
|
||||||
"[{migration_index}/{migration_count}]Applying migration: {}",
|
local_from.0, local_from.1, local_from.2, target.0, target.1, target.2
|
||||||
upgrade.description()
|
);
|
||||||
);
|
let mut wtxn = env.write_txn()?;
|
||||||
|
upgrade.upgrade(env, &mut wtxn, local_from)?;
|
||||||
upgrade.upgrade(env, &mut wtxn)?;
|
versioning.set_version(&mut wtxn, target)?;
|
||||||
|
wtxn.commit()?;
|
||||||
info!(
|
local_from = target;
|
||||||
"[{}/{migration_count}]Migration applied: {}",
|
|
||||||
migration_index + 1,
|
|
||||||
upgrade.description()
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
info!(
|
|
||||||
"[{migration_index}/{migration_count}]Skipping unnecessary migration: {}",
|
|
||||||
upgrade.description()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
versioning.set_version(&mut wtxn, target_version)?;
|
let mut wtxn = env.write_txn()?;
|
||||||
info!("Task queue upgraded, spawning the upgrade database task");
|
|
||||||
|
|
||||||
let queue = TaskQueue::new(env, &mut wtxn)?;
|
let queue = TaskQueue::new(env, &mut wtxn)?;
|
||||||
let uid = queue.next_task_id(&wtxn)?;
|
let uid = queue.next_task_id(&wtxn)?;
|
||||||
queue.register(
|
queue.register(
|
||||||
@@ -97,14 +91,31 @@ pub fn upgrade_index_scheduler(
|
|||||||
finished_at: None,
|
finished_at: None,
|
||||||
error: None,
|
error: None,
|
||||||
canceled_by: None,
|
canceled_by: None,
|
||||||
details: Some(Details::UpgradeDatabase { from: initial_version, to: target_version }),
|
details: Some(Details::UpgradeDatabase { from, to }),
|
||||||
status: Status::Enqueued,
|
status: Status::Enqueued,
|
||||||
kind: KindWithContent::UpgradeDatabase { from: initial_version },
|
kind: KindWithContent::UpgradeDatabase { from },
|
||||||
network: None,
|
network: None,
|
||||||
custom_metadata: None,
|
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
struct ToCurrentNoOp {}
|
||||||
|
|
||||||
|
impl UpgradeIndexScheduler for ToCurrentNoOp {
|
||||||
|
fn upgrade(
|
||||||
|
&self,
|
||||||
|
_env: &Env<WithoutTls>,
|
||||||
|
_wtxn: &mut RwTxn,
|
||||||
|
_original: (u32, u32, u32),
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn target_version(&self) -> (u32, u32, u32) {
|
||||||
|
(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -256,15 +256,14 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
|
|||||||
use KindWithContent as K;
|
use KindWithContent as K;
|
||||||
let mut index_uids = vec![];
|
let mut index_uids = vec![];
|
||||||
match &mut task.kind {
|
match &mut task.kind {
|
||||||
K::DocumentAdditionOrUpdate { index_uid, .. }
|
K::DocumentAdditionOrUpdate { index_uid, .. } => index_uids.push(index_uid),
|
||||||
| K::DocumentEdition { index_uid, .. }
|
K::DocumentEdition { index_uid, .. } => index_uids.push(index_uid),
|
||||||
| K::DocumentDeletion { index_uid, .. }
|
K::DocumentDeletion { index_uid, .. } => index_uids.push(index_uid),
|
||||||
| K::DocumentDeletionByFilter { index_uid, .. }
|
K::DocumentDeletionByFilter { index_uid, .. } => index_uids.push(index_uid),
|
||||||
| K::DocumentClear { index_uid }
|
K::DocumentClear { index_uid } => index_uids.push(index_uid),
|
||||||
| K::SettingsUpdate { index_uid, .. }
|
K::SettingsUpdate { index_uid, .. } => index_uids.push(index_uid),
|
||||||
| K::IndexDeletion { index_uid }
|
K::IndexDeletion { index_uid } => index_uids.push(index_uid),
|
||||||
| K::IndexCreation { index_uid, .. }
|
K::IndexCreation { index_uid, .. } => index_uids.push(index_uid),
|
||||||
| K::IndexCompaction { index_uid, .. } => index_uids.push(index_uid),
|
|
||||||
K::IndexUpdate { index_uid, new_index_uid, .. } => {
|
K::IndexUpdate { index_uid, new_index_uid, .. } => {
|
||||||
index_uids.push(index_uid);
|
index_uids.push(index_uid);
|
||||||
if let Some(new_uid) = new_index_uid {
|
if let Some(new_uid) = new_index_uid {
|
||||||
@@ -379,7 +378,6 @@ impl crate::IndexScheduler {
|
|||||||
status,
|
status,
|
||||||
kind,
|
kind,
|
||||||
network: _,
|
network: _,
|
||||||
custom_metadata: _,
|
|
||||||
} = task;
|
} = task;
|
||||||
assert_eq!(uid, task.uid);
|
assert_eq!(uid, task.uid);
|
||||||
if task.status != Status::Enqueued {
|
if task.status != Status::Enqueued {
|
||||||
@@ -620,13 +618,6 @@ impl crate::IndexScheduler {
|
|||||||
Details::UpgradeDatabase { from: _, to: _ } => {
|
Details::UpgradeDatabase { from: _, to: _ } => {
|
||||||
assert_eq!(kind.as_kind(), Kind::UpgradeDatabase);
|
assert_eq!(kind.as_kind(), Kind::UpgradeDatabase);
|
||||||
}
|
}
|
||||||
Details::IndexCompaction {
|
|
||||||
index_uid: _,
|
|
||||||
pre_compaction_size: _,
|
|
||||||
post_compaction_size: _,
|
|
||||||
} => {
|
|
||||||
assert_eq!(kind.as_kind(), Kind::IndexCompaction);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -64,7 +64,14 @@ impl Versioning {
|
|||||||
};
|
};
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
|
|
||||||
upgrade_index_scheduler(env, &this, from)?;
|
let bin_major: u32 = versioning::VERSION_MAJOR;
|
||||||
|
let bin_minor: u32 = versioning::VERSION_MINOR;
|
||||||
|
let bin_patch: u32 = versioning::VERSION_PATCH;
|
||||||
|
let to = (bin_major, bin_minor, bin_patch);
|
||||||
|
|
||||||
|
if from != to {
|
||||||
|
upgrade_index_scheduler(env, &this, from, to)?;
|
||||||
|
}
|
||||||
|
|
||||||
// Once we reach this point it means the upgrade process, if there was one is entirely finished
|
// Once we reach this point it means the upgrade process, if there was one is entirely finished
|
||||||
// we can safely say we reached the latest version of the index scheduler
|
// we can safely say we reached the latest version of the index scheduler
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ license.workspace = true
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = "0.7.0"
|
criterion = "0.6.0"
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "depth"
|
name = "depth"
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ license.workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
# fixed version due to format breakages in v1.40
|
# fixed version due to format breakages in v1.40
|
||||||
insta = { version = "=1.39.0", features = ["json", "redactions"] }
|
insta = { version = "=1.39.0", features = ["json", "redactions"] }
|
||||||
md5 = "0.8.0"
|
md5 = "0.7.0"
|
||||||
once_cell = "1.21"
|
once_cell = "1.21"
|
||||||
regex-lite = "0.1.8"
|
regex-lite = "0.1.6"
|
||||||
uuid = { version = "1.18.1", features = ["v4"] }
|
uuid = { version = "1.17.0", features = ["v4"] }
|
||||||
|
|||||||
@@ -12,15 +12,15 @@ license.workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64 = "0.22.1"
|
base64 = "0.22.1"
|
||||||
enum-iterator = "2.3.0"
|
enum-iterator = "2.1.0"
|
||||||
hmac = "0.12.1"
|
hmac = "0.12.1"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
roaring = { version = "0.10.12", features = ["serde"] }
|
roaring = { version = "0.10.12", features = ["serde"] }
|
||||||
serde = { version = "1.0.228", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
sha2 = "0.10.9"
|
sha2 = "0.10.9"
|
||||||
thiserror = "2.0.17"
|
thiserror = "2.0.12"
|
||||||
time = { version = "0.3.44", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
time = { version = "0.3.41", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||||
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||||
|
|||||||
@@ -109,7 +109,6 @@ impl HeedAuthStore {
|
|||||||
Action::IndexesGet,
|
Action::IndexesGet,
|
||||||
Action::IndexesUpdate,
|
Action::IndexesUpdate,
|
||||||
Action::IndexesSwap,
|
Action::IndexesSwap,
|
||||||
Action::IndexesCompact,
|
|
||||||
]
|
]
|
||||||
.iter(),
|
.iter(),
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -11,38 +11,38 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
actix-web = { version = "4.12.0", default-features = false }
|
actix-web = { version = "4.11.0", default-features = false }
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.98"
|
||||||
bumpalo = "3.19.0"
|
bumpalo = "3.18.1"
|
||||||
bumparaw-collections = "0.1.4"
|
bumparaw-collections = "0.1.4"
|
||||||
byte-unit = { version = "5.1.6", features = ["serde"] }
|
byte-unit = { version = "5.1.6", features = ["serde"] }
|
||||||
convert_case = "0.9.0"
|
convert_case = "0.8.0"
|
||||||
csv = "1.4.0"
|
csv = "1.3.1"
|
||||||
deserr = { version = "0.6.4", features = ["actix-web"] }
|
deserr = { version = "0.6.3", features = ["actix-web"] }
|
||||||
either = { version = "1.15.0", features = ["serde"] }
|
either = { version = "1.15.0", features = ["serde"] }
|
||||||
enum-iterator = "2.3.0"
|
enum-iterator = "2.1.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.1.5"
|
flate2 = "1.1.2"
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
memmap2 = "0.9.9"
|
memmap2 = "0.9.7"
|
||||||
milli = { path = "../milli" }
|
milli = { path = "../milli" }
|
||||||
roaring = { version = "0.10.12", features = ["serde"] }
|
roaring = { version = "0.10.12", features = ["serde"] }
|
||||||
rustc-hash = "2.1.1"
|
rustc-hash = "2.1.1"
|
||||||
serde = { version = "1.0.228", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde-cs = "0.2.4"
|
serde-cs = "0.2.4"
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
tar = "0.4.44"
|
tar = "0.4.44"
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
thiserror = "2.0.17"
|
thiserror = "2.0.12"
|
||||||
time = { version = "0.3.44", features = [
|
time = { version = "0.3.41", features = [
|
||||||
"serde-well-known",
|
"serde-well-known",
|
||||||
"formatting",
|
"formatting",
|
||||||
"parsing",
|
"parsing",
|
||||||
"macros",
|
"macros",
|
||||||
] }
|
] }
|
||||||
tokio = "1.48"
|
tokio = "1.45"
|
||||||
utoipa = { version = "5.4.0", features = ["macros"] }
|
utoipa = { version = "5.4.0", features = ["macros"] }
|
||||||
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
# fixed version due to format breakages in v1.40
|
# fixed version due to format breakages in v1.40
|
||||||
@@ -56,9 +56,6 @@ all-tokenizations = ["milli/all-tokenizations"]
|
|||||||
# chinese specialized tokenization
|
# chinese specialized tokenization
|
||||||
chinese = ["milli/chinese"]
|
chinese = ["milli/chinese"]
|
||||||
chinese-pinyin = ["milli/chinese-pinyin"]
|
chinese-pinyin = ["milli/chinese-pinyin"]
|
||||||
|
|
||||||
enterprise = ["milli/enterprise"]
|
|
||||||
|
|
||||||
# hebrew specialized tokenization
|
# hebrew specialized tokenization
|
||||||
hebrew = ["milli/hebrew"]
|
hebrew = ["milli/hebrew"]
|
||||||
# japanese specialized tokenization
|
# japanese specialized tokenization
|
||||||
|
|||||||
@@ -1,16 +0,0 @@
|
|||||||
pub mod network {
|
|
||||||
use milli::update::new::indexer::current_edition::sharding::Shards;
|
|
||||||
|
|
||||||
use crate::network::Network;
|
|
||||||
|
|
||||||
impl Network {
|
|
||||||
pub fn shards(&self) -> Option<Shards> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sharding(&self) -> bool {
|
|
||||||
// always false in CE
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,9 +3,21 @@
|
|||||||
// Use of this source code is governed by the Business Source License 1.1,
|
// Use of this source code is governed by the Business Source License 1.1,
|
||||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||||
|
|
||||||
use milli::update::new::indexer::enterprise_edition::sharding::Shards;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use crate::network::Network;
|
use milli::update::new::indexer::enterprise_edition::sharding::Shards;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Network {
|
||||||
|
#[serde(default, rename = "self")]
|
||||||
|
pub local: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub remotes: BTreeMap<String, Remote>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub sharding: bool,
|
||||||
|
}
|
||||||
|
|
||||||
impl Network {
|
impl Network {
|
||||||
pub fn shards(&self) -> Option<Shards> {
|
pub fn shards(&self) -> Option<Shards> {
|
||||||
@@ -22,8 +34,14 @@ impl Network {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
pub fn sharding(&self) -> bool {
|
|
||||||
self.sharding
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||||
}
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Remote {
|
||||||
|
pub url: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub search_api_key: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub write_api_key: Option<String>,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -254,12 +254,10 @@ InvalidSearchHybridQuery , InvalidRequest , BAD_REQU
|
|||||||
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
|
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
|
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
|
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidIndexCustomMetadata , InvalidRequest , BAD_REQUEST ;
|
|
||||||
InvalidIndexUid , InvalidRequest , BAD_REQUEST ;
|
InvalidIndexUid , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchFacets , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchFacets , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchFacetsByIndex , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchFacetsByIndex , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchFacetOrder , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchFacetOrder , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchQueryPersonalization , InvalidRequest , BAD_REQUEST ;
|
|
||||||
InvalidMultiSearchFederated , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchFederated , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchFederationOptions , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchFederationOptions , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchMaxValuesPerFacet , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchMaxValuesPerFacet , InvalidRequest , BAD_REQUEST ;
|
||||||
@@ -317,8 +315,6 @@ InvalidSearchShowRankingScoreDetails , InvalidRequest , BAD_REQU
|
|||||||
InvalidSimilarShowRankingScoreDetails , InvalidRequest , BAD_REQUEST ;
|
InvalidSimilarShowRankingScoreDetails , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidSearchSort , InvalidRequest , BAD_REQUEST ;
|
InvalidSearchSort , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidSearchDistinct , InvalidRequest , BAD_REQUEST ;
|
InvalidSearchDistinct , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidSearchPersonalize , InvalidRequest , BAD_REQUEST ;
|
|
||||||
InvalidSearchPersonalizeUserContext , InvalidRequest , BAD_REQUEST ;
|
|
||||||
InvalidSearchMediaAndVector , InvalidRequest , BAD_REQUEST ;
|
InvalidSearchMediaAndVector , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidSettingsDisplayedAttributes , InvalidRequest , BAD_REQUEST ;
|
InvalidSettingsDisplayedAttributes , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidSettingsDistinctAttribute , InvalidRequest , BAD_REQUEST ;
|
InvalidSettingsDistinctAttribute , InvalidRequest , BAD_REQUEST ;
|
||||||
@@ -394,9 +390,6 @@ TooManyVectors , InvalidRequest , BAD_REQU
|
|||||||
UnretrievableDocument , Internal , BAD_REQUEST ;
|
UnretrievableDocument , Internal , BAD_REQUEST ;
|
||||||
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
|
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
|
||||||
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
||||||
InvalidS3SnapshotRequest , Internal , BAD_REQUEST ;
|
|
||||||
InvalidS3SnapshotParameters , Internal , BAD_REQUEST ;
|
|
||||||
S3SnapshotServerError , Internal , BAD_GATEWAY ;
|
|
||||||
|
|
||||||
// Experimental features
|
// Experimental features
|
||||||
VectorEmbeddingError , InvalidRequest , BAD_REQUEST ;
|
VectorEmbeddingError , InvalidRequest , BAD_REQUEST ;
|
||||||
@@ -433,7 +426,6 @@ InvalidChatCompletionSearchQueryParamPrompt , InvalidRequest , BAD_REQU
|
|||||||
InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ;
|
InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ;
|
InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST ;
|
InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST ;
|
||||||
RequiresEnterpriseEdition , InvalidRequest , UNAVAILABLE_FOR_LEGAL_REASONS ;
|
|
||||||
// Webhooks
|
// Webhooks
|
||||||
InvalidWebhooks , InvalidRequest , BAD_REQUEST ;
|
InvalidWebhooks , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidWebhookUrl , InvalidRequest , BAD_REQUEST ;
|
InvalidWebhookUrl , InvalidRequest , BAD_REQUEST ;
|
||||||
@@ -687,18 +679,6 @@ impl fmt::Display for deserr_codes::InvalidNetworkSearchApiKey {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for deserr_codes::InvalidSearchPersonalize {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(f, "the value of `personalize` is invalid, expected a JSON object with `userContext` string.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for deserr_codes::InvalidSearchPersonalizeUserContext {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(f, "the value of `userContext` is invalid, expected a string.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! internal_error {
|
macro_rules! internal_error {
|
||||||
($target:ty : $($other:path), *) => {
|
($target:ty : $($other:path), *) => {
|
||||||
|
|||||||
@@ -380,9 +380,6 @@ pub enum Action {
|
|||||||
#[serde(rename = "webhooks.*")]
|
#[serde(rename = "webhooks.*")]
|
||||||
#[deserr(rename = "webhooks.*")]
|
#[deserr(rename = "webhooks.*")]
|
||||||
WebhooksAll,
|
WebhooksAll,
|
||||||
#[serde(rename = "indexes.compact")]
|
|
||||||
#[deserr(rename = "indexes.compact")]
|
|
||||||
IndexesCompact,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Action {
|
impl Action {
|
||||||
@@ -401,7 +398,6 @@ impl Action {
|
|||||||
INDEXES_UPDATE => Some(Self::IndexesUpdate),
|
INDEXES_UPDATE => Some(Self::IndexesUpdate),
|
||||||
INDEXES_DELETE => Some(Self::IndexesDelete),
|
INDEXES_DELETE => Some(Self::IndexesDelete),
|
||||||
INDEXES_SWAP => Some(Self::IndexesSwap),
|
INDEXES_SWAP => Some(Self::IndexesSwap),
|
||||||
INDEXES_COMPACT => Some(Self::IndexesCompact),
|
|
||||||
TASKS_ALL => Some(Self::TasksAll),
|
TASKS_ALL => Some(Self::TasksAll),
|
||||||
TASKS_CANCEL => Some(Self::TasksCancel),
|
TASKS_CANCEL => Some(Self::TasksCancel),
|
||||||
TASKS_DELETE => Some(Self::TasksDelete),
|
TASKS_DELETE => Some(Self::TasksDelete),
|
||||||
@@ -466,7 +462,6 @@ impl Action {
|
|||||||
IndexesUpdate => false,
|
IndexesUpdate => false,
|
||||||
IndexesDelete => false,
|
IndexesDelete => false,
|
||||||
IndexesSwap => false,
|
IndexesSwap => false,
|
||||||
IndexesCompact => false,
|
|
||||||
TasksCancel => false,
|
TasksCancel => false,
|
||||||
TasksDelete => false,
|
TasksDelete => false,
|
||||||
TasksGet => true,
|
TasksGet => true,
|
||||||
@@ -518,7 +513,6 @@ pub mod actions {
|
|||||||
pub const INDEXES_UPDATE: u8 = IndexesUpdate.repr();
|
pub const INDEXES_UPDATE: u8 = IndexesUpdate.repr();
|
||||||
pub const INDEXES_DELETE: u8 = IndexesDelete.repr();
|
pub const INDEXES_DELETE: u8 = IndexesDelete.repr();
|
||||||
pub const INDEXES_SWAP: u8 = IndexesSwap.repr();
|
pub const INDEXES_SWAP: u8 = IndexesSwap.repr();
|
||||||
pub const INDEXES_COMPACT: u8 = IndexesCompact.repr();
|
|
||||||
pub const TASKS_ALL: u8 = TasksAll.repr();
|
pub const TASKS_ALL: u8 = TasksAll.repr();
|
||||||
pub const TASKS_CANCEL: u8 = TasksCancel.repr();
|
pub const TASKS_CANCEL: u8 = TasksCancel.repr();
|
||||||
pub const TASKS_DELETE: u8 = TasksDelete.repr();
|
pub const TASKS_DELETE: u8 = TasksDelete.repr();
|
||||||
@@ -620,7 +614,6 @@ pub(crate) mod test {
|
|||||||
assert!(WebhooksDelete.repr() == 47 && WEBHOOKS_DELETE == 47);
|
assert!(WebhooksDelete.repr() == 47 && WEBHOOKS_DELETE == 47);
|
||||||
assert!(WebhooksCreate.repr() == 48 && WEBHOOKS_CREATE == 48);
|
assert!(WebhooksCreate.repr() == 48 && WEBHOOKS_CREATE == 48);
|
||||||
assert!(WebhooksAll.repr() == 49 && WEBHOOKS_ALL == 49);
|
assert!(WebhooksAll.repr() == 49 && WEBHOOKS_ALL == 49);
|
||||||
assert!(IndexesCompact.repr() == 50 && INDEXES_COMPACT == 50);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -2,17 +2,10 @@
|
|||||||
|
|
||||||
pub mod batch_view;
|
pub mod batch_view;
|
||||||
pub mod batches;
|
pub mod batches;
|
||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
pub mod community_edition;
|
|
||||||
pub mod compression;
|
pub mod compression;
|
||||||
pub mod deserr;
|
pub mod deserr;
|
||||||
pub mod document_formats;
|
pub mod document_formats;
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub mod enterprise_edition;
|
pub mod enterprise_edition;
|
||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
pub use community_edition as current_edition;
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub use enterprise_edition as current_edition;
|
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod facet_values_sort;
|
pub mod facet_values_sort;
|
||||||
pub mod features;
|
pub mod features;
|
||||||
@@ -20,7 +13,6 @@ pub mod index_uid;
|
|||||||
pub mod index_uid_pattern;
|
pub mod index_uid_pattern;
|
||||||
pub mod keys;
|
pub mod keys;
|
||||||
pub mod locales;
|
pub mod locales;
|
||||||
pub mod network;
|
|
||||||
pub mod settings;
|
pub mod settings;
|
||||||
pub mod star_or;
|
pub mod star_or;
|
||||||
pub mod task_view;
|
pub mod task_view;
|
||||||
|
|||||||
@@ -1,24 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct Network {
|
|
||||||
#[serde(default, rename = "self")]
|
|
||||||
pub local: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub remotes: BTreeMap<String, Remote>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub sharding: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct Remote {
|
|
||||||
pub url: String,
|
|
||||||
#[serde(default)]
|
|
||||||
pub search_api_key: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub write_api_key: Option<String>,
|
|
||||||
}
|
|
||||||
@@ -346,26 +346,24 @@ impl<T> Settings<T> {
|
|||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
hide_secret(api_key, 0);
|
Self::hide_secret(api_key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/// Redact a secret string, starting from the `secret_offset`th byte.
|
fn hide_secret(secret: &mut String) {
|
||||||
pub fn hide_secret(secret: &mut String, secret_offset: usize) {
|
match secret.len() {
|
||||||
match secret.len().checked_sub(secret_offset) {
|
x if x < 10 => {
|
||||||
None => (),
|
secret.replace_range(.., "XXX...");
|
||||||
Some(x) if x < 10 => {
|
}
|
||||||
secret.replace_range(secret_offset.., "XXX...");
|
x if x < 20 => {
|
||||||
}
|
secret.replace_range(2.., "XXXX...");
|
||||||
Some(x) if x < 20 => {
|
}
|
||||||
secret.replace_range((secret_offset + 2).., "XXXX...");
|
x if x < 30 => {
|
||||||
}
|
secret.replace_range(3.., "XXXXX...");
|
||||||
Some(x) if x < 30 => {
|
}
|
||||||
secret.replace_range((secret_offset + 3).., "XXXXX...");
|
_x => {
|
||||||
}
|
secret.replace_range(5.., "XXXXXX...");
|
||||||
Some(_x) => {
|
}
|
||||||
secret.replace_range((secret_offset + 5).., "XXXXXX...");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -55,9 +55,6 @@ pub struct TaskView {
|
|||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub network: Option<TaskNetwork>,
|
pub network: Option<TaskNetwork>,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub custom_metadata: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TaskView {
|
impl TaskView {
|
||||||
@@ -76,7 +73,6 @@ impl TaskView {
|
|||||||
started_at: task.started_at,
|
started_at: task.started_at,
|
||||||
finished_at: task.finished_at,
|
finished_at: task.finished_at,
|
||||||
network: task.network.clone(),
|
network: task.network.clone(),
|
||||||
custom_metadata: task.custom_metadata.clone(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -146,11 +142,6 @@ pub struct DetailsView {
|
|||||||
pub old_index_uid: Option<String>,
|
pub old_index_uid: Option<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub new_index_uid: Option<String>,
|
pub new_index_uid: Option<String>,
|
||||||
// index compaction
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub pre_compaction_size: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub post_compaction_size: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DetailsView {
|
impl DetailsView {
|
||||||
@@ -323,24 +314,6 @@ impl DetailsView {
|
|||||||
// We should never be able to batch multiple renames at the same time.
|
// We should never be able to batch multiple renames at the same time.
|
||||||
(Some(left), Some(_right)) => Some(left),
|
(Some(left), Some(_right)) => Some(left),
|
||||||
},
|
},
|
||||||
pre_compaction_size: match (
|
|
||||||
self.pre_compaction_size.clone(),
|
|
||||||
other.pre_compaction_size.clone(),
|
|
||||||
) {
|
|
||||||
(None, None) => None,
|
|
||||||
(None, Some(size)) | (Some(size), None) => Some(size),
|
|
||||||
// We should never be able to batch multiple compactions at the same time.
|
|
||||||
(Some(left), Some(_right)) => Some(left),
|
|
||||||
},
|
|
||||||
post_compaction_size: match (
|
|
||||||
self.post_compaction_size.clone(),
|
|
||||||
other.post_compaction_size.clone(),
|
|
||||||
) {
|
|
||||||
(None, None) => None,
|
|
||||||
(None, Some(size)) | (Some(size), None) => Some(size),
|
|
||||||
// We should never be able to batch multiple compactions at the same time.
|
|
||||||
(Some(left), Some(_right)) => Some(left),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -442,15 +415,6 @@ impl From<Details> for DetailsView {
|
|||||||
upgrade_to: Some(format!("v{}.{}.{}", to.0, to.1, to.2)),
|
upgrade_to: Some(format!("v{}.{}.{}", to.0, to.1, to.2)),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Details::IndexCompaction { pre_compaction_size, post_compaction_size, .. } => {
|
|
||||||
DetailsView {
|
|
||||||
pre_compaction_size: pre_compaction_size
|
|
||||||
.map(|size| size.get_appropriate_unit(UnitType::Both).to_string()),
|
|
||||||
post_compaction_size: post_compaction_size
|
|
||||||
.map(|size| size.get_appropriate_unit(UnitType::Both).to_string()),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -45,9 +45,6 @@ pub struct Task {
|
|||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub network: Option<TaskNetwork>,
|
pub network: Option<TaskNetwork>,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub custom_metadata: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Task {
|
impl Task {
|
||||||
@@ -70,8 +67,7 @@ impl Task {
|
|||||||
| SettingsUpdate { index_uid, .. }
|
| SettingsUpdate { index_uid, .. }
|
||||||
| IndexCreation { index_uid, .. }
|
| IndexCreation { index_uid, .. }
|
||||||
| IndexUpdate { index_uid, .. }
|
| IndexUpdate { index_uid, .. }
|
||||||
| IndexDeletion { index_uid }
|
| IndexDeletion { index_uid } => Some(index_uid),
|
||||||
| IndexCompaction { index_uid } => Some(index_uid),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -98,8 +94,7 @@ impl Task {
|
|||||||
| KindWithContent::DumpCreation { .. }
|
| KindWithContent::DumpCreation { .. }
|
||||||
| KindWithContent::SnapshotCreation
|
| KindWithContent::SnapshotCreation
|
||||||
| KindWithContent::Export { .. }
|
| KindWithContent::Export { .. }
|
||||||
| KindWithContent::UpgradeDatabase { .. }
|
| KindWithContent::UpgradeDatabase { .. } => None,
|
||||||
| KindWithContent::IndexCompaction { .. } => None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -175,9 +170,6 @@ pub enum KindWithContent {
|
|||||||
UpgradeDatabase {
|
UpgradeDatabase {
|
||||||
from: (u32, u32, u32),
|
from: (u32, u32, u32),
|
||||||
},
|
},
|
||||||
IndexCompaction {
|
|
||||||
index_uid: String,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
|
||||||
@@ -214,7 +206,6 @@ impl KindWithContent {
|
|||||||
KindWithContent::SnapshotCreation => Kind::SnapshotCreation,
|
KindWithContent::SnapshotCreation => Kind::SnapshotCreation,
|
||||||
KindWithContent::Export { .. } => Kind::Export,
|
KindWithContent::Export { .. } => Kind::Export,
|
||||||
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
|
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
|
||||||
KindWithContent::IndexCompaction { .. } => Kind::IndexCompaction,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -235,8 +226,7 @@ impl KindWithContent {
|
|||||||
| DocumentClear { index_uid }
|
| DocumentClear { index_uid }
|
||||||
| SettingsUpdate { index_uid, .. }
|
| SettingsUpdate { index_uid, .. }
|
||||||
| IndexCreation { index_uid, .. }
|
| IndexCreation { index_uid, .. }
|
||||||
| IndexDeletion { index_uid }
|
| IndexDeletion { index_uid } => vec![index_uid],
|
||||||
| IndexCompaction { index_uid } => vec![index_uid],
|
|
||||||
IndexUpdate { index_uid, new_index_uid, .. } => {
|
IndexUpdate { index_uid, new_index_uid, .. } => {
|
||||||
let mut indexes = vec![index_uid.as_str()];
|
let mut indexes = vec![index_uid.as_str()];
|
||||||
if let Some(new_uid) = new_index_uid {
|
if let Some(new_uid) = new_index_uid {
|
||||||
@@ -335,11 +325,6 @@ impl KindWithContent {
|
|||||||
versioning::VERSION_PATCH,
|
versioning::VERSION_PATCH,
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction {
|
|
||||||
index_uid: index_uid.clone(),
|
|
||||||
pre_compaction_size: None,
|
|
||||||
post_compaction_size: None,
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -422,11 +407,6 @@ impl KindWithContent {
|
|||||||
versioning::VERSION_PATCH,
|
versioning::VERSION_PATCH,
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction {
|
|
||||||
index_uid: index_uid.clone(),
|
|
||||||
pre_compaction_size: None,
|
|
||||||
post_compaction_size: None,
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -489,11 +469,6 @@ impl From<&KindWithContent> for Option<Details> {
|
|||||||
versioning::VERSION_PATCH,
|
versioning::VERSION_PATCH,
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction {
|
|
||||||
index_uid: index_uid.clone(),
|
|
||||||
pre_compaction_size: None,
|
|
||||||
post_compaction_size: None,
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -604,7 +579,6 @@ pub enum Kind {
|
|||||||
SnapshotCreation,
|
SnapshotCreation,
|
||||||
Export,
|
Export,
|
||||||
UpgradeDatabase,
|
UpgradeDatabase,
|
||||||
IndexCompaction,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Kind {
|
impl Kind {
|
||||||
@@ -616,8 +590,7 @@ impl Kind {
|
|||||||
| Kind::SettingsUpdate
|
| Kind::SettingsUpdate
|
||||||
| Kind::IndexCreation
|
| Kind::IndexCreation
|
||||||
| Kind::IndexDeletion
|
| Kind::IndexDeletion
|
||||||
| Kind::IndexUpdate
|
| Kind::IndexUpdate => true,
|
||||||
| Kind::IndexCompaction => true,
|
|
||||||
Kind::IndexSwap
|
Kind::IndexSwap
|
||||||
| Kind::TaskCancelation
|
| Kind::TaskCancelation
|
||||||
| Kind::TaskDeletion
|
| Kind::TaskDeletion
|
||||||
@@ -645,7 +618,6 @@ impl Display for Kind {
|
|||||||
Kind::SnapshotCreation => write!(f, "snapshotCreation"),
|
Kind::SnapshotCreation => write!(f, "snapshotCreation"),
|
||||||
Kind::Export => write!(f, "export"),
|
Kind::Export => write!(f, "export"),
|
||||||
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
|
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
|
||||||
Kind::IndexCompaction => write!(f, "indexCompaction"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -681,8 +653,6 @@ impl FromStr for Kind {
|
|||||||
Ok(Kind::Export)
|
Ok(Kind::Export)
|
||||||
} else if kind.eq_ignore_ascii_case("upgradeDatabase") {
|
} else if kind.eq_ignore_ascii_case("upgradeDatabase") {
|
||||||
Ok(Kind::UpgradeDatabase)
|
Ok(Kind::UpgradeDatabase)
|
||||||
} else if kind.eq_ignore_ascii_case("indexCompaction") {
|
|
||||||
Ok(Kind::IndexCompaction)
|
|
||||||
} else {
|
} else {
|
||||||
Err(ParseTaskKindError(kind.to_owned()))
|
Err(ParseTaskKindError(kind.to_owned()))
|
||||||
}
|
}
|
||||||
@@ -768,11 +738,6 @@ pub enum Details {
|
|||||||
from: (u32, u32, u32),
|
from: (u32, u32, u32),
|
||||||
to: (u32, u32, u32),
|
to: (u32, u32, u32),
|
||||||
},
|
},
|
||||||
IndexCompaction {
|
|
||||||
index_uid: String,
|
|
||||||
pre_compaction_size: Option<Byte>,
|
|
||||||
post_compaction_size: Option<Byte>,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
@@ -835,10 +800,6 @@ impl Details {
|
|||||||
Self::ClearAll { deleted_documents } => *deleted_documents = Some(0),
|
Self::ClearAll { deleted_documents } => *deleted_documents = Some(0),
|
||||||
Self::TaskCancelation { canceled_tasks, .. } => *canceled_tasks = Some(0),
|
Self::TaskCancelation { canceled_tasks, .. } => *canceled_tasks = Some(0),
|
||||||
Self::TaskDeletion { deleted_tasks, .. } => *deleted_tasks = Some(0),
|
Self::TaskDeletion { deleted_tasks, .. } => *deleted_tasks = Some(0),
|
||||||
Self::IndexCompaction { pre_compaction_size, post_compaction_size, .. } => {
|
|
||||||
*pre_compaction_size = None;
|
|
||||||
*post_compaction_size = None;
|
|
||||||
}
|
|
||||||
Self::SettingsUpdate { .. }
|
Self::SettingsUpdate { .. }
|
||||||
| Self::IndexInfo { .. }
|
| Self::IndexInfo { .. }
|
||||||
| Self::Dump { .. }
|
| Self::Dump { .. }
|
||||||
|
|||||||
@@ -11,24 +11,6 @@ pub struct Webhook {
|
|||||||
pub headers: BTreeMap<String, String>,
|
pub headers: BTreeMap<String, String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Webhook {
|
|
||||||
pub fn redact_authorization_header(&mut self) {
|
|
||||||
// headers are case insensitive, so to make the redaction robust we iterate over qualifying headers
|
|
||||||
// rather than getting one canonical `Authorization` header.
|
|
||||||
for value in self
|
|
||||||
.headers
|
|
||||||
.iter_mut()
|
|
||||||
.filter_map(|(name, value)| name.eq_ignore_ascii_case("authorization").then_some(value))
|
|
||||||
{
|
|
||||||
if value.starts_with("Bearer ") {
|
|
||||||
crate::settings::hide_secret(value, "Bearer ".len());
|
|
||||||
} else {
|
|
||||||
crate::settings::hide_secret(value, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Default, Clone, PartialEq)]
|
#[derive(Debug, Serialize, Default, Clone, PartialEq)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct WebhooksView {
|
pub struct WebhooksView {
|
||||||
|
|||||||
@@ -14,91 +14,91 @@ default-run = "meilisearch"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
actix-cors = "0.7.1"
|
actix-cors = "0.7.1"
|
||||||
actix-http = { version = "3.11.2", default-features = false, features = [
|
actix-http = { version = "3.11.0", default-features = false, features = [
|
||||||
"compress-brotli",
|
"compress-brotli",
|
||||||
"compress-gzip",
|
"compress-gzip",
|
||||||
"rustls-0_23",
|
"rustls-0_23",
|
||||||
] }
|
] }
|
||||||
actix-utils = "3.0.1"
|
actix-utils = "3.0.1"
|
||||||
actix-web = { version = "4.12.0", default-features = false, features = [
|
actix-web = { version = "4.11.0", default-features = false, features = [
|
||||||
"macros",
|
"macros",
|
||||||
"compress-brotli",
|
"compress-brotli",
|
||||||
"compress-gzip",
|
"compress-gzip",
|
||||||
"cookies",
|
"cookies",
|
||||||
"rustls-0_23",
|
"rustls-0_23",
|
||||||
] }
|
] }
|
||||||
anyhow = { version = "1.0.100", features = ["backtrace"] }
|
anyhow = { version = "1.0.98", features = ["backtrace"] }
|
||||||
bstr = "1.12.1"
|
bstr = "1.12.0"
|
||||||
byte-unit = { version = "5.1.6", features = ["serde"] }
|
byte-unit = { version = "5.1.6", features = ["serde"] }
|
||||||
bytes = "1.11.0"
|
bytes = "1.10.1"
|
||||||
bumpalo = "3.19.0"
|
bumpalo = "3.18.1"
|
||||||
clap = { version = "4.5.52", features = ["derive", "env"] }
|
clap = { version = "4.5.40", features = ["derive", "env"] }
|
||||||
crossbeam-channel = "0.5.15"
|
crossbeam-channel = "0.5.15"
|
||||||
deserr = { version = "0.6.4", features = ["actix-web"] }
|
deserr = { version = "0.6.3", features = ["actix-web"] }
|
||||||
dump = { path = "../dump" }
|
dump = { path = "../dump" }
|
||||||
either = "1.15.0"
|
either = "1.15.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.1.5"
|
flate2 = "1.1.2"
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3.31"
|
futures = "0.3.31"
|
||||||
futures-util = "0.3.31"
|
futures-util = "0.3.31"
|
||||||
index-scheduler = { path = "../index-scheduler" }
|
index-scheduler = { path = "../index-scheduler" }
|
||||||
indexmap = { version = "2.12.0", features = ["serde"] }
|
indexmap = { version = "2.9.0", features = ["serde"] }
|
||||||
is-terminal = "0.4.17"
|
is-terminal = "0.4.16"
|
||||||
itertools = "0.14.0"
|
itertools = "0.14.0"
|
||||||
jsonwebtoken = "9.3.1"
|
jsonwebtoken = "9.3.1"
|
||||||
lazy_static = "1.5.0"
|
lazy_static = "1.5.0"
|
||||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
memmap2 = "0.9.9"
|
memmap2 = "0.9.7"
|
||||||
mimalloc = { version = "0.1.48", default-features = false }
|
mimalloc = { version = "0.1.47", default-features = false }
|
||||||
mime = "0.3.17"
|
mime = "0.3.17"
|
||||||
num_cpus = "1.17.0"
|
num_cpus = "1.17.0"
|
||||||
obkv = "0.3.0"
|
obkv = "0.3.0"
|
||||||
once_cell = "1.21.3"
|
once_cell = "1.21.3"
|
||||||
ordered-float = "5.1.0"
|
ordered-float = "5.0.0"
|
||||||
parking_lot = "0.12.5"
|
parking_lot = "0.12.4"
|
||||||
permissive-json-pointer = { path = "../permissive-json-pointer" }
|
permissive-json-pointer = { path = "../permissive-json-pointer" }
|
||||||
pin-project-lite = "0.2.16"
|
pin-project-lite = "0.2.16"
|
||||||
platform-dirs = "0.3.0"
|
platform-dirs = "0.3.0"
|
||||||
prometheus = { version = "0.14.0", features = ["process"] }
|
prometheus = { version = "0.14.0", features = ["process"] }
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
rayon = "1.11.0"
|
rayon = "1.10.0"
|
||||||
regex = "1.12.2"
|
regex = "1.11.1"
|
||||||
reqwest = { version = "0.12.24", features = [
|
reqwest = { version = "0.12.20", features = [
|
||||||
"rustls-tls",
|
"rustls-tls",
|
||||||
"json",
|
"json",
|
||||||
], default-features = false }
|
], default-features = false }
|
||||||
rustls = { version = "0.23.35", features = ["ring"], default-features = false }
|
rustls = { version = "0.23.28", features = ["ring"], default-features = false }
|
||||||
rustls-pki-types = { version = "1.13.0", features = ["alloc"] }
|
rustls-pki-types = { version = "1.12.0", features = ["alloc"] }
|
||||||
rustls-pemfile = "2.2.0"
|
rustls-pemfile = "2.2.0"
|
||||||
segment = { version = "0.2.6" }
|
segment = { version = "0.2.6" }
|
||||||
serde = { version = "1.0.228", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.145", features = ["preserve_order"] }
|
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||||
sha2 = "0.10.9"
|
sha2 = "0.10.9"
|
||||||
siphasher = "1.0.1"
|
siphasher = "1.0.1"
|
||||||
slice-group-by = "0.3.1"
|
slice-group-by = "0.3.1"
|
||||||
static-files = { version = "0.3.1", optional = true }
|
static-files = { version = "0.2.5", optional = true }
|
||||||
sysinfo = "0.37.2"
|
sysinfo = "0.35.2"
|
||||||
tar = "0.4.44"
|
tar = "0.4.44"
|
||||||
tempfile = "3.23.0"
|
tempfile = "3.20.0"
|
||||||
thiserror = "2.0.17"
|
thiserror = "2.0.12"
|
||||||
time = { version = "0.3.44", features = [
|
time = { version = "0.3.41", features = [
|
||||||
"serde-well-known",
|
"serde-well-known",
|
||||||
"formatting",
|
"formatting",
|
||||||
"parsing",
|
"parsing",
|
||||||
"macros",
|
"macros",
|
||||||
] }
|
] }
|
||||||
tokio = { version = "1.48.0", features = ["full"] }
|
tokio = { version = "1.45.1", features = ["full"] }
|
||||||
toml = "0.9.8"
|
toml = "0.8.23"
|
||||||
uuid = { version = "1.18.1", features = ["serde", "v4", "v7"] }
|
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||||
serde_urlencoded = "0.7.1"
|
serde_urlencoded = "0.7.1"
|
||||||
termcolor = "1.4.1"
|
termcolor = "1.4.1"
|
||||||
url = { version = "2.5.7", features = ["serde"] }
|
url = { version = "2.5.4", features = ["serde"] }
|
||||||
tracing = "0.1.41"
|
tracing = "0.1.41"
|
||||||
tracing-subscriber = { version = "0.3.20", features = ["json"] }
|
tracing-subscriber = { version = "0.3.20", features = ["json"] }
|
||||||
tracing-trace = { version = "0.1.0", path = "../tracing-trace" }
|
tracing-trace = { version = "0.1.0", path = "../tracing-trace" }
|
||||||
tracing-actix-web = "0.7.19"
|
tracing-actix-web = "0.7.18"
|
||||||
build-info = { version = "1.7.0", path = "../build-info" }
|
build-info = { version = "1.7.0", path = "../build-info" }
|
||||||
roaring = "0.10.12"
|
roaring = "0.10.12"
|
||||||
mopa-maintained = "0.2.3"
|
mopa-maintained = "0.2.3"
|
||||||
@@ -114,35 +114,35 @@ utoipa = { version = "5.4.0", features = [
|
|||||||
utoipa-scalar = { version = "0.3.0", optional = true, features = ["actix-web"] }
|
utoipa-scalar = { version = "0.3.0", optional = true, features = ["actix-web"] }
|
||||||
async-openai = { git = "https://github.com/meilisearch/async-openai", branch = "better-error-handling" }
|
async-openai = { git = "https://github.com/meilisearch/async-openai", branch = "better-error-handling" }
|
||||||
secrecy = "0.10.3"
|
secrecy = "0.10.3"
|
||||||
actix-web-lab = { version = "0.24.3", default-features = false }
|
actix-web-lab = { version = "0.24.1", default-features = false }
|
||||||
urlencoding = "2.1.3"
|
urlencoding = "2.1.3"
|
||||||
backoff = { version = "0.4.0", features = ["tokio"] }
|
backoff = { version = "0.4.0", features = ["tokio"] }
|
||||||
humantime = { version = "2.3.0", default-features = false }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
actix-rt = "2.11.0"
|
actix-rt = "2.10.0"
|
||||||
brotli = "8.0.2"
|
brotli = "8.0.1"
|
||||||
# fixed version due to format breakages in v1.40
|
# fixed version due to format breakages in v1.40
|
||||||
insta = { version = "=1.39.0", features = ["redactions"] }
|
insta = { version = "=1.39.0", features = ["redactions"] }
|
||||||
manifest-dir-macros = "0.1.18"
|
manifest-dir-macros = "0.1.18"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
meili-snap = { path = "../meili-snap" }
|
meili-snap = { path = "../meili-snap" }
|
||||||
temp-env = "0.3.6"
|
temp-env = "0.3.6"
|
||||||
wiremock = "0.6.5"
|
wiremock = "0.6.3"
|
||||||
yaup = "0.3.1"
|
yaup = "0.3.1"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
anyhow = { version = "1.0.100", optional = true }
|
anyhow = { version = "1.0.98", optional = true }
|
||||||
cargo_toml = { version = "0.22.3", optional = true }
|
cargo_toml = { version = "0.22.1", optional = true }
|
||||||
hex = { version = "0.4.3", optional = true }
|
hex = { version = "0.4.3", optional = true }
|
||||||
reqwest = { version = "0.12.24", features = [
|
reqwest = { version = "0.12.20", features = [
|
||||||
"blocking",
|
"blocking",
|
||||||
"rustls-tls",
|
"rustls-tls",
|
||||||
], default-features = false, optional = true }
|
], default-features = false, optional = true }
|
||||||
sha-1 = { version = "0.10.1", optional = true }
|
sha-1 = { version = "0.10.1", optional = true }
|
||||||
static-files = { version = "0.3.1", optional = true }
|
static-files = { version = "0.2.5", optional = true }
|
||||||
tempfile = { version = "3.23.0", optional = true }
|
tempfile = { version = "3.20.0", optional = true }
|
||||||
zip = { version = "6.0.0", optional = true }
|
zip = { version = "4.1.0", optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["meilisearch-types/all-tokenizations", "mini-dashboard"]
|
default = ["meilisearch-types/all-tokenizations", "mini-dashboard"]
|
||||||
@@ -160,7 +160,6 @@ mini-dashboard = [
|
|||||||
]
|
]
|
||||||
chinese = ["meilisearch-types/chinese"]
|
chinese = ["meilisearch-types/chinese"]
|
||||||
chinese-pinyin = ["meilisearch-types/chinese-pinyin"]
|
chinese-pinyin = ["meilisearch-types/chinese-pinyin"]
|
||||||
enterprise = ["meilisearch-types/enterprise"]
|
|
||||||
hebrew = ["meilisearch-types/hebrew"]
|
hebrew = ["meilisearch-types/hebrew"]
|
||||||
japanese = ["meilisearch-types/japanese"]
|
japanese = ["meilisearch-types/japanese"]
|
||||||
korean = ["meilisearch-types/korean"]
|
korean = ["meilisearch-types/korean"]
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::any::TypeId;
|
use std::any::TypeId;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::Path;
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
@@ -195,7 +195,7 @@ struct Infos {
|
|||||||
experimental_enable_logs_route: bool,
|
experimental_enable_logs_route: bool,
|
||||||
experimental_reduce_indexing_memory_usage: bool,
|
experimental_reduce_indexing_memory_usage: bool,
|
||||||
experimental_max_number_of_batched_tasks: usize,
|
experimental_max_number_of_batched_tasks: usize,
|
||||||
experimental_limit_batched_tasks_total_size: Option<u64>,
|
experimental_limit_batched_tasks_total_size: u64,
|
||||||
experimental_network: bool,
|
experimental_network: bool,
|
||||||
experimental_multimodal: bool,
|
experimental_multimodal: bool,
|
||||||
experimental_chat_completions: bool,
|
experimental_chat_completions: bool,
|
||||||
@@ -205,10 +205,7 @@ struct Infos {
|
|||||||
experimental_no_snapshot_compaction: bool,
|
experimental_no_snapshot_compaction: bool,
|
||||||
experimental_no_edition_2024_for_dumps: bool,
|
experimental_no_edition_2024_for_dumps: bool,
|
||||||
experimental_no_edition_2024_for_settings: bool,
|
experimental_no_edition_2024_for_settings: bool,
|
||||||
experimental_no_edition_2024_for_prefix_post_processing: bool,
|
|
||||||
experimental_no_edition_2024_for_facet_post_processing: bool,
|
|
||||||
experimental_vector_store_setting: bool,
|
experimental_vector_store_setting: bool,
|
||||||
experimental_personalization: bool,
|
|
||||||
gpu_enabled: bool,
|
gpu_enabled: bool,
|
||||||
db_path: bool,
|
db_path: bool,
|
||||||
import_dump: bool,
|
import_dump: bool,
|
||||||
@@ -218,7 +215,6 @@ struct Infos {
|
|||||||
import_snapshot: bool,
|
import_snapshot: bool,
|
||||||
schedule_snapshot: Option<u64>,
|
schedule_snapshot: Option<u64>,
|
||||||
snapshot_dir: bool,
|
snapshot_dir: bool,
|
||||||
uses_s3_snapshots: bool,
|
|
||||||
ignore_missing_snapshot: bool,
|
ignore_missing_snapshot: bool,
|
||||||
ignore_snapshot_if_db_exists: bool,
|
ignore_snapshot_if_db_exists: bool,
|
||||||
http_addr: bool,
|
http_addr: bool,
|
||||||
@@ -287,8 +283,6 @@ impl Infos {
|
|||||||
indexer_options,
|
indexer_options,
|
||||||
config_file_path,
|
config_file_path,
|
||||||
no_analytics: _,
|
no_analytics: _,
|
||||||
experimental_personalization_api_key,
|
|
||||||
s3_snapshot_options,
|
|
||||||
} = options;
|
} = options;
|
||||||
|
|
||||||
let schedule_snapshot = match schedule_snapshot {
|
let schedule_snapshot = match schedule_snapshot {
|
||||||
@@ -302,8 +296,6 @@ impl Infos {
|
|||||||
skip_index_budget: _,
|
skip_index_budget: _,
|
||||||
experimental_no_edition_2024_for_settings,
|
experimental_no_edition_2024_for_settings,
|
||||||
experimental_no_edition_2024_for_dumps,
|
experimental_no_edition_2024_for_dumps,
|
||||||
experimental_no_edition_2024_for_prefix_post_processing,
|
|
||||||
experimental_no_edition_2024_for_facet_post_processing,
|
|
||||||
} = indexer_options;
|
} = indexer_options;
|
||||||
|
|
||||||
let RuntimeTogglableFeatures {
|
let RuntimeTogglableFeatures {
|
||||||
@@ -344,22 +336,21 @@ impl Infos {
|
|||||||
experimental_no_edition_2024_for_dumps,
|
experimental_no_edition_2024_for_dumps,
|
||||||
experimental_vector_store_setting: vector_store_setting,
|
experimental_vector_store_setting: vector_store_setting,
|
||||||
gpu_enabled: meilisearch_types::milli::vector::is_cuda_enabled(),
|
gpu_enabled: meilisearch_types::milli::vector::is_cuda_enabled(),
|
||||||
db_path: db_path != Path::new("./data.ms"),
|
db_path: db_path != PathBuf::from("./data.ms"),
|
||||||
import_dump: import_dump.is_some(),
|
import_dump: import_dump.is_some(),
|
||||||
dump_dir: dump_dir != Path::new("dumps/"),
|
dump_dir: dump_dir != PathBuf::from("dumps/"),
|
||||||
ignore_missing_dump,
|
ignore_missing_dump,
|
||||||
ignore_dump_if_db_exists,
|
ignore_dump_if_db_exists,
|
||||||
import_snapshot: import_snapshot.is_some(),
|
import_snapshot: import_snapshot.is_some(),
|
||||||
schedule_snapshot,
|
schedule_snapshot,
|
||||||
snapshot_dir: snapshot_dir != Path::new("snapshots/"),
|
snapshot_dir: snapshot_dir != PathBuf::from("snapshots/"),
|
||||||
uses_s3_snapshots: s3_snapshot_options.is_some(),
|
|
||||||
ignore_missing_snapshot,
|
ignore_missing_snapshot,
|
||||||
ignore_snapshot_if_db_exists,
|
ignore_snapshot_if_db_exists,
|
||||||
http_addr: http_addr != default_http_addr(),
|
http_addr: http_addr != default_http_addr(),
|
||||||
http_payload_size_limit,
|
http_payload_size_limit,
|
||||||
experimental_max_number_of_batched_tasks,
|
experimental_max_number_of_batched_tasks,
|
||||||
experimental_limit_batched_tasks_total_size:
|
experimental_limit_batched_tasks_total_size:
|
||||||
experimental_limit_batched_tasks_total_size.map(|size| size.as_u64()),
|
experimental_limit_batched_tasks_total_size.into(),
|
||||||
task_queue_webhook: task_webhook_url.is_some(),
|
task_queue_webhook: task_webhook_url.is_some(),
|
||||||
task_webhook_authorization_header: task_webhook_authorization_header.is_some(),
|
task_webhook_authorization_header: task_webhook_authorization_header.is_some(),
|
||||||
log_level: log_level.to_string(),
|
log_level: log_level.to_string(),
|
||||||
@@ -374,9 +365,6 @@ impl Infos {
|
|||||||
ssl_resumption,
|
ssl_resumption,
|
||||||
ssl_tickets,
|
ssl_tickets,
|
||||||
experimental_no_edition_2024_for_settings,
|
experimental_no_edition_2024_for_settings,
|
||||||
experimental_no_edition_2024_for_prefix_post_processing,
|
|
||||||
experimental_no_edition_2024_for_facet_post_processing,
|
|
||||||
experimental_personalization: experimental_personalization_api_key.is_some(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,8 +38,6 @@ pub enum MeilisearchHttpError {
|
|||||||
PaginationInFederatedQuery(usize, &'static str),
|
PaginationInFederatedQuery(usize, &'static str),
|
||||||
#[error("Inside `.queries[{0}]`: Using facet options is not allowed in federated queries.\n - Hint: remove `facets` from query #{0} or remove `federation` from the request\n - Hint: pass `federation.facetsByIndex.{1}: {2:?}` for facets in federated search")]
|
#[error("Inside `.queries[{0}]`: Using facet options is not allowed in federated queries.\n - Hint: remove `facets` from query #{0} or remove `federation` from the request\n - Hint: pass `federation.facetsByIndex.{1}: {2:?}` for facets in federated search")]
|
||||||
FacetsInFederatedQuery(usize, String, Vec<String>),
|
FacetsInFederatedQuery(usize, String, Vec<String>),
|
||||||
#[error("Inside `.queries[{0}]`: Using `.personalize` is not allowed in federated queries.\n - Hint: remove `personalize` from query #{0} or remove `federation` from the request")]
|
|
||||||
PersonalizationInFederatedQuery(usize),
|
|
||||||
#[error("Inconsistent order for values in facet `{facet}`: index `{previous_uid}` orders {previous_facet_order}, but index `{current_uid}` orders {index_facet_order}.\n - Hint: Remove `federation.mergeFacets` or change `faceting.sortFacetValuesBy` to be consistent in settings.")]
|
#[error("Inconsistent order for values in facet `{facet}`: index `{previous_uid}` orders {previous_facet_order}, but index `{current_uid}` orders {index_facet_order}.\n - Hint: Remove `federation.mergeFacets` or change `faceting.sortFacetValuesBy` to be consistent in settings.")]
|
||||||
InconsistentFacetOrder {
|
InconsistentFacetOrder {
|
||||||
facet: String,
|
facet: String,
|
||||||
@@ -139,9 +137,6 @@ impl ErrorCode for MeilisearchHttpError {
|
|||||||
MeilisearchHttpError::InconsistentFacetOrder { .. } => {
|
MeilisearchHttpError::InconsistentFacetOrder { .. } => {
|
||||||
Code::InvalidMultiSearchFacetOrder
|
Code::InvalidMultiSearchFacetOrder
|
||||||
}
|
}
|
||||||
MeilisearchHttpError::PersonalizationInFederatedQuery(_) => {
|
|
||||||
Code::InvalidMultiSearchQueryPersonalization
|
|
||||||
}
|
|
||||||
MeilisearchHttpError::InconsistentOriginHeaders { .. } => {
|
MeilisearchHttpError::InconsistentOriginHeaders { .. } => {
|
||||||
Code::InconsistentDocumentChangeHeaders
|
Code::InconsistentDocumentChangeHeaders
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ pub mod middleware;
|
|||||||
pub mod option;
|
pub mod option;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod option_test;
|
mod option_test;
|
||||||
pub mod personalization;
|
|
||||||
pub mod routes;
|
pub mod routes;
|
||||||
pub mod search;
|
pub mod search;
|
||||||
pub mod search_queue;
|
pub mod search_queue;
|
||||||
@@ -59,7 +58,6 @@ use tracing::{error, info_span};
|
|||||||
use tracing_subscriber::filter::Targets;
|
use tracing_subscriber::filter::Targets;
|
||||||
|
|
||||||
use crate::error::MeilisearchHttpError;
|
use crate::error::MeilisearchHttpError;
|
||||||
use crate::personalization::PersonalizationService;
|
|
||||||
|
|
||||||
/// Default number of simultaneously opened indexes.
|
/// Default number of simultaneously opened indexes.
|
||||||
///
|
///
|
||||||
@@ -130,8 +128,12 @@ pub type LogStderrType = tracing_subscriber::filter::Filtered<
|
|||||||
>;
|
>;
|
||||||
|
|
||||||
pub fn create_app(
|
pub fn create_app(
|
||||||
services: ServicesData,
|
index_scheduler: Data<IndexScheduler>,
|
||||||
|
auth_controller: Data<AuthController>,
|
||||||
|
search_queue: Data<SearchQueue>,
|
||||||
opt: Opt,
|
opt: Opt,
|
||||||
|
logs: (LogRouteHandle, LogStderrHandle),
|
||||||
|
analytics: Data<Analytics>,
|
||||||
enable_dashboard: bool,
|
enable_dashboard: bool,
|
||||||
) -> actix_web::App<
|
) -> actix_web::App<
|
||||||
impl ServiceFactory<
|
impl ServiceFactory<
|
||||||
@@ -143,7 +145,17 @@ pub fn create_app(
|
|||||||
>,
|
>,
|
||||||
> {
|
> {
|
||||||
let app = actix_web::App::new()
|
let app = actix_web::App::new()
|
||||||
.configure(|s| configure_data(s, services, &opt))
|
.configure(|s| {
|
||||||
|
configure_data(
|
||||||
|
s,
|
||||||
|
index_scheduler.clone(),
|
||||||
|
auth_controller.clone(),
|
||||||
|
search_queue.clone(),
|
||||||
|
&opt,
|
||||||
|
logs,
|
||||||
|
analytics.clone(),
|
||||||
|
)
|
||||||
|
})
|
||||||
.configure(routes::configure)
|
.configure(routes::configure)
|
||||||
.configure(|s| dashboard(s, enable_dashboard));
|
.configure(|s| dashboard(s, enable_dashboard));
|
||||||
|
|
||||||
@@ -204,10 +216,7 @@ enum OnFailure {
|
|||||||
KeepDb,
|
KeepDb,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn setup_meilisearch(
|
pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
|
||||||
opt: &Opt,
|
|
||||||
handle: tokio::runtime::Handle,
|
|
||||||
) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
|
|
||||||
let index_scheduler_opt = IndexSchedulerOptions {
|
let index_scheduler_opt = IndexSchedulerOptions {
|
||||||
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
|
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
|
||||||
auth_path: opt.db_path.join("auth"),
|
auth_path: opt.db_path.join("auth"),
|
||||||
@@ -221,26 +230,12 @@ pub fn setup_meilisearch(
|
|||||||
task_db_size: opt.max_task_db_size.as_u64() as usize,
|
task_db_size: opt.max_task_db_size.as_u64() as usize,
|
||||||
index_base_map_size: opt.max_index_size.as_u64() as usize,
|
index_base_map_size: opt.max_index_size.as_u64() as usize,
|
||||||
enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage,
|
enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage,
|
||||||
indexer_config: Arc::new({
|
indexer_config: Arc::new((&opt.indexer_options).try_into()?),
|
||||||
let s3_snapshot_options =
|
|
||||||
opt.s3_snapshot_options.clone().map(|opt| opt.try_into()).transpose()?;
|
|
||||||
IndexerConfig { s3_snapshot_options, ..(&opt.indexer_options).try_into()? }
|
|
||||||
}),
|
|
||||||
autobatching_enabled: true,
|
autobatching_enabled: true,
|
||||||
cleanup_enabled: !opt.experimental_replication_parameters,
|
cleanup_enabled: !opt.experimental_replication_parameters,
|
||||||
max_number_of_tasks: 1_000_000,
|
max_number_of_tasks: 1_000_000,
|
||||||
max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks,
|
max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks,
|
||||||
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size.map_or_else(
|
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size.into(),
|
||||||
|| {
|
|
||||||
opt.indexer_options
|
|
||||||
.max_indexing_memory
|
|
||||||
// By default, we use half of the available memory to determine the size of batched tasks
|
|
||||||
.map_or(u64::MAX, |mem| mem.as_u64() / 2)
|
|
||||||
// And never exceed 10 GiB when we infer the limit
|
|
||||||
.min(10 * 1024 * 1024 * 1024)
|
|
||||||
},
|
|
||||||
|size| size.as_u64(),
|
|
||||||
),
|
|
||||||
index_growth_amount: byte_unit::Byte::from_str("10GiB").unwrap().as_u64() as usize,
|
index_growth_amount: byte_unit::Byte::from_str("10GiB").unwrap().as_u64() as usize,
|
||||||
index_count: DEFAULT_INDEX_COUNT,
|
index_count: DEFAULT_INDEX_COUNT,
|
||||||
instance_features: opt.to_instance_features(),
|
instance_features: opt.to_instance_features(),
|
||||||
@@ -261,7 +256,6 @@ pub fn setup_meilisearch(
|
|||||||
index_scheduler_opt,
|
index_scheduler_opt,
|
||||||
OnFailure::RemoveDb,
|
OnFailure::RemoveDb,
|
||||||
binary_version, // the db is empty
|
binary_version, // the db is empty
|
||||||
handle,
|
|
||||||
)?,
|
)?,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
std::fs::remove_dir_all(&opt.db_path)?;
|
std::fs::remove_dir_all(&opt.db_path)?;
|
||||||
@@ -279,7 +273,7 @@ pub fn setup_meilisearch(
|
|||||||
bail!("snapshot doesn't exist at {}", snapshot_path.display())
|
bail!("snapshot doesn't exist at {}", snapshot_path.display())
|
||||||
// the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag
|
// the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag
|
||||||
} else {
|
} else {
|
||||||
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
|
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
|
||||||
}
|
}
|
||||||
} else if let Some(ref path) = opt.import_dump {
|
} else if let Some(ref path) = opt.import_dump {
|
||||||
let src_path_exists = path.exists();
|
let src_path_exists = path.exists();
|
||||||
@@ -290,7 +284,6 @@ pub fn setup_meilisearch(
|
|||||||
index_scheduler_opt,
|
index_scheduler_opt,
|
||||||
OnFailure::RemoveDb,
|
OnFailure::RemoveDb,
|
||||||
binary_version, // the db is empty
|
binary_version, // the db is empty
|
||||||
handle,
|
|
||||||
)?;
|
)?;
|
||||||
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
|
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
|
||||||
Ok(()) => (index_scheduler, auth_controller),
|
Ok(()) => (index_scheduler, auth_controller),
|
||||||
@@ -311,10 +304,10 @@ pub fn setup_meilisearch(
|
|||||||
// the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag
|
// the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag
|
||||||
// or, the dump is missing but we can ignore that because of the ignore_missing_dump flag
|
// or, the dump is missing but we can ignore that because of the ignore_missing_dump flag
|
||||||
} else {
|
} else {
|
||||||
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
|
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
|
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
|
||||||
};
|
};
|
||||||
|
|
||||||
// We create a loop in a thread that registers snapshotCreation tasks
|
// We create a loop in a thread that registers snapshotCreation tasks
|
||||||
@@ -345,7 +338,6 @@ fn open_or_create_database_unchecked(
|
|||||||
index_scheduler_opt: IndexSchedulerOptions,
|
index_scheduler_opt: IndexSchedulerOptions,
|
||||||
on_failure: OnFailure,
|
on_failure: OnFailure,
|
||||||
version: (u32, u32, u32),
|
version: (u32, u32, u32),
|
||||||
handle: tokio::runtime::Handle,
|
|
||||||
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
||||||
// we don't want to create anything in the data.ms yet, thus we
|
// we don't want to create anything in the data.ms yet, thus we
|
||||||
// wrap our two builders in a closure that'll be executed later.
|
// wrap our two builders in a closure that'll be executed later.
|
||||||
@@ -353,7 +345,7 @@ fn open_or_create_database_unchecked(
|
|||||||
let auth_env = open_auth_store_env(&index_scheduler_opt.auth_path).unwrap();
|
let auth_env = open_auth_store_env(&index_scheduler_opt.auth_path).unwrap();
|
||||||
let auth_controller = AuthController::new(auth_env.clone(), &opt.master_key);
|
let auth_controller = AuthController::new(auth_env.clone(), &opt.master_key);
|
||||||
let index_scheduler_builder = || -> anyhow::Result<_> {
|
let index_scheduler_builder = || -> anyhow::Result<_> {
|
||||||
Ok(IndexScheduler::new(index_scheduler_opt, auth_env, version, Some(handle))?)
|
Ok(IndexScheduler::new(index_scheduler_opt, auth_env, version)?)
|
||||||
};
|
};
|
||||||
|
|
||||||
match (
|
match (
|
||||||
@@ -460,7 +452,6 @@ fn open_or_create_database(
|
|||||||
index_scheduler_opt: IndexSchedulerOptions,
|
index_scheduler_opt: IndexSchedulerOptions,
|
||||||
empty_db: bool,
|
empty_db: bool,
|
||||||
binary_version: (u32, u32, u32),
|
binary_version: (u32, u32, u32),
|
||||||
handle: tokio::runtime::Handle,
|
|
||||||
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
||||||
let version = if !empty_db {
|
let version = if !empty_db {
|
||||||
check_version(opt, &index_scheduler_opt, binary_version)?
|
check_version(opt, &index_scheduler_opt, binary_version)?
|
||||||
@@ -468,7 +459,7 @@ fn open_or_create_database(
|
|||||||
binary_version
|
binary_version
|
||||||
};
|
};
|
||||||
|
|
||||||
open_or_create_database_unchecked(opt, index_scheduler_opt, OnFailure::KeepDb, version, handle)
|
open_or_create_database_unchecked(opt, index_scheduler_opt, OnFailure::KeepDb, version)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn import_dump(
|
fn import_dump(
|
||||||
@@ -536,11 +527,7 @@ fn import_dump(
|
|||||||
let indexer_config = if base_config.max_threads.is_none() {
|
let indexer_config = if base_config.max_threads.is_none() {
|
||||||
let (thread_pool, _) = default_thread_pool_and_threads();
|
let (thread_pool, _) = default_thread_pool_and_threads();
|
||||||
|
|
||||||
let _config = IndexerConfig {
|
let _config = IndexerConfig { thread_pool, ..*base_config };
|
||||||
thread_pool,
|
|
||||||
s3_snapshot_options: base_config.s3_snapshot_options.clone(),
|
|
||||||
..*base_config
|
|
||||||
};
|
|
||||||
backup_config = _config;
|
backup_config = _config;
|
||||||
&backup_config
|
&backup_config
|
||||||
} else {
|
} else {
|
||||||
@@ -688,26 +675,23 @@ fn import_dump(
|
|||||||
Ok(index_scheduler_dump.finish()?)
|
Ok(index_scheduler_dump.finish()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn configure_data(config: &mut web::ServiceConfig, services: ServicesData, opt: &Opt) {
|
pub fn configure_data(
|
||||||
let ServicesData {
|
config: &mut web::ServiceConfig,
|
||||||
index_scheduler,
|
index_scheduler: Data<IndexScheduler>,
|
||||||
auth,
|
auth: Data<AuthController>,
|
||||||
search_queue,
|
search_queue: Data<SearchQueue>,
|
||||||
personalization_service,
|
opt: &Opt,
|
||||||
logs_route_handle,
|
(logs_route, logs_stderr): (LogRouteHandle, LogStderrHandle),
|
||||||
logs_stderr_handle,
|
analytics: Data<Analytics>,
|
||||||
analytics,
|
) {
|
||||||
} = services;
|
|
||||||
|
|
||||||
let http_payload_size_limit = opt.http_payload_size_limit.as_u64() as usize;
|
let http_payload_size_limit = opt.http_payload_size_limit.as_u64() as usize;
|
||||||
config
|
config
|
||||||
.app_data(index_scheduler)
|
.app_data(index_scheduler)
|
||||||
.app_data(auth)
|
.app_data(auth)
|
||||||
.app_data(search_queue)
|
.app_data(search_queue)
|
||||||
.app_data(analytics)
|
.app_data(analytics)
|
||||||
.app_data(personalization_service)
|
.app_data(web::Data::new(logs_route))
|
||||||
.app_data(logs_route_handle)
|
.app_data(web::Data::new(logs_stderr))
|
||||||
.app_data(logs_stderr_handle)
|
|
||||||
.app_data(web::Data::new(opt.clone()))
|
.app_data(web::Data::new(opt.clone()))
|
||||||
.app_data(
|
.app_data(
|
||||||
web::JsonConfig::default()
|
web::JsonConfig::default()
|
||||||
@@ -768,14 +752,3 @@ pub fn dashboard(config: &mut web::ServiceConfig, enable_frontend: bool) {
|
|||||||
pub fn dashboard(config: &mut web::ServiceConfig, _enable_frontend: bool) {
|
pub fn dashboard(config: &mut web::ServiceConfig, _enable_frontend: bool) {
|
||||||
config.service(web::resource("/").route(web::get().to(routes::running)));
|
config.service(web::resource("/").route(web::get().to(routes::running)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct ServicesData {
|
|
||||||
pub index_scheduler: Data<IndexScheduler>,
|
|
||||||
pub auth: Data<AuthController>,
|
|
||||||
pub search_queue: Data<SearchQueue>,
|
|
||||||
pub personalization_service: Data<PersonalizationService>,
|
|
||||||
pub logs_route_handle: Data<LogRouteHandle>,
|
|
||||||
pub logs_stderr_handle: Data<LogStderrHandle>,
|
|
||||||
pub analytics: Data<Analytics>,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -14,11 +14,10 @@ use index_scheduler::IndexScheduler;
|
|||||||
use is_terminal::IsTerminal;
|
use is_terminal::IsTerminal;
|
||||||
use meilisearch::analytics::Analytics;
|
use meilisearch::analytics::Analytics;
|
||||||
use meilisearch::option::LogMode;
|
use meilisearch::option::LogMode;
|
||||||
use meilisearch::personalization::PersonalizationService;
|
|
||||||
use meilisearch::search_queue::SearchQueue;
|
use meilisearch::search_queue::SearchQueue;
|
||||||
use meilisearch::{
|
use meilisearch::{
|
||||||
analytics, create_app, setup_meilisearch, LogRouteHandle, LogRouteType, LogStderrHandle,
|
analytics, create_app, setup_meilisearch, LogRouteHandle, LogRouteType, LogStderrHandle,
|
||||||
LogStderrType, Opt, ServicesData, SubscriberForSecondLayer,
|
LogStderrType, Opt, SubscriberForSecondLayer,
|
||||||
};
|
};
|
||||||
use meilisearch_auth::{generate_master_key, AuthController, MASTER_KEY_MIN_SIZE};
|
use meilisearch_auth::{generate_master_key, AuthController, MASTER_KEY_MIN_SIZE};
|
||||||
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
|
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
|
||||||
@@ -77,10 +76,7 @@ fn on_panic(info: &std::panic::PanicHookInfo) {
|
|||||||
|
|
||||||
#[actix_web::main]
|
#[actix_web::main]
|
||||||
async fn main() -> anyhow::Result<()> {
|
async fn main() -> anyhow::Result<()> {
|
||||||
// won't panic inside of tokio::main
|
try_main().await.inspect_err(|error| {
|
||||||
let runtime = tokio::runtime::Handle::current();
|
|
||||||
|
|
||||||
try_main(runtime).await.inspect_err(|error| {
|
|
||||||
tracing::error!(%error);
|
tracing::error!(%error);
|
||||||
let mut current = error.source();
|
let mut current = error.source();
|
||||||
let mut depth = 0;
|
let mut depth = 0;
|
||||||
@@ -92,7 +88,7 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn try_main(runtime: tokio::runtime::Handle) -> anyhow::Result<()> {
|
async fn try_main() -> anyhow::Result<()> {
|
||||||
let (opt, config_read_from) = Opt::try_build()?;
|
let (opt, config_read_from) = Opt::try_build()?;
|
||||||
|
|
||||||
std::panic::set_hook(Box::new(on_panic));
|
std::panic::set_hook(Box::new(on_panic));
|
||||||
@@ -126,7 +122,7 @@ async fn try_main(runtime: tokio::runtime::Handle) -> anyhow::Result<()> {
|
|||||||
_ => (),
|
_ => (),
|
||||||
}
|
}
|
||||||
|
|
||||||
let (index_scheduler, auth_controller) = setup_meilisearch(&opt, runtime)?;
|
let (index_scheduler, auth_controller) = setup_meilisearch(&opt)?;
|
||||||
|
|
||||||
let analytics =
|
let analytics =
|
||||||
analytics::Analytics::new(&opt, index_scheduler.clone(), auth_controller.clone()).await;
|
analytics::Analytics::new(&opt, index_scheduler.clone(), auth_controller.clone()).await;
|
||||||
@@ -153,15 +149,8 @@ async fn run_http(
|
|||||||
let enable_dashboard = &opt.env == "development";
|
let enable_dashboard = &opt.env == "development";
|
||||||
let opt_clone = opt.clone();
|
let opt_clone = opt.clone();
|
||||||
let index_scheduler = Data::from(index_scheduler);
|
let index_scheduler = Data::from(index_scheduler);
|
||||||
let auth = Data::from(auth_controller);
|
let auth_controller = Data::from(auth_controller);
|
||||||
let analytics = Data::from(analytics);
|
let analytics = Data::from(analytics);
|
||||||
// Create personalization service with API key from options
|
|
||||||
let personalization_service = Data::new(
|
|
||||||
opt.experimental_personalization_api_key
|
|
||||||
.clone()
|
|
||||||
.map(PersonalizationService::cohere)
|
|
||||||
.unwrap_or_else(PersonalizationService::disabled),
|
|
||||||
);
|
|
||||||
let search_queue = SearchQueue::new(
|
let search_queue = SearchQueue::new(
|
||||||
opt.experimental_search_queue_size,
|
opt.experimental_search_queue_size,
|
||||||
available_parallelism()
|
available_parallelism()
|
||||||
@@ -173,25 +162,21 @@ async fn run_http(
|
|||||||
usize::from(opt.experimental_drop_search_after) as u64
|
usize::from(opt.experimental_drop_search_after) as u64
|
||||||
));
|
));
|
||||||
let search_queue = Data::new(search_queue);
|
let search_queue = Data::new(search_queue);
|
||||||
let (logs_route_handle, logs_stderr_handle) = logs;
|
|
||||||
let logs_route_handle = Data::new(logs_route_handle);
|
|
||||||
let logs_stderr_handle = Data::new(logs_stderr_handle);
|
|
||||||
|
|
||||||
let services = ServicesData {
|
let http_server = HttpServer::new(move || {
|
||||||
index_scheduler,
|
create_app(
|
||||||
auth,
|
index_scheduler.clone(),
|
||||||
search_queue,
|
auth_controller.clone(),
|
||||||
personalization_service,
|
search_queue.clone(),
|
||||||
logs_route_handle,
|
opt.clone(),
|
||||||
logs_stderr_handle,
|
logs.clone(),
|
||||||
analytics,
|
analytics.clone(),
|
||||||
};
|
enable_dashboard,
|
||||||
|
)
|
||||||
let http_server =
|
})
|
||||||
HttpServer::new(move || create_app(services.clone(), opt.clone(), enable_dashboard))
|
// Disable signals allows the server to terminate immediately when a user enter CTRL-C
|
||||||
// Disable signals allows the server to terminate immediately when a user enter CTRL-C
|
.disable_signals()
|
||||||
.disable_signals()
|
.keep_alive(KeepAlive::Os);
|
||||||
.keep_alive(KeepAlive::Os);
|
|
||||||
|
|
||||||
if let Some(config) = opt_clone.get_ssl_config()? {
|
if let Some(config) = opt_clone.get_ssl_config()? {
|
||||||
http_server.bind_rustls_0_23(opt_clone.http_addr, config)?.run().await?;
|
http_server.bind_rustls_0_23(opt_clone.http_addr, config)?.run().await?;
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user