Compare commits

...

92 Commits

Author SHA1 Message Date
ManyTheFish
3107f5947a hydrates federated search 2025-12-15 14:50:08 +01:00
ManyTheFish
e8fdc63e2c Add experimental feature 2025-12-11 14:16:38 +01:00
ManyTheFish
9e3d3bb11c Add foreignKeys setting 2025-12-11 14:16:38 +01:00
Clément Renault
26e368b116 Merge pull request #6041 from meilisearch/fix-workflow-injection
Remove risk of command injection
2025-12-09 17:04:58 +00:00
curquiza
ba95ac0915 Remove risk of command injection 2025-12-09 17:06:41 +01:00
Clément Renault
75fcbfc2fe Merge pull request #6039 from meilisearch/bump-rust-to-1-19-1
Move to Rust v1.91.1
2025-12-09 13:55:08 +00:00
Kerollmops
8c19b6d55e Make the new Clippy happy 2025-12-09 14:33:04 +01:00
Kerollmops
08d0f05ece Remove a warning 2025-12-09 13:58:37 +01:00
Kerollmops
4762e9afa0 Move to Rust v1.91.1 2025-12-09 13:52:46 +01:00
Clément Renault
12fcab91c5 Merge pull request #6037 from meilisearch/fix-intel-mac
Fix macos-amd64 compilation
2025-12-08 13:21:51 +00:00
Louis Dureuil
792a72a23f Add missing cfg 2025-12-08 13:22:01 +01:00
Louis Dureuil
2dd7f29edf Merge pull request #6034 from meilisearch/update-version-v1.29.0
Update version for the next release (v1.29.0) in Cargo.toml
2025-12-08 08:01:41 +00:00
dureuill
ff680d29a8 Update version for the next release (v1.29.0) in Cargo.toml 2025-12-04 16:24:56 +00:00
Clément Renault
00420dfca0 Merge pull request #6018 from qdequele/add-support-xlmrobertamodels
Add support for XLM Roberta models
2025-12-04 15:46:53 +00:00
Quentin de Quelen
a3a86ac629 chore: cargo fmt 2025-12-04 16:27:19 +01:00
Quentin de Quelen
f6210b8e5e add tests for the support of the models XLMRoberta 2025-12-04 16:27:19 +01:00
Quentin de Quelen
fe46af7ded add support of models XLMRoberta 2025-12-04 16:27:19 +01:00
Clément Renault
57b94b411f Merge pull request #6030 from meilisearch/require-git
Require git
2025-12-04 14:29:33 +00:00
Clément Renault
a7b6f65851 Merge pull request #6022 from meilisearch/xtask-generate-proto-name
Introduce xtask sub-command to generate prototypes
2025-12-04 13:53:20 +00:00
Louis Dureuil
1ec6646d8c Merge pull request #6029 from meilisearch/dumpless-upgrade-migrations
Switch to migration-oriented dumpless upgrade
2025-12-04 13:35:26 +00:00
Kerollmops
2dccacf273 Hide git fetch output 2025-12-04 14:35:03 +01:00
Kerollmops
ce0f04e9ee Improve the prototype guide 2025-12-04 14:35:03 +01:00
Kerollmops
9ba5c6d371 Update the prototype format 2025-12-04 14:35:03 +01:00
Kerollmops
56673fee56 Introduce the first working version of the tool 2025-12-04 14:35:03 +01:00
Clément Renault
b30bcbb931 Merge pull request #6032 from meilisearch/bump-hannoy
Bump hannoy to v0.1.0-nested-rtxns
2025-12-04 13:30:43 +00:00
Kerollmops
5fbe4436c8 Bump hannoy to v0.1.0-nested-rtxns 2025-12-04 14:06:45 +01:00
Louis Dureuil
8fa253c293 fmt 2025-12-04 13:55:28 +01:00
Louis Dureuil
4833da9edb Chore: remove some duplicated lambdas to ease compile time 2025-12-04 13:55:28 +01:00
Louis Dureuil
c0e31a4f01 Switch to migration-oriented dumpless upgrade 2025-12-04 13:55:28 +01:00
Louis Dureuil
c06ffb31d1 Update snapshots 2025-12-04 13:55:28 +01:00
Louis Dureuil
3097314b9d Make snapshots independent on the version 2025-12-04 13:55:27 +01:00
Louis Dureuil
786a978237 fmt 2025-12-04 13:52:57 +01:00
Louis Dureuil
03e53aaf6d Add binary to display build-info 2025-12-04 13:52:57 +01:00
Louis Dureuil
2206f045a4 replace git2 by the git command line in build-info 2025-12-04 13:52:56 +01:00
Louis Dureuil
246cf8b2d1 Mimic what is done for publish asset in the CI, for faster build 2025-12-04 13:52:56 +01:00
Louis Dureuil
82adabc5a0 Merge pull request #5861 from meilisearch/upgrade-tests
Declarative tests
2025-12-04 11:00:53 +00:00
Louis Dureuil
c9a22247d2 add hannoy test 2025-12-04 11:41:41 +01:00
Louis Dureuil
c535b8ddef Use variables to account for changes between local and CI 2025-12-04 09:47:37 +01:00
Louis Dureuil
8e89619aed Also evaluate variables in expected responses 2025-12-04 09:47:21 +01:00
Clément Renault
f617ca8e38 Merge pull request #6023 from meilisearch/curquiza-patch-1
Send notifications for Kubernetes integration when releasing
2025-12-04 07:00:50 +00:00
Louis Dureuil
959175ad2a switch to gh runner 2025-12-03 22:59:57 +01:00
Louis Dureuil
341ffbf5ef Modify bot message on db-change labeled PRs 2025-12-03 21:25:41 +01:00
Louis Dureuil
542f3073f4 Appease codeql 2025-12-03 21:25:41 +01:00
Louis Dureuil
0f134b079f hf-embed workload: add ranking scores 2025-12-03 21:25:41 +01:00
Louis Dureuil
9e7ae47355 Add missing sha 2025-12-03 21:25:41 +01:00
Louis Dureuil
1edf07df29 Add tests to CI 2025-12-03 21:25:40 +01:00
Louis Dureuil
88aa3cddde Support local builds of enterprise binaries 2025-12-03 21:25:40 +01:00
Louis Dureuil
e6846cb55a Rename and move the test instructions 2025-12-03 21:25:40 +01:00
Louis Dureuil
29b715e2f9 Update workloads 2025-12-03 21:25:40 +01:00
Louis Dureuil
f28dc5bd2b cleaning 2025-12-03 21:25:40 +01:00
Louis Dureuil
56d0b8ea54 Some cleaning 2025-12-03 21:25:40 +01:00
Louis Dureuil
514edb1b79 Add workloads 2025-12-03 21:25:40 +01:00
Louis Dureuil
cfb609d41d clippy 2025-12-03 21:25:40 +01:00
Louis Dureuil
11cb062067 fmt 2025-12-03 21:25:40 +01:00
Louis Dureuil
2ca4926ac5 Support editions, move to common 2025-12-03 21:25:40 +01:00
Louis Dureuil
834bd9b879 Fix uninitialization issue on unsupported platforms 2025-12-03 21:25:39 +01:00
Louis Dureuil
cac7e00983 Remove chrono 2025-12-03 21:25:39 +01:00
Mubelotix
e9300bac64 Add documentation 2025-12-03 21:25:39 +01:00
Mubelotix
b0da7864a4 Api key tests 2025-12-03 21:25:39 +01:00
Mubelotix
2b9d379feb Add variable registration mechanism 2025-12-03 21:25:39 +01:00
Mubelotix
8d585a04d4 Update movies workload 2025-12-03 21:25:39 +01:00
Mubelotix
0095a72fba Test for upgrade 2025-12-03 21:25:39 +01:00
Mubelotix
651339648c Fix processing time ms 2025-12-03 21:25:39 +01:00
Mubelotix
a489f4c172 Update issue template 2025-12-03 21:25:39 +01:00
Mubelotix
3b875ea00e Update movies 2025-12-03 21:25:39 +01:00
Mubelotix
9d269c499c Fix line feed at the end of files 2025-12-03 21:25:39 +01:00
Mubelotix
da35ae0a6e Update emojis 2025-12-03 21:25:38 +01:00
Mubelotix
61945b235d Add redaction system 2025-12-03 21:25:38 +01:00
Mubelotix
e936ac172d Fix compilation 2025-12-03 21:25:38 +01:00
Mubelotix
162a84cdbf Improve error detection 2025-12-03 21:25:38 +01:00
Mubelotix
92c63cf351 Improve diffing 2025-12-03 21:25:38 +01:00
Mubelotix
fca35b7476 Add upgrade system 2025-12-03 21:25:38 +01:00
Mubelotix
4056657a55 Refactor around meili_path 2025-12-03 21:25:38 +01:00
Mubelotix
685d227597 Move file to common 2025-12-03 21:25:38 +01:00
Mubelotix
49b9f6ff38 Remove useless data 2025-12-03 21:25:38 +01:00
Mubelotix
79d0a3fb97 Remove useless parameter 2025-12-03 21:25:38 +01:00
Mubelotix
313ef7e79b Add response updating logic 2025-12-03 21:25:37 +01:00
Mubelotix
256407be61 Fix asset version issues 2025-12-03 21:25:37 +01:00
Mubelotix
8b3943bd32 Do so that meilisearch versions get downloaded 2025-12-03 21:25:37 +01:00
Mubelotix
87b972d29a Implement test workload running logic 2025-12-03 21:25:37 +01:00
Mubelotix
09ab61b360 Continue integrating commands to tests 2025-12-03 21:25:37 +01:00
Mubelotix
2459f381b4 Remove dead code 2025-12-03 21:25:37 +01:00
Mubelotix
6442f02de4 Make commands common 2025-12-03 21:25:37 +01:00
Mubelotix
91c4d9ea79 Tag workloads 2025-12-03 21:25:37 +01:00
Mubelotix
92a4091da3 Create test workload 2025-12-03 21:25:37 +01:00
Mubelotix
29a337f0f9 Create the test function 2025-12-03 21:25:36 +01:00
Mubelotix
8c3cebadaa Create the test xtask command and args 2025-12-03 21:25:36 +01:00
Clément Renault
b566458aa2 Merge pull request #6027 from meilisearch/release-v1.28.2
Bring back changes from v1.28.2
2025-12-03 17:46:44 +00:00
Clément Renault
ae4344e359 Merge pull request #6004 from meilisearch/default-experimental-vector-store
Make Hannoy the default vector store
2025-12-03 17:16:46 +00:00
Kerollmops
b6cb384650 Fix settings tests 2025-12-03 17:52:52 +01:00
Clément Renault
2c3e3d856c Make hannoy the default vector store when creating an index 2025-12-03 17:52:52 +01:00
Clémentine
93e97f814c Add notifications for Kubernetes integration
Updated comments and conditions for notifying integration teams.
2025-12-03 17:49:46 +01:00
137 changed files with 4185 additions and 1126 deletions

View File

@@ -24,6 +24,11 @@ TBD
- [ ] If not, add the `no db change` label to your PR, and you're good to merge.
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
### Reminders when adding features
- [ ] Write unit tests using insta
- [ ] Write declarative integration tests in [workloads/tests](https://github.com/meilisearch/meilisearch/tree/main/workloads/test). Specify the routes to call and then call `cargo xtask test workloads/tests/YOUR_TEST.json --update-responses` so that responses are automatically filled.
### Reminders when modifying the API
- [ ] Update the openAPI file with utoipa:

View File

@@ -18,7 +18,7 @@ jobs:
timeout-minutes: 180 # 3h
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
with:
profile: minimal

View File

@@ -66,7 +66,7 @@ jobs:
fetch-depth: 0 # fetch full history to be able to get main commit sha
ref: ${{ steps.comment-branch.outputs.head_ref }}
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
- name: Run benchmarks on PR ${{ github.event.issue.id }}
run: |

View File

@@ -12,7 +12,7 @@ jobs:
timeout-minutes: 180 # 3h
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
# Run benchmarks
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}

View File

@@ -18,7 +18,7 @@ jobs:
timeout-minutes: 4320 # 72h
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
with:
profile: minimal

View File

@@ -44,7 +44,7 @@ jobs:
exit 1
fi
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
with:
profile: minimal

View File

@@ -16,7 +16,7 @@ jobs:
timeout-minutes: 4320 # 72h
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
with:
profile: minimal

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: benchmarks
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
with:
profile: minimal

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: benchmarks
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
with:
profile: minimal

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: benchmarks
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
with:
profile: minimal

View File

@@ -6,7 +6,7 @@ on:
env:
MESSAGE: |
### Hello, I'm a bot 🤖
### Hello, I'm a bot 🤖
You are receiving this message because you declared that this PR make changes to the Meilisearch database.
Depending on the nature of the change, additional actions might be required on your part. The following sections detail the additional actions depending on the nature of the change, please copy the relevant section in the description of your PR, and make sure to perform the required actions.
@@ -19,6 +19,7 @@ env:
- [ ] Detail the change to the DB format and why they are forward compatible
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
## This PR makes breaking changes
@@ -35,8 +36,7 @@ env:
- [ ] Write the code to go from the old database to the new one
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
- [ ] Write an integration test [here](https://github.com/meilisearch/meilisearch/blob/main/crates/meilisearch/tests/upgrade/mod.rs) ensuring you can read the old database, upgrade to the new database, and read the new database as expected
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
jobs:
add-comment:

View File

@@ -3,7 +3,7 @@ name: Look for flaky tests
on:
workflow_dispatch:
schedule:
- cron: '0 4 * * *' # Every day at 4:00AM
- cron: "0 4 * * *" # Every day at 4:00AM
jobs:
flaky:
@@ -23,7 +23,7 @@ jobs:
run: |
apt-get update && apt-get install -y curl
apt-get install build-essential -y
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
- name: Install cargo-flaky
run: cargo install cargo-flaky
- name: Run cargo flaky in the dumps

View File

@@ -12,7 +12,7 @@ jobs:
timeout-minutes: 4320 # 72h
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
# Run benchmarks
- name: Run the fuzzer

View File

@@ -31,7 +31,7 @@ jobs:
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
- name: Install cargo-deb
run: cargo install cargo-deb
- uses: actions/checkout@v5

View File

@@ -208,8 +208,8 @@ jobs:
done
cosign sign --yes ${images}
# /!\ Don't touch this without checking with Cloud team
- name: Send CI information to Cloud team
# /!\ Don't touch this without checking with engineers working on the Cloud code base on #discussion-engineering Slack channel
- name: Notify meilisearch-cloud
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
if: ${{ (github.event_name == 'push') && (matrix.edition == 'enterprise') }}
uses: peter-evans/repository-dispatch@v3
@@ -218,3 +218,14 @@ jobs:
repository: meilisearch/meilisearch-cloud
event-type: cloud-docker-build
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
# /!\ Don't touch this without checking with integration team members on #discussion-integrations Slack channel
- name: Notify meilisearch-kubernetes
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event), or if not stable
if: ${{ github.event_name == 'push' && matrix.edition == 'community' && steps.check-tag-format.outputs.stable == 'true' }}
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.MEILI_BOT_GH_PAT }}
repository: meilisearch/meilisearch-kubernetes
event-type: meilisearch-release
client-payload: '{ "version": "${{ github.ref_name }}" }'

View File

@@ -76,7 +76,7 @@ jobs:
needs: check-version
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
- name: Build
run: cargo build --release --locked ${{ matrix.feature-flag }} ${{ matrix.extra-args }}
# No need to upload binaries for dry run (cron or workflow_dispatch)

View File

@@ -25,14 +25,18 @@ jobs:
- uses: actions/checkout@v5
- name: Define the Docker image we need to use
id: define-image
env:
EVENT_NAME: ${{ github.event_name }}
DOCKER_IMAGE_INPUT: ${{ github.event.inputs.docker_image }}
run: |
event=${{ github.event_name }}
echo "docker-image=nightly" >> $GITHUB_OUTPUT
if [[ $event == 'workflow_dispatch' ]]; then
echo "docker-image=${{ github.event.inputs.docker_image }}" >> $GITHUB_OUTPUT
if [[ "$EVENT_NAME" == 'workflow_dispatch' ]]; then
echo "docker-image=$DOCKER_IMAGE_INPUT" >> $GITHUB_OUTPUT
fi
- name: Docker image is ${{ steps.define-image.outputs.docker-image }}
run: echo "Docker image is ${{ steps.define-image.outputs.docker-image }}"
env:
DOCKER_IMAGE: ${{ steps.define-image.outputs.docker-image }}
run: echo "Docker image is $DOCKER_IMAGE"
##########
## SDKs ##

View File

@@ -34,7 +34,7 @@ jobs:
- name: check free space after
run: df -h
- name: Setup test with Rust stable
uses: dtolnay/rust-toolchain@1.89
uses: dtolnay/rust-toolchain@1.91.1
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0
with:
@@ -63,7 +63,7 @@ jobs:
- uses: actions/checkout@v5
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
- name: Run cargo build without any default features
uses: actions-rs/cargo@v1
with:
@@ -87,7 +87,7 @@ jobs:
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
- name: Run cargo build with almost all features
run: |
cargo build --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
@@ -145,7 +145,7 @@ jobs:
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
- name: Run cargo tree without default features and check lindera is not present
run: |
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
@@ -167,14 +167,11 @@ jobs:
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0
- name: Run cargo build in release
uses: actions-rs/cargo@v1
with:
command: build
args: --all-targets --release
- name: Build
run: cargo build --release --locked --target x86_64-unknown-linux-gnu
clippy:
name: Run Clippy
@@ -190,7 +187,7 @@ jobs:
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
with:
components: clippy
- name: Cache dependencies
@@ -212,7 +209,7 @@ jobs:
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
with:
components: rustfmt
- name: Cache dependencies
@@ -224,3 +221,23 @@ jobs:
run: |
echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs
cargo fmt --all -- --check
declarative-tests:
name: Run declarative tests
runs-on: ubuntu-22.04-arm
permissions:
contents: read
steps:
- uses: actions/checkout@v5
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
run: |
sudo rm -rf "/opt/ghc" || true
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.91.1
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.8.0
- name: Run declarative tests
run: |
cargo xtask test workloads/tests/*.json

View File

@@ -24,7 +24,7 @@ jobs:
sudo rm -rf "/usr/share/dotnet" || true
sudo rm -rf "/usr/local/lib/android" || true
sudo rm -rf "/usr/local/share/boost" || true
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.91.1
- name: Install sd
run: cargo install sd
- name: Update Cargo.toml file

View File

@@ -124,6 +124,7 @@ They are JSON files with the following structure (comments are not actually supp
{
// Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard.
"name": "hackernews.ndjson_1M,no-threads",
"type": "bench",
// Number of consecutive runs of the commands that should be performed.
// Each run uses a fresh instance of Meilisearch and a fresh database.
// Each run produces its own report file.

106
Cargo.lock generated
View File

@@ -580,7 +580,7 @@ source = "git+https://github.com/meilisearch/bbqueue#e8af4a4bccc8eb36b2b0442c4a9
[[package]]
name = "benchmarks"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"anyhow",
"bumpalo",
@@ -790,11 +790,11 @@ dependencies = [
[[package]]
name = "build-info"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"anyhow",
"time",
"vergen-git2",
"vergen-gitcl",
]
[[package]]
@@ -1786,7 +1786,7 @@ dependencies = [
[[package]]
name = "dump"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"anyhow",
"big_s",
@@ -2018,7 +2018,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "file-store"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"tempfile",
"thiserror 2.0.17",
@@ -2040,7 +2040,7 @@ dependencies = [
[[package]]
name = "filter-parser"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"insta",
"levenshtein_automata",
@@ -2068,7 +2068,7 @@ dependencies = [
[[package]]
name = "flatten-serde-json"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"criterion",
"serde_json",
@@ -2231,7 +2231,7 @@ dependencies = [
[[package]]
name = "fuzzers"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"arbitrary",
"bumpalo",
@@ -2604,19 +2604,6 @@ version = "0.32.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7"
[[package]]
name = "git2"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2deb07a133b1520dc1a5690e9bd08950108873d7ed5de38dcc74d3b5ebffa110"
dependencies = [
"bitflags 2.10.0",
"libc",
"libgit2-sys",
"log",
"url",
]
[[package]]
name = "glob"
version = "0.3.3"
@@ -2711,9 +2698,9 @@ dependencies = [
[[package]]
name = "hannoy"
version = "0.0.9-nested-rtxns-2"
version = "0.1.0-nested-rtxns"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06eda090938d9dcd568c8c2a5de383047ed9191578ebf4a342d2975d16e621f2"
checksum = "be82bf3f2108ddc8885e3d306fcd7f4692066bfe26065ca8b42ba417f3c26dd1"
dependencies = [
"bytemuck",
"byteorder",
@@ -3198,7 +3185,7 @@ dependencies = [
[[package]]
name = "index-scheduler"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"anyhow",
"backoff",
@@ -3460,7 +3447,7 @@ dependencies = [
[[package]]
name = "json-depth-checker"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"criterion",
"serde_json",
@@ -3557,18 +3544,6 @@ dependencies = [
"rle-decode-fast",
]
[[package]]
name = "libgit2-sys"
version = "0.18.2+1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c42fe03df2bd3c53a3a9c7317ad91d80c81cd1fb0caec8d7cc4cd2bfa10c222"
dependencies = [
"cc",
"libc",
"libz-sys",
"pkg-config",
]
[[package]]
name = "libloading"
version = "0.8.9"
@@ -3626,18 +3601,6 @@ dependencies = [
"zlib-rs",
]
[[package]]
name = "libz-sys"
version = "1.1.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d"
dependencies = [
"cc",
"libc",
"pkg-config",
"vcpkg",
]
[[package]]
name = "lindera"
version = "0.43.3"
@@ -3974,7 +3937,7 @@ checksum = "ae960838283323069879657ca3de837e9f7bbb4c7bf6ea7f1b290d5e9476d2e0"
[[package]]
name = "meili-snap"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"insta",
"md5 0.8.0",
@@ -3985,7 +3948,7 @@ dependencies = [
[[package]]
name = "meilisearch"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"actix-cors",
"actix-http",
@@ -4083,7 +4046,7 @@ dependencies = [
[[package]]
name = "meilisearch-auth"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"base64 0.22.1",
"enum-iterator",
@@ -4102,7 +4065,7 @@ dependencies = [
[[package]]
name = "meilisearch-types"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"actix-web",
"anyhow",
@@ -4137,7 +4100,7 @@ dependencies = [
[[package]]
name = "meilitool"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"anyhow",
"clap",
@@ -4171,7 +4134,7 @@ dependencies = [
[[package]]
name = "milli"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"arroy",
"bbqueue",
@@ -4750,7 +4713,7 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "permissive-json-pointer"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"big_s",
"serde_json",
@@ -6072,6 +6035,20 @@ name = "similar"
version = "2.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa"
dependencies = [
"bstr",
"unicode-segmentation",
]
[[package]]
name = "similar-asserts"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5b441962c817e33508847a22bd82f03a30cff43642dc2fae8b050566121eb9a"
dependencies = [
"console",
"similar",
]
[[package]]
name = "simple_asn1"
@@ -7105,12 +7082,6 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65"
[[package]]
name = "vcpkg"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
[[package]]
name = "vergen"
version = "9.0.6"
@@ -7124,14 +7095,13 @@ dependencies = [
]
[[package]]
name = "vergen-git2"
version = "1.0.7"
name = "vergen-gitcl"
version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f6ee511ec45098eabade8a0750e76eec671e7fb2d9360c563911336bea9cac1"
checksum = "b9dfc1de6eb2e08a4ddf152f1b179529638bedc0ea95e6d667c014506377aefe"
dependencies = [
"anyhow",
"derive_builder",
"git2",
"rustversion",
"time",
"vergen",
@@ -7783,7 +7753,7 @@ dependencies = [
[[package]]
name = "xtask"
version = "1.28.2"
version = "1.29.0"
dependencies = [
"anyhow",
"build-info",
@@ -7792,9 +7762,11 @@ dependencies = [
"futures-core",
"futures-util",
"reqwest",
"semver",
"serde",
"serde_json",
"sha2",
"similar-asserts",
"sysinfo",
"time",
"tokio",

View File

@@ -23,7 +23,7 @@ members = [
]
[workspace.package]
version = "1.28.2"
version = "1.29.0"
authors = [
"Quentin de Quelen <quentin@dequelen.me>",
"Clément Renault <clement@meilisearch.com>",

326
TESTING.md Normal file
View File

@@ -0,0 +1,326 @@
# Declarative tests
Declarative tests ensure that Meilisearch features remain stable across versions.
While we already have unit tests, those are run against **temporary databases** that are created fresh each time and therefore never risk corruption.
Declarative tests instead **simulate the lifetime of a database**: they chain together commands and requests to change the binary, verifying that database state and API responses remain consistent.
## Basic example
```jsonc
{
"type": "test",
"name": "api-keys",
"binary": { // the first command will run on the binary following this specification.
"source": "release", // get the binary as a release from GitHub
"version": "1.19.0", // version to fetch
"edition": "community" // edition to fetch
},
"commands": []
}
```
This example defines a no-op test (it does nothing).
If the file is saved at `workloads/tests/example.json`, you can run it with:
```bash
cargo xtask test workloads/tests/example.json
```
## Commands
Commands represent API requests sent to Meilisearch endpoints during a test.
They are executed sequentially, and their responses can be validated to ensure consistent behavior across upgrades.
```jsonc
{
"route": "keys",
"method": "POST",
"body": {
"inline": {
"actions": [
"search",
"documents.add"
],
"description": "Test API Key",
"expiresAt": null,
"indexes": [ "movies" ]
}
}
}
```
This command issues a `POST /keys` request, creating an API key with permissions to search and add documents in the `movies` index.
### Using assets in commands
To keep tests concise and reusable, you can define **assets** at the root of the workload file.
Assets are external data sources (such as datasets) that are cached between runs, making tests faster and easier to read.
```jsonc
{
"type": "test",
"name": "movies",
"binary": {
"source": "release",
"version": "1.19.0",
"edition": "community"
},
"assets": {
"movies.json": {
"local_location": null,
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
}
},
"commands": [
{
"route": "indexes/movies/documents",
"method": "POST",
"body": {
"asset": "movies.json"
}
}
]
}
```
In this example:
- The `movies.json` dataset is defined as an asset, pointing to a remote URL.
- The SHA-256 checksum ensures integrity.
- The `POST /indexes/movies/documents` command uses this asset as the request body.
This makes the test much cleaner than inlining a large dataset directly into the command.
For asset handling, please refer to the [declarative benchmarks documentation](/BENCHMARKS.md#adding-new-assets).
### Asserting responses
Commands can specify both the **expected status code** and the **expected response body**.
```jsonc
{
"route": "indexes/movies/documents",
"method": "POST",
"body": {
"asset": "movies.json"
},
"expectedStatus": 202,
"expectedResponse": {
"enqueuedAt": "[timestamp]", // Set to a bracketed string to ignore the value
"indexUid": "movies",
"status": "enqueued",
"taskUid": 1,
"type": "documentAdditionOrUpdate"
},
"synchronous": "WaitForTask"
}
```
Manually writing `expectedResponse` fields can be tedious.
Instead, you can let the test runner populate them automatically:
```bash
# Run the workload to populate expected fields. Only adds the missing ones, doesn't change existing data
cargo xtask test workloads/tests/example.json --add-missing-responses
# OR
# Run the workload to populate expected fields. Updates all fields including existing ones
cargo xtask test workloads/tests/example.json --update-responses
```
This workflow is recommended:
1. Write the test without expected fields.
2. Run it with `--add-missing-responses` to capture the actual responses.
3. Review and commit the generated expectations.
## Changing binary
It is possible to insert an instruction to change the current Meilisearch instance from one binary specification to another during a test.
When executed, such an instruction will:
1. Stop the current Meilisearch instance.
2. Fetch the binary specified by the instruction.
3. Restart the server with the specified binary on the same database.
```jsonc
{
"type": "test",
"name": "movies",
"binary": {
"source": "release",
"version": "1.19.0", // start with version v1.19.0
"edition": "community"
},
"assets": {
"movies.json": {
"local_location": null,
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
}
},
"commands": [
// setup some data
{
"route": "indexes/movies/documents",
"method": "POST",
"body": {
"asset": "movies.json"
}
},
// switch binary to v1.24.0
{
"binary": {
"source": "release",
"version": "1.24.0",
"edition": "community"
}
}
]
}
```
### Typical Usage
In most cases, the change binary instruction will be used to update a database.
- **Set up** some data using commands on an older version.
- **Upgrade** to the latest version.
- **Assert** that the data and API behavior remain correct after the upgrade.
To properly test the dumpless upgrade, one should typically:
1. Open the database without processing the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` and `--experimental-max-number-of-batched-tasks=0` as extra CLI arguments
2. Check that the search, stats and task queue still work.
3. Open the database and process the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` as the extra CLI argument. Use a `health` command to wait for the upgrade task to finish.
4. Check that the indexing, search, stats, and task queue still work.
```jsonc
{
"type": "test",
"name": "movies",
"binary": {
"source": "release",
"version": "1.12.0",
"edition": "community"
},
"commands": [
// 0. Run commands to populate the database
{
// ..
},
// 1. Open the database with new MS without processing the update task
{
"binary": {
"source": "build", // build the binary from the sources in the current git repository
"edition": "community",
"extraCliArgs": [
"--experimental-dumpless-upgrade", // allows to open with a newer MS
"--experimental-max-number-of-batched-tasks=0" // prevent processing of the update task
]
}
},
// 2. Check the search etc.
{
// ..
},
// 3. Open the database with new MS and processing the update task
{
"binary": {
"source": "build", // build the binary from the sources in the current git repository
"edition": "community",
"extraCliArgs": [
"--experimental-dumpless-upgrade" // allows to open with a newer MS
// no `--experimental-max-number-of-batched-tasks=0`
]
}
},
// 4. Check the indexing, search, etc.
{
// ..
}
]
}
```
This ensures backward compatibility: databases created with older Meilisearch versions should remain functional and consistent after an upgrade.
## Variables
Sometimes a command needs to use a value returned by a **previous response**.
These values can be captured and reused using the register field.
```jsonc
{
"route": "keys",
"method": "POST",
"body": {
"inline": {
"actions": [
"search",
"documents.add"
],
"description": "Test API Key",
"expiresAt": null,
"indexes": [ "movies" ]
}
},
"expectedResponse": {
"key": "c6f64630bad2996b1f675007c8800168e14adf5d6a7bb1a400a6d2b158050eaf",
// ...
},
"register": {
"key": "/key"
},
"synchronous": "WaitForResponse"
}
```
The `register` field captures the value at the JSON path `/key` from the response.
Paths follow the **JavaScript Object Notation Pointer (RFC 6901)** format.
Registered variables are available for all subsequent commands.
Registered variables can be referenced by wrapping their name in double curly braces:
In the route/path:
```jsonc
{
"route": "tasks/{{ task_id }}",
"method": "GET"
}
```
In the request body:
```jsonc
{
"route": "indexes/movies/documents",
"method": "PATCH",
"body": {
"inline": {
"id": "{{ document_id }}",
"overview": "Shazam turns evil and the world is in danger.",
}
}
}
```
Or they can be referenced by their name (**without curly braces**) as an API key:
```jsonc
{
"route": "indexes/movies/documents",
"method": "POST",
"body": { /* ... */ },
"apiKeyVariable": "key" // The **content** of the key variable will be used as an API key
}
```

View File

@@ -21,6 +21,10 @@ use roaring::RoaringBitmap;
#[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
fn no_cancel() -> bool {
false
}
const BENCHMARK_ITERATION: usize = 10;
fn setup_dir(path: impl AsRef<Path>) {
@@ -65,7 +69,7 @@ fn setup_settings<'t>(
let sortable_fields = sortable_fields.iter().map(|s| s.to_string()).collect();
builder.set_sortable_fields(sortable_fields);
builder.execute(&|| false, &Progress::default(), Default::default()).unwrap();
builder.execute(&no_cancel, &Progress::default(), Default::default()).unwrap();
}
fn setup_index_with_settings(
@@ -152,7 +156,7 @@ fn indexing_songs_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -168,7 +172,7 @@ fn indexing_songs_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -220,7 +224,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -236,7 +240,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -266,7 +270,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -282,7 +286,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -336,7 +340,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -352,7 +356,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -414,7 +418,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -430,7 +434,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -460,7 +464,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -476,7 +480,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -502,7 +506,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -518,7 +522,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -571,7 +575,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -587,7 +591,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -639,7 +643,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -655,7 +659,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -707,7 +711,7 @@ fn indexing_wiki(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -723,7 +727,7 @@ fn indexing_wiki(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -774,7 +778,7 @@ fn reindexing_wiki(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -790,7 +794,7 @@ fn reindexing_wiki(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -820,7 +824,7 @@ fn reindexing_wiki(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -836,7 +840,7 @@ fn reindexing_wiki(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -889,7 +893,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -905,7 +909,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -967,7 +971,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -983,7 +987,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1014,7 +1018,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1030,7 +1034,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1057,7 +1061,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1073,7 +1077,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1125,7 +1129,7 @@ fn indexing_movies_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1141,7 +1145,7 @@ fn indexing_movies_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1192,7 +1196,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1208,7 +1212,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1238,7 +1242,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1254,7 +1258,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1307,7 +1311,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1323,7 +1327,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1372,7 +1376,7 @@ fn delete_documents_from_ids(index: Index, document_ids_to_delete: Vec<RoaringBi
Some(primary_key),
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1422,7 +1426,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1438,7 +1442,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1468,7 +1472,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1484,7 +1488,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1510,7 +1514,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1526,7 +1530,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1601,7 +1605,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1617,7 +1621,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1693,7 +1697,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1709,7 +1713,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1777,7 +1781,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1793,7 +1797,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1845,7 +1849,7 @@ fn indexing_geo(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1861,7 +1865,7 @@ fn indexing_geo(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1912,7 +1916,7 @@ fn reindexing_geo(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1928,7 +1932,7 @@ fn reindexing_geo(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -1958,7 +1962,7 @@ fn reindexing_geo(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -1974,7 +1978,7 @@ fn reindexing_geo(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -2027,7 +2031,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -2043,7 +2047,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)

View File

@@ -15,4 +15,4 @@ time = { version = "0.3.44", features = ["parsing"] }
[build-dependencies]
anyhow = "1.0.100"
vergen-git2 = "1.0.7"
vergen-gitcl = "1.0.8"

View File

@@ -15,7 +15,7 @@ fn emit_git_variables() -> anyhow::Result<()> {
// Note: any code that needs VERGEN_ environment variables should take care to define them manually in the Dockerfile and pass them
// in the corresponding GitHub workflow (publish_docker.yml).
// This is due to the Dockerfile building the binary outside of the git directory.
let mut builder = vergen_git2::Git2Builder::default();
let mut builder = vergen_gitcl::GitclBuilder::default();
builder.branch(true);
builder.commit_timestamp(true);
@@ -25,5 +25,5 @@ fn emit_git_variables() -> anyhow::Result<()> {
let git2 = builder.build()?;
vergen_git2::Emitter::default().fail_on_error().add_instructions(&git2)?.emit()
vergen_gitcl::Emitter::default().fail_on_error().add_instructions(&git2)?.emit()
}

View File

@@ -0,0 +1,6 @@
use build_info::BuildInfo;
fn main() {
let info = BuildInfo::from_build();
dbg!(info);
}

View File

@@ -317,6 +317,7 @@ pub(crate) mod test {
FilterableAttributesRule::Field(S("race")),
FilterableAttributesRule::Field(S("age")),
]),
foreign_keys: Setting::NotSet,
sortable_attributes: Setting::Set(btreeset! { S("age") }),
ranking_rules: Setting::NotSet,
stop_words: Setting::NotSet,

View File

@@ -349,6 +349,7 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
v5::settings::Setting::Reset => v6::Setting::Reset,
v5::settings::Setting::NotSet => v6::Setting::NotSet,
},
foreign_keys: v6::Setting::NotSet,
sortable_attributes: settings.sortable_attributes.into(),
ranking_rules: {
match settings.ranking_rules {

View File

@@ -107,19 +107,14 @@ impl Settings<Unchecked> {
}
}
#[derive(Debug, Clone, PartialEq)]
#[derive(Default, Debug, Clone, PartialEq)]
pub enum Setting<T> {
Set(T),
Reset,
#[default]
NotSet,
}
impl<T> Default for Setting<T> {
fn default() -> Self {
Self::NotSet
}
}
impl<T> Setting<T> {
pub const fn is_not_set(&self) -> bool {
matches!(self, Self::NotSet)

View File

@@ -161,19 +161,14 @@ pub struct Facets {
pub min_level_size: Option<NonZeroUsize>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[derive(Default, Debug, Clone, PartialEq, Eq)]
pub enum Setting<T> {
Set(T),
Reset,
#[default]
NotSet,
}
impl<T> Default for Setting<T> {
fn default() -> Self {
Self::NotSet
}
}
impl<T> Setting<T> {
pub fn map<U, F>(self, f: F) -> Setting<U>
where

View File

@@ -1,9 +1,7 @@
use std::fmt::{self, Display, Formatter};
use std::marker::PhantomData;
use std::str::FromStr;
use serde::de::Visitor;
use serde::{Deserialize, Deserializer};
use serde::Deserialize;
use uuid::Uuid;
use super::settings::{Settings, Unchecked};
@@ -82,59 +80,3 @@ impl Display for IndexUidFormatError {
}
impl std::error::Error for IndexUidFormatError {}
/// A type that tries to match either a star (*) or
/// any other thing that implements `FromStr`.
#[derive(Debug)]
#[cfg_attr(test, derive(serde::Serialize))]
pub enum StarOr<T> {
Star,
Other(T),
}
impl<'de, T, E> Deserialize<'de> for StarOr<T>
where
T: FromStr<Err = E>,
E: Display,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
/// Serde can't differentiate between `StarOr::Star` and `StarOr::Other` without a tag.
/// Simply using `#[serde(untagged)]` + `#[serde(rename="*")]` will lead to attempting to
/// deserialize everything as a `StarOr::Other`, including "*".
/// [`#[serde(other)]`](https://serde.rs/variant-attrs.html#other) might have helped but is
/// not supported on untagged enums.
struct StarOrVisitor<T>(PhantomData<T>);
impl<T, FE> Visitor<'_> for StarOrVisitor<T>
where
T: FromStr<Err = FE>,
FE: Display,
{
type Value = StarOr<T>;
fn expecting(&self, formatter: &mut Formatter) -> std::fmt::Result {
formatter.write_str("a string")
}
fn visit_str<SE>(self, v: &str) -> Result<Self::Value, SE>
where
SE: serde::de::Error,
{
match v {
"*" => Ok(StarOr::Star),
v => {
let other = FromStr::from_str(v).map_err(|e: T::Err| {
SE::custom(format!("Invalid `other` value: {}", e))
})?;
Ok(StarOr::Other(other))
}
}
}
}
deserializer.deserialize_str(StarOrVisitor(PhantomData))
}
}

View File

@@ -192,19 +192,14 @@ pub struct Facets {
pub min_level_size: Option<NonZeroUsize>,
}
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)]
pub enum Setting<T> {
Set(T),
Reset,
#[default]
NotSet,
}
impl<T> Default for Setting<T> {
fn default() -> Self {
Self::NotSet
}
}
impl<T> Setting<T> {
pub fn set(self) -> Option<T> {
match self {

View File

@@ -47,20 +47,15 @@ pub struct Settings<T> {
pub _kind: PhantomData<T>,
}
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)]
#[cfg_attr(test, derive(serde::Serialize))]
pub enum Setting<T> {
Set(T),
Reset,
#[default]
NotSet,
}
impl<T> Default for Setting<T> {
fn default() -> Self {
Self::NotSet
}
}
impl<T> Setting<T> {
pub fn set(self) -> Option<T> {
match self {

View File

@@ -322,7 +322,7 @@ impl From<Task> for TaskView {
_ => None,
});
let duration = finished_at.zip(started_at).map(|(tf, ts)| (tf - ts));
let duration = finished_at.zip(started_at).map(|(tf, ts)| tf - ts);
Self {
uid: id,

View File

@@ -171,6 +171,19 @@ impl RoFeatures {
.into())
}
}
pub fn check_foreign_keys_setting(&self, disabled_action: &'static str) -> Result<()> {
if self.runtime.foreign_keys {
Ok(())
} else {
Err(FeatureNotEnabledError {
disabled_action,
feature: "foreign_keys",
issue_link: "https://github.com/orgs/meilisearch/discussions/873",
}
.into())
}
}
}
impl FeatureData {

View File

@@ -6,7 +6,7 @@ use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str};
use meilisearch_types::heed::{Database, RoTxn};
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
use meilisearch_types::tasks::{Details, Kind, Status, Task};
use meilisearch_types::versioning;
use meilisearch_types::versioning::{self, VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
use roaring::RoaringBitmap;
use crate::index_mapper::IndexMapper;
@@ -320,7 +320,11 @@ fn snapshot_details(d: &Details) -> String {
format!("{{ url: {url:?}, api_key: {api_key:?}, payload_size: {payload_size:?}, indexes: {indexes:?} }}")
}
Details::UpgradeDatabase { from, to } => {
format!("{{ from: {from:?}, to: {to:?} }}")
if to == &(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) {
format!("{{ from: {from:?}, to: [current version] }}")
} else {
format!("{{ from: {from:?}, to: {to:?} }}")
}
}
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
@@ -400,7 +404,21 @@ pub fn snapshot_batch(batch: &Batch) -> String {
snap.push('{');
snap.push_str(&format!("uid: {uid}, "));
snap.push_str(&format!("details: {}, ", serde_json::to_string(details).unwrap()));
let details = if let Some(upgrade_to) = &details.upgrade_to {
if upgrade_to.as_str()
== format!("v{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_PATCH}").as_str()
{
let mut details = details.clone();
details.upgrade_to = Some("[current version]".into());
serde_json::to_string(&details).unwrap()
} else {
serde_json::to_string(details).unwrap()
}
} else {
serde_json::to_string(details).unwrap()
};
snap.push_str(&format!("details: {details}, "));
snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap()));
if !embedder_stats.skip_serializing() {
snap.push_str(&format!(

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, status: enqueued, details: { settings: Settings { displayed_attributes: WildcardSetting(NotSet), searchable_attributes: WildcardSetting(NotSet), filterable_attributes: NotSet, sortable_attributes: NotSet, ranking_rules: NotSet, stop_words: NotSet, non_separator_tokens: NotSet, separator_tokens: NotSet, dictionary: NotSet, synonyms: NotSet, distinct_attribute: NotSet, proximity_precision: NotSet, typo_tolerance: NotSet, faceting: NotSet, pagination: NotSet, embedders: Set({"default": Set(EmbeddingSettings { source: Set(Rest), model: NotSet, revision: NotSet, pooling: NotSet, api_key: Set("My super secret"), dimensions: Set(4), binary_quantized: NotSet, document_template: NotSet, document_template_max_bytes: NotSet, url: Set("http://localhost:7777"), indexing_fragments: NotSet, search_fragments: NotSet, request: Set(String("{{text}}")), response: Set(String("{{embedding}}")), headers: NotSet, search_embedder: NotSet, indexing_embedder: NotSet, distribution: NotSet })}), search_cutoff_ms: NotSet, localized_attributes: NotSet, facet_search: NotSet, prefix_search: NotSet, chat: NotSet, vector_store: NotSet, _kind: PhantomData<meilisearch_types::settings::Unchecked> } }, kind: SettingsUpdate { index_uid: "doggos", new_settings: Settings { displayed_attributes: WildcardSetting(NotSet), searchable_attributes: WildcardSetting(NotSet), filterable_attributes: NotSet, sortable_attributes: NotSet, ranking_rules: NotSet, stop_words: NotSet, non_separator_tokens: NotSet, separator_tokens: NotSet, dictionary: NotSet, synonyms: NotSet, distinct_attribute: NotSet, proximity_precision: NotSet, typo_tolerance: NotSet, faceting: NotSet, pagination: NotSet, embedders: Set({"default": Set(EmbeddingSettings { source: Set(Rest), model: NotSet, revision: NotSet, pooling: NotSet, api_key: Set("My super secret"), dimensions: Set(4), binary_quantized: NotSet, document_template: NotSet, document_template_max_bytes: NotSet, url: Set("http://localhost:7777"), indexing_fragments: NotSet, search_fragments: NotSet, request: Set(String("{{text}}")), response: Set(String("{{embedding}}")), headers: NotSet, search_embedder: NotSet, indexing_embedder: NotSet, distribution: NotSet })}), search_cutoff_ms: NotSet, localized_attributes: NotSet, facet_search: NotSet, prefix_search: NotSet, chat: NotSet, vector_store: NotSet, _kind: PhantomData<meilisearch_types::settings::Unchecked> }, is_deletion: false, allow_index_creation: true }}
0 {uid: 0, status: enqueued, details: { settings: Settings { displayed_attributes: WildcardSetting(NotSet), searchable_attributes: WildcardSetting(NotSet), filterable_attributes: NotSet, sortable_attributes: NotSet, foreign_keys: NotSet, ranking_rules: NotSet, stop_words: NotSet, non_separator_tokens: NotSet, separator_tokens: NotSet, dictionary: NotSet, synonyms: NotSet, distinct_attribute: NotSet, proximity_precision: NotSet, typo_tolerance: NotSet, faceting: NotSet, pagination: NotSet, embedders: Set({"default": Set(EmbeddingSettings { source: Set(Rest), model: NotSet, revision: NotSet, pooling: NotSet, api_key: Set("My super secret"), dimensions: Set(4), binary_quantized: NotSet, document_template: NotSet, document_template_max_bytes: NotSet, url: Set("http://localhost:7777"), indexing_fragments: NotSet, search_fragments: NotSet, request: Set(String("{{text}}")), response: Set(String("{{embedding}}")), headers: NotSet, search_embedder: NotSet, indexing_embedder: NotSet, distribution: NotSet })}), search_cutoff_ms: NotSet, localized_attributes: NotSet, facet_search: NotSet, prefix_search: NotSet, chat: NotSet, vector_store: NotSet, _kind: PhantomData<meilisearch_types::settings::Unchecked> } }, kind: SettingsUpdate { index_uid: "doggos", new_settings: Settings { displayed_attributes: WildcardSetting(NotSet), searchable_attributes: WildcardSetting(NotSet), filterable_attributes: NotSet, sortable_attributes: NotSet, foreign_keys: NotSet, ranking_rules: NotSet, stop_words: NotSet, non_separator_tokens: NotSet, separator_tokens: NotSet, dictionary: NotSet, synonyms: NotSet, distinct_attribute: NotSet, proximity_precision: NotSet, typo_tolerance: NotSet, faceting: NotSet, pagination: NotSet, embedders: Set({"default": Set(EmbeddingSettings { source: Set(Rest), model: NotSet, revision: NotSet, pooling: NotSet, api_key: Set("My super secret"), dimensions: Set(4), binary_quantized: NotSet, document_template: NotSet, document_template_max_bytes: NotSet, url: Set("http://localhost:7777"), indexing_fragments: NotSet, search_fragments: NotSet, request: Set(String("{{text}}")), response: Set(String("{{embedding}}")), headers: NotSet, search_embedder: NotSet, indexing_embedder: NotSet, distribution: NotSet })}), search_cutoff_ms: NotSet, localized_attributes: NotSet, facet_search: NotSet, prefix_search: NotSet, chat: NotSet, vector_store: NotSet, _kind: PhantomData<meilisearch_types::settings::Unchecked> }, is_deletion: false, allow_index_creation: true }}
----------------------------------------------------------------------
### Status:
enqueued [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { settings: Settings { displayed_attributes: WildcardSetting(NotSet), searchable_attributes: WildcardSetting(NotSet), filterable_attributes: NotSet, sortable_attributes: NotSet, ranking_rules: NotSet, stop_words: NotSet, non_separator_tokens: NotSet, separator_tokens: NotSet, dictionary: NotSet, synonyms: NotSet, distinct_attribute: NotSet, proximity_precision: NotSet, typo_tolerance: NotSet, faceting: NotSet, pagination: NotSet, embedders: Set({"default": Set(EmbeddingSettings { source: Set(Rest), model: NotSet, revision: NotSet, pooling: NotSet, api_key: Set("My super secret"), dimensions: Set(4), binary_quantized: NotSet, document_template: NotSet, document_template_max_bytes: NotSet, url: Set("http://localhost:7777"), indexing_fragments: NotSet, search_fragments: NotSet, request: Set(String("{{text}}")), response: Set(String("{{embedding}}")), headers: NotSet, search_embedder: NotSet, indexing_embedder: NotSet, distribution: NotSet })}), search_cutoff_ms: NotSet, localized_attributes: NotSet, facet_search: NotSet, prefix_search: NotSet, chat: NotSet, vector_store: NotSet, _kind: PhantomData<meilisearch_types::settings::Unchecked> } }, kind: SettingsUpdate { index_uid: "doggos", new_settings: Settings { displayed_attributes: WildcardSetting(NotSet), searchable_attributes: WildcardSetting(NotSet), filterable_attributes: NotSet, sortable_attributes: NotSet, ranking_rules: NotSet, stop_words: NotSet, non_separator_tokens: NotSet, separator_tokens: NotSet, dictionary: NotSet, synonyms: NotSet, distinct_attribute: NotSet, proximity_precision: NotSet, typo_tolerance: NotSet, faceting: NotSet, pagination: NotSet, embedders: Set({"default": Set(EmbeddingSettings { source: Set(Rest), model: NotSet, revision: NotSet, pooling: NotSet, api_key: Set("My super secret"), dimensions: Set(4), binary_quantized: NotSet, document_template: NotSet, document_template_max_bytes: NotSet, url: Set("http://localhost:7777"), indexing_fragments: NotSet, search_fragments: NotSet, request: Set(String("{{text}}")), response: Set(String("{{embedding}}")), headers: NotSet, search_embedder: NotSet, indexing_embedder: NotSet, distribution: NotSet })}), search_cutoff_ms: NotSet, localized_attributes: NotSet, facet_search: NotSet, prefix_search: NotSet, chat: NotSet, vector_store: NotSet, _kind: PhantomData<meilisearch_types::settings::Unchecked> }, is_deletion: false, allow_index_creation: true }}
0 {uid: 0, batch_uid: 0, status: succeeded, details: { settings: Settings { displayed_attributes: WildcardSetting(NotSet), searchable_attributes: WildcardSetting(NotSet), filterable_attributes: NotSet, sortable_attributes: NotSet, foreign_keys: NotSet, ranking_rules: NotSet, stop_words: NotSet, non_separator_tokens: NotSet, separator_tokens: NotSet, dictionary: NotSet, synonyms: NotSet, distinct_attribute: NotSet, proximity_precision: NotSet, typo_tolerance: NotSet, faceting: NotSet, pagination: NotSet, embedders: Set({"default": Set(EmbeddingSettings { source: Set(Rest), model: NotSet, revision: NotSet, pooling: NotSet, api_key: Set("My super secret"), dimensions: Set(4), binary_quantized: NotSet, document_template: NotSet, document_template_max_bytes: NotSet, url: Set("http://localhost:7777"), indexing_fragments: NotSet, search_fragments: NotSet, request: Set(String("{{text}}")), response: Set(String("{{embedding}}")), headers: NotSet, search_embedder: NotSet, indexing_embedder: NotSet, distribution: NotSet })}), search_cutoff_ms: NotSet, localized_attributes: NotSet, facet_search: NotSet, prefix_search: NotSet, chat: NotSet, vector_store: NotSet, _kind: PhantomData<meilisearch_types::settings::Unchecked> } }, kind: SettingsUpdate { index_uid: "doggos", new_settings: Settings { displayed_attributes: WildcardSetting(NotSet), searchable_attributes: WildcardSetting(NotSet), filterable_attributes: NotSet, sortable_attributes: NotSet, foreign_keys: NotSet, ranking_rules: NotSet, stop_words: NotSet, non_separator_tokens: NotSet, separator_tokens: NotSet, dictionary: NotSet, synonyms: NotSet, distinct_attribute: NotSet, proximity_precision: NotSet, typo_tolerance: NotSet, faceting: NotSet, pagination: NotSet, embedders: Set({"default": Set(EmbeddingSettings { source: Set(Rest), model: NotSet, revision: NotSet, pooling: NotSet, api_key: Set("My super secret"), dimensions: Set(4), binary_quantized: NotSet, document_template: NotSet, document_template_max_bytes: NotSet, url: Set("http://localhost:7777"), indexing_fragments: NotSet, search_fragments: NotSet, request: Set(String("{{text}}")), response: Set(String("{{embedding}}")), headers: NotSet, search_embedder: NotSet, indexing_embedder: NotSet, distribution: NotSet })}), search_cutoff_ms: NotSet, localized_attributes: NotSet, facet_search: NotSet, prefix_search: NotSet, chat: NotSet, vector_store: NotSet, _kind: PhantomData<meilisearch_types::settings::Unchecked> }, is_deletion: false, allow_index_creation: true }}
----------------------------------------------------------------------
### Status:
enqueued []

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
[timestamp] [4,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.28.2"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
----------------------------------------------------------------------
### Status:
enqueued [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
----------------------------------------------------------------------
### Status:

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
----------------------------------------------------------------------
### Status:
@@ -37,7 +37,7 @@ catto [1,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.28.2"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
----------------------------------------------------------------------
@@ -40,7 +40,7 @@ doggo [2,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.28.2"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -43,7 +43,7 @@ doggo [2,3,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.28.2"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -1,7 +1,7 @@
use anyhow::bail;
use meilisearch_types::heed::{Env, RwTxn, WithoutTls};
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
use meilisearch_types::versioning;
use time::OffsetDateTime;
use tracing::info;
@@ -9,83 +9,82 @@ use crate::queue::TaskQueue;
use crate::versioning::Versioning;
trait UpgradeIndexScheduler {
fn upgrade(
&self,
env: &Env<WithoutTls>,
wtxn: &mut RwTxn,
original: (u32, u32, u32),
) -> anyhow::Result<()>;
fn target_version(&self) -> (u32, u32, u32);
fn upgrade(&self, env: &Env<WithoutTls>, wtxn: &mut RwTxn) -> anyhow::Result<()>;
/// Whether the migration should be applied, depending on the initial version of the index scheduler before
/// any migration was applied
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool;
/// A progress-centric description of the migration
fn description(&self) -> &'static str;
}
/// Upgrade the index scheduler to the binary version.
///
/// # Warning
///
/// The current implementation uses a single wtxn to the index scheduler for the whole duration of the upgrade.
/// If migrations start taking take a long time, it might prevent tasks from being registered.
/// If this issue manifests, then it can be mitigated by adding a `fn target_version` to `UpgradeIndexScheduler`,
/// to be able to write intermediate versions and drop the wtxn between applying migrations.
pub fn upgrade_index_scheduler(
env: &Env<WithoutTls>,
versioning: &Versioning,
from: (u32, u32, u32),
to: (u32, u32, u32),
initial_version: (u32, u32, u32),
) -> anyhow::Result<()> {
let current_major = to.0;
let current_minor = to.1;
let current_patch = to.2;
let target_major: u32 = versioning::VERSION_MAJOR;
let target_minor: u32 = versioning::VERSION_MINOR;
let target_patch: u32 = versioning::VERSION_PATCH;
let target_version = (target_major, target_minor, target_patch);
let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[
// This is the last upgrade function, it will be called when the index is up to date.
// any other upgrade function should be added before this one.
&ToCurrentNoOp {},
];
let start = match from {
(1, 12, _) => 0,
(1, 13, _) => 0,
(1, 14, _) => 0,
(1, 15, _) => 0,
(1, 16, _) => 0,
(1, 17, _) => 0,
(1, 18, _) => 0,
(1, 19, _) => 0,
(1, 20, _) => 0,
(1, 21, _) => 0,
(1, 22, _) => 0,
(1, 23, _) => 0,
(1, 24, _) => 0,
(1, 25, _) => 0,
(1, 26, _) => 0,
(1, 27, _) => 0,
(1, 28, _) => 0,
(major, minor, patch) => {
if major > current_major
|| (major == current_major && minor > current_minor)
|| (major == current_major && minor == current_minor && patch > current_patch)
{
bail!(
"Database version {major}.{minor}.{patch} is higher than the Meilisearch version {current_major}.{current_minor}.{current_patch}. Downgrade is not supported",
);
} else if major < 1 || (major == current_major && minor < 12) {
bail!(
"Database version {major}.{minor}.{patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{major}.{minor}.{patch} and import it in the v{current_major}.{current_minor}.{current_patch}",
);
} else {
bail!("Unknown database version: v{major}.{minor}.{patch}");
}
}
};
info!("Upgrading the task queue");
let mut local_from = from;
for upgrade in upgrade_functions[start..].iter() {
let target = upgrade.target_version();
info!(
"Upgrading from v{}.{}.{} to v{}.{}.{}",
local_from.0, local_from.1, local_from.2, target.0, target.1, target.2
);
let mut wtxn = env.write_txn()?;
upgrade.upgrade(env, &mut wtxn, local_from)?;
versioning.set_version(&mut wtxn, target)?;
wtxn.commit()?;
local_from = target;
if initial_version == target_version {
return Ok(());
}
let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[
// List all upgrade functions to apply in order here.
];
let (initial_major, initial_minor, initial_patch) = initial_version;
if initial_version > target_version {
bail!(
"Database version {initial_major}.{initial_minor}.{initial_patch} is higher than the Meilisearch version {target_major}.{target_minor}.{target_patch}. Downgrade is not supported",
);
}
if initial_version < (1, 12, 0) {
bail!(
"Database version {initial_major}.{initial_minor}.{initial_patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{initial_major}.{initial_minor}.{initial_patch} and import it in the v{target_major}.{target_minor}.{target_patch}",
);
}
info!("Upgrading the task queue");
let mut wtxn = env.write_txn()?;
let migration_count = upgrade_functions.len();
for (migration_index, upgrade) in upgrade_functions.iter().enumerate() {
if upgrade.must_upgrade(initial_version) {
info!(
"[{migration_index}/{migration_count}]Applying migration: {}",
upgrade.description()
);
upgrade.upgrade(env, &mut wtxn)?;
info!(
"[{}/{migration_count}]Migration applied: {}",
migration_index + 1,
upgrade.description()
)
} else {
info!(
"[{migration_index}/{migration_count}]Skipping unnecessary migration: {}",
upgrade.description()
)
}
}
versioning.set_version(&mut wtxn, target_version)?;
info!("Task queue upgraded, spawning the upgrade database task");
let queue = TaskQueue::new(env, &mut wtxn)?;
let uid = queue.next_task_id(&wtxn)?;
queue.register(
@@ -98,9 +97,9 @@ pub fn upgrade_index_scheduler(
finished_at: None,
error: None,
canceled_by: None,
details: Some(Details::UpgradeDatabase { from, to }),
details: Some(Details::UpgradeDatabase { from: initial_version, to: target_version }),
status: Status::Enqueued,
kind: KindWithContent::UpgradeDatabase { from },
kind: KindWithContent::UpgradeDatabase { from: initial_version },
network: None,
custom_metadata: None,
},
@@ -109,21 +108,3 @@ pub fn upgrade_index_scheduler(
Ok(())
}
#[allow(non_camel_case_types)]
struct ToCurrentNoOp {}
impl UpgradeIndexScheduler for ToCurrentNoOp {
fn upgrade(
&self,
_env: &Env<WithoutTls>,
_wtxn: &mut RwTxn,
_original: (u32, u32, u32),
) -> anyhow::Result<()> {
Ok(())
}
fn target_version(&self) -> (u32, u32, u32) {
(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
}
}

View File

@@ -64,14 +64,7 @@ impl Versioning {
};
wtxn.commit()?;
let bin_major: u32 = versioning::VERSION_MAJOR;
let bin_minor: u32 = versioning::VERSION_MINOR;
let bin_patch: u32 = versioning::VERSION_PATCH;
let to = (bin_major, bin_minor, bin_patch);
if from != to {
upgrade_index_scheduler(env, &this, from, to)?;
}
upgrade_index_scheduler(env, &this, from)?;
// Once we reach this point it means the upgrade process, if there was one is entirely finished
// we can safely say we reached the latest version of the index scheduler

View File

@@ -327,6 +327,7 @@ InvalidSettingsFacetSearch , InvalidRequest , BAD_REQU
InvalidSettingsPrefixSearch , InvalidRequest , BAD_REQUEST ;
InvalidSettingsFaceting , InvalidRequest , BAD_REQUEST ;
InvalidSettingsFilterableAttributes , InvalidRequest , BAD_REQUEST ;
InvalidSettingsForeignKeys , InvalidRequest , BAD_REQUEST ;
InvalidSettingsPagination , InvalidRequest , BAD_REQUEST ;
InvalidSettingsSearchCutoffMs , InvalidRequest , BAD_REQUEST ;
InvalidSettingsEmbedders , InvalidRequest , BAD_REQUEST ;

View File

@@ -22,6 +22,7 @@ pub struct RuntimeTogglableFeatures {
pub chat_completions: bool,
pub multimodal: bool,
pub vector_store_setting: bool,
pub foreign_keys: bool,
}
#[derive(Default, Debug, Clone, Copy)]

View File

@@ -1,6 +1,7 @@
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
#[serde(rename_all = "camelCase")]
pub struct Network {

View File

@@ -15,7 +15,10 @@ pub use milli::update::ChatSettings;
use milli::update::Setting;
use milli::vector::db::IndexEmbeddingConfig;
use milli::vector::VectorStoreBackend;
use milli::{Criterion, CriterionError, FilterableAttributesRule, Index, DEFAULT_VALUES_PER_FACET};
use milli::{
Criterion, CriterionError, FilterableAttributesRule, ForeignKey, Index,
DEFAULT_VALUES_PER_FACET,
};
use serde::{Deserialize, Serialize, Serializer};
use utoipa::ToSchema;
@@ -221,6 +224,12 @@ pub struct Settings<T> {
#[schema(value_type = Option<Vec<String>>, example = json!(["release_date"]))]
pub sortable_attributes: Setting<BTreeSet<String>>,
/// Foreign keys to use for cross-index filtering search.
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
#[deserr(default, error = DeserrJsonError<InvalidSettingsForeignKeys>)]
#[schema(value_type = Option<Vec<ForeignKey>>, example = json!([{"foreignIndexUid": "products", "fieldName": "productId"}]))]
pub foreign_keys: Setting<Vec<ForeignKey>>,
/// List of ranking rules sorted by order of importance. The order is customizable.
/// [A list of ordered built-in ranking rules](https://www.meilisearch.com/docs/learn/relevancy/relevancy).
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
@@ -376,6 +385,7 @@ impl Settings<Checked> {
displayed_attributes: Setting::Reset.into(),
searchable_attributes: Setting::Reset.into(),
filterable_attributes: Setting::Reset,
foreign_keys: Setting::Reset,
sortable_attributes: Setting::Reset,
ranking_rules: Setting::Reset,
stop_words: Setting::Reset,
@@ -404,6 +414,7 @@ impl Settings<Checked> {
displayed_attributes,
searchable_attributes,
filterable_attributes,
foreign_keys,
sortable_attributes,
ranking_rules,
stop_words,
@@ -431,6 +442,7 @@ impl Settings<Checked> {
searchable_attributes,
filterable_attributes,
sortable_attributes,
foreign_keys,
ranking_rules,
stop_words,
non_separator_tokens,
@@ -482,6 +494,7 @@ impl Settings<Unchecked> {
displayed_attributes: displayed_attributes.into(),
searchable_attributes: searchable_attributes.into(),
filterable_attributes: self.filterable_attributes,
foreign_keys: self.foreign_keys,
sortable_attributes: self.sortable_attributes,
ranking_rules: self.ranking_rules,
stop_words: self.stop_words,
@@ -543,6 +556,7 @@ impl Settings<Unchecked> {
.sortable_attributes
.clone()
.or(self.sortable_attributes.clone()),
foreign_keys: other.foreign_keys.clone().or(self.foreign_keys.clone()),
ranking_rules: other.ranking_rules.clone().or(self.ranking_rules.clone()),
stop_words: other.stop_words.clone().or(self.stop_words.clone()),
non_separator_tokens: other
@@ -604,6 +618,7 @@ pub fn apply_settings_to_builder(
searchable_attributes,
filterable_attributes,
sortable_attributes,
foreign_keys,
ranking_rules,
stop_words,
non_separator_tokens,
@@ -651,6 +666,12 @@ pub fn apply_settings_to_builder(
Setting::NotSet => (),
}
match foreign_keys {
Setting::Set(ref keys) => builder.set_foreign_keys(keys.clone().into_iter().collect()),
Setting::Reset => builder.reset_foreign_keys(),
Setting::NotSet => (),
}
match ranking_rules {
Setting::Set(ref criteria) => {
builder.set_criteria(criteria.iter().map(|c| c.clone().into()).collect())
@@ -868,6 +889,8 @@ pub fn settings(
let sortable_attributes = index.sortable_fields(rtxn)?.into_iter().collect();
let foreign_keys = index.foreign_keys(rtxn)?.into_iter().collect();
let criteria = index.criteria(rtxn)?;
let stop_words = index
@@ -965,6 +988,7 @@ pub fn settings(
.into(),
filterable_attributes: Setting::Set(filterable_attributes),
sortable_attributes: Setting::Set(sortable_attributes),
foreign_keys: Setting::Set(foreign_keys),
ranking_rules: Setting::Set(criteria.iter().map(|c| c.clone().into()).collect()),
stop_words: Setting::Set(stop_words),
non_separator_tokens: Setting::Set(non_separator_tokens),
@@ -1207,6 +1231,7 @@ pub(crate) mod test {
searchable_attributes: Setting::Set(vec![String::from("hello")]).into(),
filterable_attributes: Setting::NotSet,
sortable_attributes: Setting::NotSet,
foreign_keys: Setting::NotSet,
ranking_rules: Setting::NotSet,
stop_words: Setting::NotSet,
non_separator_tokens: Setting::NotSet,
@@ -1240,6 +1265,7 @@ pub(crate) mod test {
.into(),
filterable_attributes: Setting::NotSet,
sortable_attributes: Setting::NotSet,
foreign_keys: Setting::NotSet,
ranking_rules: Setting::NotSet,
stop_words: Setting::NotSet,
non_separator_tokens: Setting::NotSet,

View File

@@ -1,7 +1,7 @@
use std::any::TypeId;
use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::{Path, PathBuf};
use std::path::Path;
use std::sync::Arc;
use std::time::{Duration, Instant};
@@ -208,6 +208,7 @@ struct Infos {
experimental_no_edition_2024_for_prefix_post_processing: bool,
experimental_no_edition_2024_for_facet_post_processing: bool,
experimental_vector_store_setting: bool,
experimental_foreign_keys: bool,
experimental_personalization: bool,
gpu_enabled: bool,
db_path: bool,
@@ -317,6 +318,7 @@ impl Infos {
chat_completions,
multimodal,
vector_store_setting,
foreign_keys,
} = features;
// We're going to override every sensible information.
@@ -343,15 +345,16 @@ impl Infos {
experimental_no_snapshot_compaction,
experimental_no_edition_2024_for_dumps,
experimental_vector_store_setting: vector_store_setting,
experimental_foreign_keys: foreign_keys,
gpu_enabled: meilisearch_types::milli::vector::is_cuda_enabled(),
db_path: db_path != PathBuf::from("./data.ms"),
db_path: db_path != Path::new("./data.ms"),
import_dump: import_dump.is_some(),
dump_dir: dump_dir != PathBuf::from("dumps/"),
dump_dir: dump_dir != Path::new("dumps/"),
ignore_missing_dump,
ignore_dump_if_db_exists,
import_snapshot: import_snapshot.is_some(),
schedule_snapshot,
snapshot_dir: snapshot_dir != PathBuf::from("snapshots/"),
snapshot_dir: snapshot_dir != Path::new("snapshots/"),
uses_s3_snapshots: s3_snapshot_options.is_some(),
ignore_missing_snapshot,
ignore_snapshot_if_db_exists,

View File

@@ -56,6 +56,7 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
chat_completions: Some(false),
multimodal: Some(false),
vector_store_setting: Some(false),
foreign_keys: Some(false),
})),
(status = 401, description = "The authorization header is missing", body = ResponseError, content_type = "application/json", example = json!(
{
@@ -106,6 +107,8 @@ pub struct RuntimeTogglableFeatures {
pub multimodal: Option<bool>,
#[deserr(default)]
pub vector_store_setting: Option<bool>,
#[deserr(default)]
pub foreign_keys: Option<bool>,
}
impl From<meilisearch_types::features::RuntimeTogglableFeatures> for RuntimeTogglableFeatures {
@@ -121,6 +124,7 @@ impl From<meilisearch_types::features::RuntimeTogglableFeatures> for RuntimeTogg
chat_completions,
multimodal,
vector_store_setting,
foreign_keys,
} = value;
Self {
@@ -134,6 +138,7 @@ impl From<meilisearch_types::features::RuntimeTogglableFeatures> for RuntimeTogg
chat_completions: Some(chat_completions),
multimodal: Some(multimodal),
vector_store_setting: Some(vector_store_setting),
foreign_keys: Some(foreign_keys),
}
}
}
@@ -150,6 +155,7 @@ pub struct PatchExperimentalFeatureAnalytics {
chat_completions: bool,
multimodal: bool,
vector_store_setting: bool,
foreign_keys: bool,
}
impl Aggregate for PatchExperimentalFeatureAnalytics {
@@ -169,6 +175,7 @@ impl Aggregate for PatchExperimentalFeatureAnalytics {
chat_completions: new.chat_completions,
multimodal: new.multimodal,
vector_store_setting: new.vector_store_setting,
foreign_keys: new.foreign_keys,
})
}
@@ -197,6 +204,7 @@ impl Aggregate for PatchExperimentalFeatureAnalytics {
chat_completions: Some(false),
multimodal: Some(false),
vector_store_setting: Some(false),
foreign_keys: Some(false),
})),
(status = 401, description = "The authorization header is missing", body = ResponseError, content_type = "application/json", example = json!(
{
@@ -244,6 +252,7 @@ async fn patch_features(
.0
.vector_store_setting
.unwrap_or(old_features.vector_store_setting),
foreign_keys: new_features.0.foreign_keys.unwrap_or(old_features.foreign_keys),
};
// explicitly destructure for analytics rather than using the `Serialize` implementation, because
@@ -260,6 +269,7 @@ async fn patch_features(
chat_completions,
multimodal,
vector_store_setting,
foreign_keys,
} = new_features;
analytics.publish(
@@ -274,6 +284,7 @@ async fn patch_features(
chat_completions,
multimodal,
vector_store_setting,
foreign_keys,
},
&req,
);

View File

@@ -531,6 +531,17 @@ make_setting_routes!(
camelcase_attr: "vectorStore",
analytics: VectorStoreAnalytics
},
{
route: "/foreign-keys",
update_verb: put,
value_type: Vec<meilisearch_types::milli::ForeignKey>,
err_type: meilisearch_types::deserr::DeserrJsonError<
meilisearch_types::error::deserr_codes::InvalidSettingsForeignKeys,
>,
attr: foreign_keys,
camelcase_attr: "foreignKeys",
analytics: ForeignKeysAnalytics
},
);
#[utoipa::path(
@@ -595,6 +606,7 @@ pub async fn update_all(
filterable_attributes: FilterableAttributesAnalytics::new(
new_settings.filterable_attributes.as_ref().set(),
),
foreign_keys: ForeignKeysAnalytics::new(new_settings.foreign_keys.as_ref().set()),
distinct_attribute: DistinctAttributeAnalytics::new(
new_settings.distinct_attribute.as_ref().set(),
),
@@ -688,6 +700,10 @@ pub async fn get_all(
new_settings.vector_store = Setting::NotSet;
}
if features.check_foreign_keys_setting("showing index `foreignKeys` settings").is_err() {
new_settings.foreign_keys = Setting::NotSet;
}
debug!(returns = ?new_settings, "Get all settings");
Ok(HttpResponse::Ok().json(new_settings))
}
@@ -793,5 +809,9 @@ fn validate_settings(
features.check_vector_store_setting("setting `vectorStore` in the index settings")?;
}
if let Setting::Set(_) = &settings.foreign_keys {
features.check_foreign_keys_setting("setting `foreignKeys` in the index settings")?;
}
Ok(settings.validate()?)
}

View File

@@ -9,7 +9,7 @@ use meilisearch_types::facet_values_sort::FacetValuesSort;
use meilisearch_types::locales::{Locale, LocalizedAttributesRuleView};
use meilisearch_types::milli::update::Setting;
use meilisearch_types::milli::vector::VectorStoreBackend;
use meilisearch_types::milli::FilterableAttributesRule;
use meilisearch_types::milli::{FilterableAttributesRule, ForeignKey};
use meilisearch_types::settings::{
ChatSettings, FacetingSettings, PaginationSettings, PrefixSearchSettings,
ProximityPrecisionView, RankingRuleView, SettingEmbeddingSettings, TypoSettings,
@@ -25,6 +25,7 @@ pub struct SettingsAnalytics {
pub displayed_attributes: DisplayedAttributesAnalytics,
pub sortable_attributes: SortableAttributesAnalytics,
pub filterable_attributes: FilterableAttributesAnalytics,
pub foreign_keys: ForeignKeysAnalytics,
pub distinct_attribute: DistinctAttributeAnalytics,
pub proximity_precision: ProximityPrecisionAnalytics,
pub typo_tolerance: TypoToleranceAnalytics,
@@ -98,6 +99,10 @@ impl Aggregate for SettingsAnalytics {
.has_patterns
.or(self.filterable_attributes.has_patterns),
},
foreign_keys: ForeignKeysAnalytics {
set: new.foreign_keys.set | self.foreign_keys.set,
total: new.foreign_keys.total.or(self.foreign_keys.total),
},
distinct_attribute: DistinctAttributeAnalytics {
set: self.distinct_attribute.set | new.distinct_attribute.set,
},
@@ -362,6 +367,22 @@ impl FilterableAttributesAnalytics {
}
}
#[derive(Serialize, Default)]
pub struct ForeignKeysAnalytics {
pub set: bool,
pub total: Option<usize>,
}
impl ForeignKeysAnalytics {
pub fn new(settings: Option<&Vec<ForeignKey>>) -> Self {
Self { set: settings.is_some(), total: settings.as_ref().map(|s| s.len()) }
}
pub fn into_settings(self) -> SettingsAnalytics {
SettingsAnalytics { foreign_keys: self, ..Default::default() }
}
}
#[derive(Serialize, Default)]
pub struct DistinctAttributeAnalytics {
pub set: bool,

View File

@@ -10,12 +10,17 @@ use actix_http::StatusCode;
use index_scheduler::{IndexScheduler, RoFeatures};
use itertools::Itertools;
use meilisearch_types::error::ResponseError;
use meilisearch_types::heed::RoTxn;
use meilisearch_types::milli::order_by_map::OrderByMap;
use meilisearch_types::milli::score_details::{ScoreDetails, WeightedScoreValue};
use meilisearch_types::milli::vector::Embedding;
use meilisearch_types::milli::{self, DocumentId, OrderBy, TimeBudget, DEFAULT_VALUES_PER_FACET};
use meilisearch_types::milli::{
self, DocumentId, ForeignKey, OrderBy, TimeBudget, DEFAULT_VALUES_PER_FACET,
};
use meilisearch_types::network::{Network, Remote};
use permissive_json_pointer::map_leaf_values;
use roaring::RoaringBitmap;
use serde_json::Value;
use tokio::task::JoinHandle;
use uuid::Uuid;
@@ -1075,7 +1080,7 @@ impl SearchByIndex {
},
)
.collect();
let merged_result = merged_result?;
let mut merged_result = merged_result?;
let estimated_total_hits = candidates.len() as usize;
let facets = facets_by_index
.map(|facets_by_index| {
@@ -1100,6 +1105,9 @@ impl SearchByIndex {
);
error
})?;
let foreign_keys = index.foreign_keys(&rtxn)?;
hydrate_documents(&mut merged_result, &foreign_keys, &params.index_scheduler)?;
self.results_by_index.push(SearchResultByIndex {
index: index_uid,
primary_key,
@@ -1272,3 +1280,87 @@ impl FacetOrder {
(facet_distribution, facet_stats, facets_by_index)
}
}
fn hydrate_documents(
documents: &mut Vec<SearchHitByIndex>,
foreign_keys: &[ForeignKey],
index_scheduler: &IndexScheduler,
) -> Result<(), ResponseError> {
for foreign_key in foreign_keys {
let index = index_scheduler.index(&foreign_key.foreign_index_uid)?;
let rtxn = index.read_txn()?;
// TODO: replace with a simpler formatter
let attributes_format = AttributesFormat {
attributes_to_retrieve: None,
retrieve_vectors: RetrieveVectors::Hide,
attributes_to_highlight: None,
attributes_to_crop: None,
crop_length: 0,
crop_marker: "".to_string(),
highlight_pre_tag: "".to_string(),
highlight_post_tag: "".to_string(),
show_matches_position: false,
sort: None,
show_ranking_score: false,
show_ranking_score_details: false,
locales: None,
};
let tokenizer = HitMaker::tokenizer(None, None);
let formatter_builder =
HitMaker::formatter_builder(milli::MatchingWords::default(), tokenizer);
let hit_maker = HitMaker::new(&index, &rtxn, attributes_format, formatter_builder)?;
for document in documents.iter_mut() {
let mut res = Ok(());
map_leaf_values(
&mut document.hit.document,
[foreign_key.field_name.as_str()],
|_key, _array_indices, value| {
res = hydrate_document_value(
&rtxn,
&hit_maker,
&index.external_documents_ids(),
value,
);
},
);
// TODO: Trick to avoid modifying the map_leaf_values signature, we may use a custom map_leaf_values that returns a Result
res?;
}
}
Ok(())
}
fn hydrate_document_value(
rtxn: &RoTxn,
hit_maker: &HitMaker,
external_documents_ids: &milli::ExternalDocumentsIds,
value: &mut Value,
) -> Result<(), ResponseError> {
match value {
Value::String(inner_value) => {
let Some(docid) = external_documents_ids.get(rtxn, inner_value)? else {
todo!("Document not found")
};
let SearchHit { document, .. } = hit_maker.make_hit(docid, &[])?;
*value = Value::Object(document);
}
Value::Number(inner_value) => {
let Some(docid) = external_documents_ids.get(rtxn, inner_value.to_string())? else {
todo!("Document not found")
};
let SearchHit { document, .. } = hit_maker.make_hit(docid, &[])?;
*value = Value::Object(document);
}
Value::Array(values) => {
for value in values {
hydrate_document_value(rtxn, hit_maker, external_documents_ids, value)?;
}
}
_ => unreachable!("Invalid foreign key value type: {value:?}"),
}
Ok(())
}

View File

@@ -789,11 +789,12 @@ impl TryFrom<Value> for ExternalDocumentId {
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserr, ToSchema, Serialize)]
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Deserr, ToSchema, Serialize)]
#[deserr(rename_all = camelCase)]
#[serde(rename_all = "camelCase")]
pub enum MatchingStrategy {
/// Remove query words from last to first
#[default]
Last,
/// All query words are mandatory
All,
@@ -801,12 +802,6 @@ pub enum MatchingStrategy {
Frequency,
}
impl Default for MatchingStrategy {
fn default() -> Self {
Self::Last
}
}
impl From<MatchingStrategy> for TermsMatchingStrategy {
fn from(other: MatchingStrategy) -> Self {
match other {

View File

@@ -187,7 +187,7 @@ macro_rules! compute_forbidden_search {
#[actix_rt::test]
async fn search_authorized_simple_token() {
let tenant_tokens = vec![
let tenant_tokens = [
hashmap! {
"searchRules" => json!({"*": {}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -239,7 +239,7 @@ async fn search_authorized_simple_token() {
#[actix_rt::test]
async fn search_authorized_filter_token() {
let tenant_tokens = vec![
let tenant_tokens = [
hashmap! {
"searchRules" => json!({"*": {"filter": "color = blue"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -292,7 +292,7 @@ async fn search_authorized_filter_token() {
#[actix_rt::test]
async fn filter_search_authorized_filter_token() {
let tenant_tokens = vec![
let tenant_tokens = [
hashmap! {
"searchRules" => json!({"*": {"filter": "color = blue"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -353,7 +353,7 @@ async fn filter_search_authorized_filter_token() {
/// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above.
#[actix_rt::test]
async fn error_search_token_forbidden_parent_key() {
let tenant_tokens = vec![
let tenant_tokens = [
hashmap! {
"searchRules" => json!({"*": {}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -389,7 +389,7 @@ async fn error_search_token_forbidden_parent_key() {
#[actix_rt::test]
async fn error_search_forbidden_token() {
let tenant_tokens = vec![
let tenant_tokens = [
// bad index
hashmap! {
"searchRules" => json!({"products": {}}),

View File

@@ -680,7 +680,7 @@ async fn multi_search_authorized_simple_token() {
#[actix_rt::test]
async fn single_search_authorized_filter_token() {
let tenant_tokens = vec![
let tenant_tokens = [
hashmap! {
"searchRules" => json!({"*": {"filter": "color = blue"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -733,7 +733,7 @@ async fn single_search_authorized_filter_token() {
#[actix_rt::test]
async fn multi_search_authorized_filter_token() {
let both_tenant_tokens = vec![
let both_tenant_tokens = [
hashmap! {
"searchRules" => json!({"sales": {"filter": "color = blue"}, "products": {"filter": "doggos.age <= 5"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -842,7 +842,7 @@ async fn filter_single_search_authorized_filter_token() {
#[actix_rt::test]
async fn filter_multi_search_authorized_filter_token() {
let tenant_tokens = vec![
let tenant_tokens = [
hashmap! {
"searchRules" => json!({"sales": {"filter": "color = blue"}, "products": {"filter": "doggos.age <= 5"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -900,7 +900,7 @@ async fn filter_multi_search_authorized_filter_token() {
/// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above.
#[actix_rt::test]
async fn error_single_search_token_forbidden_parent_key() {
let tenant_tokens = vec![
let tenant_tokens = [
hashmap! {
"searchRules" => json!({"*": {}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
@@ -941,7 +941,7 @@ async fn error_single_search_token_forbidden_parent_key() {
/// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above.
#[actix_rt::test]
async fn error_multi_search_token_forbidden_parent_key() {
let tenant_tokens = vec![
let tenant_tokens = [
hashmap! {
"searchRules" => json!({"*": {}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())

View File

@@ -237,6 +237,7 @@ async fn import_dump_v1_movie_with_settings() {
"sortableAttributes": [
"genres"
],
"foreignKeys": [],
"rankingRules": [
"typo",
"words",
@@ -411,6 +412,7 @@ async fn import_dump_v1_rubygems_with_settings() {
"sortableAttributes": [
"version"
],
"foreignKeys": [],
"rankingRules": [
"typo",
"words",
@@ -740,6 +742,7 @@ async fn import_dump_v2_movie_with_settings() {
"genres"
],
"sortableAttributes": [],
"foreignKeys": [],
"rankingRules": [
"words",
"typo",
@@ -911,6 +914,7 @@ async fn import_dump_v2_rubygems_with_settings() {
"version"
],
"sortableAttributes": [],
"foreignKeys": [],
"rankingRules": [
"typo",
"words",
@@ -1240,6 +1244,7 @@ async fn import_dump_v3_movie_with_settings() {
"genres"
],
"sortableAttributes": [],
"foreignKeys": [],
"rankingRules": [
"words",
"typo",
@@ -1411,6 +1416,7 @@ async fn import_dump_v3_rubygems_with_settings() {
"version"
],
"sortableAttributes": [],
"foreignKeys": [],
"rankingRules": [
"typo",
"words",
@@ -1740,6 +1746,7 @@ async fn import_dump_v4_movie_with_settings() {
"genres"
],
"sortableAttributes": [],
"foreignKeys": [],
"rankingRules": [
"words",
"typo",
@@ -1911,6 +1918,7 @@ async fn import_dump_v4_rubygems_with_settings() {
"version"
],
"sortableAttributes": [],
"foreignKeys": [],
"rankingRules": [
"typo",
"words",
@@ -2190,7 +2198,8 @@ async fn import_dump_v6_containing_experimental_features() {
"compositeEmbedders": false,
"chatCompletions": false,
"multimodal": false,
"vectorStoreSetting": false
"vectorStoreSetting": false,
"foreignKeys": false
}
"###);

View File

@@ -27,7 +27,8 @@ async fn experimental_features() {
"compositeEmbedders": false,
"chatCompletions": false,
"multimodal": false,
"vectorStoreSetting": false
"vectorStoreSetting": false,
"foreignKeys": false
}
"###);
@@ -45,7 +46,8 @@ async fn experimental_features() {
"compositeEmbedders": false,
"chatCompletions": false,
"multimodal": false,
"vectorStoreSetting": false
"vectorStoreSetting": false,
"foreignKeys": false
}
"###);
@@ -63,7 +65,8 @@ async fn experimental_features() {
"compositeEmbedders": false,
"chatCompletions": false,
"multimodal": false,
"vectorStoreSetting": false
"vectorStoreSetting": false,
"foreignKeys": false
}
"###);
@@ -82,7 +85,8 @@ async fn experimental_features() {
"compositeEmbedders": false,
"chatCompletions": false,
"multimodal": false,
"vectorStoreSetting": false
"vectorStoreSetting": false,
"foreignKeys": false
}
"###);
@@ -101,7 +105,8 @@ async fn experimental_features() {
"compositeEmbedders": false,
"chatCompletions": false,
"multimodal": false,
"vectorStoreSetting": false
"vectorStoreSetting": false,
"foreignKeys": false
}
"###);
}
@@ -127,7 +132,8 @@ async fn experimental_feature_metrics() {
"compositeEmbedders": false,
"chatCompletions": false,
"multimodal": false,
"vectorStoreSetting": false
"vectorStoreSetting": false,
"foreignKeys": false
}
"###);
@@ -174,7 +180,7 @@ async fn errors() {
meili_snap::snapshot!(code, @"400 Bad Request");
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
{
"message": "Unknown field `NotAFeature`: expected one of `metrics`, `logsRoute`, `editDocumentsByFunction`, `containsFilter`, `network`, `getTaskDocumentsRoute`, `compositeEmbedders`, `chatCompletions`, `multimodal`, `vectorStoreSetting`",
"message": "Unknown field `NotAFeature`: expected one of `metrics`, `logsRoute`, `editDocumentsByFunction`, `containsFilter`, `network`, `getTaskDocumentsRoute`, `compositeEmbedders`, `chatCompletions`, `multimodal`, `vectorStoreSetting`, `foreignKeys`",
"code": "bad_request",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#bad_request"

View File

@@ -197,7 +197,7 @@ test_setting_routes!(
{
setting: vector_store,
update_verb: patch,
default_value: null
default_value: "experimental"
},
);
@@ -318,6 +318,7 @@ async fn secrets_are_hidden_in_settings() {
],
"filterableAttributes": [],
"sortableAttributes": [],
"foreignKeys": [],
"rankingRules": [
"words",
"typo",

View File

@@ -42,8 +42,16 @@ async fn version_too_old() {
std::fs::create_dir_all(&db_path).unwrap();
std::fs::write(db_path.join("VERSION"), "1.11.9999").unwrap();
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.28.2");
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err().to_string();
let major = meilisearch_types::versioning::VERSION_MAJOR;
let minor = meilisearch_types::versioning::VERSION_MINOR;
let patch = meilisearch_types::versioning::VERSION_PATCH;
let current_version = format!("{major}.{minor}.{patch}");
let err = err.replace(&current_version, "[current version]");
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v[current version]");
}
#[actix_rt::test]
@@ -54,11 +62,21 @@ async fn version_requires_downgrade() {
std::fs::create_dir_all(&db_path).unwrap();
let major = meilisearch_types::versioning::VERSION_MAJOR;
let minor = meilisearch_types::versioning::VERSION_MINOR;
let patch = meilisearch_types::versioning::VERSION_PATCH + 1;
std::fs::write(db_path.join("VERSION"), format!("{major}.{minor}.{patch}")).unwrap();
let mut patch = meilisearch_types::versioning::VERSION_PATCH;
let current_version = format!("{major}.{minor}.{patch}");
patch += 1;
let future_version = format!("{major}.{minor}.{patch}");
std::fs::write(db_path.join("VERSION"), &future_version).unwrap();
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
snapshot!(err, @"Database version 1.28.3 is higher than the Meilisearch version 1.28.2. Downgrade is not supported");
let err = err.to_string();
let err = err.replace(&current_version, "[current version]");
let err = err.replace(&future_version, "[future version]");
snapshot!(err, @"Database version [future version] is higher than the Meilisearch version [current version]. Downgrade is not supported");
}
#[actix_rt::test]

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.28.2"
"upgradeTo": "[current version]"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.28.2"
"upgradeTo": "[current version]"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.28.2"
"upgradeTo": "[current version]"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.28.2"
"upgradeTo": "[current version]"
},
"error": null,
"duration": "[duration]",

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.28.2"
"upgradeTo": "[current version]"
},
"error": null,
"duration": "[duration]",

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.28.2"
"upgradeTo": "[current version]"
},
"error": null,
"duration": "[duration]",

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.28.2"
"upgradeTo": "[current version]"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.28.2"
"upgradeTo": "[current version]"
},
"error": null,
"duration": "[duration]",

View File

@@ -166,55 +166,55 @@ async fn check_the_index_scheduler(server: &Server) {
// We rewrite the first task for all calls because it may be the upgrade database with unknown dates and duration.
// The other tasks should NOT change
let (tasks, _) = server.tasks_filter("limit=1000").await;
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "the_whole_task_queue_once_everything_has_been_processed");
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "the_whole_task_queue_once_everything_has_been_processed");
let (batches, _) = server.batches_filter("limit=1000").await;
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "the_whole_batch_queue_once_everything_has_been_processed");
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "the_whole_batch_queue_once_everything_has_been_processed");
// Tests all the tasks query parameters
let (tasks, _) = server.tasks_filter("uids=10").await;
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_uids_equal_10");
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_uids_equal_10");
let (tasks, _) = server.tasks_filter("batchUids=10").await;
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_batchUids_equal_10");
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_batchUids_equal_10");
let (tasks, _) = server.tasks_filter("statuses=canceled").await;
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_statuses_equal_canceled");
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_statuses_equal_canceled");
// types has already been tested above to retrieve the upgrade database
let (tasks, _) = server.tasks_filter("canceledBy=19").await;
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_canceledBy_equal_19");
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_canceledBy_equal_19");
let (tasks, _) = server.tasks_filter("beforeEnqueuedAt=2025-01-16T16:47:41Z").await;
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_beforeEnqueuedAt_equal_2025-01-16T16_47_41");
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_beforeEnqueuedAt_equal_2025-01-16T16_47_41");
let (tasks, _) = server.tasks_filter("afterEnqueuedAt=2025-01-16T16:47:41Z").await;
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_afterEnqueuedAt_equal_2025-01-16T16_47_41");
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_afterEnqueuedAt_equal_2025-01-16T16_47_41");
let (tasks, _) = server.tasks_filter("beforeStartedAt=2025-01-16T16:47:41Z").await;
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_beforeStartedAt_equal_2025-01-16T16_47_41");
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_beforeStartedAt_equal_2025-01-16T16_47_41");
let (tasks, _) = server.tasks_filter("afterStartedAt=2025-01-16T16:47:41Z").await;
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_afterStartedAt_equal_2025-01-16T16_47_41");
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_afterStartedAt_equal_2025-01-16T16_47_41");
let (tasks, _) = server.tasks_filter("beforeFinishedAt=2025-01-16T16:47:41Z").await;
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_beforeFinishedAt_equal_2025-01-16T16_47_41");
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_beforeFinishedAt_equal_2025-01-16T16_47_41");
let (tasks, _) = server.tasks_filter("afterFinishedAt=2025-01-16T16:47:41Z").await;
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_afterFinishedAt_equal_2025-01-16T16_47_41");
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_afterFinishedAt_equal_2025-01-16T16_47_41");
// Tests all the batches query parameters
let (batches, _) = server.batches_filter("uids=10").await;
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_uids_equal_10");
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_uids_equal_10");
let (batches, _) = server.batches_filter("batchUids=10").await;
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_batchUids_equal_10");
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_batchUids_equal_10");
let (batches, _) = server.batches_filter("statuses=canceled").await;
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_statuses_equal_canceled");
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_statuses_equal_canceled");
// types has already been tested above to retrieve the upgrade database
let (batches, _) = server.batches_filter("canceledBy=19").await;
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_canceledBy_equal_19");
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_canceledBy_equal_19");
let (batches, _) = server.batches_filter("beforeEnqueuedAt=2025-01-16T16:47:41Z").await;
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeEnqueuedAt_equal_2025-01-16T16_47_41");
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeEnqueuedAt_equal_2025-01-16T16_47_41");
let (batches, _) = server.batches_filter("afterEnqueuedAt=2025-01-16T16:47:41Z").await;
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterEnqueuedAt_equal_2025-01-16T16_47_41");
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterEnqueuedAt_equal_2025-01-16T16_47_41");
let (batches, _) = server.batches_filter("beforeStartedAt=2025-01-16T16:47:41Z").await;
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeStartedAt_equal_2025-01-16T16_47_41");
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeStartedAt_equal_2025-01-16T16_47_41");
let (batches, _) = server.batches_filter("afterStartedAt=2025-01-16T16:47:41Z").await;
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterStartedAt_equal_2025-01-16T16_47_41");
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterStartedAt_equal_2025-01-16T16_47_41");
let (batches, _) = server.batches_filter("beforeFinishedAt=2025-01-16T16:47:41Z").await;
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeFinishedAt_equal_2025-01-16T16_47_41");
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeFinishedAt_equal_2025-01-16T16_47_41");
let (batches, _) = server.batches_filter("afterFinishedAt=2025-01-16T16:47:41Z").await;
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterFinishedAt_equal_2025-01-16T16_47_41");
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterFinishedAt_equal_2025-01-16T16_47_41");
let (stats, _) = server.stats().await;
assert_json_snapshot!(stats, {

View File

@@ -104,8 +104,8 @@ async fn binary_quantize_before_sending_documents() {
"manual": {
"embeddings": [
[
-1.0,
-1.0,
0.0,
0.0,
1.0
]
],
@@ -122,7 +122,7 @@ async fn binary_quantize_before_sending_documents() {
[
1.0,
1.0,
-1.0
0.0
]
],
"regenerate": false
@@ -191,8 +191,8 @@ async fn binary_quantize_after_sending_documents() {
"manual": {
"embeddings": [
[
-1.0,
-1.0,
0.0,
0.0,
1.0
]
],
@@ -209,7 +209,7 @@ async fn binary_quantize_after_sending_documents() {
[
1.0,
1.0,
-1.0
0.0
]
],
"regenerate": false

View File

@@ -0,0 +1,43 @@
use meili_snap::snapshot;
use crate::common::{GetAllDocumentsOptions, Server};
use crate::json;
#[actix_rt::test]
async fn hf_bge_m3_force_cls_settings() {
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
"embedders": {
"default": {
"source": "huggingFace",
"model": "baai/bge-m3",
"revision": "5617a9f61b028005a4858fdac845db406aefb181",
"pooling": "forceCls",
// minimal template to allow potential document embedding if used later
"documentTemplate": "{{doc.title}}"
}
}
}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await.succeeded();
// Try to embed one simple document
let (task, code) =
index.add_documents(json!([{ "id": 1, "title": "Hello world" }]), None).await;
snapshot!(code, @"202 Accepted");
server.wait_task(task.uid()).await.succeeded();
// Retrieve the document with vectors and assert embeddings were produced
let (documents, _code) = index
.get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() })
.await;
let has_vectors = documents["results"][0]["_vectors"]["default"]["embeddings"]
.as_array()
.map(|a| !a.is_empty())
.unwrap_or(false);
snapshot!(has_vectors, @"true");
}

View File

@@ -1,5 +1,6 @@
mod binary_quantized;
mod fragments;
mod huggingface;
#[cfg(feature = "test-ollama")]
mod ollama;
mod openai;

View File

@@ -91,7 +91,7 @@ rhai = { version = "1.23.6", features = [
"sync",
] }
arroy = "0.6.4-nested-rtxns"
hannoy = { version = "0.0.9-nested-rtxns-2", features = ["arroy"] }
hannoy = { version = "0.1.0-nested-rtxns", features = ["arroy"] }
rand = "0.8.5"
tracing = "0.1.41"
ureq = { version = "2.12.1", features = ["json"] }

View File

@@ -53,6 +53,7 @@ pub mod main_key {
pub const HIDDEN_FACETED_FIELDS_KEY: &str = "hidden-faceted-fields";
pub const FILTERABLE_FIELDS_KEY: &str = "filterable-fields";
pub const SORTABLE_FIELDS_KEY: &str = "sortable-fields";
pub const FOREIGN_KEYS_KEY: &str = "foreign-keys";
pub const FIELD_DISTRIBUTION_KEY: &str = "fields-distribution";
pub const FIELDS_IDS_MAP_KEY: &str = "fields-ids-map";
pub const FIELDIDS_WEIGHTS_MAP_KEY: &str = "fieldids-weights-map";
@@ -281,6 +282,9 @@ impl Index {
&mut wtxn,
(constants::VERSION_MAJOR, constants::VERSION_MINOR, constants::VERSION_PATCH),
)?;
// The database before v1.29 defaulted to using arroy, so we
// need to set it explicitly because the new default is hannoy.
this.put_vector_store(&mut wtxn, VectorStoreBackend::Hannoy)?;
}
wtxn.commit()?;

View File

@@ -19,6 +19,7 @@ mod external_documents_ids;
pub mod facet;
mod fields_ids_map;
mod filterable_attributes_rules;
mod foreign_key;
pub mod heed_codec;
pub mod index;
mod localized_attributes_rules;
@@ -71,6 +72,7 @@ pub use self::filterable_attributes_rules::{
FilterFeatures, FilterableAttributesFeatures, FilterableAttributesPatterns,
FilterableAttributesRule,
};
pub use self::foreign_key::ForeignKey;
pub use self::heed_codec::{
BEU16StrCodec, BEU32StrCodec, BoRoaringBitmapCodec, BoRoaringBitmapLenCodec,
CboRoaringBitmapCodec, CboRoaringBitmapLenCodec, FieldIdWordCountCodec, ObkvCodec,

View File

@@ -385,9 +385,10 @@ pub struct SearchResult {
pub query_vector: Option<Embedding>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
pub enum TermsMatchingStrategy {
// remove last word first
#[default]
Last,
// all words are mandatory
All,
@@ -395,12 +396,6 @@ pub enum TermsMatchingStrategy {
Frequency,
}
impl Default for TermsMatchingStrategy {
fn default() -> Self {
Self::Last
}
}
impl From<MatchingStrategy> for TermsMatchingStrategy {
fn from(other: MatchingStrategy) -> Self {
match other {

View File

@@ -124,7 +124,7 @@ impl GrenadParameters {
/// This should be called inside of a rayon thread pool,
/// otherwise, it will take the global number of threads.
pub fn max_memory_by_thread(&self) -> Option<usize> {
self.max_memory.map(|max_memory| (max_memory / rayon::current_num_threads()))
self.max_memory.map(|max_memory| max_memory / rayon::current_num_threads())
}
}

View File

@@ -54,11 +54,12 @@ pub struct DocumentAdditionResult {
pub number_of_documents: u64,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[non_exhaustive]
pub enum IndexDocumentsMethod {
/// Replace the previous document with the new one,
/// removing all the already known attributes.
#[default]
ReplaceDocuments,
/// Merge the previous version of the document with the new version,
@@ -66,12 +67,6 @@ pub enum IndexDocumentsMethod {
UpdateDocuments,
}
impl Default for IndexDocumentsMethod {
fn default() -> Self {
Self::ReplaceDocuments
}
}
pub struct IndexDocuments<'t, 'i, 'a, FP, FA> {
wtxn: &'t mut heed::RwTxn<'i>,
index: &'i Index,
@@ -806,6 +801,10 @@ mod tests {
use crate::vector::db::IndexEmbeddingConfig;
use crate::{all_obkv_to_json, db_snap, Filter, FilterableAttributesRule, Search, UserError};
fn no_cancel() -> bool {
false
}
#[test]
fn simple_document_replacement() {
let index = TempIndex::new();
@@ -1985,7 +1984,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -2038,7 +2037,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -2057,7 +2056,7 @@ mod tests {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -2127,7 +2126,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -2146,7 +2145,7 @@ mod tests {
primary_key,
&document_changes,
RuntimeEmbedders::default(),
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -2317,7 +2316,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -2333,7 +2332,7 @@ mod tests {
primary_key,
&document_changes,
embedders,
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -2381,7 +2380,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -2397,7 +2396,7 @@ mod tests {
primary_key,
&document_changes,
embedders,
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -2436,7 +2435,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -2452,7 +2451,7 @@ mod tests {
primary_key,
&document_changes,
embedders,
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -2490,7 +2489,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -2506,7 +2505,7 @@ mod tests {
primary_key,
&document_changes,
embedders,
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -2546,7 +2545,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -2562,7 +2561,7 @@ mod tests {
primary_key,
&document_changes,
embedders,
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -2607,7 +2606,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -2623,7 +2622,7 @@ mod tests {
primary_key,
&document_changes,
embedders,
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -2661,7 +2660,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -2677,7 +2676,7 @@ mod tests {
primary_key,
&document_changes,
embedders,
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -2715,7 +2714,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -2731,7 +2730,7 @@ mod tests {
primary_key,
&document_changes,
embedders,
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -2927,7 +2926,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -2943,7 +2942,7 @@ mod tests {
primary_key,
&document_changes,
embedders,
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -2988,7 +2987,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -3004,7 +3003,7 @@ mod tests {
primary_key,
&document_changes,
embedders,
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)
@@ -3046,7 +3045,7 @@ mod tests {
&rtxn,
None,
&mut new_fields_ids_map,
&|| false,
&no_cancel,
Progress::default(),
None,
)
@@ -3062,7 +3061,7 @@ mod tests {
primary_key,
&document_changes,
embedders,
&|| false,
&no_cancel,
&Progress::default(),
&Default::default(),
)

View File

@@ -45,13 +45,15 @@ use crate::vector::{
VectorStoreBackend,
};
use crate::{
ChannelCongestion, FieldId, FilterableAttributesRule, Index, LocalizedAttributesRule, Result,
ChannelCongestion, FieldId, FilterableAttributesRule, ForeignKey, Index,
LocalizedAttributesRule, Result,
};
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)]
pub enum Setting<T> {
Set(T),
Reset,
#[default]
NotSet,
}
@@ -71,12 +73,6 @@ where
}
}
impl<T> Default for Setting<T> {
fn default() -> Self {
Self::NotSet
}
}
impl<T> Setting<T> {
pub fn set(self) -> Option<T> {
match self {
@@ -176,6 +172,7 @@ pub struct Settings<'a, 't, 'i> {
displayed_fields: Setting<Vec<String>>,
filterable_fields: Setting<Vec<FilterableAttributesRule>>,
sortable_fields: Setting<HashSet<String>>,
foreign_keys: Setting<Vec<ForeignKey>>,
criteria: Setting<Vec<Criterion>>,
stop_words: Setting<BTreeSet<String>>,
non_separator_tokens: Setting<BTreeSet<String>>,
@@ -217,6 +214,7 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
displayed_fields: Setting::NotSet,
filterable_fields: Setting::NotSet,
sortable_fields: Setting::NotSet,
foreign_keys: Setting::NotSet,
criteria: Setting::NotSet,
stop_words: Setting::NotSet,
non_separator_tokens: Setting::NotSet,
@@ -278,6 +276,14 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
self.sortable_fields = Setting::Reset;
}
pub fn set_foreign_keys(&mut self, keys: Vec<ForeignKey>) {
self.foreign_keys = Setting::Set(keys);
}
pub fn reset_foreign_keys(&mut self) {
self.foreign_keys = Setting::Reset;
}
pub fn reset_criteria(&mut self) {
self.criteria = Setting::Reset;
}
@@ -822,6 +828,19 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
Ok(())
}
fn update_foreign_keys(&mut self) -> Result<()> {
match self.foreign_keys {
Setting::Set(ref keys) => {
self.index.put_foreign_keys(self.wtxn, keys)?;
}
Setting::Reset => {
self.index.delete_foreign_keys(self.wtxn)?;
}
Setting::NotSet => (),
}
Ok(())
}
fn update_criteria(&mut self) -> Result<()> {
match &self.criteria {
Setting::Set(criteria) => {
@@ -1455,6 +1474,7 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
self.update_sort_facet_values_by()?;
self.update_pagination_max_total_hits()?;
self.update_search_cutoff()?;
self.update_foreign_keys()?;
// could trigger re-indexing
self.update_filterable()?;
@@ -1593,6 +1613,7 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
displayed_fields: Setting::NotSet,
filterable_fields: Setting::NotSet,
sortable_fields: Setting::NotSet,
foreign_keys: Setting::NotSet,
criteria: Setting::NotSet,
stop_words: Setting::NotSet, // TODO (require force reindexing of searchables)
non_separator_tokens: Setting::NotSet, // TODO (require force reindexing of searchables)

View File

@@ -867,6 +867,7 @@ fn test_correct_settings_init() {
displayed_fields,
filterable_fields,
sortable_fields,
foreign_keys,
criteria,
stop_words,
non_separator_tokens,
@@ -897,6 +898,7 @@ fn test_correct_settings_init() {
assert!(matches!(displayed_fields, Setting::NotSet));
assert!(matches!(filterable_fields, Setting::NotSet));
assert!(matches!(sortable_fields, Setting::NotSet));
assert!(matches!(foreign_keys, Setting::NotSet));
assert!(matches!(criteria, Setting::NotSet));
assert!(matches!(stop_words, Setting::NotSet));
assert!(matches!(non_separator_tokens, Setting::NotSet));

View File

@@ -5,103 +5,36 @@ mod v1_15;
mod v1_16;
use heed::RwTxn;
use v1_12::{V1_12_3_To_V1_13_0, V1_12_To_V1_12_3};
use v1_13::{V1_13_0_To_V1_13_1, V1_13_1_To_Latest_V1_13};
use v1_14::Latest_V1_13_To_Latest_V1_14;
use v1_15::Latest_V1_14_To_Latest_V1_15;
use v1_16::Latest_V1_15_To_V1_16_0;
use v1_12::{FixFieldDistribution, RecomputeStats};
use v1_13::AddNewStats;
use v1_14::UpgradeArroyVersion;
use v1_15::RecomputeWordFst;
use v1_16::SwitchToMultimodal;
use crate::constants::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
use crate::progress::{Progress, VariableNameStep};
use crate::{Index, InternalError, Result};
trait UpgradeIndex {
/// Returns `true` if `upgrade` should be called when the index started with version `initial_version`.
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool;
/// Returns `true` if the index scheduler must regenerate its cached stats.
fn upgrade(
&self,
wtxn: &mut RwTxn,
index: &Index,
original: (u32, u32, u32),
progress: Progress,
) -> Result<bool>;
fn target_version(&self) -> (u32, u32, u32);
fn upgrade(&self, wtxn: &mut RwTxn, index: &Index, progress: Progress) -> Result<bool>;
/// Description of the upgrade for progress display purposes.
fn description(&self) -> &'static str;
}
const UPGRADE_FUNCTIONS: &[&dyn UpgradeIndex] = &[
&V1_12_To_V1_12_3 {},
&V1_12_3_To_V1_13_0 {},
&V1_13_0_To_V1_13_1 {},
&V1_13_1_To_Latest_V1_13 {},
&Latest_V1_13_To_Latest_V1_14 {},
&Latest_V1_14_To_Latest_V1_15 {},
&Latest_V1_15_To_V1_16_0 {},
&ToTargetNoOp { target: (1, 18, 0) },
&ToTargetNoOp { target: (1, 19, 0) },
&ToTargetNoOp { target: (1, 20, 0) },
&ToTargetNoOp { target: (1, 21, 0) },
&ToTargetNoOp { target: (1, 22, 0) },
&ToTargetNoOp { target: (1, 23, 0) },
&ToTargetNoOp { target: (1, 24, 0) },
&ToTargetNoOp { target: (1, 25, 0) },
&ToTargetNoOp { target: (1, 26, 0) },
&ToTargetNoOp { target: (1, 27, 0) },
&ToTargetNoOp { target: (1, 28, 0) },
// This is the last upgrade function, it will be called when the index is up to date.
// any other upgrade function should be added before this one.
&ToCurrentNoOp {},
&FixFieldDistribution {},
&RecomputeStats {},
&AddNewStats {},
&UpgradeArroyVersion {},
&RecomputeWordFst {},
&SwitchToMultimodal {},
];
/// Causes a compile-time error if the argument is not in range of `0..UPGRADE_FUNCTIONS.len()`
macro_rules! function_index {
($start:expr) => {{
const _CHECK_INDEX: () = {
if $start >= $crate::update::upgrade::UPGRADE_FUNCTIONS.len() {
panic!("upgrade functions out of range")
}
};
$start
}};
}
const fn start(from: (u32, u32, u32)) -> Option<usize> {
let start = match from {
(1, 12, 0..=2) => function_index!(0),
(1, 12, 3..) => function_index!(1),
(1, 13, 0) => function_index!(2),
(1, 13, _) => function_index!(4),
(1, 14, _) => function_index!(5),
// We must handle the current version in the match because in case of a failure some index may have been upgraded but not other.
(1, 15, _) => function_index!(6),
(1, 16, _) | (1, 17, _) => function_index!(7),
(1, 18, _) => function_index!(8),
(1, 19, _) => function_index!(9),
(1, 20, _) => function_index!(10),
(1, 21, _) => function_index!(11),
(1, 22, _) => function_index!(12),
(1, 23, _) => function_index!(13),
(1, 24, _) => function_index!(14),
(1, 25, _) => function_index!(15),
(1, 26, _) => function_index!(16),
(1, 27, _) => function_index!(17),
(1, 28, _) => function_index!(18),
// We deliberately don't add a placeholder with (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) here to force manually
// considering dumpless upgrade.
(_major, _minor, _patch) => return None,
};
Some(start)
}
/// Causes a compile-time error if the latest package cannot be upgraded.
///
/// This serves as a reminder to consider the proper dumpless upgrade implementation when changing the package version.
const _CHECK_PACKAGE_CAN_UPGRADE: () = {
if start((VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)).is_none() {
panic!("cannot upgrade from latest package version")
}
};
/// Return true if the cached stats of the index must be regenerated
pub fn upgrade<MSP>(
wtxn: &mut RwTxn,
@@ -113,79 +46,34 @@ pub fn upgrade<MSP>(
where
MSP: Fn() -> bool + Sync,
{
let from = index.get_version(wtxn)?.unwrap_or(db_version);
let upgrade_functions = UPGRADE_FUNCTIONS;
let start =
start(from).ok_or_else(|| InternalError::CannotUpgradeToVersion(from.0, from.1, from.2))?;
let initial_version = index.get_version(wtxn)?.unwrap_or(db_version);
enum UpgradeVersion {}
let upgrade_path = &UPGRADE_FUNCTIONS[start..];
let mut current_version = from;
let mut regenerate_stats = false;
for (i, upgrade) in upgrade_path.iter().enumerate() {
for (i, upgrade) in upgrade_functions.iter().enumerate() {
if (must_stop_processing)() {
return Err(crate::Error::InternalError(InternalError::AbortedIndexation));
}
let target = upgrade.target_version();
progress.update_progress(VariableNameStep::<UpgradeVersion>::new(
format!(
"Upgrading from v{}.{}.{} to v{}.{}.{}",
current_version.0,
current_version.1,
current_version.2,
target.0,
target.1,
target.2
),
i as u32,
upgrade_path.len() as u32,
));
regenerate_stats |= upgrade.upgrade(wtxn, index, from, progress.clone())?;
index.put_version(wtxn, target)?;
current_version = target;
if upgrade.must_upgrade(initial_version) {
regenerate_stats |= upgrade.upgrade(wtxn, index, progress.clone())?;
progress.update_progress(VariableNameStep::<UpgradeVersion>::new(
upgrade.description(),
i as u32,
upgrade_functions.len() as u32,
));
} else {
progress.update_progress(VariableNameStep::<UpgradeVersion>::new(
"Skipping migration that must not be applied",
i as u32,
upgrade_functions.len() as u32,
));
}
}
index.put_version(wtxn, (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH))?;
Ok(regenerate_stats)
}
#[allow(non_camel_case_types)]
struct ToCurrentNoOp {}
impl UpgradeIndex for ToCurrentNoOp {
fn upgrade(
&self,
_wtxn: &mut RwTxn,
_index: &Index,
_original: (u32, u32, u32),
_progress: Progress,
) -> Result<bool> {
Ok(false)
}
fn target_version(&self) -> (u32, u32, u32) {
(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
}
}
/// Perform no operation during the upgrade except changing to the specified target version.
#[allow(non_camel_case_types)]
struct ToTargetNoOp {
pub target: (u32, u32, u32),
}
impl UpgradeIndex for ToTargetNoOp {
fn upgrade(
&self,
_wtxn: &mut RwTxn,
_index: &Index,
_original: (u32, u32, u32),
_progress: Progress,
) -> Result<bool> {
Ok(false)
}
fn target_version(&self) -> (u32, u32, u32) {
self.target
}
}

View File

@@ -4,17 +4,10 @@ use super::UpgradeIndex;
use crate::progress::Progress;
use crate::{make_enum_progress, Index, Result};
#[allow(non_camel_case_types)]
pub(super) struct V1_12_To_V1_12_3 {}
pub(super) struct FixFieldDistribution {}
impl UpgradeIndex for V1_12_To_V1_12_3 {
fn upgrade(
&self,
wtxn: &mut RwTxn,
index: &Index,
_original: (u32, u32, u32),
progress: Progress,
) -> Result<bool> {
impl UpgradeIndex for FixFieldDistribution {
fn upgrade(&self, wtxn: &mut RwTxn, index: &Index, progress: Progress) -> Result<bool> {
make_enum_progress! {
enum FieldDistribution {
RebuildingFieldDistribution,
@@ -25,27 +18,28 @@ impl UpgradeIndex for V1_12_To_V1_12_3 {
Ok(true)
}
fn target_version(&self) -> (u32, u32, u32) {
(1, 12, 3)
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool {
initial_version < (1, 12, 3)
}
fn description(&self) -> &'static str {
"Recomputing field distribution which was wrong before v1.12.3"
}
}
#[allow(non_camel_case_types)]
pub(super) struct V1_12_3_To_V1_13_0 {}
pub(super) struct RecomputeStats {}
impl UpgradeIndex for V1_12_3_To_V1_13_0 {
fn upgrade(
&self,
_wtxn: &mut RwTxn,
_index: &Index,
_original: (u32, u32, u32),
_progress: Progress,
) -> Result<bool> {
impl UpgradeIndex for RecomputeStats {
fn upgrade(&self, _wtxn: &mut RwTxn, _index: &Index, _progress: Progress) -> Result<bool> {
// recompute the indexes stats
Ok(true)
}
fn target_version(&self) -> (u32, u32, u32) {
(1, 13, 0)
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool {
initial_version < (1, 13, 0)
}
fn description(&self) -> &'static str {
"Recomputing stats"
}
}

View File

@@ -5,17 +5,10 @@ use crate::database_stats::DatabaseStats;
use crate::progress::Progress;
use crate::{make_enum_progress, Index, Result};
#[allow(non_camel_case_types)]
pub(super) struct V1_13_0_To_V1_13_1();
pub(super) struct AddNewStats();
impl UpgradeIndex for V1_13_0_To_V1_13_1 {
fn upgrade(
&self,
wtxn: &mut RwTxn,
index: &Index,
_original: (u32, u32, u32),
progress: Progress,
) -> Result<bool> {
impl UpgradeIndex for AddNewStats {
fn upgrade(&self, wtxn: &mut RwTxn, index: &Index, progress: Progress) -> Result<bool> {
make_enum_progress! {
enum DocumentsStats {
CreatingDocumentsStats,
@@ -30,26 +23,11 @@ impl UpgradeIndex for V1_13_0_To_V1_13_1 {
Ok(true)
}
fn target_version(&self) -> (u32, u32, u32) {
(1, 13, 1)
}
}
#[allow(non_camel_case_types)]
pub(super) struct V1_13_1_To_Latest_V1_13();
impl UpgradeIndex for V1_13_1_To_Latest_V1_13 {
fn upgrade(
&self,
_wtxn: &mut RwTxn,
_index: &Index,
_original: (u32, u32, u32),
_progress: Progress,
) -> Result<bool> {
Ok(false)
}
fn target_version(&self) -> (u32, u32, u32) {
(1, 13, 3)
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool {
initial_version < (1, 13, 1)
}
fn description(&self) -> &'static str {
"Computing newly introduced document stats"
}
}

View File

@@ -5,17 +5,10 @@ use super::UpgradeIndex;
use crate::progress::Progress;
use crate::{make_enum_progress, Index, Result};
#[allow(non_camel_case_types)]
pub(super) struct Latest_V1_13_To_Latest_V1_14();
pub(super) struct UpgradeArroyVersion();
impl UpgradeIndex for Latest_V1_13_To_Latest_V1_14 {
fn upgrade(
&self,
wtxn: &mut RwTxn,
index: &Index,
_original: (u32, u32, u32),
progress: Progress,
) -> Result<bool> {
impl UpgradeIndex for UpgradeArroyVersion {
fn upgrade(&self, wtxn: &mut RwTxn, index: &Index, progress: Progress) -> Result<bool> {
make_enum_progress! {
enum VectorStore {
UpdateInternalVersions,
@@ -35,7 +28,11 @@ impl UpgradeIndex for Latest_V1_13_To_Latest_V1_14 {
Ok(false)
}
fn target_version(&self) -> (u32, u32, u32) {
(1, 14, 0)
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool {
initial_version < (1, 14, 0)
}
fn description(&self) -> &'static str {
"Updating vector store with an internal version"
}
}

View File

@@ -7,25 +7,21 @@ use crate::progress::Progress;
use crate::update::new::indexer::recompute_word_fst_from_word_docids_database;
use crate::{Index, Result};
#[allow(non_camel_case_types)]
pub(super) struct Latest_V1_14_To_Latest_V1_15();
pub(super) struct RecomputeWordFst();
impl UpgradeIndex for Latest_V1_14_To_Latest_V1_15 {
fn upgrade(
&self,
wtxn: &mut RwTxn,
index: &Index,
_original: (u32, u32, u32),
progress: Progress,
) -> Result<bool> {
impl UpgradeIndex for RecomputeWordFst {
fn upgrade(&self, wtxn: &mut RwTxn, index: &Index, progress: Progress) -> Result<bool> {
// Recompute the word FST from the word docids database.
recompute_word_fst_from_word_docids_database(index, wtxn, &progress)?;
Ok(false)
}
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool {
initial_version < (1, 15, 0)
}
fn target_version(&self) -> (u32, u32, u32) {
(1, 15, 0)
fn description(&self) -> &'static str {
"Recomputing word FST from word docids database as it was wrong before v1.15.0"
}
}

View File

@@ -6,17 +6,10 @@ use crate::progress::Progress;
use crate::vector::db::{EmbedderInfo, EmbeddingStatus};
use crate::{Index, InternalError, Result};
#[allow(non_camel_case_types)]
pub(super) struct Latest_V1_15_To_V1_16_0();
pub(super) struct SwitchToMultimodal();
impl UpgradeIndex for Latest_V1_15_To_V1_16_0 {
fn upgrade(
&self,
wtxn: &mut RwTxn,
index: &Index,
_original: (u32, u32, u32),
_progress: Progress,
) -> Result<bool> {
impl UpgradeIndex for SwitchToMultimodal {
fn upgrade(&self, wtxn: &mut RwTxn, index: &Index, _progress: Progress) -> Result<bool> {
let v1_15_indexing_configs = index
.main
.remap_types::<Str, SerdeJson<Vec<super::v1_15::IndexEmbeddingConfig>>>()
@@ -41,8 +34,11 @@ impl UpgradeIndex for Latest_V1_15_To_V1_16_0 {
Ok(false)
}
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool {
initial_version < (1, 16, 0)
}
fn target_version(&self) -> (u32, u32, u32) {
(1, 16, 0)
fn description(&self) -> &'static str {
"Migrating the database for multimodal support"
}
}

View File

@@ -2,6 +2,7 @@ use candle_core::Tensor;
use candle_nn::VarBuilder;
use candle_transformers::models::bert::{BertModel, Config as BertConfig, DTYPE};
use candle_transformers::models::modernbert::{Config as ModernConfig, ModernBert};
use candle_transformers::models::xlm_roberta::{Config as XlmRobertaConfig, XLMRobertaModel};
// FIXME: currently we'll be using the hub to retrieve model, in the future we might want to embed it into Meilisearch itself
use hf_hub::api::sync::Api;
use hf_hub::{Repo, RepoType};
@@ -89,6 +90,7 @@ impl Default for EmbedderOptions {
enum ModelKind {
Bert(BertModel),
Modern(ModernBert),
XlmRoberta(XLMRobertaModel),
}
/// Perform embedding of documents and queries
@@ -304,7 +306,8 @@ impl Embedder {
};
let is_modern = has_arch("modernbert");
tracing::debug!(is_modern, model_type, "detected HF architecture");
let is_xlm_roberta = has_arch("xlm-roberta") || has_arch("xlm_roberta");
tracing::debug!(is_modern, is_xlm_roberta, model_type, "detected HF architecture");
let mut tokenizer = Tokenizer::from_file(&tokenizer_filename)
.map_err(|inner| NewEmbedderError::open_tokenizer(tokenizer_filename, inner))?;
@@ -340,6 +343,18 @@ impl Embedder {
)
})?;
ModelKind::Modern(ModernBert::load(vb, &config).map_err(NewEmbedderError::load_model)?)
} else if is_xlm_roberta {
let config: XlmRobertaConfig = serde_json::from_str(&config_str).map_err(|inner| {
NewEmbedderError::deserialize_config(
options.model.clone(),
config_str.clone(),
config_filename.clone(),
inner,
)
})?;
ModelKind::XlmRoberta(
XLMRobertaModel::new(&config, vb).map_err(NewEmbedderError::load_model)?,
)
} else {
let config: BertConfig = serde_json::from_str(&config_str).map_err(|inner| {
NewEmbedderError::deserialize_config(
@@ -451,6 +466,19 @@ impl Embedder {
let mask = Tensor::stack(&[mask], 0).map_err(EmbedError::tensor_shape)?;
model.forward(&token_ids, &mask).map_err(EmbedError::model_forward)?
}
ModelKind::XlmRoberta(model) => {
let mut mask_vec = tokens.get_attention_mask().to_vec();
if mask_vec.len() > self.max_len {
mask_vec.truncate(self.max_len);
}
let mask = Tensor::new(mask_vec.as_slice(), &self.device)
.map_err(EmbedError::tensor_shape)?;
let mask = Tensor::stack(&[mask], 0).map_err(EmbedError::tensor_shape)?;
let token_type_ids = token_ids.zeros_like().map_err(EmbedError::tensor_shape)?;
model
.forward(&token_ids, &mask, &token_type_ids, None, None, None)
.map_err(EmbedError::model_forward)?
}
};
let embedding = Self::pooling(embeddings, self.pooling)?;

View File

@@ -67,7 +67,7 @@ impl<F> Embeddings<F> {
///
/// If `embeddings.len() % self.dimension != 0`, then the append operation fails.
pub fn append(&mut self, mut embeddings: Vec<F>) -> Result<(), Vec<F>> {
if embeddings.len() % self.dimension != 0 {
if !embeddings.len().is_multiple_of(self.dimension) {
return Err(embeddings);
}
self.data.append(&mut embeddings);

View File

@@ -1,5 +1,5 @@
use hannoy::distances::{Cosine, Hamming};
use hannoy::ItemId;
use hannoy::{ItemId, Searched};
use heed::{RoTxn, RwTxn, Unspecified};
use ordered_float::OrderedFloat;
use rand::SeedableRng as _;
@@ -974,7 +974,7 @@ impl VectorStore {
}
if let Some(mut ret) = searcher.by_item(rtxn, item)? {
results.append(&mut ret);
results.append(&mut ret.nns);
}
}
results.sort_unstable_by_key(|(_, distance)| OrderedFloat(*distance));
@@ -1028,10 +1028,9 @@ impl VectorStore {
searcher.candidates(filter);
}
let (res, _degraded) =
&mut searcher
.by_vector_with_cancellation(rtxn, vector, || time_budget.exceeded())?;
results.append(res);
let Searched { mut nns, did_cancel: _ } =
searcher.by_vector_with_cancellation(rtxn, vector, || time_budget.exceeded())?;
results.append(&mut nns);
}
results.sort_unstable_by_key(|(_, distance)| OrderedFloat(*distance));

View File

@@ -22,6 +22,7 @@ reqwest = { version = "0.12.24", features = [
"json",
"rustls-tls",
], default-features = false }
semver = "1.0.27"
serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.145"
sha2 = "0.10.9"
@@ -42,3 +43,4 @@ tracing = "0.1.41"
tracing-subscriber = "0.3.20"
tracing-trace = { version = "0.1.0", path = "../tracing-trace" }
uuid = { version = "1.18.1", features = ["v7", "serde"] }
similar-asserts = "1.7.0"

View File

@@ -1,194 +0,0 @@
use std::collections::BTreeMap;
use std::fmt::Display;
use std::io::Read as _;
use anyhow::{bail, Context as _};
use serde::Deserialize;
use super::assets::{fetch_asset, Asset};
use super::client::{Client, Method};
#[derive(Clone, Deserialize)]
pub struct Command {
pub route: String,
pub method: Method,
#[serde(default)]
pub body: Body,
#[serde(default)]
pub synchronous: SyncMode,
}
#[derive(Default, Clone, Deserialize)]
#[serde(untagged)]
pub enum Body {
Inline {
inline: serde_json::Value,
},
Asset {
asset: String,
},
#[default]
Empty,
}
impl Body {
pub fn get(
self,
assets: &BTreeMap<String, Asset>,
asset_folder: &str,
) -> anyhow::Result<Option<(Vec<u8>, &'static str)>> {
Ok(match self {
Body::Inline { inline: body } => Some((
serde_json::to_vec(&body)
.context("serializing to bytes")
.context("while getting inline body")?,
"application/json",
)),
Body::Asset { asset: name } => Some({
let context = || format!("while getting body from asset '{name}'");
let (mut file, format) =
fetch_asset(&name, assets, asset_folder).with_context(context)?;
let mut buf = Vec::new();
file.read_to_end(&mut buf).with_context(context)?;
(buf, format.to_content_type(&name))
}),
Body::Empty => None,
})
}
}
impl Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?} {} ({:?})", self.method, self.route, self.synchronous)
}
}
#[derive(Default, Debug, Clone, Copy, Deserialize)]
pub enum SyncMode {
DontWait,
#[default]
WaitForResponse,
WaitForTask,
}
pub async fn run_batch(
client: &Client,
batch: &[Command],
assets: &BTreeMap<String, Asset>,
asset_folder: &str,
) -> anyhow::Result<()> {
let [.., last] = batch else { return Ok(()) };
let sync = last.synchronous;
let mut tasks = tokio::task::JoinSet::new();
for command in batch {
// FIXME: you probably don't want to copy assets everytime here
tasks.spawn({
let client = client.clone();
let command = command.clone();
let assets = assets.clone();
let asset_folder = asset_folder.to_owned();
async move { run(client, command, &assets, &asset_folder).await }
});
}
while let Some(result) = tasks.join_next().await {
result
.context("panicked while executing command")?
.context("error while executing command")?;
}
match sync {
SyncMode::DontWait => {}
SyncMode::WaitForResponse => {}
SyncMode::WaitForTask => wait_for_tasks(client).await?,
}
Ok(())
}
async fn wait_for_tasks(client: &Client) -> anyhow::Result<()> {
loop {
let response = client
.get("tasks?statuses=enqueued,processing")
.send()
.await
.context("could not wait for tasks")?;
let response: serde_json::Value = response
.json()
.await
.context("could not deserialize response to JSON")
.context("could not wait for tasks")?;
match response.get("total") {
Some(serde_json::Value::Number(number)) => {
let number = number.as_u64().with_context(|| {
format!("waiting for tasks: could not parse 'total' as integer, got {}", number)
})?;
if number == 0 {
break;
} else {
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
continue;
}
}
Some(thing_else) => {
bail!(format!(
"waiting for tasks: could not parse 'total' as a number, got '{thing_else}'"
))
}
None => {
bail!(format!(
"waiting for tasks: expected response to contain 'total', got '{response}'"
))
}
}
}
Ok(())
}
#[tracing::instrument(skip(client, command, assets, asset_folder), fields(command = %command))]
pub async fn run(
client: Client,
mut command: Command,
assets: &BTreeMap<String, Asset>,
asset_folder: &str,
) -> anyhow::Result<()> {
// memtake the body here to leave an empty body in its place, so that command is not partially moved-out
let body = std::mem::take(&mut command.body)
.get(assets, asset_folder)
.with_context(|| format!("while getting body for command {command}"))?;
let request = client.request(command.method.into(), &command.route);
let request = if let Some((body, content_type)) = body {
request.body(body).header(reqwest::header::CONTENT_TYPE, content_type)
} else {
request
};
let response =
request.send().await.with_context(|| format!("error sending command: {}", command))?;
let code = response.status();
if code.is_client_error() {
tracing::error!(%command, %code, "error in workload file");
let response: serde_json::Value = response
.json()
.await
.context("could not deserialize response as JSON")
.context("parsing error in workload file when sending command")?;
bail!("error in workload file: server responded with error code {code} and '{response}'")
} else if code.is_server_error() {
tracing::error!(%command, %code, "server error");
let response: serde_json::Value = response
.json()
.await
.context("could not deserialize response as JSON")
.context("parsing server error when sending command")?;
bail!("server error: server responded with error code {code} and '{response}'")
}
Ok(())
}

View File

@@ -7,9 +7,9 @@ use tokio::task::AbortHandle;
use tracing_trace::processor::span_stats::CallStats;
use uuid::Uuid;
use super::client::Client;
use super::env_info;
use super::workload::Workload;
use super::workload::BenchWorkload;
use crate::common::client::Client;
#[derive(Debug, Clone)]
pub enum DashboardClient {
@@ -89,7 +89,7 @@ impl DashboardClient {
pub async fn create_workload(
&self,
invocation_uuid: Uuid,
workload: &Workload,
workload: &BenchWorkload,
) -> anyhow::Result<Uuid> {
let Self::Client(dashboard_client) = self else { return Ok(Uuid::now_v7()) };

View File

@@ -1,51 +1,36 @@
mod assets;
mod client;
mod command;
mod dashboard;
mod env_info;
mod meili_process;
mod workload;
use std::io::LineWriter;
use std::path::PathBuf;
use crate::common::args::CommonArgs;
use crate::common::logs::setup_logs;
use crate::common::workload::Workload;
use std::{path::PathBuf, sync::Arc};
use anyhow::Context;
use anyhow::{bail, Context};
use clap::Parser;
use tracing_subscriber::fmt::format::FmtSpan;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::Layer;
use self::client::Client;
use self::workload::Workload;
use crate::common::client::Client;
pub use workload::BenchWorkload;
pub fn default_http_addr() -> String {
"127.0.0.1:7700".to_string()
}
pub fn default_report_folder() -> String {
"./bench/reports/".into()
}
pub fn default_asset_folder() -> String {
"./bench/assets/".into()
}
pub fn default_log_filter() -> String {
"info".into()
}
pub fn default_dashboard_url() -> String {
"http://localhost:9001".into()
}
/// Run benchmarks from a workload
#[derive(Parser, Debug)]
pub struct BenchDeriveArgs {
/// Filename of the workload file, pass multiple filenames
/// to run multiple workloads in the specified order.
///
/// Each workload run will get its own report file.
#[arg(value_name = "WORKLOAD_FILE", last = false)]
workload_file: Vec<PathBuf>,
pub struct BenchArgs {
/// Common arguments shared with other commands
#[command(flatten)]
common: CommonArgs,
/// Meilisearch master keys
#[arg(long)]
pub master_key: Option<String>,
/// URL of the dashboard.
#[arg(long, default_value_t = default_dashboard_url())]
@@ -59,34 +44,14 @@ pub struct BenchDeriveArgs {
#[arg(long, default_value_t = default_report_folder())]
report_folder: String,
/// Directory to store the remote assets.
#[arg(long, default_value_t = default_asset_folder())]
asset_folder: String,
/// Log directives
#[arg(short, long, default_value_t = default_log_filter())]
log_filter: String,
/// Benchmark dashboard API key
#[arg(long)]
api_key: Option<String>,
/// Meilisearch master keys
#[arg(long)]
master_key: Option<String>,
/// Authentication bearer for fetching assets
#[arg(long)]
assets_key: Option<String>,
/// Reason for the benchmark invocation
#[arg(short, long)]
reason: Option<String>,
/// The maximum time in seconds we allow for fetching the task queue before timing out.
#[arg(long, default_value_t = 60)]
tasks_queue_timeout_secs: u64,
/// The path to the binary to run.
///
/// If unspecified, runs `cargo run` after building Meilisearch with `cargo build`.
@@ -94,18 +59,8 @@ pub struct BenchDeriveArgs {
binary_path: Option<PathBuf>,
}
pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
// setup logs
let filter: tracing_subscriber::filter::Targets =
args.log_filter.parse().context("invalid --log-filter")?;
let subscriber = tracing_subscriber::registry().with(
tracing_subscriber::fmt::layer()
.with_writer(|| LineWriter::new(std::io::stderr()))
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.with_filter(filter),
);
tracing::subscriber::set_global_default(subscriber).context("could not setup logging")?;
pub fn run(args: BenchArgs) -> anyhow::Result<()> {
setup_logs(&args.common.log_filter)?;
// fetch environment and build info
let env = env_info::Environment::generate_from_current_config();
@@ -116,8 +71,11 @@ pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
let _scope = rt.enter();
// setup clients
let assets_client =
Client::new(None, args.assets_key.as_deref(), Some(std::time::Duration::from_secs(3600)))?; // 1h
let assets_client = Client::new(
None,
args.common.assets_key.as_deref(),
Some(std::time::Duration::from_secs(3600)), // 1h
)?;
let dashboard_client = if args.no_dashboard {
dashboard::DashboardClient::new_dry()
@@ -134,11 +92,11 @@ pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
None,
)?;
let meili_client = Client::new(
let meili_client = Arc::new(Client::new(
Some("http://127.0.0.1:7700".into()),
args.master_key.as_deref(),
Some(std::time::Duration::from_secs(args.tasks_queue_timeout_secs)),
)?;
Some(std::time::Duration::from_secs(args.common.tasks_queue_timeout_secs)),
)?);
// enter runtime
@@ -146,11 +104,11 @@ pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
dashboard_client.send_machine_info(&env).await?;
let commit_message = build_info.commit_msg.unwrap_or_default().split('\n').next().unwrap();
let max_workloads = args.workload_file.len();
let max_workloads = args.common.workload_file.len();
let reason: Option<&str> = args.reason.as_deref();
let invocation_uuid = dashboard_client.create_invocation(build_info.clone(), commit_message, env, max_workloads, reason).await?;
tracing::info!(workload_count = args.workload_file.len(), "handling workload files");
tracing::info!(workload_count = args.common.workload_file.len(), "handling workload files");
// main task
let workload_runs = tokio::spawn(
@@ -158,13 +116,17 @@ pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
let dashboard_client = dashboard_client.clone();
let mut dashboard_urls = Vec::new();
async move {
for workload_file in args.workload_file.iter() {
for workload_file in args.common.workload_file.iter() {
let workload: Workload = serde_json::from_reader(
std::fs::File::open(workload_file)
.with_context(|| format!("error opening {}", workload_file.display()))?,
)
.with_context(|| format!("error parsing {} as JSON", workload_file.display()))?;
let Workload::Bench(workload) = workload else {
bail!("workload file {} is not a bench workload", workload_file.display());
};
let workload_name = workload.name.clone();
workload::execute(

View File

@@ -1,24 +1,28 @@
use std::collections::BTreeMap;
use std::collections::{BTreeMap, HashMap};
use std::fs::File;
use std::io::{Seek as _, Write as _};
use std::path::Path;
use std::sync::Arc;
use anyhow::{bail, Context as _};
use futures_util::TryStreamExt as _;
use serde::Deserialize;
use serde::{Deserialize, Serialize};
use serde_json::json;
use tokio::task::JoinHandle;
use uuid::Uuid;
use super::assets::Asset;
use super::client::Client;
use super::command::SyncMode;
use super::dashboard::DashboardClient;
use super::BenchDeriveArgs;
use crate::bench::{assets, meili_process};
use super::BenchArgs;
use crate::common::assets::{self, Asset};
use crate::common::client::Client;
use crate::common::command::{run_commands, Command};
use crate::common::instance::Binary;
use crate::common::process::{self, delete_db, start_meili};
#[derive(Deserialize)]
pub struct Workload {
/// A bench workload.
/// Not to be confused with [a test workload](crate::test::workload::Workload).
#[derive(Serialize, Deserialize, Debug)]
pub struct BenchWorkload {
pub name: String,
pub run_count: u16,
pub extra_cli_args: Vec<String>,
@@ -26,30 +30,34 @@ pub struct Workload {
#[serde(default)]
pub target: String,
#[serde(default)]
pub precommands: Vec<super::command::Command>,
pub commands: Vec<super::command::Command>,
pub precommands: Vec<Command>,
pub commands: Vec<Command>,
}
async fn run_commands(
async fn run_workload_commands(
dashboard_client: &DashboardClient,
logs_client: &Client,
meili_client: &Client,
meili_client: &Arc<Client>,
workload_uuid: Uuid,
workload: &Workload,
args: &BenchDeriveArgs,
workload: &BenchWorkload,
args: &BenchArgs,
run_number: u16,
) -> anyhow::Result<JoinHandle<anyhow::Result<File>>> {
let report_folder = &args.report_folder;
let workload_name = &workload.name;
let assets = Arc::new(workload.assets.clone());
let asset_folder = args.common.asset_folder.clone().leak();
for batch in workload
.precommands
.as_slice()
.split_inclusive(|command| !matches!(command.synchronous, SyncMode::DontWait))
{
super::command::run_batch(meili_client, batch, &workload.assets, &args.asset_folder)
.await?;
}
run_commands(
meili_client,
&workload.precommands,
0,
&assets,
asset_folder,
&mut HashMap::new(),
false,
)
.await?;
std::fs::create_dir_all(report_folder)
.with_context(|| format!("could not create report directory at {report_folder}"))?;
@@ -59,14 +67,16 @@ async fn run_commands(
let report_handle = start_report(logs_client, trace_filename, &workload.target).await?;
for batch in workload
.commands
.as_slice()
.split_inclusive(|command| !matches!(command.synchronous, SyncMode::DontWait))
{
super::command::run_batch(meili_client, batch, &workload.assets, &args.asset_folder)
.await?;
}
run_commands(
meili_client,
&workload.commands,
0,
&assets,
asset_folder,
&mut HashMap::new(),
false,
)
.await?;
let processor =
stop_report(dashboard_client, logs_client, workload_uuid, report_filename, report_handle)
@@ -81,14 +91,14 @@ pub async fn execute(
assets_client: &Client,
dashboard_client: &DashboardClient,
logs_client: &Client,
meili_client: &Client,
meili_client: &Arc<Client>,
invocation_uuid: Uuid,
master_key: Option<&str>,
workload: Workload,
args: &BenchDeriveArgs,
workload: BenchWorkload,
args: &BenchArgs,
binary_path: Option<&Path>,
) -> anyhow::Result<()> {
assets::fetch_assets(assets_client, &workload.assets, &args.asset_folder).await?;
assets::fetch_assets(assets_client, &workload.assets, &args.common.asset_folder).await?;
let workload_uuid = dashboard_client.create_workload(invocation_uuid, &workload).await?;
@@ -129,38 +139,33 @@ pub async fn execute(
async fn execute_run(
dashboard_client: &DashboardClient,
logs_client: &Client,
meili_client: &Client,
meili_client: &Arc<Client>,
workload_uuid: Uuid,
master_key: Option<&str>,
workload: &Workload,
args: &BenchDeriveArgs,
workload: &BenchWorkload,
args: &BenchArgs,
binary_path: Option<&Path>,
run_number: u16,
) -> anyhow::Result<tokio::task::JoinHandle<anyhow::Result<std::fs::File>>> {
meili_process::delete_db();
delete_db().await;
let run_command = match binary_path {
Some(binary_path) => tokio::process::Command::new(binary_path),
None => {
meili_process::build().await?;
let mut command = tokio::process::Command::new("cargo");
command
.arg("run")
.arg("--release")
.arg("-p")
.arg("meilisearch")
.arg("--bin")
.arg("meilisearch")
.arg("--");
command
}
let binary = match binary_path {
Some(binary_path) => Binary {
source: crate::common::instance::BinarySource::Path(binary_path.to_owned()),
extra_cli_args: workload.extra_cli_args.clone(),
},
None => Binary {
source: crate::common::instance::BinarySource::Build {
edition: crate::common::instance::Edition::Community,
},
extra_cli_args: workload.extra_cli_args.clone(),
},
};
let meilisearch =
meili_process::start(meili_client, master_key, workload, &args.asset_folder, run_command)
.await?;
start_meili(meili_client, master_key, &binary, &args.common.asset_folder).await?;
let processor = run_commands(
let processor = run_workload_commands(
dashboard_client,
logs_client,
meili_client,
@@ -171,7 +176,7 @@ async fn execute_run(
)
.await?;
meili_process::kill(meilisearch).await;
process::kill_meili(meilisearch).await;
tracing::info!(run_number, "Successful run");

View File

@@ -0,0 +1,36 @@
use clap::Parser;
use std::path::PathBuf;
pub fn default_asset_folder() -> String {
"./bench/assets/".into()
}
pub fn default_log_filter() -> String {
"info".into()
}
#[derive(Parser, Debug, Clone)]
pub struct CommonArgs {
/// Filename of the workload file, pass multiple filenames
/// to run multiple workloads in the specified order.
///
/// For benches, each workload run will get its own report file.
#[arg(value_name = "WORKLOAD_FILE", last = false)]
pub workload_file: Vec<PathBuf>,
/// Directory to store the remote assets.
#[arg(long, default_value_t = default_asset_folder())]
pub asset_folder: String,
/// Log directives
#[arg(short, long, default_value_t = default_log_filter())]
pub log_filter: String,
/// Authentication bearer for fetching assets
#[arg(long)]
pub assets_key: Option<String>,
/// The maximum time in seconds we allow for fetching the task queue before timing out.
#[arg(long, default_value_t = 60)]
pub tasks_queue_timeout_secs: u64,
}

View File

@@ -3,21 +3,22 @@ use std::io::{Read as _, Seek as _, Write as _};
use anyhow::{bail, Context};
use futures_util::TryStreamExt as _;
use serde::Deserialize;
use serde::{Deserialize, Serialize};
use sha2::Digest;
use super::client::Client;
#[derive(Deserialize, Clone)]
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Asset {
pub local_location: Option<String>,
pub remote_location: Option<String>,
#[serde(default)]
#[serde(default, skip_serializing_if = "AssetFormat::is_default")]
pub format: AssetFormat,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sha256: Option<String>,
}
#[derive(Deserialize, Default, Copy, Clone)]
#[derive(Serialize, Deserialize, Default, Copy, Clone, Debug)]
pub enum AssetFormat {
#[default]
Auto,
@@ -27,6 +28,10 @@ pub enum AssetFormat {
}
impl AssetFormat {
fn is_default(&self) -> bool {
matches!(self, AssetFormat::Auto)
}
pub fn to_content_type(self, filename: &str) -> &'static str {
match self {
AssetFormat::Auto => Self::auto_detect(filename).to_content_type(filename),
@@ -166,7 +171,14 @@ fn check_sha256(name: &str, asset: &Asset, mut file: std::fs::File) -> anyhow::R
}
}
None => {
tracing::warn!(sha256 = file_hash, "Skipping hash for asset {name} that doesn't have one. Please add it to workload file");
let msg = match name.starts_with("meilisearch-") {
true => "Please add it to crates/xtask/src/common/instance/release.rs",
false => "Please add it to workload file",
};
tracing::warn!(
sha256 = file_hash,
"Skipping hash for asset {name} that doesn't have one. {msg}"
);
true
}
})

View File

@@ -1,5 +1,5 @@
use anyhow::Context;
use serde::Deserialize;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone)]
pub struct Client {
@@ -61,7 +61,7 @@ impl Client {
}
}
#[derive(Debug, Clone, Copy, Deserialize)]
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum Method {
Get,

Some files were not shown because too many files have changed in this diff Show More