mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-06 12:45:42 +00:00
Compare commits
125 Commits
latest
...
update-ver
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ff680d29a8 | ||
|
|
00420dfca0 | ||
|
|
a3a86ac629 | ||
|
|
f6210b8e5e | ||
|
|
fe46af7ded | ||
|
|
57b94b411f | ||
|
|
a7b6f65851 | ||
|
|
1ec6646d8c | ||
|
|
2dccacf273 | ||
|
|
ce0f04e9ee | ||
|
|
9ba5c6d371 | ||
|
|
56673fee56 | ||
|
|
b30bcbb931 | ||
|
|
5fbe4436c8 | ||
|
|
8fa253c293 | ||
|
|
4833da9edb | ||
|
|
c0e31a4f01 | ||
|
|
c06ffb31d1 | ||
|
|
3097314b9d | ||
|
|
786a978237 | ||
|
|
03e53aaf6d | ||
|
|
2206f045a4 | ||
|
|
246cf8b2d1 | ||
|
|
82adabc5a0 | ||
|
|
c9a22247d2 | ||
|
|
c535b8ddef | ||
|
|
8e89619aed | ||
|
|
f617ca8e38 | ||
|
|
959175ad2a | ||
|
|
341ffbf5ef | ||
|
|
542f3073f4 | ||
|
|
0f134b079f | ||
|
|
9e7ae47355 | ||
|
|
1edf07df29 | ||
|
|
88aa3cddde | ||
|
|
e6846cb55a | ||
|
|
29b715e2f9 | ||
|
|
f28dc5bd2b | ||
|
|
56d0b8ea54 | ||
|
|
514edb1b79 | ||
|
|
cfb609d41d | ||
|
|
11cb062067 | ||
|
|
2ca4926ac5 | ||
|
|
834bd9b879 | ||
|
|
cac7e00983 | ||
|
|
e9300bac64 | ||
|
|
b0da7864a4 | ||
|
|
2b9d379feb | ||
|
|
8d585a04d4 | ||
|
|
0095a72fba | ||
|
|
651339648c | ||
|
|
a489f4c172 | ||
|
|
3b875ea00e | ||
|
|
9d269c499c | ||
|
|
da35ae0a6e | ||
|
|
61945b235d | ||
|
|
e936ac172d | ||
|
|
162a84cdbf | ||
|
|
92c63cf351 | ||
|
|
fca35b7476 | ||
|
|
4056657a55 | ||
|
|
685d227597 | ||
|
|
49b9f6ff38 | ||
|
|
79d0a3fb97 | ||
|
|
313ef7e79b | ||
|
|
256407be61 | ||
|
|
8b3943bd32 | ||
|
|
87b972d29a | ||
|
|
09ab61b360 | ||
|
|
2459f381b4 | ||
|
|
6442f02de4 | ||
|
|
91c4d9ea79 | ||
|
|
92a4091da3 | ||
|
|
29a337f0f9 | ||
|
|
8c3cebadaa | ||
|
|
b566458aa2 | ||
|
|
ae4344e359 | ||
|
|
b6cb384650 | ||
|
|
2c3e3d856c | ||
|
|
93e97f814c | ||
|
|
e9350f033d | ||
|
|
54c92fd6c0 | ||
|
|
4f4df83a51 | ||
|
|
a51021cab7 | ||
|
|
e33f4fdeae | ||
|
|
e407bca196 | ||
|
|
cd24ea11b4 | ||
|
|
ba578e7ab5 | ||
|
|
05a74d1e68 | ||
|
|
41d61deb97 | ||
|
|
bba292b01a | ||
|
|
96923dff33 | ||
|
|
8f9c9305da | ||
|
|
a9f309e1d1 | ||
|
|
e456a9acd8 | ||
|
|
9b7d29466c | ||
|
|
b0ef14b6f0 | ||
|
|
36febe2068 | ||
|
|
6f14a6ec18 | ||
|
|
fce046d84d | ||
|
|
3fc507bb44 | ||
|
|
fdbcd033fb | ||
|
|
aaab49baca | ||
|
|
0d0d6e8099 | ||
|
|
c1e351c92b | ||
|
|
67cab4cc9d | ||
|
|
f30a37b0fe | ||
|
|
a78a9f80dd | ||
|
|
439fee5434 | ||
|
|
9e858590e0 | ||
|
|
29eebd5f93 | ||
|
|
07da6edbdf | ||
|
|
22b83042e6 | ||
|
|
52ab13906a | ||
|
|
29bec8efd4 | ||
|
|
6947a8990b | ||
|
|
fbb2bb0c73 | ||
|
|
15918f53a9 | ||
|
|
d7f5f3a0a3 | ||
|
|
1afbf35f27 | ||
|
|
d7675233d5 | ||
|
|
c63c1ac32b | ||
|
|
6171dcde0d | ||
|
|
04bc134324 | ||
|
|
8ff39d927d |
5
.github/ISSUE_TEMPLATE/new_feature_issue.md
vendored
5
.github/ISSUE_TEMPLATE/new_feature_issue.md
vendored
@@ -24,6 +24,11 @@ TBD
|
||||
- [ ] If not, add the `no db change` label to your PR, and you're good to merge.
|
||||
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
|
||||
|
||||
### Reminders when adding features
|
||||
|
||||
- [ ] Write unit tests using insta
|
||||
- [ ] Write declarative integration tests in [workloads/tests](https://github.com/meilisearch/meilisearch/tree/main/workloads/test). Specify the routes to call and then call `cargo xtask test workloads/tests/YOUR_TEST.json --update-responses` so that responses are automatically filled.
|
||||
|
||||
### Reminders when modifying the API
|
||||
|
||||
- [ ] Update the openAPI file with utoipa:
|
||||
|
||||
2
.github/workflows/bench-pr.yml
vendored
2
.github/workflows/bench-pr.yml
vendored
@@ -67,8 +67,6 @@ jobs:
|
||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
||||
run: |
|
||||
|
||||
2
.github/workflows/bench-push-indexing.yml
vendored
2
.github/workflows/bench-push-indexing.yml
vendored
@@ -13,8 +13,6 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
||||
|
||||
6
.github/workflows/db-change-comments.yml
vendored
6
.github/workflows/db-change-comments.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
|
||||
env:
|
||||
MESSAGE: |
|
||||
### Hello, I'm a bot 🤖
|
||||
### Hello, I'm a bot 🤖
|
||||
|
||||
You are receiving this message because you declared that this PR make changes to the Meilisearch database.
|
||||
Depending on the nature of the change, additional actions might be required on your part. The following sections detail the additional actions depending on the nature of the change, please copy the relevant section in the description of your PR, and make sure to perform the required actions.
|
||||
@@ -19,6 +19,7 @@ env:
|
||||
|
||||
- [ ] Detail the change to the DB format and why they are forward compatible
|
||||
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
|
||||
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
|
||||
|
||||
|
||||
## This PR makes breaking changes
|
||||
@@ -35,8 +36,7 @@ env:
|
||||
- [ ] Write the code to go from the old database to the new one
|
||||
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
|
||||
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
|
||||
- [ ] Write an integration test [here](https://github.com/meilisearch/meilisearch/blob/main/crates/meilisearch/tests/upgrade/mod.rs) ensuring you can read the old database, upgrade to the new database, and read the new database as expected
|
||||
|
||||
- [ ] Declarative test: add a [declarative test containing a dumpless upgrade](https://github.com/meilisearch/meilisearch/blob/main/TESTING.md#typical-usage)
|
||||
|
||||
jobs:
|
||||
add-comment:
|
||||
|
||||
6
.github/workflows/flaky-tests.yml
vendored
6
.github/workflows/flaky-tests.yml
vendored
@@ -13,6 +13,12 @@ jobs:
|
||||
image: ubuntu:22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||
run: |
|
||||
sudo rm -rf "/opt/ghc" || true
|
||||
sudo rm -rf "/usr/share/dotnet" || true
|
||||
sudo rm -rf "/usr/local/lib/android" || true
|
||||
sudo rm -rf "/usr/local/share/boost" || true
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
|
||||
2
.github/workflows/fuzzer-indexing.yml
vendored
2
.github/workflows/fuzzer-indexing.yml
vendored
@@ -13,8 +13,6 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run the fuzzer
|
||||
|
||||
6
.github/workflows/publish-apt-brew-pkg.yml
vendored
6
.github/workflows/publish-apt-brew-pkg.yml
vendored
@@ -25,6 +25,12 @@ jobs:
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||
run: |
|
||||
sudo rm -rf "/opt/ghc" || true
|
||||
sudo rm -rf "/usr/share/dotnet" || true
|
||||
sudo rm -rf "/usr/local/lib/android" || true
|
||||
sudo rm -rf "/usr/local/share/boost" || true
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
- name: Install cargo-deb
|
||||
run: cargo install cargo-deb
|
||||
|
||||
15
.github/workflows/publish-docker-images.yml
vendored
15
.github/workflows/publish-docker-images.yml
vendored
@@ -208,8 +208,8 @@ jobs:
|
||||
done
|
||||
cosign sign --yes ${images}
|
||||
|
||||
# /!\ Don't touch this without checking with Cloud team
|
||||
- name: Send CI information to Cloud team
|
||||
# /!\ Don't touch this without checking with engineers working on the Cloud code base on #discussion-engineering Slack channel
|
||||
- name: Notify meilisearch-cloud
|
||||
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
|
||||
if: ${{ (github.event_name == 'push') && (matrix.edition == 'enterprise') }}
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
@@ -218,3 +218,14 @@ jobs:
|
||||
repository: meilisearch/meilisearch-cloud
|
||||
event-type: cloud-docker-build
|
||||
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
||||
|
||||
# /!\ Don't touch this without checking with integration team members on #discussion-integrations Slack channel
|
||||
- name: Notify meilisearch-kubernetes
|
||||
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event), or if not stable
|
||||
if: ${{ github.event_name == 'push' && matrix.edition == 'community' && steps.check-tag-format.outputs.stable == 'true' }}
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
repository: meilisearch/meilisearch-kubernetes
|
||||
event-type: meilisearch-release
|
||||
client-payload: '{ "version": "${{ github.ref_name }}" }'
|
||||
|
||||
150
.github/workflows/test-suite.yml
vendored
150
.github/workflows/test-suite.yml
vendored
@@ -19,31 +19,36 @@ jobs:
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
runner: [ubuntu-24.04, ubuntu-24.04-arm]
|
||||
runner: [ubuntu-22.04, ubuntu-22.04-arm]
|
||||
features: ["", "--features enterprise"]
|
||||
container:
|
||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||
image: ubuntu:22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Install needed dependencies
|
||||
- name: check free space before
|
||||
run: df -h
|
||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
sudo rm -rf "/opt/ghc" || true
|
||||
sudo rm -rf "/usr/share/dotnet" || true
|
||||
sudo rm -rf "/usr/local/lib/android" || true
|
||||
sudo rm -rf "/usr/local/share/boost" || true
|
||||
- name: check free space after
|
||||
run: df -h
|
||||
- name: Setup test with Rust stable
|
||||
uses: dtolnay/rust-toolchain@1.89
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.8.0
|
||||
- name: Run cargo check without any default features
|
||||
with:
|
||||
key: ${{ matrix.features }}
|
||||
- name: Run cargo build without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --locked --release --no-default-features --all
|
||||
args: --locked --no-default-features --all
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release --all ${{ matrix.features }}
|
||||
args: --locked --all ${{ matrix.features }}
|
||||
|
||||
test-others:
|
||||
name: Tests on ${{ matrix.os }}
|
||||
@@ -53,54 +58,56 @@ jobs:
|
||||
matrix:
|
||||
os: [macos-14, windows-2022]
|
||||
features: ["", "--features enterprise"]
|
||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request'
|
||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.8.0
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
- name: Run cargo check without any default features
|
||||
- name: Run cargo build without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --locked --release --no-default-features --all
|
||||
args: --locked --no-default-features --all
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release --all ${{ matrix.features }}
|
||||
args: --locked --all ${{ matrix.features }}
|
||||
|
||||
test-all-features:
|
||||
name: Tests almost all features
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||
image: ubuntu:22.04
|
||||
runs-on: ubuntu-22.04
|
||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Install needed dependencies
|
||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install --assume-yes build-essential curl
|
||||
sudo rm -rf "/opt/ghc" || true
|
||||
sudo rm -rf "/usr/share/dotnet" || true
|
||||
sudo rm -rf "/usr/local/lib/android" || true
|
||||
sudo rm -rf "/usr/local/share/boost" || true
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
- name: Run cargo build with almost all features
|
||||
run: |
|
||||
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||
cargo build --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||
- name: Run cargo test with almost all features
|
||||
run: |
|
||||
cargo test --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||
cargo test --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||
|
||||
ollama-ubuntu:
|
||||
name: Test with Ollama
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
features: ["", "--features enterprise"]
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||
run: |
|
||||
sudo rm -rf "/opt/ghc" || true
|
||||
sudo rm -rf "/usr/share/dotnet" || true
|
||||
sudo rm -rf "/usr/local/lib/android" || true
|
||||
sudo rm -rf "/usr/local/share/boost" || true
|
||||
- name: Install Ollama
|
||||
run: |
|
||||
curl -fsSL https://ollama.com/install.sh | sudo -E sh
|
||||
@@ -124,20 +131,20 @@ jobs:
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release --all --features test-ollama ollama ${{ matrix.features }}
|
||||
args: --locked -p meilisearch --features test-ollama ollama
|
||||
|
||||
test-disabled-tokenization:
|
||||
name: Test disabled tokenization
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ubuntu:22.04
|
||||
runs-on: ubuntu-22.04
|
||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Install needed dependencies
|
||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install --assume-yes build-essential curl
|
||||
sudo rm -rf "/opt/ghc" || true
|
||||
sudo rm -rf "/usr/share/dotnet" || true
|
||||
sudo rm -rf "/usr/local/lib/android" || true
|
||||
sudo rm -rf "/usr/local/share/boost" || true
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
- name: Run cargo tree without default features and check lindera is not present
|
||||
run: |
|
||||
@@ -149,33 +156,39 @@ jobs:
|
||||
run: |
|
||||
cargo tree -f '{p} {f}' -e normal | grep lindera -qz
|
||||
|
||||
# We run tests in debug also, to make sure that the debug_assertions are hit
|
||||
test-debug:
|
||||
name: Run tests in debug
|
||||
build:
|
||||
name: Build in release
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||
run: |
|
||||
sudo rm -rf "/opt/ghc" || true
|
||||
sudo rm -rf "/usr/share/dotnet" || true
|
||||
sudo rm -rf "/usr/local/lib/android" || true
|
||||
sudo rm -rf "/usr/local/share/boost" || true
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.8.0
|
||||
- name: Build
|
||||
run: cargo build --release --locked --target x86_64-unknown-linux-gnu
|
||||
|
||||
clippy:
|
||||
name: Run Clippy
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
features: ["", "--features enterprise"]
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
- name: Run tests in debug
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --all ${{ matrix.features }}
|
||||
|
||||
clippy:
|
||||
name: Run Clippy
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
features: ["", "--features enterprise"]
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||
run: |
|
||||
sudo rm -rf "/opt/ghc" || true
|
||||
sudo rm -rf "/usr/share/dotnet" || true
|
||||
sudo rm -rf "/usr/local/lib/android" || true
|
||||
sudo rm -rf "/usr/local/share/boost" || true
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
with:
|
||||
profile: minimal
|
||||
components: clippy
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.8.0
|
||||
@@ -187,14 +200,17 @@ jobs:
|
||||
|
||||
fmt:
|
||||
name: Run Rustfmt
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||
run: |
|
||||
sudo rm -rf "/opt/ghc" || true
|
||||
sudo rm -rf "/usr/share/dotnet" || true
|
||||
sudo rm -rf "/usr/local/lib/android" || true
|
||||
sudo rm -rf "/usr/local/share/boost" || true
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: nightly-2024-07-09
|
||||
override: true
|
||||
components: rustfmt
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.8.0
|
||||
@@ -205,3 +221,23 @@ jobs:
|
||||
run: |
|
||||
echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs
|
||||
cargo fmt --all -- --check
|
||||
|
||||
declarative-tests:
|
||||
name: Run declarative tests
|
||||
runs-on: ubuntu-22.04-arm
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||
run: |
|
||||
sudo rm -rf "/opt/ghc" || true
|
||||
sudo rm -rf "/usr/share/dotnet" || true
|
||||
sudo rm -rf "/usr/local/lib/android" || true
|
||||
sudo rm -rf "/usr/local/share/boost" || true
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.8.0
|
||||
- name: Run declarative tests
|
||||
run: |
|
||||
cargo xtask test workloads/tests/*.json
|
||||
|
||||
@@ -18,9 +18,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Clean space as per https://github.com/actions/virtual-environments/issues/709
|
||||
run: |
|
||||
sudo rm -rf "/opt/ghc" || true
|
||||
sudo rm -rf "/usr/share/dotnet" || true
|
||||
sudo rm -rf "/usr/local/lib/android" || true
|
||||
sudo rm -rf "/usr/local/share/boost" || true
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
with:
|
||||
profile: minimal
|
||||
- name: Install sd
|
||||
run: cargo install sd
|
||||
- name: Update Cargo.toml file
|
||||
|
||||
@@ -124,6 +124,7 @@ They are JSON files with the following structure (comments are not actually supp
|
||||
{
|
||||
// Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard.
|
||||
"name": "hackernews.ndjson_1M,no-threads",
|
||||
"type": "bench",
|
||||
// Number of consecutive runs of the commands that should be performed.
|
||||
// Each run uses a fresh instance of Meilisearch and a fresh database.
|
||||
// Each run produces its own report file.
|
||||
|
||||
106
Cargo.lock
generated
106
Cargo.lock
generated
@@ -580,7 +580,7 @@ source = "git+https://github.com/meilisearch/bbqueue#e8af4a4bccc8eb36b2b0442c4a9
|
||||
|
||||
[[package]]
|
||||
name = "benchmarks"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bumpalo",
|
||||
@@ -790,11 +790,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "build-info"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"time",
|
||||
"vergen-git2",
|
||||
"vergen-gitcl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1786,7 +1786,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "dump"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"big_s",
|
||||
@@ -2018,7 +2018,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
||||
|
||||
[[package]]
|
||||
name = "file-store"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"tempfile",
|
||||
"thiserror 2.0.17",
|
||||
@@ -2040,7 +2040,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "filter-parser"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"levenshtein_automata",
|
||||
@@ -2068,7 +2068,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flatten-serde-json"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@@ -2231,7 +2231,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "fuzzers"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"bumpalo",
|
||||
@@ -2604,19 +2604,6 @@ version = "0.32.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7"
|
||||
|
||||
[[package]]
|
||||
name = "git2"
|
||||
version = "0.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2deb07a133b1520dc1a5690e9bd08950108873d7ed5de38dcc74d3b5ebffa110"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"libc",
|
||||
"libgit2-sys",
|
||||
"log",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "glob"
|
||||
version = "0.3.3"
|
||||
@@ -2711,9 +2698,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "hannoy"
|
||||
version = "0.0.9-nested-rtxns-2"
|
||||
version = "0.1.0-nested-rtxns"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06eda090938d9dcd568c8c2a5de383047ed9191578ebf4a342d2975d16e621f2"
|
||||
checksum = "be82bf3f2108ddc8885e3d306fcd7f4692066bfe26065ca8b42ba417f3c26dd1"
|
||||
dependencies = [
|
||||
"bytemuck",
|
||||
"byteorder",
|
||||
@@ -3198,7 +3185,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index-scheduler"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"backoff",
|
||||
@@ -3460,7 +3447,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "json-depth-checker"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@@ -3557,18 +3544,6 @@ dependencies = [
|
||||
"rle-decode-fast",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libgit2-sys"
|
||||
version = "0.18.2+1.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1c42fe03df2bd3c53a3a9c7317ad91d80c81cd1fb0caec8d7cc4cd2bfa10c222"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"libz-sys",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libloading"
|
||||
version = "0.8.9"
|
||||
@@ -3626,18 +3601,6 @@ dependencies = [
|
||||
"zlib-rs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libz-sys"
|
||||
version = "1.1.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lindera"
|
||||
version = "0.43.3"
|
||||
@@ -3974,7 +3937,7 @@ checksum = "ae960838283323069879657ca3de837e9f7bbb4c7bf6ea7f1b290d5e9476d2e0"
|
||||
|
||||
[[package]]
|
||||
name = "meili-snap"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"md5 0.8.0",
|
||||
@@ -3985,7 +3948,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"actix-cors",
|
||||
"actix-http",
|
||||
@@ -4083,7 +4046,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-auth"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"enum-iterator",
|
||||
@@ -4102,7 +4065,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-types"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"actix-web",
|
||||
"anyhow",
|
||||
@@ -4137,7 +4100,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilitool"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"clap",
|
||||
@@ -4171,7 +4134,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "milli"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"arroy",
|
||||
"bbqueue",
|
||||
@@ -4750,7 +4713,7 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
|
||||
|
||||
[[package]]
|
||||
name = "permissive-json-pointer"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"big_s",
|
||||
"serde_json",
|
||||
@@ -6072,6 +6035,20 @@ name = "similar"
|
||||
version = "2.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa"
|
||||
dependencies = [
|
||||
"bstr",
|
||||
"unicode-segmentation",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "similar-asserts"
|
||||
version = "1.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5b441962c817e33508847a22bd82f03a30cff43642dc2fae8b050566121eb9a"
|
||||
dependencies = [
|
||||
"console",
|
||||
"similar",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simple_asn1"
|
||||
@@ -7105,12 +7082,6 @@ version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65"
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "vergen"
|
||||
version = "9.0.6"
|
||||
@@ -7124,14 +7095,13 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "vergen-git2"
|
||||
version = "1.0.7"
|
||||
name = "vergen-gitcl"
|
||||
version = "1.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f6ee511ec45098eabade8a0750e76eec671e7fb2d9360c563911336bea9cac1"
|
||||
checksum = "b9dfc1de6eb2e08a4ddf152f1b179529638bedc0ea95e6d667c014506377aefe"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"derive_builder",
|
||||
"git2",
|
||||
"rustversion",
|
||||
"time",
|
||||
"vergen",
|
||||
@@ -7783,7 +7753,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "xtask"
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"build-info",
|
||||
@@ -7792,9 +7762,11 @@ dependencies = [
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"reqwest",
|
||||
"semver",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"similar-asserts",
|
||||
"sysinfo",
|
||||
"time",
|
||||
"tokio",
|
||||
|
||||
@@ -23,7 +23,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.28.2"
|
||||
version = "1.29.0"
|
||||
authors = [
|
||||
"Quentin de Quelen <quentin@dequelen.me>",
|
||||
"Clément Renault <clement@meilisearch.com>",
|
||||
|
||||
326
TESTING.md
Normal file
326
TESTING.md
Normal file
@@ -0,0 +1,326 @@
|
||||
# Declarative tests
|
||||
|
||||
Declarative tests ensure that Meilisearch features remain stable across versions.
|
||||
|
||||
While we already have unit tests, those are run against **temporary databases** that are created fresh each time and therefore never risk corruption.
|
||||
|
||||
Declarative tests instead **simulate the lifetime of a database**: they chain together commands and requests to change the binary, verifying that database state and API responses remain consistent.
|
||||
|
||||
## Basic example
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"type": "test",
|
||||
"name": "api-keys",
|
||||
"binary": { // the first command will run on the binary following this specification.
|
||||
"source": "release", // get the binary as a release from GitHub
|
||||
"version": "1.19.0", // version to fetch
|
||||
"edition": "community" // edition to fetch
|
||||
},
|
||||
"commands": []
|
||||
}
|
||||
```
|
||||
|
||||
This example defines a no-op test (it does nothing).
|
||||
|
||||
If the file is saved at `workloads/tests/example.json`, you can run it with:
|
||||
|
||||
```bash
|
||||
cargo xtask test workloads/tests/example.json
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
Commands represent API requests sent to Meilisearch endpoints during a test.
|
||||
|
||||
They are executed sequentially, and their responses can be validated to ensure consistent behavior across upgrades.
|
||||
|
||||
```jsonc
|
||||
|
||||
{
|
||||
"route": "keys",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"actions": [
|
||||
"search",
|
||||
"documents.add"
|
||||
],
|
||||
"description": "Test API Key",
|
||||
"expiresAt": null,
|
||||
"indexes": [ "movies" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This command issues a `POST /keys` request, creating an API key with permissions to search and add documents in the `movies` index.
|
||||
|
||||
### Using assets in commands
|
||||
|
||||
To keep tests concise and reusable, you can define **assets** at the root of the workload file.
|
||||
|
||||
Assets are external data sources (such as datasets) that are cached between runs, making tests faster and easier to read.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"type": "test",
|
||||
"name": "movies",
|
||||
"binary": {
|
||||
"source": "release",
|
||||
"version": "1.19.0",
|
||||
"edition": "community"
|
||||
},
|
||||
"assets": {
|
||||
"movies.json": {
|
||||
"local_location": null,
|
||||
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
|
||||
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
|
||||
}
|
||||
},
|
||||
"commands": [
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"asset": "movies.json"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
In this example:
|
||||
- The `movies.json` dataset is defined as an asset, pointing to a remote URL.
|
||||
- The SHA-256 checksum ensures integrity.
|
||||
- The `POST /indexes/movies/documents` command uses this asset as the request body.
|
||||
|
||||
This makes the test much cleaner than inlining a large dataset directly into the command.
|
||||
|
||||
For asset handling, please refer to the [declarative benchmarks documentation](/BENCHMARKS.md#adding-new-assets).
|
||||
|
||||
### Asserting responses
|
||||
|
||||
Commands can specify both the **expected status code** and the **expected response body**.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"asset": "movies.json"
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]", // Set to a bracketed string to ignore the value
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 1,
|
||||
"type": "documentAdditionOrUpdate"
|
||||
},
|
||||
"synchronous": "WaitForTask"
|
||||
}
|
||||
```
|
||||
|
||||
Manually writing `expectedResponse` fields can be tedious.
|
||||
|
||||
Instead, you can let the test runner populate them automatically:
|
||||
|
||||
```bash
|
||||
# Run the workload to populate expected fields. Only adds the missing ones, doesn't change existing data
|
||||
cargo xtask test workloads/tests/example.json --add-missing-responses
|
||||
|
||||
# OR
|
||||
|
||||
# Run the workload to populate expected fields. Updates all fields including existing ones
|
||||
cargo xtask test workloads/tests/example.json --update-responses
|
||||
```
|
||||
|
||||
This workflow is recommended:
|
||||
|
||||
1. Write the test without expected fields.
|
||||
2. Run it with `--add-missing-responses` to capture the actual responses.
|
||||
3. Review and commit the generated expectations.
|
||||
|
||||
## Changing binary
|
||||
|
||||
It is possible to insert an instruction to change the current Meilisearch instance from one binary specification to another during a test.
|
||||
|
||||
When executed, such an instruction will:
|
||||
1. Stop the current Meilisearch instance.
|
||||
2. Fetch the binary specified by the instruction.
|
||||
3. Restart the server with the specified binary on the same database.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"type": "test",
|
||||
"name": "movies",
|
||||
"binary": {
|
||||
"source": "release",
|
||||
"version": "1.19.0", // start with version v1.19.0
|
||||
"edition": "community"
|
||||
},
|
||||
"assets": {
|
||||
"movies.json": {
|
||||
"local_location": null,
|
||||
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
|
||||
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
|
||||
}
|
||||
},
|
||||
"commands": [
|
||||
// setup some data
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"asset": "movies.json"
|
||||
}
|
||||
},
|
||||
// switch binary to v1.24.0
|
||||
{
|
||||
"binary": {
|
||||
"source": "release",
|
||||
"version": "1.24.0",
|
||||
"edition": "community"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Typical Usage
|
||||
|
||||
In most cases, the change binary instruction will be used to update a database.
|
||||
|
||||
- **Set up** some data using commands on an older version.
|
||||
- **Upgrade** to the latest version.
|
||||
- **Assert** that the data and API behavior remain correct after the upgrade.
|
||||
|
||||
To properly test the dumpless upgrade, one should typically:
|
||||
|
||||
1. Open the database without processing the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` and `--experimental-max-number-of-batched-tasks=0` as extra CLI arguments
|
||||
2. Check that the search, stats and task queue still work.
|
||||
3. Open the database and process the update task: Use a `binary` instruction to switch to the desired version, passing `--experimental-dumpless-upgrade` as the extra CLI argument. Use a `health` command to wait for the upgrade task to finish.
|
||||
4. Check that the indexing, search, stats, and task queue still work.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"type": "test",
|
||||
"name": "movies",
|
||||
"binary": {
|
||||
"source": "release",
|
||||
"version": "1.12.0",
|
||||
"edition": "community"
|
||||
},
|
||||
"commands": [
|
||||
// 0. Run commands to populate the database
|
||||
{
|
||||
// ..
|
||||
},
|
||||
// 1. Open the database with new MS without processing the update task
|
||||
{
|
||||
"binary": {
|
||||
"source": "build", // build the binary from the sources in the current git repository
|
||||
"edition": "community",
|
||||
"extraCliArgs": [
|
||||
"--experimental-dumpless-upgrade", // allows to open with a newer MS
|
||||
"--experimental-max-number-of-batched-tasks=0" // prevent processing of the update task
|
||||
]
|
||||
}
|
||||
},
|
||||
// 2. Check the search etc.
|
||||
{
|
||||
// ..
|
||||
},
|
||||
// 3. Open the database with new MS and processing the update task
|
||||
{
|
||||
"binary": {
|
||||
"source": "build", // build the binary from the sources in the current git repository
|
||||
"edition": "community",
|
||||
"extraCliArgs": [
|
||||
"--experimental-dumpless-upgrade" // allows to open with a newer MS
|
||||
// no `--experimental-max-number-of-batched-tasks=0`
|
||||
]
|
||||
}
|
||||
},
|
||||
// 4. Check the indexing, search, etc.
|
||||
{
|
||||
// ..
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This ensures backward compatibility: databases created with older Meilisearch versions should remain functional and consistent after an upgrade.
|
||||
|
||||
## Variables
|
||||
|
||||
Sometimes a command needs to use a value returned by a **previous response**.
|
||||
These values can be captured and reused using the register field.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"route": "keys",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"actions": [
|
||||
"search",
|
||||
"documents.add"
|
||||
],
|
||||
"description": "Test API Key",
|
||||
"expiresAt": null,
|
||||
"indexes": [ "movies" ]
|
||||
}
|
||||
},
|
||||
"expectedResponse": {
|
||||
"key": "c6f64630bad2996b1f675007c8800168e14adf5d6a7bb1a400a6d2b158050eaf",
|
||||
// ...
|
||||
},
|
||||
"register": {
|
||||
"key": "/key"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
}
|
||||
```
|
||||
|
||||
The `register` field captures the value at the JSON path `/key` from the response.
|
||||
Paths follow the **JavaScript Object Notation Pointer (RFC 6901)** format.
|
||||
Registered variables are available for all subsequent commands.
|
||||
|
||||
Registered variables can be referenced by wrapping their name in double curly braces:
|
||||
|
||||
In the route/path:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"route": "tasks/{{ task_id }}",
|
||||
"method": "GET"
|
||||
}
|
||||
```
|
||||
|
||||
In the request body:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "PATCH",
|
||||
"body": {
|
||||
"inline": {
|
||||
"id": "{{ document_id }}",
|
||||
"overview": "Shazam turns evil and the world is in danger.",
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Or they can be referenced by their name (**without curly braces**) as an API key:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": { /* ... */ },
|
||||
"apiKeyVariable": "key" // The **content** of the key variable will be used as an API key
|
||||
}
|
||||
```
|
||||
@@ -21,6 +21,10 @@ use roaring::RoaringBitmap;
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn no_cancel() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
const BENCHMARK_ITERATION: usize = 10;
|
||||
|
||||
fn setup_dir(path: impl AsRef<Path>) {
|
||||
@@ -65,7 +69,7 @@ fn setup_settings<'t>(
|
||||
let sortable_fields = sortable_fields.iter().map(|s| s.to_string()).collect();
|
||||
builder.set_sortable_fields(sortable_fields);
|
||||
|
||||
builder.execute(&|| false, &Progress::default(), Default::default()).unwrap();
|
||||
builder.execute(&no_cancel, &Progress::default(), Default::default()).unwrap();
|
||||
}
|
||||
|
||||
fn setup_index_with_settings(
|
||||
@@ -152,7 +156,7 @@ fn indexing_songs_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -168,7 +172,7 @@ fn indexing_songs_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -220,7 +224,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -236,7 +240,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -266,7 +270,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -282,7 +286,7 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -336,7 +340,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -352,7 +356,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -414,7 +418,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -430,7 +434,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -460,7 +464,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -476,7 +480,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -502,7 +506,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -518,7 +522,7 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -571,7 +575,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -587,7 +591,7 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -639,7 +643,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -655,7 +659,7 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -707,7 +711,7 @@ fn indexing_wiki(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -723,7 +727,7 @@ fn indexing_wiki(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -774,7 +778,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -790,7 +794,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -820,7 +824,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -836,7 +840,7 @@ fn reindexing_wiki(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -889,7 +893,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -905,7 +909,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -967,7 +971,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -983,7 +987,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1014,7 +1018,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1030,7 +1034,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1057,7 +1061,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1073,7 +1077,7 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1125,7 +1129,7 @@ fn indexing_movies_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1141,7 +1145,7 @@ fn indexing_movies_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1192,7 +1196,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1208,7 +1212,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1238,7 +1242,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1254,7 +1258,7 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1307,7 +1311,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1323,7 +1327,7 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1372,7 +1376,7 @@ fn delete_documents_from_ids(index: Index, document_ids_to_delete: Vec<RoaringBi
|
||||
Some(primary_key),
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1422,7 +1426,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1438,7 +1442,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1468,7 +1472,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1484,7 +1488,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1510,7 +1514,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1526,7 +1530,7 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1601,7 +1605,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1617,7 +1621,7 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1693,7 +1697,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1709,7 +1713,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1777,7 +1781,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1793,7 +1797,7 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1845,7 +1849,7 @@ fn indexing_geo(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1861,7 +1865,7 @@ fn indexing_geo(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1912,7 +1916,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1928,7 +1932,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -1958,7 +1962,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -1974,7 +1978,7 @@ fn reindexing_geo(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -2027,7 +2031,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -2043,7 +2047,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
|
||||
@@ -15,4 +15,4 @@ time = { version = "0.3.44", features = ["parsing"] }
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = "1.0.100"
|
||||
vergen-git2 = "1.0.7"
|
||||
vergen-gitcl = "1.0.8"
|
||||
|
||||
@@ -15,7 +15,7 @@ fn emit_git_variables() -> anyhow::Result<()> {
|
||||
// Note: any code that needs VERGEN_ environment variables should take care to define them manually in the Dockerfile and pass them
|
||||
// in the corresponding GitHub workflow (publish_docker.yml).
|
||||
// This is due to the Dockerfile building the binary outside of the git directory.
|
||||
let mut builder = vergen_git2::Git2Builder::default();
|
||||
let mut builder = vergen_gitcl::GitclBuilder::default();
|
||||
|
||||
builder.branch(true);
|
||||
builder.commit_timestamp(true);
|
||||
@@ -25,5 +25,5 @@ fn emit_git_variables() -> anyhow::Result<()> {
|
||||
|
||||
let git2 = builder.build()?;
|
||||
|
||||
vergen_git2::Emitter::default().fail_on_error().add_instructions(&git2)?.emit()
|
||||
vergen_gitcl::Emitter::default().fail_on_error().add_instructions(&git2)?.emit()
|
||||
}
|
||||
|
||||
6
crates/build-info/src/main.rs
Normal file
6
crates/build-info/src/main.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
use build_info::BuildInfo;
|
||||
|
||||
fn main() {
|
||||
let info = BuildInfo::from_build();
|
||||
dbg!(info);
|
||||
}
|
||||
@@ -6,7 +6,7 @@ use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str};
|
||||
use meilisearch_types::heed::{Database, RoTxn};
|
||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
||||
use meilisearch_types::tasks::{Details, Kind, Status, Task};
|
||||
use meilisearch_types::versioning;
|
||||
use meilisearch_types::versioning::{self, VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
||||
use roaring::RoaringBitmap;
|
||||
|
||||
use crate::index_mapper::IndexMapper;
|
||||
@@ -320,7 +320,11 @@ fn snapshot_details(d: &Details) -> String {
|
||||
format!("{{ url: {url:?}, api_key: {api_key:?}, payload_size: {payload_size:?}, indexes: {indexes:?} }}")
|
||||
}
|
||||
Details::UpgradeDatabase { from, to } => {
|
||||
format!("{{ from: {from:?}, to: {to:?} }}")
|
||||
if to == &(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) {
|
||||
format!("{{ from: {from:?}, to: [current version] }}")
|
||||
} else {
|
||||
format!("{{ from: {from:?}, to: {to:?} }}")
|
||||
}
|
||||
}
|
||||
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
|
||||
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
|
||||
@@ -400,7 +404,21 @@ pub fn snapshot_batch(batch: &Batch) -> String {
|
||||
|
||||
snap.push('{');
|
||||
snap.push_str(&format!("uid: {uid}, "));
|
||||
snap.push_str(&format!("details: {}, ", serde_json::to_string(details).unwrap()));
|
||||
let details = if let Some(upgrade_to) = &details.upgrade_to {
|
||||
if upgrade_to.as_str()
|
||||
== format!("v{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_PATCH}").as_str()
|
||||
{
|
||||
let mut details = details.clone();
|
||||
|
||||
details.upgrade_to = Some("[current version]".into());
|
||||
serde_json::to_string(&details).unwrap()
|
||||
} else {
|
||||
serde_json::to_string(details).unwrap()
|
||||
}
|
||||
} else {
|
||||
serde_json::to_string(details).unwrap()
|
||||
};
|
||||
snap.push_str(&format!("details: {details}, "));
|
||||
snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap()));
|
||||
if !embedder_stats.skip_serializing() {
|
||||
snap.push_str(&format!(
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
|
||||
[timestamp] [4,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.28.2"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
||||
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
||||
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,]
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
@@ -37,7 +37,7 @@ catto [1,]
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.28.2"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
0 [0,]
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
----------------------------------------------------------------------
|
||||
@@ -40,7 +40,7 @@ doggo [2,]
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.28.2"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
0 [0,]
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 28, 2) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: [current version] }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
@@ -43,7 +43,7 @@ doggo [2,3,]
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.28.2"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"[current version]"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
0 [0,]
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use anyhow::bail;
|
||||
use meilisearch_types::heed::{Env, RwTxn, WithoutTls};
|
||||
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
|
||||
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
||||
use meilisearch_types::versioning;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::info;
|
||||
|
||||
@@ -9,83 +9,82 @@ use crate::queue::TaskQueue;
|
||||
use crate::versioning::Versioning;
|
||||
|
||||
trait UpgradeIndexScheduler {
|
||||
fn upgrade(
|
||||
&self,
|
||||
env: &Env<WithoutTls>,
|
||||
wtxn: &mut RwTxn,
|
||||
original: (u32, u32, u32),
|
||||
) -> anyhow::Result<()>;
|
||||
fn target_version(&self) -> (u32, u32, u32);
|
||||
fn upgrade(&self, env: &Env<WithoutTls>, wtxn: &mut RwTxn) -> anyhow::Result<()>;
|
||||
/// Whether the migration should be applied, depending on the initial version of the index scheduler before
|
||||
/// any migration was applied
|
||||
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool;
|
||||
/// A progress-centric description of the migration
|
||||
fn description(&self) -> &'static str;
|
||||
}
|
||||
|
||||
/// Upgrade the index scheduler to the binary version.
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// The current implementation uses a single wtxn to the index scheduler for the whole duration of the upgrade.
|
||||
/// If migrations start taking take a long time, it might prevent tasks from being registered.
|
||||
/// If this issue manifests, then it can be mitigated by adding a `fn target_version` to `UpgradeIndexScheduler`,
|
||||
/// to be able to write intermediate versions and drop the wtxn between applying migrations.
|
||||
pub fn upgrade_index_scheduler(
|
||||
env: &Env<WithoutTls>,
|
||||
versioning: &Versioning,
|
||||
from: (u32, u32, u32),
|
||||
to: (u32, u32, u32),
|
||||
initial_version: (u32, u32, u32),
|
||||
) -> anyhow::Result<()> {
|
||||
let current_major = to.0;
|
||||
let current_minor = to.1;
|
||||
let current_patch = to.2;
|
||||
let target_major: u32 = versioning::VERSION_MAJOR;
|
||||
let target_minor: u32 = versioning::VERSION_MINOR;
|
||||
let target_patch: u32 = versioning::VERSION_PATCH;
|
||||
let target_version = (target_major, target_minor, target_patch);
|
||||
|
||||
let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[
|
||||
// This is the last upgrade function, it will be called when the index is up to date.
|
||||
// any other upgrade function should be added before this one.
|
||||
&ToCurrentNoOp {},
|
||||
];
|
||||
|
||||
let start = match from {
|
||||
(1, 12, _) => 0,
|
||||
(1, 13, _) => 0,
|
||||
(1, 14, _) => 0,
|
||||
(1, 15, _) => 0,
|
||||
(1, 16, _) => 0,
|
||||
(1, 17, _) => 0,
|
||||
(1, 18, _) => 0,
|
||||
(1, 19, _) => 0,
|
||||
(1, 20, _) => 0,
|
||||
(1, 21, _) => 0,
|
||||
(1, 22, _) => 0,
|
||||
(1, 23, _) => 0,
|
||||
(1, 24, _) => 0,
|
||||
(1, 25, _) => 0,
|
||||
(1, 26, _) => 0,
|
||||
(1, 27, _) => 0,
|
||||
(1, 28, _) => 0,
|
||||
(major, minor, patch) => {
|
||||
if major > current_major
|
||||
|| (major == current_major && minor > current_minor)
|
||||
|| (major == current_major && minor == current_minor && patch > current_patch)
|
||||
{
|
||||
bail!(
|
||||
"Database version {major}.{minor}.{patch} is higher than the Meilisearch version {current_major}.{current_minor}.{current_patch}. Downgrade is not supported",
|
||||
);
|
||||
} else if major < 1 || (major == current_major && minor < 12) {
|
||||
bail!(
|
||||
"Database version {major}.{minor}.{patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{major}.{minor}.{patch} and import it in the v{current_major}.{current_minor}.{current_patch}",
|
||||
);
|
||||
} else {
|
||||
bail!("Unknown database version: v{major}.{minor}.{patch}");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
info!("Upgrading the task queue");
|
||||
let mut local_from = from;
|
||||
for upgrade in upgrade_functions[start..].iter() {
|
||||
let target = upgrade.target_version();
|
||||
info!(
|
||||
"Upgrading from v{}.{}.{} to v{}.{}.{}",
|
||||
local_from.0, local_from.1, local_from.2, target.0, target.1, target.2
|
||||
);
|
||||
let mut wtxn = env.write_txn()?;
|
||||
upgrade.upgrade(env, &mut wtxn, local_from)?;
|
||||
versioning.set_version(&mut wtxn, target)?;
|
||||
wtxn.commit()?;
|
||||
local_from = target;
|
||||
if initial_version == target_version {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[
|
||||
// List all upgrade functions to apply in order here.
|
||||
];
|
||||
|
||||
let (initial_major, initial_minor, initial_patch) = initial_version;
|
||||
|
||||
if initial_version > target_version {
|
||||
bail!(
|
||||
"Database version {initial_major}.{initial_minor}.{initial_patch} is higher than the Meilisearch version {target_major}.{target_minor}.{target_patch}. Downgrade is not supported",
|
||||
);
|
||||
}
|
||||
|
||||
if initial_version < (1, 12, 0) {
|
||||
bail!(
|
||||
"Database version {initial_major}.{initial_minor}.{initial_patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{initial_major}.{initial_minor}.{initial_patch} and import it in the v{target_major}.{target_minor}.{target_patch}",
|
||||
);
|
||||
}
|
||||
|
||||
info!("Upgrading the task queue");
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let migration_count = upgrade_functions.len();
|
||||
for (migration_index, upgrade) in upgrade_functions.iter().enumerate() {
|
||||
if upgrade.must_upgrade(initial_version) {
|
||||
info!(
|
||||
"[{migration_index}/{migration_count}]Applying migration: {}",
|
||||
upgrade.description()
|
||||
);
|
||||
|
||||
upgrade.upgrade(env, &mut wtxn)?;
|
||||
|
||||
info!(
|
||||
"[{}/{migration_count}]Migration applied: {}",
|
||||
migration_index + 1,
|
||||
upgrade.description()
|
||||
)
|
||||
} else {
|
||||
info!(
|
||||
"[{migration_index}/{migration_count}]Skipping unnecessary migration: {}",
|
||||
upgrade.description()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
versioning.set_version(&mut wtxn, target_version)?;
|
||||
info!("Task queue upgraded, spawning the upgrade database task");
|
||||
|
||||
let queue = TaskQueue::new(env, &mut wtxn)?;
|
||||
let uid = queue.next_task_id(&wtxn)?;
|
||||
queue.register(
|
||||
@@ -98,9 +97,9 @@ pub fn upgrade_index_scheduler(
|
||||
finished_at: None,
|
||||
error: None,
|
||||
canceled_by: None,
|
||||
details: Some(Details::UpgradeDatabase { from, to }),
|
||||
details: Some(Details::UpgradeDatabase { from: initial_version, to: target_version }),
|
||||
status: Status::Enqueued,
|
||||
kind: KindWithContent::UpgradeDatabase { from },
|
||||
kind: KindWithContent::UpgradeDatabase { from: initial_version },
|
||||
network: None,
|
||||
custom_metadata: None,
|
||||
},
|
||||
@@ -109,21 +108,3 @@ pub fn upgrade_index_scheduler(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
struct ToCurrentNoOp {}
|
||||
|
||||
impl UpgradeIndexScheduler for ToCurrentNoOp {
|
||||
fn upgrade(
|
||||
&self,
|
||||
_env: &Env<WithoutTls>,
|
||||
_wtxn: &mut RwTxn,
|
||||
_original: (u32, u32, u32),
|
||||
) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn target_version(&self) -> (u32, u32, u32) {
|
||||
(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,14 +64,7 @@ impl Versioning {
|
||||
};
|
||||
wtxn.commit()?;
|
||||
|
||||
let bin_major: u32 = versioning::VERSION_MAJOR;
|
||||
let bin_minor: u32 = versioning::VERSION_MINOR;
|
||||
let bin_patch: u32 = versioning::VERSION_PATCH;
|
||||
let to = (bin_major, bin_minor, bin_patch);
|
||||
|
||||
if from != to {
|
||||
upgrade_index_scheduler(env, &this, from, to)?;
|
||||
}
|
||||
upgrade_index_scheduler(env, &this, from)?;
|
||||
|
||||
// Once we reach this point it means the upgrade process, if there was one is entirely finished
|
||||
// we can safely say we reached the latest version of the index scheduler
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Network {
|
||||
|
||||
@@ -197,7 +197,7 @@ test_setting_routes!(
|
||||
{
|
||||
setting: vector_store,
|
||||
update_verb: patch,
|
||||
default_value: null
|
||||
default_value: "experimental"
|
||||
},
|
||||
);
|
||||
|
||||
|
||||
@@ -42,8 +42,16 @@ async fn version_too_old() {
|
||||
std::fs::create_dir_all(&db_path).unwrap();
|
||||
std::fs::write(db_path.join("VERSION"), "1.11.9999").unwrap();
|
||||
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
|
||||
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
|
||||
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.28.2");
|
||||
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err().to_string();
|
||||
|
||||
let major = meilisearch_types::versioning::VERSION_MAJOR;
|
||||
let minor = meilisearch_types::versioning::VERSION_MINOR;
|
||||
let patch = meilisearch_types::versioning::VERSION_PATCH;
|
||||
|
||||
let current_version = format!("{major}.{minor}.{patch}");
|
||||
let err = err.replace(¤t_version, "[current version]");
|
||||
|
||||
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v[current version]");
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -54,11 +62,21 @@ async fn version_requires_downgrade() {
|
||||
std::fs::create_dir_all(&db_path).unwrap();
|
||||
let major = meilisearch_types::versioning::VERSION_MAJOR;
|
||||
let minor = meilisearch_types::versioning::VERSION_MINOR;
|
||||
let patch = meilisearch_types::versioning::VERSION_PATCH + 1;
|
||||
std::fs::write(db_path.join("VERSION"), format!("{major}.{minor}.{patch}")).unwrap();
|
||||
let mut patch = meilisearch_types::versioning::VERSION_PATCH;
|
||||
|
||||
let current_version = format!("{major}.{minor}.{patch}");
|
||||
patch += 1;
|
||||
let future_version = format!("{major}.{minor}.{patch}");
|
||||
|
||||
std::fs::write(db_path.join("VERSION"), &future_version).unwrap();
|
||||
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
|
||||
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
|
||||
snapshot!(err, @"Database version 1.28.3 is higher than the Meilisearch version 1.28.2. Downgrade is not supported");
|
||||
|
||||
let err = err.to_string();
|
||||
let err = err.replace(¤t_version, "[current version]");
|
||||
let err = err.replace(&future_version, "[future version]");
|
||||
|
||||
snapshot!(err, @"Database version [future version] is higher than the Meilisearch version [current version]. Downgrade is not supported");
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
|
||||
@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"progress": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.28.2"
|
||||
"upgradeTo": "[current version]"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
|
||||
@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"progress": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.28.2"
|
||||
"upgradeTo": "[current version]"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
|
||||
@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"progress": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.28.2"
|
||||
"upgradeTo": "[current version]"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
|
||||
@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.28.2"
|
||||
"upgradeTo": "[current version]"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
|
||||
@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.28.2"
|
||||
"upgradeTo": "[current version]"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
|
||||
@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.28.2"
|
||||
"upgradeTo": "[current version]"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
|
||||
@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"progress": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.28.2"
|
||||
"upgradeTo": "[current version]"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
|
||||
@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.28.2"
|
||||
"upgradeTo": "[current version]"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
|
||||
@@ -166,55 +166,55 @@ async fn check_the_index_scheduler(server: &Server) {
|
||||
// We rewrite the first task for all calls because it may be the upgrade database with unknown dates and duration.
|
||||
// The other tasks should NOT change
|
||||
let (tasks, _) = server.tasks_filter("limit=1000").await;
|
||||
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "the_whole_task_queue_once_everything_has_been_processed");
|
||||
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "the_whole_task_queue_once_everything_has_been_processed");
|
||||
let (batches, _) = server.batches_filter("limit=1000").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "the_whole_batch_queue_once_everything_has_been_processed");
|
||||
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "the_whole_batch_queue_once_everything_has_been_processed");
|
||||
|
||||
// Tests all the tasks query parameters
|
||||
let (tasks, _) = server.tasks_filter("uids=10").await;
|
||||
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_uids_equal_10");
|
||||
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_uids_equal_10");
|
||||
let (tasks, _) = server.tasks_filter("batchUids=10").await;
|
||||
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_batchUids_equal_10");
|
||||
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_batchUids_equal_10");
|
||||
let (tasks, _) = server.tasks_filter("statuses=canceled").await;
|
||||
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_statuses_equal_canceled");
|
||||
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_statuses_equal_canceled");
|
||||
// types has already been tested above to retrieve the upgrade database
|
||||
let (tasks, _) = server.tasks_filter("canceledBy=19").await;
|
||||
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_canceledBy_equal_19");
|
||||
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_canceledBy_equal_19");
|
||||
let (tasks, _) = server.tasks_filter("beforeEnqueuedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_beforeEnqueuedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_beforeEnqueuedAt_equal_2025-01-16T16_47_41");
|
||||
let (tasks, _) = server.tasks_filter("afterEnqueuedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_afterEnqueuedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_afterEnqueuedAt_equal_2025-01-16T16_47_41");
|
||||
let (tasks, _) = server.tasks_filter("beforeStartedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_beforeStartedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_beforeStartedAt_equal_2025-01-16T16_47_41");
|
||||
let (tasks, _) = server.tasks_filter("afterStartedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_afterStartedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_afterStartedAt_equal_2025-01-16T16_47_41");
|
||||
let (tasks, _) = server.tasks_filter("beforeFinishedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_beforeFinishedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_beforeFinishedAt_equal_2025-01-16T16_47_41");
|
||||
let (tasks, _) = server.tasks_filter("afterFinishedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(tasks, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_afterFinishedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(tasks, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]" }), name: "tasks_filter_afterFinishedAt_equal_2025-01-16T16_47_41");
|
||||
|
||||
// Tests all the batches query parameters
|
||||
let (batches, _) = server.batches_filter("uids=10").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_uids_equal_10");
|
||||
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_uids_equal_10");
|
||||
let (batches, _) = server.batches_filter("batchUids=10").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_batchUids_equal_10");
|
||||
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_batchUids_equal_10");
|
||||
let (batches, _) = server.batches_filter("statuses=canceled").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_statuses_equal_canceled");
|
||||
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_statuses_equal_canceled");
|
||||
// types has already been tested above to retrieve the upgrade database
|
||||
let (batches, _) = server.batches_filter("canceledBy=19").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_canceledBy_equal_19");
|
||||
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_canceledBy_equal_19");
|
||||
let (batches, _) = server.batches_filter("beforeEnqueuedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeEnqueuedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeEnqueuedAt_equal_2025-01-16T16_47_41");
|
||||
let (batches, _) = server.batches_filter("afterEnqueuedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterEnqueuedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterEnqueuedAt_equal_2025-01-16T16_47_41");
|
||||
let (batches, _) = server.batches_filter("beforeStartedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeStartedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeStartedAt_equal_2025-01-16T16_47_41");
|
||||
let (batches, _) = server.batches_filter("afterStartedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterStartedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterStartedAt_equal_2025-01-16T16_47_41");
|
||||
let (batches, _) = server.batches_filter("beforeFinishedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeFinishedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeFinishedAt_equal_2025-01-16T16_47_41");
|
||||
let (batches, _) = server.batches_filter("afterFinishedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterFinishedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(batches, { ".results[0].details.upgradeTo" => "[current version]", ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterFinishedAt_equal_2025-01-16T16_47_41");
|
||||
|
||||
let (stats, _) = server.stats().await;
|
||||
assert_json_snapshot!(stats, {
|
||||
|
||||
@@ -104,8 +104,8 @@ async fn binary_quantize_before_sending_documents() {
|
||||
"manual": {
|
||||
"embeddings": [
|
||||
[
|
||||
-1.0,
|
||||
-1.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.0
|
||||
]
|
||||
],
|
||||
@@ -122,7 +122,7 @@ async fn binary_quantize_before_sending_documents() {
|
||||
[
|
||||
1.0,
|
||||
1.0,
|
||||
-1.0
|
||||
0.0
|
||||
]
|
||||
],
|
||||
"regenerate": false
|
||||
@@ -191,8 +191,8 @@ async fn binary_quantize_after_sending_documents() {
|
||||
"manual": {
|
||||
"embeddings": [
|
||||
[
|
||||
-1.0,
|
||||
-1.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.0
|
||||
]
|
||||
],
|
||||
@@ -209,7 +209,7 @@ async fn binary_quantize_after_sending_documents() {
|
||||
[
|
||||
1.0,
|
||||
1.0,
|
||||
-1.0
|
||||
0.0
|
||||
]
|
||||
],
|
||||
"regenerate": false
|
||||
|
||||
43
crates/meilisearch/tests/vector/huggingface.rs
Normal file
43
crates/meilisearch/tests/vector/huggingface.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use meili_snap::snapshot;
|
||||
|
||||
use crate::common::{GetAllDocumentsOptions, Server};
|
||||
use crate::json;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn hf_bge_m3_force_cls_settings() {
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
"embedders": {
|
||||
"default": {
|
||||
"source": "huggingFace",
|
||||
"model": "baai/bge-m3",
|
||||
"revision": "5617a9f61b028005a4858fdac845db406aefb181",
|
||||
"pooling": "forceCls",
|
||||
// minimal template to allow potential document embedding if used later
|
||||
"documentTemplate": "{{doc.title}}"
|
||||
}
|
||||
}
|
||||
}))
|
||||
.await;
|
||||
snapshot!(code, @"202 Accepted");
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
// Try to embed one simple document
|
||||
let (task, code) =
|
||||
index.add_documents(json!([{ "id": 1, "title": "Hello world" }]), None).await;
|
||||
snapshot!(code, @"202 Accepted");
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Retrieve the document with vectors and assert embeddings were produced
|
||||
let (documents, _code) = index
|
||||
.get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() })
|
||||
.await;
|
||||
let has_vectors = documents["results"][0]["_vectors"]["default"]["embeddings"]
|
||||
.as_array()
|
||||
.map(|a| !a.is_empty())
|
||||
.unwrap_or(false);
|
||||
snapshot!(has_vectors, @"true");
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
mod binary_quantized;
|
||||
mod fragments;
|
||||
mod huggingface;
|
||||
#[cfg(feature = "test-ollama")]
|
||||
mod ollama;
|
||||
mod openai;
|
||||
|
||||
@@ -500,13 +500,6 @@ async fn test_both_apis() {
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response["hits"]), @r###"
|
||||
[
|
||||
{
|
||||
"id": 0,
|
||||
"name": "kefir",
|
||||
"gender": "M",
|
||||
"birthyear": 2023,
|
||||
"breed": "Patou"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Vénus",
|
||||
@@ -527,6 +520,13 @@ async fn test_both_apis() {
|
||||
"gender": "M",
|
||||
"birthyear": 1995,
|
||||
"breed": "Labrador Retriever"
|
||||
},
|
||||
{
|
||||
"id": 0,
|
||||
"name": "kefir",
|
||||
"gender": "M",
|
||||
"birthyear": 2023,
|
||||
"breed": "Patou"
|
||||
}
|
||||
]
|
||||
"###);
|
||||
@@ -540,13 +540,6 @@ async fn test_both_apis() {
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response["hits"]), @r###"
|
||||
[
|
||||
{
|
||||
"id": 0,
|
||||
"name": "kefir",
|
||||
"gender": "M",
|
||||
"birthyear": 2023,
|
||||
"breed": "Patou"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Vénus",
|
||||
@@ -567,6 +560,13 @@ async fn test_both_apis() {
|
||||
"gender": "M",
|
||||
"birthyear": 1995,
|
||||
"breed": "Labrador Retriever"
|
||||
},
|
||||
{
|
||||
"id": 0,
|
||||
"name": "kefir",
|
||||
"gender": "M",
|
||||
"birthyear": 2023,
|
||||
"breed": "Patou"
|
||||
}
|
||||
]
|
||||
"###);
|
||||
@@ -581,18 +581,11 @@ async fn test_both_apis() {
|
||||
snapshot!(json_string!(response["hits"]), @r###"
|
||||
[
|
||||
{
|
||||
"id": 0,
|
||||
"name": "kefir",
|
||||
"id": 1,
|
||||
"name": "Intel",
|
||||
"gender": "M",
|
||||
"birthyear": 2023,
|
||||
"breed": "Patou"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Max",
|
||||
"gender": "M",
|
||||
"birthyear": 1995,
|
||||
"breed": "Labrador Retriever"
|
||||
"birthyear": 2011,
|
||||
"breed": "Beagle"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
@@ -602,11 +595,18 @@ async fn test_both_apis() {
|
||||
"breed": "Jack Russel Terrier"
|
||||
},
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Intel",
|
||||
"id": 3,
|
||||
"name": "Max",
|
||||
"gender": "M",
|
||||
"birthyear": 2011,
|
||||
"breed": "Beagle"
|
||||
"birthyear": 1995,
|
||||
"breed": "Labrador Retriever"
|
||||
},
|
||||
{
|
||||
"id": 0,
|
||||
"name": "kefir",
|
||||
"gender": "M",
|
||||
"birthyear": 2023,
|
||||
"breed": "Patou"
|
||||
}
|
||||
]
|
||||
"###);
|
||||
@@ -621,18 +621,11 @@ async fn test_both_apis() {
|
||||
snapshot!(json_string!(response["hits"]), @r###"
|
||||
[
|
||||
{
|
||||
"id": 0,
|
||||
"name": "kefir",
|
||||
"id": 1,
|
||||
"name": "Intel",
|
||||
"gender": "M",
|
||||
"birthyear": 2023,
|
||||
"breed": "Patou"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Max",
|
||||
"gender": "M",
|
||||
"birthyear": 1995,
|
||||
"breed": "Labrador Retriever"
|
||||
"birthyear": 2011,
|
||||
"breed": "Beagle"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
@@ -642,11 +635,18 @@ async fn test_both_apis() {
|
||||
"breed": "Jack Russel Terrier"
|
||||
},
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Intel",
|
||||
"id": 3,
|
||||
"name": "Max",
|
||||
"gender": "M",
|
||||
"birthyear": 2011,
|
||||
"breed": "Beagle"
|
||||
"birthyear": 1995,
|
||||
"breed": "Labrador Retriever"
|
||||
},
|
||||
{
|
||||
"id": 0,
|
||||
"name": "kefir",
|
||||
"gender": "M",
|
||||
"birthyear": 2023,
|
||||
"breed": "Patou"
|
||||
}
|
||||
]
|
||||
"###);
|
||||
@@ -661,18 +661,11 @@ async fn test_both_apis() {
|
||||
snapshot!(json_string!(response["hits"]), @r###"
|
||||
[
|
||||
{
|
||||
"id": 0,
|
||||
"name": "kefir",
|
||||
"id": 1,
|
||||
"name": "Intel",
|
||||
"gender": "M",
|
||||
"birthyear": 2023,
|
||||
"breed": "Patou"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Max",
|
||||
"gender": "M",
|
||||
"birthyear": 1995,
|
||||
"breed": "Labrador Retriever"
|
||||
"birthyear": 2011,
|
||||
"breed": "Beagle"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
@@ -682,11 +675,18 @@ async fn test_both_apis() {
|
||||
"breed": "Jack Russel Terrier"
|
||||
},
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Intel",
|
||||
"id": 3,
|
||||
"name": "Max",
|
||||
"gender": "M",
|
||||
"birthyear": 2011,
|
||||
"breed": "Beagle"
|
||||
"birthyear": 1995,
|
||||
"breed": "Labrador Retriever"
|
||||
},
|
||||
{
|
||||
"id": 0,
|
||||
"name": "kefir",
|
||||
"gender": "M",
|
||||
"birthyear": 2023,
|
||||
"breed": "Patou"
|
||||
}
|
||||
]
|
||||
"###);
|
||||
@@ -701,18 +701,11 @@ async fn test_both_apis() {
|
||||
snapshot!(json_string!(response["hits"]), @r###"
|
||||
[
|
||||
{
|
||||
"id": 0,
|
||||
"name": "kefir",
|
||||
"id": 1,
|
||||
"name": "Intel",
|
||||
"gender": "M",
|
||||
"birthyear": 2023,
|
||||
"breed": "Patou"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Max",
|
||||
"gender": "M",
|
||||
"birthyear": 1995,
|
||||
"breed": "Labrador Retriever"
|
||||
"birthyear": 2011,
|
||||
"breed": "Beagle"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
@@ -722,11 +715,18 @@ async fn test_both_apis() {
|
||||
"breed": "Jack Russel Terrier"
|
||||
},
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Intel",
|
||||
"id": 3,
|
||||
"name": "Max",
|
||||
"gender": "M",
|
||||
"birthyear": 2011,
|
||||
"breed": "Beagle"
|
||||
"birthyear": 1995,
|
||||
"breed": "Labrador Retriever"
|
||||
},
|
||||
{
|
||||
"id": 0,
|
||||
"name": "kefir",
|
||||
"gender": "M",
|
||||
"birthyear": 2023,
|
||||
"breed": "Patou"
|
||||
}
|
||||
]
|
||||
"###);
|
||||
|
||||
@@ -91,7 +91,7 @@ rhai = { version = "1.23.6", features = [
|
||||
"sync",
|
||||
] }
|
||||
arroy = "0.6.4-nested-rtxns"
|
||||
hannoy = { version = "0.0.9-nested-rtxns-2", features = ["arroy"] }
|
||||
hannoy = { version = "0.1.0-nested-rtxns", features = ["arroy"] }
|
||||
rand = "0.8.5"
|
||||
tracing = "0.1.41"
|
||||
ureq = { version = "2.12.1", features = ["json"] }
|
||||
|
||||
@@ -281,6 +281,9 @@ impl Index {
|
||||
&mut wtxn,
|
||||
(constants::VERSION_MAJOR, constants::VERSION_MINOR, constants::VERSION_PATCH),
|
||||
)?;
|
||||
// The database before v1.29 defaulted to using arroy, so we
|
||||
// need to set it explicitly because the new default is hannoy.
|
||||
this.put_vector_store(&mut wtxn, VectorStoreBackend::Hannoy)?;
|
||||
}
|
||||
wtxn.commit()?;
|
||||
|
||||
|
||||
@@ -806,6 +806,10 @@ mod tests {
|
||||
use crate::vector::db::IndexEmbeddingConfig;
|
||||
use crate::{all_obkv_to_json, db_snap, Filter, FilterableAttributesRule, Search, UserError};
|
||||
|
||||
fn no_cancel() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn simple_document_replacement() {
|
||||
let index = TempIndex::new();
|
||||
@@ -1985,7 +1989,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -2038,7 +2042,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -2057,7 +2061,7 @@ mod tests {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -2127,7 +2131,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -2146,7 +2150,7 @@ mod tests {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -2317,7 +2321,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -2333,7 +2337,7 @@ mod tests {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -2381,7 +2385,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -2397,7 +2401,7 @@ mod tests {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -2436,7 +2440,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -2452,7 +2456,7 @@ mod tests {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -2490,7 +2494,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -2506,7 +2510,7 @@ mod tests {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -2546,7 +2550,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -2562,7 +2566,7 @@ mod tests {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -2607,7 +2611,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -2623,7 +2627,7 @@ mod tests {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -2661,7 +2665,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -2677,7 +2681,7 @@ mod tests {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -2715,7 +2719,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -2731,7 +2735,7 @@ mod tests {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -2927,7 +2931,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -2943,7 +2947,7 @@ mod tests {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -2988,7 +2992,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -3004,7 +3008,7 @@ mod tests {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
@@ -3046,7 +3050,7 @@ mod tests {
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
Progress::default(),
|
||||
None,
|
||||
)
|
||||
@@ -3062,7 +3066,7 @@ mod tests {
|
||||
primary_key,
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| false,
|
||||
&no_cancel,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
|
||||
@@ -5,103 +5,36 @@ mod v1_15;
|
||||
mod v1_16;
|
||||
|
||||
use heed::RwTxn;
|
||||
use v1_12::{V1_12_3_To_V1_13_0, V1_12_To_V1_12_3};
|
||||
use v1_13::{V1_13_0_To_V1_13_1, V1_13_1_To_Latest_V1_13};
|
||||
use v1_14::Latest_V1_13_To_Latest_V1_14;
|
||||
use v1_15::Latest_V1_14_To_Latest_V1_15;
|
||||
use v1_16::Latest_V1_15_To_V1_16_0;
|
||||
use v1_12::{FixFieldDistribution, RecomputeStats};
|
||||
use v1_13::AddNewStats;
|
||||
use v1_14::UpgradeArroyVersion;
|
||||
use v1_15::RecomputeWordFst;
|
||||
use v1_16::SwitchToMultimodal;
|
||||
|
||||
use crate::constants::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
||||
use crate::progress::{Progress, VariableNameStep};
|
||||
use crate::{Index, InternalError, Result};
|
||||
|
||||
trait UpgradeIndex {
|
||||
/// Returns `true` if `upgrade` should be called when the index started with version `initial_version`.
|
||||
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool;
|
||||
|
||||
/// Returns `true` if the index scheduler must regenerate its cached stats.
|
||||
fn upgrade(
|
||||
&self,
|
||||
wtxn: &mut RwTxn,
|
||||
index: &Index,
|
||||
original: (u32, u32, u32),
|
||||
progress: Progress,
|
||||
) -> Result<bool>;
|
||||
fn target_version(&self) -> (u32, u32, u32);
|
||||
fn upgrade(&self, wtxn: &mut RwTxn, index: &Index, progress: Progress) -> Result<bool>;
|
||||
|
||||
/// Description of the upgrade for progress display purposes.
|
||||
fn description(&self) -> &'static str;
|
||||
}
|
||||
|
||||
const UPGRADE_FUNCTIONS: &[&dyn UpgradeIndex] = &[
|
||||
&V1_12_To_V1_12_3 {},
|
||||
&V1_12_3_To_V1_13_0 {},
|
||||
&V1_13_0_To_V1_13_1 {},
|
||||
&V1_13_1_To_Latest_V1_13 {},
|
||||
&Latest_V1_13_To_Latest_V1_14 {},
|
||||
&Latest_V1_14_To_Latest_V1_15 {},
|
||||
&Latest_V1_15_To_V1_16_0 {},
|
||||
&ToTargetNoOp { target: (1, 18, 0) },
|
||||
&ToTargetNoOp { target: (1, 19, 0) },
|
||||
&ToTargetNoOp { target: (1, 20, 0) },
|
||||
&ToTargetNoOp { target: (1, 21, 0) },
|
||||
&ToTargetNoOp { target: (1, 22, 0) },
|
||||
&ToTargetNoOp { target: (1, 23, 0) },
|
||||
&ToTargetNoOp { target: (1, 24, 0) },
|
||||
&ToTargetNoOp { target: (1, 25, 0) },
|
||||
&ToTargetNoOp { target: (1, 26, 0) },
|
||||
&ToTargetNoOp { target: (1, 27, 0) },
|
||||
&ToTargetNoOp { target: (1, 28, 0) },
|
||||
// This is the last upgrade function, it will be called when the index is up to date.
|
||||
// any other upgrade function should be added before this one.
|
||||
&ToCurrentNoOp {},
|
||||
&FixFieldDistribution {},
|
||||
&RecomputeStats {},
|
||||
&AddNewStats {},
|
||||
&UpgradeArroyVersion {},
|
||||
&RecomputeWordFst {},
|
||||
&SwitchToMultimodal {},
|
||||
];
|
||||
|
||||
/// Causes a compile-time error if the argument is not in range of `0..UPGRADE_FUNCTIONS.len()`
|
||||
macro_rules! function_index {
|
||||
($start:expr) => {{
|
||||
const _CHECK_INDEX: () = {
|
||||
if $start >= $crate::update::upgrade::UPGRADE_FUNCTIONS.len() {
|
||||
panic!("upgrade functions out of range")
|
||||
}
|
||||
};
|
||||
|
||||
$start
|
||||
}};
|
||||
}
|
||||
|
||||
const fn start(from: (u32, u32, u32)) -> Option<usize> {
|
||||
let start = match from {
|
||||
(1, 12, 0..=2) => function_index!(0),
|
||||
(1, 12, 3..) => function_index!(1),
|
||||
(1, 13, 0) => function_index!(2),
|
||||
(1, 13, _) => function_index!(4),
|
||||
(1, 14, _) => function_index!(5),
|
||||
// We must handle the current version in the match because in case of a failure some index may have been upgraded but not other.
|
||||
(1, 15, _) => function_index!(6),
|
||||
(1, 16, _) | (1, 17, _) => function_index!(7),
|
||||
(1, 18, _) => function_index!(8),
|
||||
(1, 19, _) => function_index!(9),
|
||||
(1, 20, _) => function_index!(10),
|
||||
(1, 21, _) => function_index!(11),
|
||||
(1, 22, _) => function_index!(12),
|
||||
(1, 23, _) => function_index!(13),
|
||||
(1, 24, _) => function_index!(14),
|
||||
(1, 25, _) => function_index!(15),
|
||||
(1, 26, _) => function_index!(16),
|
||||
(1, 27, _) => function_index!(17),
|
||||
(1, 28, _) => function_index!(18),
|
||||
// We deliberately don't add a placeholder with (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) here to force manually
|
||||
// considering dumpless upgrade.
|
||||
(_major, _minor, _patch) => return None,
|
||||
};
|
||||
|
||||
Some(start)
|
||||
}
|
||||
|
||||
/// Causes a compile-time error if the latest package cannot be upgraded.
|
||||
///
|
||||
/// This serves as a reminder to consider the proper dumpless upgrade implementation when changing the package version.
|
||||
const _CHECK_PACKAGE_CAN_UPGRADE: () = {
|
||||
if start((VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)).is_none() {
|
||||
panic!("cannot upgrade from latest package version")
|
||||
}
|
||||
};
|
||||
|
||||
/// Return true if the cached stats of the index must be regenerated
|
||||
pub fn upgrade<MSP>(
|
||||
wtxn: &mut RwTxn,
|
||||
@@ -113,79 +46,34 @@ pub fn upgrade<MSP>(
|
||||
where
|
||||
MSP: Fn() -> bool + Sync,
|
||||
{
|
||||
let from = index.get_version(wtxn)?.unwrap_or(db_version);
|
||||
let upgrade_functions = UPGRADE_FUNCTIONS;
|
||||
|
||||
let start =
|
||||
start(from).ok_or_else(|| InternalError::CannotUpgradeToVersion(from.0, from.1, from.2))?;
|
||||
let initial_version = index.get_version(wtxn)?.unwrap_or(db_version);
|
||||
|
||||
enum UpgradeVersion {}
|
||||
let upgrade_path = &UPGRADE_FUNCTIONS[start..];
|
||||
|
||||
let mut current_version = from;
|
||||
let mut regenerate_stats = false;
|
||||
for (i, upgrade) in upgrade_path.iter().enumerate() {
|
||||
for (i, upgrade) in upgrade_functions.iter().enumerate() {
|
||||
if (must_stop_processing)() {
|
||||
return Err(crate::Error::InternalError(InternalError::AbortedIndexation));
|
||||
}
|
||||
let target = upgrade.target_version();
|
||||
progress.update_progress(VariableNameStep::<UpgradeVersion>::new(
|
||||
format!(
|
||||
"Upgrading from v{}.{}.{} to v{}.{}.{}",
|
||||
current_version.0,
|
||||
current_version.1,
|
||||
current_version.2,
|
||||
target.0,
|
||||
target.1,
|
||||
target.2
|
||||
),
|
||||
i as u32,
|
||||
upgrade_path.len() as u32,
|
||||
));
|
||||
regenerate_stats |= upgrade.upgrade(wtxn, index, from, progress.clone())?;
|
||||
index.put_version(wtxn, target)?;
|
||||
current_version = target;
|
||||
if upgrade.must_upgrade(initial_version) {
|
||||
regenerate_stats |= upgrade.upgrade(wtxn, index, progress.clone())?;
|
||||
progress.update_progress(VariableNameStep::<UpgradeVersion>::new(
|
||||
upgrade.description(),
|
||||
i as u32,
|
||||
upgrade_functions.len() as u32,
|
||||
));
|
||||
} else {
|
||||
progress.update_progress(VariableNameStep::<UpgradeVersion>::new(
|
||||
"Skipping migration that must not be applied",
|
||||
i as u32,
|
||||
upgrade_functions.len() as u32,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
index.put_version(wtxn, (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH))?;
|
||||
|
||||
Ok(regenerate_stats)
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
struct ToCurrentNoOp {}
|
||||
|
||||
impl UpgradeIndex for ToCurrentNoOp {
|
||||
fn upgrade(
|
||||
&self,
|
||||
_wtxn: &mut RwTxn,
|
||||
_index: &Index,
|
||||
_original: (u32, u32, u32),
|
||||
_progress: Progress,
|
||||
) -> Result<bool> {
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
fn target_version(&self) -> (u32, u32, u32) {
|
||||
(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform no operation during the upgrade except changing to the specified target version.
|
||||
#[allow(non_camel_case_types)]
|
||||
struct ToTargetNoOp {
|
||||
pub target: (u32, u32, u32),
|
||||
}
|
||||
|
||||
impl UpgradeIndex for ToTargetNoOp {
|
||||
fn upgrade(
|
||||
&self,
|
||||
_wtxn: &mut RwTxn,
|
||||
_index: &Index,
|
||||
_original: (u32, u32, u32),
|
||||
_progress: Progress,
|
||||
) -> Result<bool> {
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
fn target_version(&self) -> (u32, u32, u32) {
|
||||
self.target
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,17 +4,10 @@ use super::UpgradeIndex;
|
||||
use crate::progress::Progress;
|
||||
use crate::{make_enum_progress, Index, Result};
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub(super) struct V1_12_To_V1_12_3 {}
|
||||
pub(super) struct FixFieldDistribution {}
|
||||
|
||||
impl UpgradeIndex for V1_12_To_V1_12_3 {
|
||||
fn upgrade(
|
||||
&self,
|
||||
wtxn: &mut RwTxn,
|
||||
index: &Index,
|
||||
_original: (u32, u32, u32),
|
||||
progress: Progress,
|
||||
) -> Result<bool> {
|
||||
impl UpgradeIndex for FixFieldDistribution {
|
||||
fn upgrade(&self, wtxn: &mut RwTxn, index: &Index, progress: Progress) -> Result<bool> {
|
||||
make_enum_progress! {
|
||||
enum FieldDistribution {
|
||||
RebuildingFieldDistribution,
|
||||
@@ -25,27 +18,28 @@ impl UpgradeIndex for V1_12_To_V1_12_3 {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn target_version(&self) -> (u32, u32, u32) {
|
||||
(1, 12, 3)
|
||||
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool {
|
||||
initial_version < (1, 12, 3)
|
||||
}
|
||||
|
||||
fn description(&self) -> &'static str {
|
||||
"Recomputing field distribution which was wrong before v1.12.3"
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub(super) struct V1_12_3_To_V1_13_0 {}
|
||||
pub(super) struct RecomputeStats {}
|
||||
|
||||
impl UpgradeIndex for V1_12_3_To_V1_13_0 {
|
||||
fn upgrade(
|
||||
&self,
|
||||
_wtxn: &mut RwTxn,
|
||||
_index: &Index,
|
||||
_original: (u32, u32, u32),
|
||||
_progress: Progress,
|
||||
) -> Result<bool> {
|
||||
impl UpgradeIndex for RecomputeStats {
|
||||
fn upgrade(&self, _wtxn: &mut RwTxn, _index: &Index, _progress: Progress) -> Result<bool> {
|
||||
// recompute the indexes stats
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn target_version(&self) -> (u32, u32, u32) {
|
||||
(1, 13, 0)
|
||||
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool {
|
||||
initial_version < (1, 13, 0)
|
||||
}
|
||||
|
||||
fn description(&self) -> &'static str {
|
||||
"Recomputing stats"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,17 +5,10 @@ use crate::database_stats::DatabaseStats;
|
||||
use crate::progress::Progress;
|
||||
use crate::{make_enum_progress, Index, Result};
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub(super) struct V1_13_0_To_V1_13_1();
|
||||
pub(super) struct AddNewStats();
|
||||
|
||||
impl UpgradeIndex for V1_13_0_To_V1_13_1 {
|
||||
fn upgrade(
|
||||
&self,
|
||||
wtxn: &mut RwTxn,
|
||||
index: &Index,
|
||||
_original: (u32, u32, u32),
|
||||
progress: Progress,
|
||||
) -> Result<bool> {
|
||||
impl UpgradeIndex for AddNewStats {
|
||||
fn upgrade(&self, wtxn: &mut RwTxn, index: &Index, progress: Progress) -> Result<bool> {
|
||||
make_enum_progress! {
|
||||
enum DocumentsStats {
|
||||
CreatingDocumentsStats,
|
||||
@@ -30,26 +23,11 @@ impl UpgradeIndex for V1_13_0_To_V1_13_1 {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn target_version(&self) -> (u32, u32, u32) {
|
||||
(1, 13, 1)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub(super) struct V1_13_1_To_Latest_V1_13();
|
||||
|
||||
impl UpgradeIndex for V1_13_1_To_Latest_V1_13 {
|
||||
fn upgrade(
|
||||
&self,
|
||||
_wtxn: &mut RwTxn,
|
||||
_index: &Index,
|
||||
_original: (u32, u32, u32),
|
||||
_progress: Progress,
|
||||
) -> Result<bool> {
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
fn target_version(&self) -> (u32, u32, u32) {
|
||||
(1, 13, 3)
|
||||
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool {
|
||||
initial_version < (1, 13, 1)
|
||||
}
|
||||
|
||||
fn description(&self) -> &'static str {
|
||||
"Computing newly introduced document stats"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,17 +5,10 @@ use super::UpgradeIndex;
|
||||
use crate::progress::Progress;
|
||||
use crate::{make_enum_progress, Index, Result};
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub(super) struct Latest_V1_13_To_Latest_V1_14();
|
||||
pub(super) struct UpgradeArroyVersion();
|
||||
|
||||
impl UpgradeIndex for Latest_V1_13_To_Latest_V1_14 {
|
||||
fn upgrade(
|
||||
&self,
|
||||
wtxn: &mut RwTxn,
|
||||
index: &Index,
|
||||
_original: (u32, u32, u32),
|
||||
progress: Progress,
|
||||
) -> Result<bool> {
|
||||
impl UpgradeIndex for UpgradeArroyVersion {
|
||||
fn upgrade(&self, wtxn: &mut RwTxn, index: &Index, progress: Progress) -> Result<bool> {
|
||||
make_enum_progress! {
|
||||
enum VectorStore {
|
||||
UpdateInternalVersions,
|
||||
@@ -35,7 +28,11 @@ impl UpgradeIndex for Latest_V1_13_To_Latest_V1_14 {
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
fn target_version(&self) -> (u32, u32, u32) {
|
||||
(1, 14, 0)
|
||||
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool {
|
||||
initial_version < (1, 14, 0)
|
||||
}
|
||||
|
||||
fn description(&self) -> &'static str {
|
||||
"Updating vector store with an internal version"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,25 +7,21 @@ use crate::progress::Progress;
|
||||
use crate::update::new::indexer::recompute_word_fst_from_word_docids_database;
|
||||
use crate::{Index, Result};
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub(super) struct Latest_V1_14_To_Latest_V1_15();
|
||||
pub(super) struct RecomputeWordFst();
|
||||
|
||||
impl UpgradeIndex for Latest_V1_14_To_Latest_V1_15 {
|
||||
fn upgrade(
|
||||
&self,
|
||||
wtxn: &mut RwTxn,
|
||||
index: &Index,
|
||||
_original: (u32, u32, u32),
|
||||
progress: Progress,
|
||||
) -> Result<bool> {
|
||||
impl UpgradeIndex for RecomputeWordFst {
|
||||
fn upgrade(&self, wtxn: &mut RwTxn, index: &Index, progress: Progress) -> Result<bool> {
|
||||
// Recompute the word FST from the word docids database.
|
||||
recompute_word_fst_from_word_docids_database(index, wtxn, &progress)?;
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool {
|
||||
initial_version < (1, 15, 0)
|
||||
}
|
||||
|
||||
fn target_version(&self) -> (u32, u32, u32) {
|
||||
(1, 15, 0)
|
||||
fn description(&self) -> &'static str {
|
||||
"Recomputing word FST from word docids database as it was wrong before v1.15.0"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,17 +6,10 @@ use crate::progress::Progress;
|
||||
use crate::vector::db::{EmbedderInfo, EmbeddingStatus};
|
||||
use crate::{Index, InternalError, Result};
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub(super) struct Latest_V1_15_To_V1_16_0();
|
||||
pub(super) struct SwitchToMultimodal();
|
||||
|
||||
impl UpgradeIndex for Latest_V1_15_To_V1_16_0 {
|
||||
fn upgrade(
|
||||
&self,
|
||||
wtxn: &mut RwTxn,
|
||||
index: &Index,
|
||||
_original: (u32, u32, u32),
|
||||
_progress: Progress,
|
||||
) -> Result<bool> {
|
||||
impl UpgradeIndex for SwitchToMultimodal {
|
||||
fn upgrade(&self, wtxn: &mut RwTxn, index: &Index, _progress: Progress) -> Result<bool> {
|
||||
let v1_15_indexing_configs = index
|
||||
.main
|
||||
.remap_types::<Str, SerdeJson<Vec<super::v1_15::IndexEmbeddingConfig>>>()
|
||||
@@ -41,8 +34,11 @@ impl UpgradeIndex for Latest_V1_15_To_V1_16_0 {
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
fn must_upgrade(&self, initial_version: (u32, u32, u32)) -> bool {
|
||||
initial_version < (1, 16, 0)
|
||||
}
|
||||
|
||||
fn target_version(&self) -> (u32, u32, u32) {
|
||||
(1, 16, 0)
|
||||
fn description(&self) -> &'static str {
|
||||
"Migrating the database for multimodal support"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ use candle_core::Tensor;
|
||||
use candle_nn::VarBuilder;
|
||||
use candle_transformers::models::bert::{BertModel, Config as BertConfig, DTYPE};
|
||||
use candle_transformers::models::modernbert::{Config as ModernConfig, ModernBert};
|
||||
use candle_transformers::models::xlm_roberta::{Config as XlmRobertaConfig, XLMRobertaModel};
|
||||
// FIXME: currently we'll be using the hub to retrieve model, in the future we might want to embed it into Meilisearch itself
|
||||
use hf_hub::api::sync::Api;
|
||||
use hf_hub::{Repo, RepoType};
|
||||
@@ -89,6 +90,7 @@ impl Default for EmbedderOptions {
|
||||
enum ModelKind {
|
||||
Bert(BertModel),
|
||||
Modern(ModernBert),
|
||||
XlmRoberta(XLMRobertaModel),
|
||||
}
|
||||
|
||||
/// Perform embedding of documents and queries
|
||||
@@ -304,7 +306,8 @@ impl Embedder {
|
||||
};
|
||||
|
||||
let is_modern = has_arch("modernbert");
|
||||
tracing::debug!(is_modern, model_type, "detected HF architecture");
|
||||
let is_xlm_roberta = has_arch("xlm-roberta") || has_arch("xlm_roberta");
|
||||
tracing::debug!(is_modern, is_xlm_roberta, model_type, "detected HF architecture");
|
||||
|
||||
let mut tokenizer = Tokenizer::from_file(&tokenizer_filename)
|
||||
.map_err(|inner| NewEmbedderError::open_tokenizer(tokenizer_filename, inner))?;
|
||||
@@ -340,6 +343,18 @@ impl Embedder {
|
||||
)
|
||||
})?;
|
||||
ModelKind::Modern(ModernBert::load(vb, &config).map_err(NewEmbedderError::load_model)?)
|
||||
} else if is_xlm_roberta {
|
||||
let config: XlmRobertaConfig = serde_json::from_str(&config_str).map_err(|inner| {
|
||||
NewEmbedderError::deserialize_config(
|
||||
options.model.clone(),
|
||||
config_str.clone(),
|
||||
config_filename.clone(),
|
||||
inner,
|
||||
)
|
||||
})?;
|
||||
ModelKind::XlmRoberta(
|
||||
XLMRobertaModel::new(&config, vb).map_err(NewEmbedderError::load_model)?,
|
||||
)
|
||||
} else {
|
||||
let config: BertConfig = serde_json::from_str(&config_str).map_err(|inner| {
|
||||
NewEmbedderError::deserialize_config(
|
||||
@@ -451,6 +466,19 @@ impl Embedder {
|
||||
let mask = Tensor::stack(&[mask], 0).map_err(EmbedError::tensor_shape)?;
|
||||
model.forward(&token_ids, &mask).map_err(EmbedError::model_forward)?
|
||||
}
|
||||
ModelKind::XlmRoberta(model) => {
|
||||
let mut mask_vec = tokens.get_attention_mask().to_vec();
|
||||
if mask_vec.len() > self.max_len {
|
||||
mask_vec.truncate(self.max_len);
|
||||
}
|
||||
let mask = Tensor::new(mask_vec.as_slice(), &self.device)
|
||||
.map_err(EmbedError::tensor_shape)?;
|
||||
let mask = Tensor::stack(&[mask], 0).map_err(EmbedError::tensor_shape)?;
|
||||
let token_type_ids = token_ids.zeros_like().map_err(EmbedError::tensor_shape)?;
|
||||
model
|
||||
.forward(&token_ids, &mask, &token_type_ids, None, None, None)
|
||||
.map_err(EmbedError::model_forward)?
|
||||
}
|
||||
};
|
||||
|
||||
let embedding = Self::pooling(embeddings, self.pooling)?;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use hannoy::distances::{Cosine, Hamming};
|
||||
use hannoy::ItemId;
|
||||
use hannoy::{ItemId, Searched};
|
||||
use heed::{RoTxn, RwTxn, Unspecified};
|
||||
use ordered_float::OrderedFloat;
|
||||
use rand::SeedableRng as _;
|
||||
@@ -974,7 +974,7 @@ impl VectorStore {
|
||||
}
|
||||
|
||||
if let Some(mut ret) = searcher.by_item(rtxn, item)? {
|
||||
results.append(&mut ret);
|
||||
results.append(&mut ret.nns);
|
||||
}
|
||||
}
|
||||
results.sort_unstable_by_key(|(_, distance)| OrderedFloat(*distance));
|
||||
@@ -1028,10 +1028,9 @@ impl VectorStore {
|
||||
searcher.candidates(filter);
|
||||
}
|
||||
|
||||
let (res, _degraded) =
|
||||
&mut searcher
|
||||
.by_vector_with_cancellation(rtxn, vector, || time_budget.exceeded())?;
|
||||
results.append(res);
|
||||
let Searched { mut nns, did_cancel: _ } =
|
||||
searcher.by_vector_with_cancellation(rtxn, vector, || time_budget.exceeded())?;
|
||||
results.append(&mut nns);
|
||||
}
|
||||
|
||||
results.sort_unstable_by_key(|(_, distance)| OrderedFloat(*distance));
|
||||
|
||||
@@ -22,6 +22,7 @@ reqwest = { version = "0.12.24", features = [
|
||||
"json",
|
||||
"rustls-tls",
|
||||
], default-features = false }
|
||||
semver = "1.0.27"
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = "1.0.145"
|
||||
sha2 = "0.10.9"
|
||||
@@ -42,3 +43,4 @@ tracing = "0.1.41"
|
||||
tracing-subscriber = "0.3.20"
|
||||
tracing-trace = { version = "0.1.0", path = "../tracing-trace" }
|
||||
uuid = { version = "1.18.1", features = ["v7", "serde"] }
|
||||
similar-asserts = "1.7.0"
|
||||
|
||||
@@ -1,194 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::Display;
|
||||
use std::io::Read as _;
|
||||
|
||||
use anyhow::{bail, Context as _};
|
||||
use serde::Deserialize;
|
||||
|
||||
use super::assets::{fetch_asset, Asset};
|
||||
use super::client::{Client, Method};
|
||||
|
||||
#[derive(Clone, Deserialize)]
|
||||
pub struct Command {
|
||||
pub route: String,
|
||||
pub method: Method,
|
||||
#[serde(default)]
|
||||
pub body: Body,
|
||||
#[serde(default)]
|
||||
pub synchronous: SyncMode,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Body {
|
||||
Inline {
|
||||
inline: serde_json::Value,
|
||||
},
|
||||
Asset {
|
||||
asset: String,
|
||||
},
|
||||
#[default]
|
||||
Empty,
|
||||
}
|
||||
|
||||
impl Body {
|
||||
pub fn get(
|
||||
self,
|
||||
assets: &BTreeMap<String, Asset>,
|
||||
asset_folder: &str,
|
||||
) -> anyhow::Result<Option<(Vec<u8>, &'static str)>> {
|
||||
Ok(match self {
|
||||
Body::Inline { inline: body } => Some((
|
||||
serde_json::to_vec(&body)
|
||||
.context("serializing to bytes")
|
||||
.context("while getting inline body")?,
|
||||
"application/json",
|
||||
)),
|
||||
Body::Asset { asset: name } => Some({
|
||||
let context = || format!("while getting body from asset '{name}'");
|
||||
let (mut file, format) =
|
||||
fetch_asset(&name, assets, asset_folder).with_context(context)?;
|
||||
let mut buf = Vec::new();
|
||||
file.read_to_end(&mut buf).with_context(context)?;
|
||||
(buf, format.to_content_type(&name))
|
||||
}),
|
||||
Body::Empty => None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Command {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:?} {} ({:?})", self.method, self.route, self.synchronous)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy, Deserialize)]
|
||||
pub enum SyncMode {
|
||||
DontWait,
|
||||
#[default]
|
||||
WaitForResponse,
|
||||
WaitForTask,
|
||||
}
|
||||
|
||||
pub async fn run_batch(
|
||||
client: &Client,
|
||||
batch: &[Command],
|
||||
assets: &BTreeMap<String, Asset>,
|
||||
asset_folder: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let [.., last] = batch else { return Ok(()) };
|
||||
let sync = last.synchronous;
|
||||
|
||||
let mut tasks = tokio::task::JoinSet::new();
|
||||
|
||||
for command in batch {
|
||||
// FIXME: you probably don't want to copy assets everytime here
|
||||
tasks.spawn({
|
||||
let client = client.clone();
|
||||
let command = command.clone();
|
||||
let assets = assets.clone();
|
||||
let asset_folder = asset_folder.to_owned();
|
||||
|
||||
async move { run(client, command, &assets, &asset_folder).await }
|
||||
});
|
||||
}
|
||||
|
||||
while let Some(result) = tasks.join_next().await {
|
||||
result
|
||||
.context("panicked while executing command")?
|
||||
.context("error while executing command")?;
|
||||
}
|
||||
|
||||
match sync {
|
||||
SyncMode::DontWait => {}
|
||||
SyncMode::WaitForResponse => {}
|
||||
SyncMode::WaitForTask => wait_for_tasks(client).await?,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn wait_for_tasks(client: &Client) -> anyhow::Result<()> {
|
||||
loop {
|
||||
let response = client
|
||||
.get("tasks?statuses=enqueued,processing")
|
||||
.send()
|
||||
.await
|
||||
.context("could not wait for tasks")?;
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response to JSON")
|
||||
.context("could not wait for tasks")?;
|
||||
match response.get("total") {
|
||||
Some(serde_json::Value::Number(number)) => {
|
||||
let number = number.as_u64().with_context(|| {
|
||||
format!("waiting for tasks: could not parse 'total' as integer, got {}", number)
|
||||
})?;
|
||||
if number == 0 {
|
||||
break;
|
||||
} else {
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Some(thing_else) => {
|
||||
bail!(format!(
|
||||
"waiting for tasks: could not parse 'total' as a number, got '{thing_else}'"
|
||||
))
|
||||
}
|
||||
None => {
|
||||
bail!(format!(
|
||||
"waiting for tasks: expected response to contain 'total', got '{response}'"
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(client, command, assets, asset_folder), fields(command = %command))]
|
||||
pub async fn run(
|
||||
client: Client,
|
||||
mut command: Command,
|
||||
assets: &BTreeMap<String, Asset>,
|
||||
asset_folder: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
// memtake the body here to leave an empty body in its place, so that command is not partially moved-out
|
||||
let body = std::mem::take(&mut command.body)
|
||||
.get(assets, asset_folder)
|
||||
.with_context(|| format!("while getting body for command {command}"))?;
|
||||
|
||||
let request = client.request(command.method.into(), &command.route);
|
||||
|
||||
let request = if let Some((body, content_type)) = body {
|
||||
request.body(body).header(reqwest::header::CONTENT_TYPE, content_type)
|
||||
} else {
|
||||
request
|
||||
};
|
||||
|
||||
let response =
|
||||
request.send().await.with_context(|| format!("error sending command: {}", command))?;
|
||||
|
||||
let code = response.status();
|
||||
if code.is_client_error() {
|
||||
tracing::error!(%command, %code, "error in workload file");
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response as JSON")
|
||||
.context("parsing error in workload file when sending command")?;
|
||||
bail!("error in workload file: server responded with error code {code} and '{response}'")
|
||||
} else if code.is_server_error() {
|
||||
tracing::error!(%command, %code, "server error");
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response as JSON")
|
||||
.context("parsing server error when sending command")?;
|
||||
bail!("server error: server responded with error code {code} and '{response}'")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -7,9 +7,9 @@ use tokio::task::AbortHandle;
|
||||
use tracing_trace::processor::span_stats::CallStats;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::client::Client;
|
||||
use super::env_info;
|
||||
use super::workload::Workload;
|
||||
use super::workload::BenchWorkload;
|
||||
use crate::common::client::Client;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum DashboardClient {
|
||||
@@ -89,7 +89,7 @@ impl DashboardClient {
|
||||
pub async fn create_workload(
|
||||
&self,
|
||||
invocation_uuid: Uuid,
|
||||
workload: &Workload,
|
||||
workload: &BenchWorkload,
|
||||
) -> anyhow::Result<Uuid> {
|
||||
let Self::Client(dashboard_client) = self else { return Ok(Uuid::now_v7()) };
|
||||
|
||||
|
||||
@@ -1,51 +1,36 @@
|
||||
mod assets;
|
||||
mod client;
|
||||
mod command;
|
||||
mod dashboard;
|
||||
mod env_info;
|
||||
mod meili_process;
|
||||
mod workload;
|
||||
|
||||
use std::io::LineWriter;
|
||||
use std::path::PathBuf;
|
||||
use crate::common::args::CommonArgs;
|
||||
use crate::common::logs::setup_logs;
|
||||
use crate::common::workload::Workload;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::{bail, Context};
|
||||
use clap::Parser;
|
||||
use tracing_subscriber::fmt::format::FmtSpan;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::Layer;
|
||||
|
||||
use self::client::Client;
|
||||
use self::workload::Workload;
|
||||
use crate::common::client::Client;
|
||||
pub use workload::BenchWorkload;
|
||||
|
||||
pub fn default_http_addr() -> String {
|
||||
"127.0.0.1:7700".to_string()
|
||||
}
|
||||
pub fn default_report_folder() -> String {
|
||||
"./bench/reports/".into()
|
||||
}
|
||||
|
||||
pub fn default_asset_folder() -> String {
|
||||
"./bench/assets/".into()
|
||||
}
|
||||
|
||||
pub fn default_log_filter() -> String {
|
||||
"info".into()
|
||||
}
|
||||
|
||||
pub fn default_dashboard_url() -> String {
|
||||
"http://localhost:9001".into()
|
||||
}
|
||||
|
||||
/// Run benchmarks from a workload
|
||||
#[derive(Parser, Debug)]
|
||||
pub struct BenchDeriveArgs {
|
||||
/// Filename of the workload file, pass multiple filenames
|
||||
/// to run multiple workloads in the specified order.
|
||||
///
|
||||
/// Each workload run will get its own report file.
|
||||
#[arg(value_name = "WORKLOAD_FILE", last = false)]
|
||||
workload_file: Vec<PathBuf>,
|
||||
pub struct BenchArgs {
|
||||
/// Common arguments shared with other commands
|
||||
#[command(flatten)]
|
||||
common: CommonArgs,
|
||||
|
||||
/// Meilisearch master keys
|
||||
#[arg(long)]
|
||||
pub master_key: Option<String>,
|
||||
|
||||
/// URL of the dashboard.
|
||||
#[arg(long, default_value_t = default_dashboard_url())]
|
||||
@@ -59,34 +44,14 @@ pub struct BenchDeriveArgs {
|
||||
#[arg(long, default_value_t = default_report_folder())]
|
||||
report_folder: String,
|
||||
|
||||
/// Directory to store the remote assets.
|
||||
#[arg(long, default_value_t = default_asset_folder())]
|
||||
asset_folder: String,
|
||||
|
||||
/// Log directives
|
||||
#[arg(short, long, default_value_t = default_log_filter())]
|
||||
log_filter: String,
|
||||
|
||||
/// Benchmark dashboard API key
|
||||
#[arg(long)]
|
||||
api_key: Option<String>,
|
||||
|
||||
/// Meilisearch master keys
|
||||
#[arg(long)]
|
||||
master_key: Option<String>,
|
||||
|
||||
/// Authentication bearer for fetching assets
|
||||
#[arg(long)]
|
||||
assets_key: Option<String>,
|
||||
|
||||
/// Reason for the benchmark invocation
|
||||
#[arg(short, long)]
|
||||
reason: Option<String>,
|
||||
|
||||
/// The maximum time in seconds we allow for fetching the task queue before timing out.
|
||||
#[arg(long, default_value_t = 60)]
|
||||
tasks_queue_timeout_secs: u64,
|
||||
|
||||
/// The path to the binary to run.
|
||||
///
|
||||
/// If unspecified, runs `cargo run` after building Meilisearch with `cargo build`.
|
||||
@@ -94,18 +59,8 @@ pub struct BenchDeriveArgs {
|
||||
binary_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
|
||||
// setup logs
|
||||
let filter: tracing_subscriber::filter::Targets =
|
||||
args.log_filter.parse().context("invalid --log-filter")?;
|
||||
|
||||
let subscriber = tracing_subscriber::registry().with(
|
||||
tracing_subscriber::fmt::layer()
|
||||
.with_writer(|| LineWriter::new(std::io::stderr()))
|
||||
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
|
||||
.with_filter(filter),
|
||||
);
|
||||
tracing::subscriber::set_global_default(subscriber).context("could not setup logging")?;
|
||||
pub fn run(args: BenchArgs) -> anyhow::Result<()> {
|
||||
setup_logs(&args.common.log_filter)?;
|
||||
|
||||
// fetch environment and build info
|
||||
let env = env_info::Environment::generate_from_current_config();
|
||||
@@ -116,8 +71,11 @@ pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
|
||||
let _scope = rt.enter();
|
||||
|
||||
// setup clients
|
||||
let assets_client =
|
||||
Client::new(None, args.assets_key.as_deref(), Some(std::time::Duration::from_secs(3600)))?; // 1h
|
||||
let assets_client = Client::new(
|
||||
None,
|
||||
args.common.assets_key.as_deref(),
|
||||
Some(std::time::Duration::from_secs(3600)), // 1h
|
||||
)?;
|
||||
|
||||
let dashboard_client = if args.no_dashboard {
|
||||
dashboard::DashboardClient::new_dry()
|
||||
@@ -134,11 +92,11 @@ pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
|
||||
None,
|
||||
)?;
|
||||
|
||||
let meili_client = Client::new(
|
||||
let meili_client = Arc::new(Client::new(
|
||||
Some("http://127.0.0.1:7700".into()),
|
||||
args.master_key.as_deref(),
|
||||
Some(std::time::Duration::from_secs(args.tasks_queue_timeout_secs)),
|
||||
)?;
|
||||
Some(std::time::Duration::from_secs(args.common.tasks_queue_timeout_secs)),
|
||||
)?);
|
||||
|
||||
// enter runtime
|
||||
|
||||
@@ -146,11 +104,11 @@ pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
|
||||
dashboard_client.send_machine_info(&env).await?;
|
||||
|
||||
let commit_message = build_info.commit_msg.unwrap_or_default().split('\n').next().unwrap();
|
||||
let max_workloads = args.workload_file.len();
|
||||
let max_workloads = args.common.workload_file.len();
|
||||
let reason: Option<&str> = args.reason.as_deref();
|
||||
let invocation_uuid = dashboard_client.create_invocation(build_info.clone(), commit_message, env, max_workloads, reason).await?;
|
||||
|
||||
tracing::info!(workload_count = args.workload_file.len(), "handling workload files");
|
||||
tracing::info!(workload_count = args.common.workload_file.len(), "handling workload files");
|
||||
|
||||
// main task
|
||||
let workload_runs = tokio::spawn(
|
||||
@@ -158,13 +116,17 @@ pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
|
||||
let dashboard_client = dashboard_client.clone();
|
||||
let mut dashboard_urls = Vec::new();
|
||||
async move {
|
||||
for workload_file in args.workload_file.iter() {
|
||||
for workload_file in args.common.workload_file.iter() {
|
||||
let workload: Workload = serde_json::from_reader(
|
||||
std::fs::File::open(workload_file)
|
||||
.with_context(|| format!("error opening {}", workload_file.display()))?,
|
||||
)
|
||||
.with_context(|| format!("error parsing {} as JSON", workload_file.display()))?;
|
||||
|
||||
let Workload::Bench(workload) = workload else {
|
||||
bail!("workload file {} is not a bench workload", workload_file.display());
|
||||
};
|
||||
|
||||
let workload_name = workload.name.clone();
|
||||
|
||||
workload::execute(
|
||||
|
||||
@@ -1,24 +1,28 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::fs::File;
|
||||
use std::io::{Seek as _, Write as _};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, Context as _};
|
||||
use futures_util::TryStreamExt as _;
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use tokio::task::JoinHandle;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::assets::Asset;
|
||||
use super::client::Client;
|
||||
use super::command::SyncMode;
|
||||
use super::dashboard::DashboardClient;
|
||||
use super::BenchDeriveArgs;
|
||||
use crate::bench::{assets, meili_process};
|
||||
use super::BenchArgs;
|
||||
use crate::common::assets::{self, Asset};
|
||||
use crate::common::client::Client;
|
||||
use crate::common::command::{run_commands, Command};
|
||||
use crate::common::instance::Binary;
|
||||
use crate::common::process::{self, delete_db, start_meili};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct Workload {
|
||||
/// A bench workload.
|
||||
/// Not to be confused with [a test workload](crate::test::workload::Workload).
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct BenchWorkload {
|
||||
pub name: String,
|
||||
pub run_count: u16,
|
||||
pub extra_cli_args: Vec<String>,
|
||||
@@ -26,30 +30,34 @@ pub struct Workload {
|
||||
#[serde(default)]
|
||||
pub target: String,
|
||||
#[serde(default)]
|
||||
pub precommands: Vec<super::command::Command>,
|
||||
pub commands: Vec<super::command::Command>,
|
||||
pub precommands: Vec<Command>,
|
||||
pub commands: Vec<Command>,
|
||||
}
|
||||
|
||||
async fn run_commands(
|
||||
async fn run_workload_commands(
|
||||
dashboard_client: &DashboardClient,
|
||||
logs_client: &Client,
|
||||
meili_client: &Client,
|
||||
meili_client: &Arc<Client>,
|
||||
workload_uuid: Uuid,
|
||||
workload: &Workload,
|
||||
args: &BenchDeriveArgs,
|
||||
workload: &BenchWorkload,
|
||||
args: &BenchArgs,
|
||||
run_number: u16,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<File>>> {
|
||||
let report_folder = &args.report_folder;
|
||||
let workload_name = &workload.name;
|
||||
let assets = Arc::new(workload.assets.clone());
|
||||
let asset_folder = args.common.asset_folder.clone().leak();
|
||||
|
||||
for batch in workload
|
||||
.precommands
|
||||
.as_slice()
|
||||
.split_inclusive(|command| !matches!(command.synchronous, SyncMode::DontWait))
|
||||
{
|
||||
super::command::run_batch(meili_client, batch, &workload.assets, &args.asset_folder)
|
||||
.await?;
|
||||
}
|
||||
run_commands(
|
||||
meili_client,
|
||||
&workload.precommands,
|
||||
0,
|
||||
&assets,
|
||||
asset_folder,
|
||||
&mut HashMap::new(),
|
||||
false,
|
||||
)
|
||||
.await?;
|
||||
|
||||
std::fs::create_dir_all(report_folder)
|
||||
.with_context(|| format!("could not create report directory at {report_folder}"))?;
|
||||
@@ -59,14 +67,16 @@ async fn run_commands(
|
||||
|
||||
let report_handle = start_report(logs_client, trace_filename, &workload.target).await?;
|
||||
|
||||
for batch in workload
|
||||
.commands
|
||||
.as_slice()
|
||||
.split_inclusive(|command| !matches!(command.synchronous, SyncMode::DontWait))
|
||||
{
|
||||
super::command::run_batch(meili_client, batch, &workload.assets, &args.asset_folder)
|
||||
.await?;
|
||||
}
|
||||
run_commands(
|
||||
meili_client,
|
||||
&workload.commands,
|
||||
0,
|
||||
&assets,
|
||||
asset_folder,
|
||||
&mut HashMap::new(),
|
||||
false,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let processor =
|
||||
stop_report(dashboard_client, logs_client, workload_uuid, report_filename, report_handle)
|
||||
@@ -81,14 +91,14 @@ pub async fn execute(
|
||||
assets_client: &Client,
|
||||
dashboard_client: &DashboardClient,
|
||||
logs_client: &Client,
|
||||
meili_client: &Client,
|
||||
meili_client: &Arc<Client>,
|
||||
invocation_uuid: Uuid,
|
||||
master_key: Option<&str>,
|
||||
workload: Workload,
|
||||
args: &BenchDeriveArgs,
|
||||
workload: BenchWorkload,
|
||||
args: &BenchArgs,
|
||||
binary_path: Option<&Path>,
|
||||
) -> anyhow::Result<()> {
|
||||
assets::fetch_assets(assets_client, &workload.assets, &args.asset_folder).await?;
|
||||
assets::fetch_assets(assets_client, &workload.assets, &args.common.asset_folder).await?;
|
||||
|
||||
let workload_uuid = dashboard_client.create_workload(invocation_uuid, &workload).await?;
|
||||
|
||||
@@ -129,38 +139,33 @@ pub async fn execute(
|
||||
async fn execute_run(
|
||||
dashboard_client: &DashboardClient,
|
||||
logs_client: &Client,
|
||||
meili_client: &Client,
|
||||
meili_client: &Arc<Client>,
|
||||
workload_uuid: Uuid,
|
||||
master_key: Option<&str>,
|
||||
workload: &Workload,
|
||||
args: &BenchDeriveArgs,
|
||||
workload: &BenchWorkload,
|
||||
args: &BenchArgs,
|
||||
binary_path: Option<&Path>,
|
||||
run_number: u16,
|
||||
) -> anyhow::Result<tokio::task::JoinHandle<anyhow::Result<std::fs::File>>> {
|
||||
meili_process::delete_db();
|
||||
delete_db().await;
|
||||
|
||||
let run_command = match binary_path {
|
||||
Some(binary_path) => tokio::process::Command::new(binary_path),
|
||||
None => {
|
||||
meili_process::build().await?;
|
||||
let mut command = tokio::process::Command::new("cargo");
|
||||
command
|
||||
.arg("run")
|
||||
.arg("--release")
|
||||
.arg("-p")
|
||||
.arg("meilisearch")
|
||||
.arg("--bin")
|
||||
.arg("meilisearch")
|
||||
.arg("--");
|
||||
command
|
||||
}
|
||||
let binary = match binary_path {
|
||||
Some(binary_path) => Binary {
|
||||
source: crate::common::instance::BinarySource::Path(binary_path.to_owned()),
|
||||
extra_cli_args: workload.extra_cli_args.clone(),
|
||||
},
|
||||
None => Binary {
|
||||
source: crate::common::instance::BinarySource::Build {
|
||||
edition: crate::common::instance::Edition::Community,
|
||||
},
|
||||
extra_cli_args: workload.extra_cli_args.clone(),
|
||||
},
|
||||
};
|
||||
|
||||
let meilisearch =
|
||||
meili_process::start(meili_client, master_key, workload, &args.asset_folder, run_command)
|
||||
.await?;
|
||||
start_meili(meili_client, master_key, &binary, &args.common.asset_folder).await?;
|
||||
|
||||
let processor = run_commands(
|
||||
let processor = run_workload_commands(
|
||||
dashboard_client,
|
||||
logs_client,
|
||||
meili_client,
|
||||
@@ -171,7 +176,7 @@ async fn execute_run(
|
||||
)
|
||||
.await?;
|
||||
|
||||
meili_process::kill(meilisearch).await;
|
||||
process::kill_meili(meilisearch).await;
|
||||
|
||||
tracing::info!(run_number, "Successful run");
|
||||
|
||||
|
||||
36
crates/xtask/src/common/args.rs
Normal file
36
crates/xtask/src/common/args.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use clap::Parser;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub fn default_asset_folder() -> String {
|
||||
"./bench/assets/".into()
|
||||
}
|
||||
|
||||
pub fn default_log_filter() -> String {
|
||||
"info".into()
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug, Clone)]
|
||||
pub struct CommonArgs {
|
||||
/// Filename of the workload file, pass multiple filenames
|
||||
/// to run multiple workloads in the specified order.
|
||||
///
|
||||
/// For benches, each workload run will get its own report file.
|
||||
#[arg(value_name = "WORKLOAD_FILE", last = false)]
|
||||
pub workload_file: Vec<PathBuf>,
|
||||
|
||||
/// Directory to store the remote assets.
|
||||
#[arg(long, default_value_t = default_asset_folder())]
|
||||
pub asset_folder: String,
|
||||
|
||||
/// Log directives
|
||||
#[arg(short, long, default_value_t = default_log_filter())]
|
||||
pub log_filter: String,
|
||||
|
||||
/// Authentication bearer for fetching assets
|
||||
#[arg(long)]
|
||||
pub assets_key: Option<String>,
|
||||
|
||||
/// The maximum time in seconds we allow for fetching the task queue before timing out.
|
||||
#[arg(long, default_value_t = 60)]
|
||||
pub tasks_queue_timeout_secs: u64,
|
||||
}
|
||||
@@ -3,21 +3,22 @@ use std::io::{Read as _, Seek as _, Write as _};
|
||||
|
||||
use anyhow::{bail, Context};
|
||||
use futures_util::TryStreamExt as _;
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::Digest;
|
||||
|
||||
use super::client::Client;
|
||||
|
||||
#[derive(Deserialize, Clone)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct Asset {
|
||||
pub local_location: Option<String>,
|
||||
pub remote_location: Option<String>,
|
||||
#[serde(default)]
|
||||
#[serde(default, skip_serializing_if = "AssetFormat::is_default")]
|
||||
pub format: AssetFormat,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub sha256: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Default, Copy, Clone)]
|
||||
#[derive(Serialize, Deserialize, Default, Copy, Clone, Debug)]
|
||||
pub enum AssetFormat {
|
||||
#[default]
|
||||
Auto,
|
||||
@@ -27,6 +28,10 @@ pub enum AssetFormat {
|
||||
}
|
||||
|
||||
impl AssetFormat {
|
||||
fn is_default(&self) -> bool {
|
||||
matches!(self, AssetFormat::Auto)
|
||||
}
|
||||
|
||||
pub fn to_content_type(self, filename: &str) -> &'static str {
|
||||
match self {
|
||||
AssetFormat::Auto => Self::auto_detect(filename).to_content_type(filename),
|
||||
@@ -166,7 +171,14 @@ fn check_sha256(name: &str, asset: &Asset, mut file: std::fs::File) -> anyhow::R
|
||||
}
|
||||
}
|
||||
None => {
|
||||
tracing::warn!(sha256 = file_hash, "Skipping hash for asset {name} that doesn't have one. Please add it to workload file");
|
||||
let msg = match name.starts_with("meilisearch-") {
|
||||
true => "Please add it to crates/xtask/src/common/instance/release.rs",
|
||||
false => "Please add it to workload file",
|
||||
};
|
||||
tracing::warn!(
|
||||
sha256 = file_hash,
|
||||
"Skipping hash for asset {name} that doesn't have one. {msg}"
|
||||
);
|
||||
true
|
||||
}
|
||||
})
|
||||
@@ -1,5 +1,5 @@
|
||||
use anyhow::Context;
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Client {
|
||||
@@ -61,7 +61,7 @@ impl Client {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Deserialize)]
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
|
||||
pub enum Method {
|
||||
Get,
|
||||
430
crates/xtask/src/common/command.rs
Normal file
430
crates/xtask/src/common/command.rs
Normal file
@@ -0,0 +1,430 @@
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::fmt::Display;
|
||||
use std::io::Read as _;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, Context as _};
|
||||
use reqwest::StatusCode;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use similar_asserts::SimpleDiff;
|
||||
|
||||
use crate::common::assets::{fetch_asset, Asset};
|
||||
use crate::common::client::{Client, Method};
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase", deny_unknown_fields)]
|
||||
pub struct Command {
|
||||
pub route: String,
|
||||
pub method: Method,
|
||||
#[serde(default)]
|
||||
pub body: Body,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub expected_status: Option<u16>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub expected_response: Option<serde_json::Value>,
|
||||
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
|
||||
pub register: HashMap<String, String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub api_key_variable: Option<String>,
|
||||
#[serde(default)]
|
||||
pub synchronous: SyncMode,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone, Serialize, Deserialize, Debug)]
|
||||
#[serde(untagged)]
|
||||
pub enum Body {
|
||||
Inline {
|
||||
inline: serde_json::Value,
|
||||
},
|
||||
Asset {
|
||||
asset: String,
|
||||
},
|
||||
#[default]
|
||||
Empty,
|
||||
}
|
||||
|
||||
impl Body {
|
||||
pub fn get(
|
||||
self,
|
||||
assets: &BTreeMap<String, Asset>,
|
||||
registered: &HashMap<String, Value>,
|
||||
asset_folder: &str,
|
||||
) -> anyhow::Result<Option<(Vec<u8>, &'static str)>> {
|
||||
Ok(match self {
|
||||
Body::Inline { inline: mut body } => {
|
||||
if !registered.is_empty() {
|
||||
insert_variables(&mut body, registered);
|
||||
}
|
||||
|
||||
Some((
|
||||
serde_json::to_vec(&body)
|
||||
.context("serializing to bytes")
|
||||
.context("while getting inline body")?,
|
||||
"application/json",
|
||||
))
|
||||
}
|
||||
Body::Asset { asset: name } => Some({
|
||||
let context = || format!("while getting body from asset '{name}'");
|
||||
let (mut file, format) =
|
||||
fetch_asset(&name, assets, asset_folder).with_context(context)?;
|
||||
let mut buf = Vec::new();
|
||||
file.read_to_end(&mut buf).with_context(context)?;
|
||||
(buf, format.to_content_type(&name))
|
||||
}),
|
||||
Body::Empty => None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Command {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:?} {} ({:?})", self.method, self.route, self.synchronous)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
|
||||
pub enum SyncMode {
|
||||
DontWait,
|
||||
#[default]
|
||||
WaitForResponse,
|
||||
WaitForTask,
|
||||
}
|
||||
|
||||
async fn run_batch(
|
||||
client: &Arc<Client>,
|
||||
batch: &[Command],
|
||||
first_command_index: usize,
|
||||
assets: &Arc<BTreeMap<String, Asset>>,
|
||||
asset_folder: &'static str,
|
||||
registered: &mut HashMap<String, Value>,
|
||||
return_response: bool,
|
||||
) -> anyhow::Result<Vec<(Value, StatusCode)>> {
|
||||
let [.., last] = batch else { return Ok(Vec::new()) };
|
||||
let sync = last.synchronous;
|
||||
let batch_len = batch.len();
|
||||
|
||||
let mut tasks = Vec::with_capacity(batch.len());
|
||||
for (index, command) in batch.iter().cloned().enumerate() {
|
||||
let client2 = Arc::clone(client);
|
||||
let assets2 = Arc::clone(assets);
|
||||
let needs_response = return_response || !command.register.is_empty();
|
||||
let registered2 = registered.clone(); // FIXME: cloning the whole map for each command is inefficient
|
||||
tasks.push(tokio::spawn(async move {
|
||||
run(
|
||||
&client2,
|
||||
&command,
|
||||
first_command_index + index,
|
||||
&assets2,
|
||||
registered2,
|
||||
asset_folder,
|
||||
needs_response,
|
||||
)
|
||||
.await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut outputs = Vec::with_capacity(if return_response { batch_len } else { 0 });
|
||||
for (task, command) in tasks.into_iter().zip(batch.iter()) {
|
||||
let output = task.await.context("task panicked")??;
|
||||
if let Some(output) = output {
|
||||
for (name, path) in &command.register {
|
||||
let value = output
|
||||
.0
|
||||
.pointer(path)
|
||||
.with_context(|| format!("could not find path '{path}' in response (required to register '{name}')"))?
|
||||
.clone();
|
||||
registered.insert(name.clone(), value);
|
||||
}
|
||||
|
||||
if return_response {
|
||||
outputs.push(output);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match sync {
|
||||
SyncMode::DontWait => {}
|
||||
SyncMode::WaitForResponse => {}
|
||||
SyncMode::WaitForTask => wait_for_tasks(client).await?,
|
||||
}
|
||||
|
||||
Ok(outputs)
|
||||
}
|
||||
|
||||
async fn wait_for_tasks(client: &Client) -> anyhow::Result<()> {
|
||||
loop {
|
||||
let response = client
|
||||
.get("tasks?statuses=enqueued,processing")
|
||||
.send()
|
||||
.await
|
||||
.context("could not wait for tasks")?;
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response to JSON")
|
||||
.context("could not wait for tasks")?;
|
||||
match response.get("total") {
|
||||
Some(serde_json::Value::Number(number)) => {
|
||||
let number = number.as_u64().with_context(|| {
|
||||
format!("waiting for tasks: could not parse 'total' as integer, got {}", number)
|
||||
})?;
|
||||
if number == 0 {
|
||||
break;
|
||||
} else {
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Some(thing_else) => {
|
||||
bail!(format!(
|
||||
"waiting for tasks: could not parse 'total' as a number, got '{thing_else}'"
|
||||
))
|
||||
}
|
||||
None => {
|
||||
bail!(format!(
|
||||
"waiting for tasks: expected response to contain 'total', got '{response}'"
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn json_eq_ignore(reference: &Value, value: &Value) -> bool {
|
||||
match reference {
|
||||
Value::Null | Value::Bool(_) | Value::Number(_) => reference == value,
|
||||
Value::String(s) => (s.starts_with('[') && s.ends_with(']')) || reference == value,
|
||||
Value::Array(values) => match value {
|
||||
Value::Array(other_values) => {
|
||||
if values.len() != other_values.len() {
|
||||
return false;
|
||||
}
|
||||
for (value, other_value) in values.iter().zip(other_values.iter()) {
|
||||
if !json_eq_ignore(value, other_value) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
},
|
||||
Value::Object(map) => match value {
|
||||
Value::Object(other_map) => {
|
||||
if map.len() != other_map.len() {
|
||||
return false;
|
||||
}
|
||||
for (key, value) in map.iter() {
|
||||
match other_map.get(key) {
|
||||
Some(other_value) => {
|
||||
if !json_eq_ignore(value, other_value) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
None => return false,
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(client, command, assets, registered, asset_folder), fields(command = %command))]
|
||||
pub async fn run(
|
||||
client: &Client,
|
||||
command: &Command,
|
||||
command_index: usize,
|
||||
assets: &BTreeMap<String, Asset>,
|
||||
registered: HashMap<String, Value>,
|
||||
asset_folder: &str,
|
||||
return_value: bool,
|
||||
) -> anyhow::Result<Option<(Value, StatusCode)>> {
|
||||
// Try to replace variables in the route
|
||||
let mut route = &command.route;
|
||||
let mut owned_route;
|
||||
if !registered.is_empty() {
|
||||
while let (Some(pos1), Some(pos2)) = (route.find("{{"), route.rfind("}}")) {
|
||||
if pos2 > pos1 {
|
||||
let name = route[pos1 + 2..pos2].trim();
|
||||
if let Some(replacement) = registered.get(name).and_then(|r| r.as_str()) {
|
||||
let mut new_route = String::new();
|
||||
new_route.push_str(&route[..pos1]);
|
||||
new_route.push_str(replacement);
|
||||
new_route.push_str(&route[pos2 + 2..]);
|
||||
owned_route = new_route;
|
||||
route = &owned_route;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// memtake the body here to leave an empty body in its place, so that command is not partially moved-out
|
||||
let body = command
|
||||
.body
|
||||
.clone()
|
||||
.get(assets, ®istered, asset_folder)
|
||||
.with_context(|| format!("while getting body for command {command}"))?;
|
||||
|
||||
let mut request = client.request(command.method.into(), route);
|
||||
|
||||
// Replace the api key
|
||||
if let Some(var_name) = &command.api_key_variable {
|
||||
if let Some(api_key) = registered.get(var_name).and_then(|v| v.as_str()) {
|
||||
request = request.header("Authorization", format!("Bearer {api_key}"));
|
||||
} else {
|
||||
bail!("could not find API key variable '{var_name}' in registered values");
|
||||
}
|
||||
}
|
||||
|
||||
let request = if let Some((body, content_type)) = body {
|
||||
request.body(body).header(reqwest::header::CONTENT_TYPE, content_type)
|
||||
} else {
|
||||
request
|
||||
};
|
||||
|
||||
let response =
|
||||
request.send().await.with_context(|| format!("error sending command: {}", command))?;
|
||||
|
||||
let code = response.status();
|
||||
|
||||
if !return_value {
|
||||
if let Some(expected_status) = command.expected_status {
|
||||
if code.as_u16() != expected_status {
|
||||
let response = response
|
||||
.text()
|
||||
.await
|
||||
.context("could not read response body as text")
|
||||
.context("reading response body when checking expected status")?;
|
||||
bail!("unexpected status code: got {}, expected {expected_status}, response body: '{response}'", code.as_u16());
|
||||
}
|
||||
} else if code.is_client_error() {
|
||||
tracing::error!(%command, %code, "error in workload file");
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response as JSON")
|
||||
.context("parsing error in workload file when sending command")?;
|
||||
bail!(
|
||||
"error in workload file: server responded with error code {code} and '{response}'"
|
||||
)
|
||||
} else if code.is_server_error() {
|
||||
tracing::error!(%command, %code, "server error");
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response as JSON")
|
||||
.context("parsing server error when sending command")?;
|
||||
bail!("server error: server responded with error code {code} and '{response}'")
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(expected_response) = &command.expected_response {
|
||||
let mut evaluated_expected_response;
|
||||
|
||||
let expected_response = if !registered.is_empty() {
|
||||
evaluated_expected_response = expected_response.clone();
|
||||
insert_variables(&mut evaluated_expected_response, ®istered);
|
||||
&evaluated_expected_response
|
||||
} else {
|
||||
expected_response
|
||||
};
|
||||
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response as JSON")
|
||||
.context("parsing response when checking expected response")?;
|
||||
if return_value {
|
||||
return Ok(Some((response, code)));
|
||||
}
|
||||
if !json_eq_ignore(expected_response, &response) {
|
||||
let expected_pretty = serde_json::to_string_pretty(expected_response)
|
||||
.context("serializing expected response as pretty JSON")?;
|
||||
let response_pretty = serde_json::to_string_pretty(&response)
|
||||
.context("serializing response as pretty JSON")?;
|
||||
let diff = SimpleDiff::from_str(&expected_pretty, &response_pretty, "expected", "got");
|
||||
bail!("command #{command_index} unexpected response:\n{diff}");
|
||||
}
|
||||
} else if return_value {
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response as JSON")
|
||||
.context("parsing response when recording expected response")?;
|
||||
return Ok(Some((response, code)));
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub async fn run_commands(
|
||||
client: &Arc<Client>,
|
||||
commands: &[Command],
|
||||
mut first_command_index: usize,
|
||||
assets: &Arc<BTreeMap<String, Asset>>,
|
||||
asset_folder: &'static str,
|
||||
registered: &mut HashMap<String, Value>,
|
||||
return_response: bool,
|
||||
) -> anyhow::Result<Vec<(Value, StatusCode)>> {
|
||||
let mut responses = Vec::new();
|
||||
for batch in
|
||||
commands.split_inclusive(|command| !matches!(command.synchronous, SyncMode::DontWait))
|
||||
{
|
||||
let mut new_responses = run_batch(
|
||||
client,
|
||||
batch,
|
||||
first_command_index,
|
||||
assets,
|
||||
asset_folder,
|
||||
registered,
|
||||
return_response,
|
||||
)
|
||||
.await?;
|
||||
responses.append(&mut new_responses);
|
||||
|
||||
first_command_index += batch.len();
|
||||
}
|
||||
|
||||
Ok(responses)
|
||||
}
|
||||
|
||||
pub fn health_command() -> Command {
|
||||
Command {
|
||||
route: "/health".into(),
|
||||
method: crate::common::client::Method::Get,
|
||||
body: Default::default(),
|
||||
register: HashMap::new(),
|
||||
synchronous: SyncMode::WaitForResponse,
|
||||
expected_status: None,
|
||||
expected_response: None,
|
||||
api_key_variable: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert_variables(value: &mut Value, registered: &HashMap<String, Value>) {
|
||||
match value {
|
||||
Value::Null | Value::Bool(_) | Value::Number(_) => (),
|
||||
Value::String(s) => {
|
||||
if s.starts_with("{{") && s.ends_with("}}") {
|
||||
let name = s[2..s.len() - 2].trim();
|
||||
if let Some(replacement) = registered.get(name) {
|
||||
*value = replacement.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
Value::Array(values) => {
|
||||
for value in values {
|
||||
insert_variables(value, registered);
|
||||
}
|
||||
}
|
||||
Value::Object(map) => {
|
||||
for (_key, value) in map.iter_mut() {
|
||||
insert_variables(value, registered);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
113
crates/xtask/src/common/instance/mod.rs
Normal file
113
crates/xtask/src/common/instance/mod.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
use std::fmt::Display;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
mod release;
|
||||
|
||||
pub use release::{add_releases_to_assets, Release};
|
||||
|
||||
/// A binary to execute on a temporary DB.
|
||||
///
|
||||
/// - The URL of the binary will be in the form <http://localhost:PORT>, where `PORT`
|
||||
/// is selected by the runner.
|
||||
/// - The database will be temporary, cleaned before use, and will be selected by the runner.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Binary {
|
||||
/// Describes how this binary should be instantiated
|
||||
#[serde(flatten)]
|
||||
pub source: BinarySource,
|
||||
/// Extra CLI arguments to pass to the binary.
|
||||
///
|
||||
/// Should be Meilisearch CLI options.
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub extra_cli_args: Vec<String>,
|
||||
}
|
||||
|
||||
impl Display for Binary {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.source)?;
|
||||
if !self.extra_cli_args.is_empty() {
|
||||
write!(f, "with arguments: {:?}", self.extra_cli_args)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Binary {
|
||||
pub fn as_release(&self) -> Option<&Release> {
|
||||
if let BinarySource::Release(release) = &self.source {
|
||||
Some(release)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn binary_path(&self, asset_folder: &str) -> anyhow::Result<Option<PathBuf>> {
|
||||
self.source.binary_path(asset_folder)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", deny_unknown_fields, tag = "source")]
|
||||
/// Description of how to get a binary to instantiate.
|
||||
pub enum BinarySource {
|
||||
/// Compile and run the binary from the current repository.=
|
||||
Build {
|
||||
#[serde(default)]
|
||||
edition: Edition,
|
||||
},
|
||||
/// Get a release from GitHub
|
||||
Release(Release),
|
||||
/// Run the binary from the specified local path.
|
||||
Path(PathBuf),
|
||||
}
|
||||
|
||||
impl Display for BinarySource {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
BinarySource::Build { edition: Edition::Community } => {
|
||||
f.write_str("git with community edition")
|
||||
}
|
||||
BinarySource::Build { edition: Edition::Enterprise } => {
|
||||
f.write_str("git with enterprise edition")
|
||||
}
|
||||
BinarySource::Release(release) => write!(f, "{release}"),
|
||||
BinarySource::Path(path) => write!(f, "binary at `{}`", path.display()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BinarySource {
|
||||
fn default() -> Self {
|
||||
Self::Build { edition: Default::default() }
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySource {
|
||||
fn binary_path(&self, asset_folder: &str) -> anyhow::Result<Option<PathBuf>> {
|
||||
Ok(match self {
|
||||
Self::Release(release) => Some(release.binary_path(asset_folder)?),
|
||||
Self::Build { .. } => None,
|
||||
Self::Path(path) => Some(path.clone()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", deny_unknown_fields)]
|
||||
pub enum Edition {
|
||||
#[default]
|
||||
Community,
|
||||
Enterprise,
|
||||
}
|
||||
|
||||
impl Edition {
|
||||
fn binary_base(&self) -> &'static str {
|
||||
match self {
|
||||
Edition::Community => "meilisearch",
|
||||
Edition::Enterprise => "meilisearch-enterprise",
|
||||
}
|
||||
}
|
||||
}
|
||||
193
crates/xtask/src/common/instance/release.rs
Normal file
193
crates/xtask/src/common/instance/release.rs
Normal file
@@ -0,0 +1,193 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::Display;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Context;
|
||||
use cargo_metadata::semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::Edition;
|
||||
use crate::common::assets::{Asset, AssetFormat};
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", deny_unknown_fields)]
|
||||
pub struct Release {
|
||||
#[serde(default)]
|
||||
pub edition: Edition,
|
||||
pub version: Version,
|
||||
}
|
||||
|
||||
impl Display for Release {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "v{}", self.version)?;
|
||||
match self.edition {
|
||||
Edition::Community => f.write_str(" Community Edition"),
|
||||
Edition::Enterprise => f.write_str(" Enterprise Edition"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Release {
|
||||
pub fn binary_path(&self, asset_folder: &str) -> anyhow::Result<PathBuf> {
|
||||
let mut asset_folder: PathBuf = asset_folder
|
||||
.parse()
|
||||
.with_context(|| format!("parsing asset folder `{asset_folder}` as a path"))?;
|
||||
asset_folder.push(self.local_filename()?);
|
||||
Ok(asset_folder)
|
||||
}
|
||||
|
||||
fn local_filename(&self) -> anyhow::Result<String> {
|
||||
let version = &self.version;
|
||||
let arch = get_arch()?;
|
||||
let base = self.edition.binary_base();
|
||||
|
||||
Ok(format!("{base}-{version}-{arch}"))
|
||||
}
|
||||
|
||||
fn remote_filename(&self) -> anyhow::Result<String> {
|
||||
let arch = get_arch()?;
|
||||
let base = self.edition.binary_base();
|
||||
|
||||
Ok(format!("{base}-{arch}"))
|
||||
}
|
||||
|
||||
async fn fetch_sha256(&self) -> anyhow::Result<String> {
|
||||
let version = &self.version;
|
||||
let asset_name = self.remote_filename()?;
|
||||
|
||||
// If version is lower than 1.15 there is no point in trying to get the sha256, GitHub didn't support it
|
||||
if *version < Version::parse("1.15.0")? {
|
||||
anyhow::bail!("version is lower than 1.15, sha256 not available");
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct GithubReleaseAsset {
|
||||
name: String,
|
||||
digest: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct GithubRelease {
|
||||
assets: Vec<GithubReleaseAsset>,
|
||||
}
|
||||
|
||||
let url = format!(
|
||||
"https://api.github.com/repos/meilisearch/meilisearch/releases/tags/v{version}"
|
||||
);
|
||||
|
||||
let client = reqwest::Client::builder()
|
||||
.user_agent("Meilisearch bench xtask")
|
||||
.build()
|
||||
.context("failed to build reqwest client")?;
|
||||
let body = client.get(url).send().await?.text().await?;
|
||||
let data: GithubRelease = serde_json::from_str(&body)?;
|
||||
|
||||
let digest = data
|
||||
.assets
|
||||
.into_iter()
|
||||
.find(|asset| asset.name.as_str() == asset_name.as_str())
|
||||
.with_context(|| format!("asset {asset_name} not found in release {self}"))?
|
||||
.digest
|
||||
.with_context(|| format!("asset {asset_name} has no digest"))?;
|
||||
|
||||
let sha256 = digest
|
||||
.strip_prefix("sha256:")
|
||||
.map(|s| s.to_string())
|
||||
.context("invalid sha256 format")?;
|
||||
|
||||
Ok(sha256)
|
||||
}
|
||||
|
||||
async fn add_asset(&self, assets: &mut BTreeMap<String, Asset>) -> anyhow::Result<()> {
|
||||
let local_filename = self.local_filename()?;
|
||||
let version = &self.version;
|
||||
if assets.contains_key(&local_filename) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let remote_filename = self.remote_filename()?;
|
||||
|
||||
// Try to get the sha256 but it may fail if Github is rate limiting us
|
||||
// We hardcode some values to speed up tests and avoid hitting Github
|
||||
// Also, versions prior to 1.15 don't have sha256 available anyway
|
||||
let sha256 = match local_filename.as_str() {
|
||||
"meilisearch-1.12.0-macos-apple-silicon" => {
|
||||
Some("3b384707a5df9edf66f9157f0ddb70dcd3ac84d4887149169cf93067d06717b7".into())
|
||||
}
|
||||
"meilisearch-1.12.0-linux-amd64" => {
|
||||
Some("865a3fc222e3b3bd1f4b64346cb114b9669af691aae28d71fa68dbf39427abcf".into())
|
||||
}
|
||||
_ => match self.fetch_sha256().await {
|
||||
Ok(sha256) => Some(sha256),
|
||||
Err(err) => {
|
||||
tracing::warn!("failed to get sha256 for release {self}: {err}");
|
||||
None
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
let url = format!(
|
||||
"https://github.com/meilisearch/meilisearch/releases/download/v{version}/{remote_filename}"
|
||||
);
|
||||
|
||||
let asset = Asset {
|
||||
local_location: Some(local_filename.clone()),
|
||||
remote_location: Some(url),
|
||||
format: AssetFormat::Raw,
|
||||
sha256,
|
||||
};
|
||||
|
||||
assets.insert(local_filename, asset);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_arch() -> anyhow::Result<&'static str> {
|
||||
// linux-aarch64
|
||||
#[cfg(all(target_os = "linux", target_arch = "aarch64"))]
|
||||
{
|
||||
Ok("linux-aarch64")
|
||||
}
|
||||
|
||||
// linux-amd64
|
||||
#[cfg(all(target_os = "linux", target_arch = "x86_64"))]
|
||||
{
|
||||
Ok("linux-amd64")
|
||||
}
|
||||
|
||||
// macos-amd64
|
||||
#[cfg(all(target_os = "macos", target_arch = "x86_64"))]
|
||||
{
|
||||
Ok("macos-amd64")
|
||||
}
|
||||
|
||||
// macos-apple-silicon
|
||||
#[cfg(all(target_os = "macos", target_arch = "aarch64"))]
|
||||
{
|
||||
Ok("macos-apple-silicon")
|
||||
}
|
||||
|
||||
// windows-amd64
|
||||
#[cfg(all(target_os = "windows", target_arch = "x86_64"))]
|
||||
{
|
||||
Ok("windows-amd64")
|
||||
}
|
||||
|
||||
#[cfg(not(all(target_os = "windows", target_arch = "x86_64")))]
|
||||
#[cfg(not(all(target_os = "linux", target_arch = "aarch64")))]
|
||||
#[cfg(not(all(target_os = "linux", target_arch = "x86_64")))]
|
||||
#[cfg(not(all(target_os = "macos", target_arch = "aarch64")))]
|
||||
anyhow::bail!("unsupported platform")
|
||||
}
|
||||
|
||||
pub async fn add_releases_to_assets(
|
||||
assets: &mut BTreeMap<String, Asset>,
|
||||
releases: impl IntoIterator<Item = &Release>,
|
||||
) -> anyhow::Result<()> {
|
||||
for release in releases {
|
||||
release.add_asset(assets).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
18
crates/xtask/src/common/logs.rs
Normal file
18
crates/xtask/src/common/logs.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use anyhow::Context;
|
||||
use std::io::LineWriter;
|
||||
use tracing_subscriber::{fmt::format::FmtSpan, layer::SubscriberExt, Layer};
|
||||
|
||||
pub fn setup_logs(log_filter: &str) -> anyhow::Result<()> {
|
||||
let filter: tracing_subscriber::filter::Targets =
|
||||
log_filter.parse().context("invalid --log-filter")?;
|
||||
|
||||
let subscriber = tracing_subscriber::registry().with(
|
||||
tracing_subscriber::fmt::layer()
|
||||
.with_writer(|| LineWriter::new(std::io::stderr()))
|
||||
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
|
||||
.with_filter(filter),
|
||||
);
|
||||
tracing::subscriber::set_global_default(subscriber).context("could not setup logging")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
8
crates/xtask/src/common/mod.rs
Normal file
8
crates/xtask/src/common/mod.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
pub mod args;
|
||||
pub mod assets;
|
||||
pub mod client;
|
||||
pub mod command;
|
||||
pub mod instance;
|
||||
pub mod logs;
|
||||
pub mod process;
|
||||
pub mod workload;
|
||||
@@ -1,18 +1,18 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{bail, Context as _};
|
||||
use tokio::process::Command;
|
||||
use tokio::process::Command as TokioCommand;
|
||||
use tokio::time;
|
||||
|
||||
use super::assets::Asset;
|
||||
use super::client::Client;
|
||||
use super::workload::Workload;
|
||||
use crate::common::client::Client;
|
||||
use crate::common::command::{health_command, run as run_command};
|
||||
use crate::common::instance::{Binary, BinarySource, Edition};
|
||||
|
||||
pub async fn kill(mut meilisearch: tokio::process::Child) {
|
||||
pub async fn kill_meili(mut meilisearch: tokio::process::Child) {
|
||||
let Some(id) = meilisearch.id() else { return };
|
||||
|
||||
match Command::new("kill").args(["--signal=TERM", &id.to_string()]).spawn() {
|
||||
match TokioCommand::new("kill").args(["--signal=TERM", &id.to_string()]).spawn() {
|
||||
Ok(mut cmd) => {
|
||||
let Err(error) = cmd.wait().await else { return };
|
||||
tracing::warn!(
|
||||
@@ -49,9 +49,12 @@ pub async fn kill(mut meilisearch: tokio::process::Child) {
|
||||
}
|
||||
|
||||
#[tracing::instrument]
|
||||
pub async fn build() -> anyhow::Result<()> {
|
||||
let mut command = Command::new("cargo");
|
||||
async fn build(edition: Edition) -> anyhow::Result<()> {
|
||||
let mut command = TokioCommand::new("cargo");
|
||||
command.arg("build").arg("--release").arg("-p").arg("meilisearch");
|
||||
if let Edition::Enterprise = edition {
|
||||
command.arg("--features=enterprise");
|
||||
}
|
||||
|
||||
command.kill_on_drop(true);
|
||||
|
||||
@@ -64,29 +67,68 @@ pub async fn build() -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(client, master_key, workload), fields(workload = workload.name))]
|
||||
pub async fn start(
|
||||
#[tracing::instrument(skip(client, master_key))]
|
||||
pub async fn start_meili(
|
||||
client: &Client,
|
||||
master_key: Option<&str>,
|
||||
workload: &Workload,
|
||||
binary: &Binary,
|
||||
asset_folder: &str,
|
||||
mut command: Command,
|
||||
) -> anyhow::Result<tokio::process::Child> {
|
||||
let mut command = match &binary.source {
|
||||
BinarySource::Build { edition } => {
|
||||
build(*edition).await?;
|
||||
let mut command = tokio::process::Command::new("cargo");
|
||||
|
||||
command
|
||||
.arg("run")
|
||||
.arg("--release")
|
||||
.arg("-p")
|
||||
.arg("meilisearch")
|
||||
.arg("--bin")
|
||||
.arg("meilisearch");
|
||||
if let Edition::Enterprise = *edition {
|
||||
command.arg("--features=enterprise");
|
||||
}
|
||||
command.arg("--");
|
||||
command
|
||||
}
|
||||
BinarySource::Release(release) => {
|
||||
let binary_path = release.binary_path(asset_folder)?;
|
||||
tokio::process::Command::new(binary_path)
|
||||
}
|
||||
BinarySource::Path(binary_path) => tokio::process::Command::new(binary_path),
|
||||
};
|
||||
|
||||
command.arg("--db-path").arg("./_xtask_benchmark.ms");
|
||||
if let Some(master_key) = master_key {
|
||||
command.arg("--master-key").arg(master_key);
|
||||
}
|
||||
command.arg("--experimental-enable-logs-route");
|
||||
|
||||
for extra_arg in workload.extra_cli_args.iter() {
|
||||
for extra_arg in binary.extra_cli_args.iter() {
|
||||
command.arg(extra_arg);
|
||||
}
|
||||
|
||||
command.kill_on_drop(true);
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
if let Some(binary_path) = binary.binary_path(asset_folder)? {
|
||||
let mut perms = tokio::fs::metadata(&binary_path)
|
||||
.await
|
||||
.with_context(|| format!("could not get metadata for {binary_path:?}"))?
|
||||
.permissions();
|
||||
perms.set_mode(perms.mode() | 0o111);
|
||||
tokio::fs::set_permissions(&binary_path, perms)
|
||||
.await
|
||||
.with_context(|| format!("could not set permissions for {binary_path:?}"))?;
|
||||
}
|
||||
}
|
||||
|
||||
let mut meilisearch = command.spawn().context("Error starting Meilisearch")?;
|
||||
|
||||
wait_for_health(client, &mut meilisearch, &workload.assets, asset_folder).await?;
|
||||
wait_for_health(client, &mut meilisearch).await?;
|
||||
|
||||
Ok(meilisearch)
|
||||
}
|
||||
@@ -94,11 +136,11 @@ pub async fn start(
|
||||
async fn wait_for_health(
|
||||
client: &Client,
|
||||
meilisearch: &mut tokio::process::Child,
|
||||
assets: &BTreeMap<String, Asset>,
|
||||
asset_folder: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
for i in 0..100 {
|
||||
let res = super::command::run(client.clone(), health_command(), assets, asset_folder).await;
|
||||
let res =
|
||||
run_command(client, &health_command(), 0, &BTreeMap::new(), HashMap::new(), "", false)
|
||||
.await;
|
||||
if res.is_ok() {
|
||||
// check that this is actually the current Meilisearch instance that answered us
|
||||
if let Some(exit_code) =
|
||||
@@ -122,15 +164,6 @@ async fn wait_for_health(
|
||||
bail!("meilisearch is not responding")
|
||||
}
|
||||
|
||||
fn health_command() -> super::command::Command {
|
||||
super::command::Command {
|
||||
route: "/health".into(),
|
||||
method: super::client::Method::Get,
|
||||
body: Default::default(),
|
||||
synchronous: super::command::SyncMode::WaitForResponse,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete_db() {
|
||||
let _ = std::fs::remove_dir_all("./_xtask_benchmark.ms");
|
||||
pub async fn delete_db() {
|
||||
let _ = tokio::fs::remove_dir_all("./_xtask_benchmark.ms").await;
|
||||
}
|
||||
11
crates/xtask/src/common/workload.rs
Normal file
11
crates/xtask/src/common/workload.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{bench::BenchWorkload, test::TestWorkload};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(tag = "type")]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum Workload {
|
||||
Bench(BenchWorkload),
|
||||
Test(TestWorkload),
|
||||
}
|
||||
@@ -1 +1,3 @@
|
||||
pub mod bench;
|
||||
pub mod common;
|
||||
pub mod test;
|
||||
|
||||
@@ -1,16 +1,34 @@
|
||||
use std::collections::HashSet;
|
||||
use std::{collections::HashSet, process::Stdio};
|
||||
|
||||
use anyhow::Context;
|
||||
use clap::Parser;
|
||||
use xtask::bench::BenchDeriveArgs;
|
||||
use semver::{Prerelease, Version};
|
||||
use xtask::{bench::BenchArgs, test::TestArgs};
|
||||
|
||||
/// This is the version of the crate but also the current Meilisearch version
|
||||
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
/// List features available in the workspace
|
||||
#[derive(Parser, Debug)]
|
||||
struct ListFeaturesDeriveArgs {
|
||||
struct ListFeaturesArgs {
|
||||
/// Feature to exclude from the list. Use a comma to separate multiple features.
|
||||
#[arg(short, long, value_delimiter = ',')]
|
||||
exclude_feature: Vec<String>,
|
||||
}
|
||||
|
||||
/// Create a git tag for the current version
|
||||
///
|
||||
/// The tag will of the form prototype-v<version>-<name>.<increment>
|
||||
#[derive(Parser, Debug)]
|
||||
struct PrototypeArgs {
|
||||
/// Name of the prototype to generate
|
||||
name: String,
|
||||
/// If true refuses to increment the tag if it already exists
|
||||
/// else refuses to generate new tag and expect the tag to exist.
|
||||
#[arg(long)]
|
||||
generate_new: bool,
|
||||
}
|
||||
|
||||
/// Utilitary commands
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about)]
|
||||
@@ -18,8 +36,10 @@ struct ListFeaturesDeriveArgs {
|
||||
#[command(bin_name = "cargo xtask")]
|
||||
#[allow(clippy::large_enum_variant)] // please, that's enough...
|
||||
enum Command {
|
||||
ListFeatures(ListFeaturesDeriveArgs),
|
||||
Bench(BenchDeriveArgs),
|
||||
ListFeatures(ListFeaturesArgs),
|
||||
Bench(BenchArgs),
|
||||
GeneratePrototype(PrototypeArgs),
|
||||
Test(TestArgs),
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
@@ -27,11 +47,13 @@ fn main() -> anyhow::Result<()> {
|
||||
match args {
|
||||
Command::ListFeatures(args) => list_features(args),
|
||||
Command::Bench(args) => xtask::bench::run(args)?,
|
||||
Command::GeneratePrototype(args) => generate_prototype(args)?,
|
||||
Command::Test(args) => xtask::test::run(args)?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn list_features(args: ListFeaturesDeriveArgs) {
|
||||
fn list_features(args: ListFeaturesArgs) {
|
||||
let exclude_features: HashSet<_> = args.exclude_feature.into_iter().collect();
|
||||
let metadata = cargo_metadata::MetadataCommand::new().no_deps().exec().unwrap();
|
||||
let features: Vec<String> = metadata
|
||||
@@ -44,3 +66,106 @@ fn list_features(args: ListFeaturesDeriveArgs) {
|
||||
let features = features.join(" ");
|
||||
println!("{features}")
|
||||
}
|
||||
|
||||
fn generate_prototype(args: PrototypeArgs) -> anyhow::Result<()> {
|
||||
let PrototypeArgs { name, generate_new: create_new } = args;
|
||||
|
||||
if name.rsplit_once(['.', '-']).filter(|(_, t)| t.chars().all(char::is_numeric)).is_some() {
|
||||
anyhow::bail!(
|
||||
"The increment must not be part of the name and will be rather incremented by this command."
|
||||
);
|
||||
}
|
||||
|
||||
// 1. Fetch the crate version
|
||||
let version = Version::parse(VERSION).context("while semver-parsing the crate version")?;
|
||||
|
||||
// 2. Pull tags from remote and retrieve last prototype tag
|
||||
std::process::Command::new("git")
|
||||
.arg("fetch")
|
||||
.arg("--tags")
|
||||
.stderr(Stdio::null())
|
||||
.stdout(Stdio::null())
|
||||
.status()?;
|
||||
|
||||
let output = std::process::Command::new("git")
|
||||
.arg("tag")
|
||||
.args(["--list", "prototype-v*"])
|
||||
.stderr(Stdio::inherit())
|
||||
.output()?;
|
||||
let output =
|
||||
String::try_from(output.stdout).context("while converting the tag list into a string")?;
|
||||
|
||||
let mut highest_increment = None;
|
||||
for tag in output.lines() {
|
||||
let Some(version) = tag.strip_prefix("prototype-v") else {
|
||||
continue;
|
||||
};
|
||||
let Ok(version) = Version::parse(version) else {
|
||||
continue;
|
||||
};
|
||||
let Ok(proto) = PrototypePrerelease::from_str(version.pre.as_str()) else {
|
||||
continue;
|
||||
};
|
||||
if proto.name() == name {
|
||||
highest_increment = match highest_increment {
|
||||
Some(last) if last < proto.increment() => Some(proto.increment()),
|
||||
Some(last) => Some(last),
|
||||
None => Some(proto.increment()),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Generate the new tag name (without git, just a string)
|
||||
let increment = match (create_new, highest_increment) {
|
||||
(true, None) => 0,
|
||||
(true, Some(increment)) => anyhow::bail!(
|
||||
"A prototype with the name `{name}` already exists with increment `{increment}`"
|
||||
),
|
||||
(false, None) => anyhow::bail!(
|
||||
"Prototype `{name}` is missing and must exist to be incremented.\n\
|
||||
Use the --generate-new flag to create a new prototype with an increment at 0."
|
||||
),
|
||||
(false, Some(increment)) => {
|
||||
increment.checked_add(1).context("While incrementing by one the increment")?
|
||||
}
|
||||
};
|
||||
|
||||
// Note that we cannot have leading zeros in the increment
|
||||
let pre = format!("{name}.{increment}").parse().context("while parsing pre-release name")?;
|
||||
let tag_name = Version { pre, ..version };
|
||||
println!("prototype-v{tag_name}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
struct PrototypePrerelease {
|
||||
pre: Prerelease,
|
||||
}
|
||||
|
||||
impl PrototypePrerelease {
|
||||
fn from_str(s: &str) -> anyhow::Result<Self> {
|
||||
Prerelease::new(s)
|
||||
.map_err(Into::into)
|
||||
.and_then(|pre| {
|
||||
if pre.rsplit_once('.').is_some() {
|
||||
Ok(pre)
|
||||
} else {
|
||||
Err(anyhow::anyhow!("Invalid prototype name, missing name or increment"))
|
||||
}
|
||||
})
|
||||
.map(|pre| PrototypePrerelease { pre })
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
self.pre.rsplit_once('.').expect("Missing prototype name").0
|
||||
}
|
||||
|
||||
fn increment(&self) -> u32 {
|
||||
self.pre
|
||||
.as_str()
|
||||
.rsplit_once('.')
|
||||
.map(|(_, tail)| tail.parse().expect("Invalid increment"))
|
||||
.expect("Missing increment")
|
||||
}
|
||||
}
|
||||
|
||||
95
crates/xtask/src/test/mod.rs
Normal file
95
crates/xtask/src/test/mod.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{bail, Context};
|
||||
use clap::Parser;
|
||||
|
||||
use crate::common::args::CommonArgs;
|
||||
use crate::common::client::Client;
|
||||
use crate::common::command::SyncMode;
|
||||
use crate::common::logs::setup_logs;
|
||||
use crate::common::workload::Workload;
|
||||
use crate::test::workload::CommandOrBinary;
|
||||
|
||||
mod workload;
|
||||
|
||||
pub use workload::TestWorkload;
|
||||
|
||||
/// Run tests from a workload
|
||||
#[derive(Parser, Debug)]
|
||||
pub struct TestArgs {
|
||||
/// Common arguments shared with other commands
|
||||
#[command(flatten)]
|
||||
common: CommonArgs,
|
||||
|
||||
/// Enables workloads to be rewritten in place to update expected responses.
|
||||
#[arg(short, long, default_value_t = false)]
|
||||
pub update_responses: bool,
|
||||
|
||||
/// Enables workloads to be rewritten in place to add missing expected responses.
|
||||
#[arg(short, long, default_value_t = false)]
|
||||
pub add_missing_responses: bool,
|
||||
}
|
||||
|
||||
pub fn run(args: TestArgs) -> anyhow::Result<()> {
|
||||
let rt = tokio::runtime::Builder::new_current_thread().enable_io().enable_time().build()?;
|
||||
let _scope = rt.enter();
|
||||
|
||||
rt.block_on(async { run_inner(args).await })?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_inner(args: TestArgs) -> anyhow::Result<()> {
|
||||
setup_logs(&args.common.log_filter)?;
|
||||
|
||||
// setup clients
|
||||
let assets_client = Arc::new(Client::new(
|
||||
None,
|
||||
args.common.assets_key.as_deref(),
|
||||
Some(Duration::from_secs(3600)), // 1h
|
||||
)?);
|
||||
|
||||
let meili_client = Arc::new(Client::new(
|
||||
Some("http://127.0.0.1:7700".into()),
|
||||
Some("masterKey"),
|
||||
Some(Duration::from_secs(args.common.tasks_queue_timeout_secs)),
|
||||
)?);
|
||||
|
||||
let asset_folder = args.common.asset_folder.clone().leak();
|
||||
for workload_file in &args.common.workload_file {
|
||||
let string = tokio::fs::read_to_string(workload_file)
|
||||
.await
|
||||
.with_context(|| format!("error reading {}", workload_file.display()))?;
|
||||
let workload: Workload = serde_json::from_str(string.trim())
|
||||
.with_context(|| format!("error parsing {} as JSON", workload_file.display()))?;
|
||||
|
||||
let Workload::Test(workload) = workload else {
|
||||
bail!("workload file {} is not a test workload", workload_file.display());
|
||||
};
|
||||
|
||||
let has_faulty_register = workload.commands.iter().any(|c| {
|
||||
matches!(c, CommandOrBinary::Command(cmd) if cmd.synchronous == SyncMode::DontWait && !cmd.register.is_empty())
|
||||
});
|
||||
if has_faulty_register {
|
||||
bail!("workload {} contains commands that register values but are marked as --dont-wait. This is not supported because we cannot guarantee the value will be registered before the next command runs.", workload.name);
|
||||
}
|
||||
|
||||
let name = workload.name.clone();
|
||||
match workload.run(&args, &assets_client, &meili_client, asset_folder).await {
|
||||
Ok(_) => match args.update_responses || args.add_missing_responses {
|
||||
true => println!(
|
||||
"🛠️ Workload {name} was updated, please check the output and restart the test"
|
||||
),
|
||||
false => println!("✅ Workload {name} passed"),
|
||||
},
|
||||
Err(error) => {
|
||||
println!("❌ Workload {name} failed: {error}");
|
||||
println!("💡 Is this intentional? If so, rerun with --update-responses to update the workload files.");
|
||||
return Err(error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
201
crates/xtask/src/test/workload.rs
Normal file
201
crates/xtask/src/test/workload.rs
Normal file
@@ -0,0 +1,201 @@
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::io::Write;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::common::assets::{fetch_assets, Asset};
|
||||
use crate::common::client::Client;
|
||||
use crate::common::command::{run_commands, Command};
|
||||
use crate::common::instance::Binary;
|
||||
use crate::common::process::{self, delete_db, kill_meili};
|
||||
use crate::common::workload::Workload;
|
||||
use crate::test::TestArgs;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(untagged)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum CommandOrBinary {
|
||||
Command(Command),
|
||||
Binary { binary: Binary },
|
||||
}
|
||||
|
||||
enum CommandOrBinaryVec<'a> {
|
||||
Commands(Vec<&'a mut Command>),
|
||||
Binary(Binary),
|
||||
}
|
||||
|
||||
fn produce_reference_value(value: &mut Value) {
|
||||
match value {
|
||||
Value::Null | Value::Bool(_) | Value::Number(_) => (),
|
||||
Value::String(string) => {
|
||||
if time::OffsetDateTime::parse(
|
||||
string.as_str(),
|
||||
&time::format_description::well_known::Rfc3339,
|
||||
)
|
||||
.is_ok()
|
||||
{
|
||||
*string = String::from("[timestamp]");
|
||||
} else if uuid::Uuid::parse_str(string).is_ok() {
|
||||
*string = String::from("[uuid]");
|
||||
}
|
||||
}
|
||||
Value::Array(values) => {
|
||||
for value in values {
|
||||
produce_reference_value(value);
|
||||
}
|
||||
}
|
||||
Value::Object(map) => {
|
||||
for (key, value) in map.iter_mut() {
|
||||
match key.as_str() {
|
||||
"duration" => {
|
||||
*value = Value::String(String::from("[duration]"));
|
||||
}
|
||||
"processingTimeMs" => {
|
||||
*value = Value::String(String::from("[duration]"));
|
||||
}
|
||||
_ => produce_reference_value(value),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A test workload.
|
||||
/// Not to be confused with [a bench workload](crate::bench::workload::Workload).
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TestWorkload {
|
||||
pub name: String,
|
||||
pub binary: Binary,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub master_key: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub assets: BTreeMap<String, Asset>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub commands: Vec<CommandOrBinary>,
|
||||
}
|
||||
|
||||
impl TestWorkload {
|
||||
pub async fn run(
|
||||
mut self,
|
||||
args: &TestArgs,
|
||||
assets_client: &Client,
|
||||
meili_client: &Arc<Client>,
|
||||
asset_folder: &'static str,
|
||||
) -> anyhow::Result<()> {
|
||||
// Group commands between upgrades
|
||||
let mut commands_or_instance = Vec::new();
|
||||
let mut current_commands = Vec::new();
|
||||
let mut all_releases = Vec::new();
|
||||
|
||||
if let Some(release) = self.binary.as_release() {
|
||||
all_releases.push(release);
|
||||
}
|
||||
for command_or_upgrade in &mut self.commands {
|
||||
match command_or_upgrade {
|
||||
CommandOrBinary::Command(command) => current_commands.push(command),
|
||||
CommandOrBinary::Binary { binary: instance } => {
|
||||
if !current_commands.is_empty() {
|
||||
commands_or_instance.push(CommandOrBinaryVec::Commands(current_commands));
|
||||
current_commands = Vec::new();
|
||||
}
|
||||
commands_or_instance.push(CommandOrBinaryVec::Binary(instance.clone()));
|
||||
if let Some(release) = instance.as_release() {
|
||||
all_releases.push(release);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !current_commands.is_empty() {
|
||||
commands_or_instance.push(CommandOrBinaryVec::Commands(current_commands));
|
||||
}
|
||||
|
||||
// Fetch assets
|
||||
crate::common::instance::add_releases_to_assets(&mut self.assets, all_releases).await?;
|
||||
fetch_assets(assets_client, &self.assets, &args.common.asset_folder).await?;
|
||||
|
||||
// Run server
|
||||
delete_db().await;
|
||||
let mut process = process::start_meili(
|
||||
meili_client,
|
||||
Some("masterKey"),
|
||||
&self.binary,
|
||||
&args.common.asset_folder,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let assets = Arc::new(self.assets.clone());
|
||||
let return_responses = args.add_missing_responses || args.update_responses;
|
||||
let mut registered = HashMap::new();
|
||||
let mut first_command_index = 0;
|
||||
for command_or_upgrade in commands_or_instance {
|
||||
match command_or_upgrade {
|
||||
CommandOrBinaryVec::Commands(commands) => {
|
||||
let cloned: Vec<_> = commands.iter().map(|c| (*c).clone()).collect();
|
||||
let responses = run_commands(
|
||||
meili_client,
|
||||
&cloned,
|
||||
first_command_index,
|
||||
&assets,
|
||||
asset_folder,
|
||||
&mut registered,
|
||||
return_responses,
|
||||
)
|
||||
.await?;
|
||||
first_command_index += cloned.len();
|
||||
if return_responses {
|
||||
assert_eq!(responses.len(), cloned.len());
|
||||
for (command, (mut response, status)) in commands.into_iter().zip(responses)
|
||||
{
|
||||
if args.update_responses
|
||||
|| (args.add_missing_responses
|
||||
&& command.expected_response.is_none())
|
||||
{
|
||||
produce_reference_value(&mut response);
|
||||
command.expected_response = Some(response);
|
||||
command.expected_status = Some(status.as_u16());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
CommandOrBinaryVec::Binary(binary) => {
|
||||
kill_meili(process).await;
|
||||
process = process::start_meili(
|
||||
meili_client,
|
||||
Some("masterKey"),
|
||||
&binary,
|
||||
&args.common.asset_folder,
|
||||
)
|
||||
.await?;
|
||||
tracing::info!("Restarted instance with {binary}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write back the workload if needed
|
||||
if return_responses {
|
||||
// Filter out the assets we added for the versions
|
||||
self.assets.retain(|_, asset| {
|
||||
asset.local_location.as_ref().is_none_or(|a| !a.starts_with("meilisearch-"))
|
||||
});
|
||||
|
||||
let workload = Workload::Test(self);
|
||||
let mut file =
|
||||
std::fs::File::create(&args.common.workload_file[0]).with_context(|| {
|
||||
format!("could not open {}", args.common.workload_file[0].display())
|
||||
})?;
|
||||
serde_json::to_writer_pretty(&file, &workload).with_context(|| {
|
||||
format!("could not write to {}", args.common.workload_file[0].display())
|
||||
})?;
|
||||
file.write_all(b"\n").with_context(|| {
|
||||
format!("could not write to {}", args.common.workload_file[0].display())
|
||||
})?;
|
||||
tracing::info!("Updated workload file {}", args.common.workload_file[0].display());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -20,29 +20,33 @@ These make us iterate fast before stabilizing it for the current release.
|
||||
|
||||
### Release steps
|
||||
|
||||
The prototype name must follow this convention: `prototype-v<version>.<name>-<number>` where
|
||||
The prototype name must [follow this convention](https://semver.org/#spec-item-11): `prototype-v<version>-<name>.<iteration>` where
|
||||
- `version` is the version of Meilisearch on which the prototype is based.
|
||||
- `name` is the feature name formatted in `kebab-case`. It should not end with a single number.
|
||||
- `Y` is the version of the prototype, starting from `0`.
|
||||
- `name` is the feature name formatted in `kebab-case`.
|
||||
- `iteration` is the iteration of the prototype, starting from `0`.
|
||||
|
||||
✅ Example: `prototype-v1.23.0.search-personalization-0`. </br>
|
||||
✅ Example: `prototype-v1.23.0-search-personalization.1`. </br>
|
||||
❌ Bad example: `prototype-v1.23.0-search-personalization-0`: a dash separates the name and version. </br>
|
||||
❌ Bad example: `prototype-v1.23.0.search-personalization.0`: a dot separates the version and name. </br>
|
||||
❌ Bad example: `prototype-search-personalization-0`: version is missing.</br>
|
||||
❌ Bad example: `v1.23.0.auto-resize-0`: lacks the `prototype` prefix. </br>
|
||||
❌ Bad example: `prototype-v1.23.0.auto-resize`: lacks the version suffix. </br>
|
||||
❌ Bad example: `prototype-v1.23.0.auto-resize-0-0`: feature name ends with a single number.
|
||||
❌ Bad example: `v1.23.0-auto-resize-0`: lacks the `prototype-` prefix. </br>
|
||||
❌ Bad example: `prototype-v1.23.0-auto-resize`: lacks the version suffix. </br>
|
||||
❌ Bad example: `prototype-v1.23.0-auto-resize.0-0`: feature name ends with something else than a number.
|
||||
|
||||
Steps to create a prototype:
|
||||
|
||||
1. In your terminal, go to the last commit of your branch (the one you want to provide as a prototype).
|
||||
2. Create a tag following the convention: `git tag prototype-X-Y`
|
||||
3. Run Meilisearch and check that its launch summary features a line: `Prototype: prototype-X-Y` (you may need to switch branches and back after tagging for this to work).
|
||||
3. Push the tag: `git push origin prototype-X-Y`
|
||||
4. Check the [Docker CI](https://github.com/meilisearch/meilisearch/actions/workflows/publish-docker-images.yml) is now running.
|
||||
2. Use the `cargo xtask generate-prototype` command to generate the prototype name.
|
||||
3. Create the tag using the `git tag` command.
|
||||
4. Checkout the tag, run Meilisearch and check that it launches summary features a line: `Prototype: prototype-v<version>-<name>.<iteration>`.
|
||||
5. Checkout back to your branch: `git checkout -`.
|
||||
6. Push the tag: `git push origin prototype-v<version>-<name>.<iteration>`
|
||||
7. Check that the [Docker CI](https://github.com/meilisearch/meilisearch/actions/workflows/publish-docker-images.yml) is now running.
|
||||
|
||||
🐳 Once the CI has finished to run (~1h30), a Docker image named `prototype-X-Y` will be available on [DockerHub](https://hub.docker.com/repository/docker/getmeili/meilisearch/general). People can use it with the following command: `docker run -p 7700:7700 -v $(pwd)/meili_data:/meili_data getmeili/meilisearch:prototype-X-Y`. <br>
|
||||
🐳 Once the CI has finished to run, a Docker image named `prototype-v<version>-<name>.<iteration>` will be available on [DockerHub](https://hub.docker.com/repository/docker/getmeili/meilisearch/general). People can use it with the following command: `docker run -p 7700:7700 -v $(pwd)/meili_data:/meili_data getmeili/meilisearch:prototype-v<version>-<name>.<iteration>`. <br>
|
||||
More information about [how to run Meilisearch with Docker](https://docs.meilisearch.com/learn/cookbooks/docker.html#download-meilisearch-with-docker).
|
||||
|
||||
⚠️ However, no binaries will be created. If the users do not use Docker, they can go to the `prototype-X-Y` tag in the Meilisearch repository and compile it from the source code.
|
||||
⚠️ However, no binaries will be created. If the users do not use Docker, they can go to the `prototype-v<version>-<name>.<iteration>` tag in the Meilisearch repository and compile it from the source code.
|
||||
|
||||
### Communication
|
||||
|
||||
@@ -63,7 +67,7 @@ Here is an example of messages to share on GitHub:
|
||||
> How to run the prototype?
|
||||
> You need to start from a fresh new database (remove the previous used `data.ms`) and use the following Docker image:
|
||||
> ```bash
|
||||
> docker run -it --rm -p 7700:7700 -v $(pwd)/meili_data:/meili_data getmeili/meilisearch:prototype-X-Y
|
||||
> docker run -it --rm -p 7700:7700 -v $(pwd)/meili_data:/meili_data getmeili/meilisearch:prototype-v<version>-<name>.<iteration>
|
||||
> ```
|
||||
>
|
||||
> You can use the feature this way:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "movies-subset-hf-embeddings",
|
||||
"type": "bench",
|
||||
"run_count": 5,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=4"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "settings-add-embeddings-hf",
|
||||
"type": "bench",
|
||||
"run_count": 5,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=4"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "hackernews.add_new_documents",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "hackernews.ndjson_1M_ignore_first_100k",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "hackernews.modify_facet_numbers",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "hackernews.modify_facet_strings",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "hackernews.modify_searchables",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "hackernews.ndjson_1M",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "movies.json,no-threads",
|
||||
"type": "bench",
|
||||
"run_count": 2,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=1"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "movies.json",
|
||||
"type": "bench",
|
||||
"run_count": 10,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "search-movies-subset-hf-embeddings",
|
||||
"type": "bench",
|
||||
"run_count": 2,
|
||||
"target": "search::=trace",
|
||||
"extra_cli_args": [
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "search-filterable-movies.json",
|
||||
"type": "bench",
|
||||
"run_count": 10,
|
||||
"target": "search::=trace",
|
||||
"extra_cli_args": [],
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"name": "search-geosort.jsonl_1M",
|
||||
"run_count": 3,
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"target": "search::=trace",
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "search-hackernews.ndjson_1M",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"target": "search::=trace",
|
||||
"extra_cli_args": [],
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "search-movies.json",
|
||||
"type": "bench",
|
||||
"run_count": 10,
|
||||
"target": "search::=trace",
|
||||
"extra_cli_args": [],
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "search-sortable-movies.json",
|
||||
"type": "bench",
|
||||
"run_count": 10,
|
||||
"target": "search::=trace",
|
||||
"extra_cli_args": [],
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "settings-add-remove-filters.json",
|
||||
"type": "bench",
|
||||
"run_count": 5,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=4"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "settings-proximity-precision.json",
|
||||
"type": "bench",
|
||||
"run_count": 5,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=4"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "settings-remove-add-swap-searchable.json",
|
||||
"type": "bench",
|
||||
"run_count": 5,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=4"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "settings-typo.json",
|
||||
"type": "bench",
|
||||
"run_count": 5,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=4"
|
||||
|
||||
369
workloads/tests/api-keys.json
Normal file
369
workloads/tests/api-keys.json
Normal file
@@ -0,0 +1,369 @@
|
||||
{
|
||||
"type": "test",
|
||||
"name": "api-keys",
|
||||
"binary": {
|
||||
"source": "release",
|
||||
"edition": "community",
|
||||
"version": "1.12.0"
|
||||
},
|
||||
"commands": [
|
||||
{
|
||||
"route": "keys",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"actions": [
|
||||
"search",
|
||||
"documents.add"
|
||||
],
|
||||
"description": "Test API Key",
|
||||
"expiresAt": null,
|
||||
"indexes": [
|
||||
"movies"
|
||||
],
|
||||
"uid": "9e053497-b180-4b9f-bf10-a4a6fc4ca1b2"
|
||||
}
|
||||
},
|
||||
"expectedStatus": 201,
|
||||
"expectedResponse": {
|
||||
"actions": [
|
||||
"search",
|
||||
"documents.add"
|
||||
],
|
||||
"createdAt": "[timestamp]",
|
||||
"description": "Test API Key",
|
||||
"expiresAt": null,
|
||||
"indexes": [
|
||||
"movies"
|
||||
],
|
||||
"key": "b387a8dabdc80d4d2069718ca43bad8bcb1ce5d8bb85b31af17a5ea6348317dc",
|
||||
"name": null,
|
||||
"uid": "9e053497-b180-4b9f-bf10-a4a6fc4ca1b2",
|
||||
"updatedAt": "[timestamp]"
|
||||
},
|
||||
"register": {
|
||||
"key": "/key"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "keys/{{ key }}",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"actions": [
|
||||
"search",
|
||||
"documents.add"
|
||||
],
|
||||
"createdAt": "[timestamp]",
|
||||
"description": "Test API Key",
|
||||
"expiresAt": null,
|
||||
"indexes": [
|
||||
"movies"
|
||||
],
|
||||
"key": "b387a8dabdc80d4d2069718ca43bad8bcb1ce5d8bb85b31af17a5ea6348317dc",
|
||||
"name": null,
|
||||
"uid": "[uuid]",
|
||||
"updatedAt": "[timestamp]"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "/indexes",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"primaryKey": "id",
|
||||
"uid": "movies"
|
||||
}
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 0,
|
||||
"type": "indexCreation"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"id": 287947,
|
||||
"overview": "A boy is given the ability to become an adult superhero in times of need with a single magic word.",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
|
||||
"release_date": "2019-03-23",
|
||||
"title": "Shazam"
|
||||
}
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 1,
|
||||
"type": "documentAdditionOrUpdate"
|
||||
},
|
||||
"apiKeyVariable": "key",
|
||||
"synchronous": "WaitForTask"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search?q=shazam",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 1,
|
||||
"hits": [
|
||||
{
|
||||
"id": 287947,
|
||||
"overview": "A boy is given the ability to become an adult superhero in times of need with a single magic word.",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
|
||||
"release_date": "2019-03-23",
|
||||
"title": "Shazam"
|
||||
}
|
||||
],
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "shazam"
|
||||
},
|
||||
"apiKeyVariable": "key",
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"binary": {
|
||||
"source": "build",
|
||||
"edition": "community",
|
||||
"extraCliArgs": [
|
||||
"--experimental-dumpless-upgrade",
|
||||
"--experimental-max-number-of-batched-tasks=0"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search?q=shazam",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 1,
|
||||
"hits": [
|
||||
{
|
||||
"id": 287947,
|
||||
"overview": "A boy is given the ability to become an adult superhero in times of need with a single magic word.",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
|
||||
"release_date": "2019-03-23",
|
||||
"title": "Shazam"
|
||||
}
|
||||
],
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "shazam",
|
||||
"requestUid": "[uid]"
|
||||
},
|
||||
"apiKeyVariable": "key",
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"binary": {
|
||||
"source": "build",
|
||||
"edition": "community",
|
||||
"extraCliArgs": [
|
||||
"--experimental-dumpless-upgrade"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"route": "health",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"status": "available"
|
||||
},
|
||||
"synchronous": "WaitForTask"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search?q=shazam",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 1,
|
||||
"hits": [
|
||||
{
|
||||
"id": 287947,
|
||||
"overview": "A boy is given the ability to become an adult superhero in times of need with a single magic word.",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
|
||||
"release_date": "2019-03-23",
|
||||
"title": "Shazam"
|
||||
}
|
||||
],
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "shazam",
|
||||
"requestUid": "[uid]"
|
||||
},
|
||||
"apiKeyVariable": "key",
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/documents/287947",
|
||||
"method": "DELETE",
|
||||
"body": null,
|
||||
"expectedStatus": 403,
|
||||
"expectedResponse": {
|
||||
"code": "invalid_api_key",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_api_key",
|
||||
"message": "The provided API key is invalid.",
|
||||
"type": "auth"
|
||||
},
|
||||
"apiKeyVariable": "key",
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"id": 287948,
|
||||
"overview": "Shazam turns evil and the world is in danger.",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
|
||||
"release_date": "2032-03-23",
|
||||
"title": "Shazam 2"
|
||||
}
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 3,
|
||||
"type": "documentAdditionOrUpdate"
|
||||
},
|
||||
"apiKeyVariable": "key",
|
||||
"synchronous": "WaitForTask"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search?q=shaza",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 2,
|
||||
"hits": [
|
||||
{
|
||||
"id": 287947,
|
||||
"overview": "A boy is given the ability to become an adult superhero in times of need with a single magic word.",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
|
||||
"release_date": "2019-03-23",
|
||||
"title": "Shazam"
|
||||
},
|
||||
{
|
||||
"id": 287948,
|
||||
"overview": "Shazam turns evil and the world is in danger.",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
|
||||
"release_date": "2032-03-23",
|
||||
"title": "Shazam 2"
|
||||
}
|
||||
],
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "shaza",
|
||||
"requestUid": "[uid]"
|
||||
},
|
||||
"apiKeyVariable": "key",
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "tasks",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"from": 3,
|
||||
"limit": 20,
|
||||
"next": null,
|
||||
"results": [
|
||||
{
|
||||
"batchUid": 3,
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"indexedDocuments": 1,
|
||||
"receivedDocuments": 1
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"error": null,
|
||||
"finishedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"startedAt": "[timestamp]",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"uid": 3
|
||||
},
|
||||
{
|
||||
"batchUid": 2,
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "[latest]"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"error": null,
|
||||
"finishedAt": "[timestamp]",
|
||||
"indexUid": null,
|
||||
"startedAt": "[timestamp]",
|
||||
"status": "succeeded",
|
||||
"type": "upgradeDatabase",
|
||||
"uid": 2
|
||||
},
|
||||
{
|
||||
"batchUid": 1,
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"indexedDocuments": 1,
|
||||
"receivedDocuments": 1
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"error": null,
|
||||
"finishedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"startedAt": "[timestamp]",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"uid": 1
|
||||
},
|
||||
{
|
||||
"batchUid": 0,
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"primaryKey": "id"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"error": null,
|
||||
"finishedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"startedAt": "[timestamp]",
|
||||
"status": "succeeded",
|
||||
"type": "indexCreation",
|
||||
"uid": 0
|
||||
}
|
||||
],
|
||||
"total": 4
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
}
|
||||
]
|
||||
}
|
||||
445
workloads/tests/hf-embed-hannoy.json
Normal file
445
workloads/tests/hf-embed-hannoy.json
Normal file
@@ -0,0 +1,445 @@
|
||||
{
|
||||
"type": "test",
|
||||
"name": "hf-embed-hannoy",
|
||||
"binary": {
|
||||
"source": "release",
|
||||
"edition": "community",
|
||||
"version": "1.21.0"
|
||||
},
|
||||
"assets": {
|
||||
"movies-100.json": {
|
||||
"local_location": null,
|
||||
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies-100.json",
|
||||
"sha256": "d215e395e4240f12f03b8f1f68901eac82d9e7ded5b462cbf4a6b8efde76c6c6"
|
||||
}
|
||||
},
|
||||
"commands": [
|
||||
{
|
||||
"route": "experimental-features",
|
||||
"method": "PATCH",
|
||||
"body": {
|
||||
"inline": {
|
||||
"vectorStoreSetting": true
|
||||
}
|
||||
},
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"chatCompletions": false,
|
||||
"compositeEmbedders": false,
|
||||
"containsFilter": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"getTaskDocumentsRoute": false,
|
||||
"logsRoute": true,
|
||||
"metrics": false,
|
||||
"multimodal": false,
|
||||
"network": false,
|
||||
"vectorStoreSetting": true
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/settings",
|
||||
"method": "PATCH",
|
||||
"body": {
|
||||
"inline": {
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"release_date"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"title",
|
||||
"overview"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"vectorStore": "experimental"
|
||||
}
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 0,
|
||||
"type": "settingsUpdate"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/settings",
|
||||
"method": "PATCH",
|
||||
"body": {
|
||||
"inline": {
|
||||
"embedders": {
|
||||
"default": {
|
||||
"source": "huggingFace"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 1,
|
||||
"type": "settingsUpdate"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"asset": "movies-100.json"
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 2,
|
||||
"type": "documentAdditionOrUpdate"
|
||||
},
|
||||
"synchronous": "WaitForTask"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"attributesToRetrieve": [
|
||||
"id"
|
||||
],
|
||||
"hybrid": {
|
||||
"embedder": "default",
|
||||
"semanticRatio": 1.0
|
||||
},
|
||||
"limit": 5,
|
||||
"q": "Police",
|
||||
"showRankingScore": true
|
||||
}
|
||||
},
|
||||
"expectedStatus": 200,
|
||||
"register": {
|
||||
"hit2score": "/hits/2/_rankingScore",
|
||||
"hit3": "/hits/3/id",
|
||||
"hit0score": "/hits/0/_rankingScore",
|
||||
"hit4": "/hits/4/id",
|
||||
"hit1score": "/hits/1/_rankingScore",
|
||||
"hit1": "/hits/1/id",
|
||||
"hit0": "/hits/0/id",
|
||||
"hit2": "/hits/2/id",
|
||||
"hit4score": "/hits/4/_rankingScore",
|
||||
"hit3score": "/hits/3/_rankingScore"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/settings/embedders",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"default": {
|
||||
"documentTemplate": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",
|
||||
"documentTemplateMaxBytes": 400,
|
||||
"model": "BAAI/bge-base-en-v1.5",
|
||||
"pooling": "useModel",
|
||||
"revision": "617ca489d9e86b49b8167676d8220688b99db36e",
|
||||
"source": "huggingFace"
|
||||
}
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"binary": {
|
||||
"source": "build",
|
||||
"edition": "community",
|
||||
"extraCliArgs": [
|
||||
"--experimental-dumpless-upgrade",
|
||||
"--experimental-max-number-of-batched-tasks=0"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"route": "experimental-features",
|
||||
"method": "PATCH",
|
||||
"body": {
|
||||
"inline": {
|
||||
"vectorStoreSetting": true
|
||||
}
|
||||
},
|
||||
"expectedStatus": 200,
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"attributesToRetrieve": [
|
||||
"id"
|
||||
],
|
||||
"hybrid": {
|
||||
"embedder": "default",
|
||||
"semanticRatio": 1.0
|
||||
},
|
||||
"limit": 5,
|
||||
"q": "Police",
|
||||
"showRankingScore": true
|
||||
}
|
||||
},
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 99,
|
||||
"hits": [
|
||||
{
|
||||
"_rankingScore": "{{ hit0score }}",
|
||||
"id": "{{ hit0 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit1score }}",
|
||||
"id": "{{ hit1 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit2score }}",
|
||||
"id": "{{ hit2 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit3score }}",
|
||||
"id": "{{ hit3 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit4score }}",
|
||||
"id": "{{ hit4 }}"
|
||||
}
|
||||
],
|
||||
"limit": 5,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "Police",
|
||||
"requestUid": "[uuid]",
|
||||
"semanticHitCount": 5
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/settings/embedders",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"default": {
|
||||
"documentTemplate": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",
|
||||
"documentTemplateMaxBytes": 400,
|
||||
"model": "BAAI/bge-base-en-v1.5",
|
||||
"pooling": "useModel",
|
||||
"revision": "617ca489d9e86b49b8167676d8220688b99db36e",
|
||||
"source": "huggingFace"
|
||||
}
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/settings/vector-store",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": "experimental",
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"binary": {
|
||||
"source": "build",
|
||||
"edition": "community",
|
||||
"extraCliArgs": [
|
||||
"--experimental-dumpless-upgrade"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"route": "health",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"status": "available"
|
||||
},
|
||||
"synchronous": "WaitForTask"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/settings/vector-store",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": "experimental",
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"attributesToRetrieve": [
|
||||
"id"
|
||||
],
|
||||
"hybrid": {
|
||||
"embedder": "default",
|
||||
"semanticRatio": 1.0
|
||||
},
|
||||
"limit": 5,
|
||||
"q": "Police",
|
||||
"showRankingScore": true
|
||||
}
|
||||
},
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 99,
|
||||
"hits": [
|
||||
{
|
||||
"_rankingScore": "{{ hit0score }}",
|
||||
"id": "{{ hit0 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit1score }}",
|
||||
"id": "{{ hit1 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit2score }}",
|
||||
"id": "{{ hit2 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit3score }}",
|
||||
"id": "{{ hit3 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit4score }}",
|
||||
"id": "{{ hit4 }}"
|
||||
}
|
||||
],
|
||||
"limit": 5,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "Police",
|
||||
"requestUid": "[uuid]",
|
||||
"semanticHitCount": 5
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/settings/embedders",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"default": {
|
||||
"documentTemplate": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",
|
||||
"documentTemplateMaxBytes": 400,
|
||||
"model": "BAAI/bge-base-en-v1.5",
|
||||
"pooling": "useModel",
|
||||
"revision": "617ca489d9e86b49b8167676d8220688b99db36e",
|
||||
"source": "huggingFace"
|
||||
}
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "tasks",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"from": 3,
|
||||
"limit": 20,
|
||||
"next": null,
|
||||
"results": [
|
||||
{
|
||||
"batchUid": 3,
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.21.0",
|
||||
"upgradeTo": "[latest]"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"error": null,
|
||||
"finishedAt": "[timestamp]",
|
||||
"indexUid": null,
|
||||
"startedAt": "[timestamp]",
|
||||
"status": "succeeded",
|
||||
"type": "upgradeDatabase",
|
||||
"uid": 3
|
||||
},
|
||||
{
|
||||
"batchUid": 2,
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"indexedDocuments": 99,
|
||||
"receivedDocuments": 99
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"error": null,
|
||||
"finishedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"startedAt": "[timestamp]",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"uid": 2
|
||||
},
|
||||
{
|
||||
"batchUid": 1,
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"embedders": {
|
||||
"default": {
|
||||
"source": "huggingFace"
|
||||
}
|
||||
}
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"error": null,
|
||||
"finishedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"startedAt": "[timestamp]",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"uid": 1
|
||||
},
|
||||
{
|
||||
"batchUid": 0,
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"release_date"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"title",
|
||||
"overview"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"vectorStore": "experimental"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"error": null,
|
||||
"finishedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"startedAt": "[timestamp]",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"uid": 0
|
||||
}
|
||||
],
|
||||
"total": 4
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
}
|
||||
]
|
||||
}
|
||||
419
workloads/tests/hf-embed.json
Normal file
419
workloads/tests/hf-embed.json
Normal file
@@ -0,0 +1,419 @@
|
||||
{
|
||||
"type": "test",
|
||||
"name": "hf-embed",
|
||||
"binary": {
|
||||
"source": "release",
|
||||
"edition": "community",
|
||||
"version": "1.14.0"
|
||||
},
|
||||
"assets": {
|
||||
"movies-100.json": {
|
||||
"local_location": null,
|
||||
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies-100.json",
|
||||
"sha256": "d215e395e4240f12f03b8f1f68901eac82d9e7ded5b462cbf4a6b8efde76c6c6"
|
||||
}
|
||||
},
|
||||
"commands": [
|
||||
{
|
||||
"route": "indexes/movies/settings",
|
||||
"method": "PATCH",
|
||||
"body": {
|
||||
"inline": {
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"release_date"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"title",
|
||||
"overview"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
]
|
||||
}
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 0,
|
||||
"type": "settingsUpdate"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/settings",
|
||||
"method": "PATCH",
|
||||
"body": {
|
||||
"inline": {
|
||||
"embedders": {
|
||||
"default": {
|
||||
"source": "huggingFace"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 1,
|
||||
"type": "settingsUpdate"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"asset": "movies-100.json"
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 2,
|
||||
"type": "documentAdditionOrUpdate"
|
||||
},
|
||||
"synchronous": "WaitForTask"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"attributesToRetrieve": [
|
||||
"id"
|
||||
],
|
||||
"hybrid": {
|
||||
"embedder": "default",
|
||||
"semanticRatio": 1.0
|
||||
},
|
||||
"limit": 5,
|
||||
"q": "Police",
|
||||
"showRankingScore": true
|
||||
}
|
||||
},
|
||||
"register": {
|
||||
"hit0": "/hits/0/id",
|
||||
"hit1": "/hits/1/id",
|
||||
"hit2": "/hits/2/id",
|
||||
"hit3": "/hits/3/id",
|
||||
"hit4": "/hits/4/id",
|
||||
"hit0score": "/hits/0/_rankingScore",
|
||||
"hit1score": "/hits/1/_rankingScore",
|
||||
"hit2score": "/hits/2/_rankingScore",
|
||||
"hit3score": "/hits/3/_rankingScore",
|
||||
"hit4score": "/hits/4/_rankingScore"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/settings/embedders",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"default": {
|
||||
"documentTemplate": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",
|
||||
"documentTemplateMaxBytes": 400,
|
||||
"model": "BAAI/bge-base-en-v1.5",
|
||||
"pooling": "useModel",
|
||||
"revision": "617ca489d9e86b49b8167676d8220688b99db36e",
|
||||
"source": "huggingFace"
|
||||
}
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"binary": {
|
||||
"source": "build",
|
||||
"edition": "community",
|
||||
"extraCliArgs": [
|
||||
"--experimental-dumpless-upgrade",
|
||||
"--experimental-max-number-of-batched-tasks=0"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"route": "experimental-features",
|
||||
"method": "PATCH",
|
||||
"body": {
|
||||
"inline": {
|
||||
"vectorStoreSetting": true
|
||||
}
|
||||
},
|
||||
"expectedStatus": 200,
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"attributesToRetrieve": [
|
||||
"id"
|
||||
],
|
||||
"hybrid": {
|
||||
"embedder": "default",
|
||||
"semanticRatio": 1.0
|
||||
},
|
||||
"limit": 5,
|
||||
"q": "Police",
|
||||
"showRankingScore": true
|
||||
}
|
||||
},
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 99,
|
||||
"hits": [
|
||||
{
|
||||
"_rankingScore": "{{ hit0score }}",
|
||||
"id": "{{ hit0 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit1score }}",
|
||||
"id": "{{ hit1 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit2score }}",
|
||||
"id": "{{ hit2 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit3score }}",
|
||||
"id": "{{ hit3 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit4score }}",
|
||||
"id": "{{ hit4 }}"
|
||||
}
|
||||
],
|
||||
"limit": 5,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "Police",
|
||||
"requestUid": "[uuid]",
|
||||
"semanticHitCount": 5
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/settings/embedders",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"default": {
|
||||
"documentTemplate": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",
|
||||
"documentTemplateMaxBytes": 400,
|
||||
"model": "BAAI/bge-base-en-v1.5",
|
||||
"pooling": "useModel",
|
||||
"revision": "617ca489d9e86b49b8167676d8220688b99db36e",
|
||||
"source": "huggingFace"
|
||||
}
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/settings/vector-store",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": null,
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"binary": {
|
||||
"source": "build",
|
||||
"edition": "community",
|
||||
"extraCliArgs": [
|
||||
"--experimental-dumpless-upgrade"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"route": "health",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"status": "available"
|
||||
},
|
||||
"synchronous": "WaitForTask"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/settings/vector-store",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": null,
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"attributesToRetrieve": [
|
||||
"id"
|
||||
],
|
||||
"hybrid": {
|
||||
"embedder": "default",
|
||||
"semanticRatio": 1.0
|
||||
},
|
||||
"limit": 5,
|
||||
"q": "Police",
|
||||
"showRankingScore": true
|
||||
}
|
||||
},
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 99,
|
||||
"hits": [
|
||||
{
|
||||
"_rankingScore": "{{ hit0score }}",
|
||||
"id": "{{ hit0 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit1score }}",
|
||||
"id": "{{ hit1 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit2score }}",
|
||||
"id": "{{ hit2 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit3score }}",
|
||||
"id": "{{ hit3 }}"
|
||||
},
|
||||
{
|
||||
"_rankingScore": "{{ hit4score }}",
|
||||
"id": "{{ hit4 }}"
|
||||
}
|
||||
],
|
||||
"limit": 5,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "Police",
|
||||
"requestUid": "[uuid]",
|
||||
"semanticHitCount": 5
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/settings/embedders",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"default": {
|
||||
"documentTemplate": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",
|
||||
"documentTemplateMaxBytes": 400,
|
||||
"model": "BAAI/bge-base-en-v1.5",
|
||||
"pooling": "useModel",
|
||||
"revision": "617ca489d9e86b49b8167676d8220688b99db36e",
|
||||
"source": "huggingFace"
|
||||
}
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "tasks",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"from": 3,
|
||||
"limit": 20,
|
||||
"next": null,
|
||||
"results": [
|
||||
{
|
||||
"batchUid": 3,
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.14.0",
|
||||
"upgradeTo": "[latest]"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"error": null,
|
||||
"finishedAt": "[timestamp]",
|
||||
"indexUid": null,
|
||||
"startedAt": "[timestamp]",
|
||||
"status": "succeeded",
|
||||
"type": "upgradeDatabase",
|
||||
"uid": 3
|
||||
},
|
||||
{
|
||||
"batchUid": 2,
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"indexedDocuments": 99,
|
||||
"receivedDocuments": 99
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"error": null,
|
||||
"finishedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"startedAt": "[timestamp]",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"uid": 2
|
||||
},
|
||||
{
|
||||
"batchUid": 1,
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"embedders": {
|
||||
"default": {
|
||||
"source": "huggingFace"
|
||||
}
|
||||
}
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"error": null,
|
||||
"finishedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"startedAt": "[timestamp]",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"uid": 1
|
||||
},
|
||||
{
|
||||
"batchUid": 0,
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"release_date"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"title",
|
||||
"overview"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
]
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"error": null,
|
||||
"finishedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"startedAt": "[timestamp]",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"uid": 0
|
||||
}
|
||||
],
|
||||
"total": 4
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
}
|
||||
]
|
||||
}
|
||||
326
workloads/tests/movies.json
Normal file
326
workloads/tests/movies.json
Normal file
@@ -0,0 +1,326 @@
|
||||
{
|
||||
"type": "test",
|
||||
"name": "movies",
|
||||
"binary": {
|
||||
"source": "release",
|
||||
"edition": "community",
|
||||
"version": "1.12.0"
|
||||
},
|
||||
"assets": {
|
||||
"movies.json": {
|
||||
"local_location": null,
|
||||
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
|
||||
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
|
||||
}
|
||||
},
|
||||
"commands": [
|
||||
{
|
||||
"route": "indexes/movies/settings",
|
||||
"method": "PATCH",
|
||||
"body": {
|
||||
"inline": {
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"release_date"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"title",
|
||||
"overview"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
]
|
||||
}
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 0,
|
||||
"type": "settingsUpdate"
|
||||
},
|
||||
"synchronous": "DontWait"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"asset": "movies.json"
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 1,
|
||||
"type": "documentAdditionOrUpdate"
|
||||
},
|
||||
"synchronous": "WaitForTask"
|
||||
},
|
||||
{
|
||||
"binary": {
|
||||
"source": "build",
|
||||
"extraCliArgs": [
|
||||
"--experimental-dumpless-upgrade",
|
||||
"--experimental-max-number-of-batched-tasks=0"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search?q=bitcoin",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 6,
|
||||
"hits": [
|
||||
{
|
||||
"genres": [
|
||||
"Documentary"
|
||||
],
|
||||
"id": 349086,
|
||||
"overview": "A documentary exploring how money and the trading of value has evolved, culminating in Bitcoin.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/A82oxum0dTL71N0cjD0F66S9gdt.jpg",
|
||||
"release_date": 1437177600,
|
||||
"title": "Bitcoin: The End of Money as We Know It"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Documentary",
|
||||
"History"
|
||||
],
|
||||
"id": 427451,
|
||||
"overview": "Not since the invention of the Internet has there been such a disruptive technology as Bitcoin. Bitcoin's early pioneers sought to blur the lines of sovereignty and the financial status quo. After years of underground development Bitcoin grabbed the attention of a curious public, and the ire of the regulators the technology had subverted. After landmark arrests of prominent cyber criminals Bitcoin faces its most severe adversary yet, the very banks it was built to destroy.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/qW3vsno24UBawZjnrKfQ1qHRPD6.jpg",
|
||||
"release_date": 1483056000,
|
||||
"title": "Banking on Bitcoin"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Documentary",
|
||||
"History"
|
||||
],
|
||||
"id": 292607,
|
||||
"overview": "A documentary about the development and spread of the virtual currency called Bitcoin.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/nUzeZupwmEOoddQIDAq10Gyifk0.jpg",
|
||||
"release_date": 1412294400,
|
||||
"title": "The Rise and Rise of Bitcoin"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Documentary"
|
||||
],
|
||||
"id": 321769,
|
||||
"overview": "Deep Web gives the inside story of one of the most important and riveting digital crime sagas of the century -- the arrest of Ross William Ulbricht, the 30-year-old entrepreneur convicted of being 'Dread Pirate Roberts,' creator and operator of online black market Silk Road. As the only film with exclusive access to the Ulbricht family, Deep Web explores how the brightest minds and thought leaders behind the Deep Web and Bitcoin are now caught in the crosshairs of the battle for control of a future inextricably linked to technology, with our digital rights hanging in the balance.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/dtSOFZ7ioDSaJxPzORaplqo8QZ2.jpg",
|
||||
"release_date": 1426377600,
|
||||
"title": "Deep Web"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Comedy",
|
||||
"Horror"
|
||||
],
|
||||
"id": 179538,
|
||||
"overview": "A gang of gold thieves lands in a coven of witches who are preparing for an ancient ritual... and in need of a sacrifice.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/u7w6vghlbz8xDUZRayOXma3Ax96.jpg",
|
||||
"release_date": 1379635200,
|
||||
"title": "Witching & Bitching"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Comedy"
|
||||
],
|
||||
"id": 70882,
|
||||
"overview": "Roseanne Barr is back with an all-new HBO comedy special! Filmed live at the Comedy Store in Los Angeles, Roseanne returns to her stand-up roots for the first time in 14 years, as she tackles hot issues of today - from gay marriage to President Bush.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/cUkQQnfPTonMXRroZzCyw11eKXr.jpg",
|
||||
"release_date": 1162598400,
|
||||
"title": "Roseanne Barr: Blonde and Bitchin'"
|
||||
}
|
||||
],
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "bitcoin",
|
||||
"requestUid": "[uuid]"
|
||||
},
|
||||
"synchronous": "DontWait"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/stats",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"avgDocumentSize": 0,
|
||||
"fieldDistribution": {
|
||||
"genres": 31944,
|
||||
"id": 31944,
|
||||
"overview": 31944,
|
||||
"poster": 31944,
|
||||
"release_date": 31944,
|
||||
"title": 31944
|
||||
},
|
||||
"isIndexing": false,
|
||||
"numberOfDocuments": 31944,
|
||||
"rawDocumentDbSize": 0
|
||||
},
|
||||
"synchronous": "DontWait"
|
||||
},
|
||||
{
|
||||
"binary": {
|
||||
"source": "build",
|
||||
"edition": "community",
|
||||
"extraCliArgs": [
|
||||
"--experimental-dumpless-upgrade"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"route": "health",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"status": "available"
|
||||
},
|
||||
"synchronous": "WaitForTask"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search?q=bitcoin",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 6,
|
||||
"hits": [
|
||||
{
|
||||
"genres": [
|
||||
"Documentary"
|
||||
],
|
||||
"id": 349086,
|
||||
"overview": "A documentary exploring how money and the trading of value has evolved, culminating in Bitcoin.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/A82oxum0dTL71N0cjD0F66S9gdt.jpg",
|
||||
"release_date": 1437177600,
|
||||
"title": "Bitcoin: The End of Money as We Know It"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Documentary",
|
||||
"History"
|
||||
],
|
||||
"id": 427451,
|
||||
"overview": "Not since the invention of the Internet has there been such a disruptive technology as Bitcoin. Bitcoin's early pioneers sought to blur the lines of sovereignty and the financial status quo. After years of underground development Bitcoin grabbed the attention of a curious public, and the ire of the regulators the technology had subverted. After landmark arrests of prominent cyber criminals Bitcoin faces its most severe adversary yet, the very banks it was built to destroy.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/qW3vsno24UBawZjnrKfQ1qHRPD6.jpg",
|
||||
"release_date": 1483056000,
|
||||
"title": "Banking on Bitcoin"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Documentary",
|
||||
"History"
|
||||
],
|
||||
"id": 292607,
|
||||
"overview": "A documentary about the development and spread of the virtual currency called Bitcoin.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/nUzeZupwmEOoddQIDAq10Gyifk0.jpg",
|
||||
"release_date": 1412294400,
|
||||
"title": "The Rise and Rise of Bitcoin"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Documentary"
|
||||
],
|
||||
"id": 321769,
|
||||
"overview": "Deep Web gives the inside story of one of the most important and riveting digital crime sagas of the century -- the arrest of Ross William Ulbricht, the 30-year-old entrepreneur convicted of being 'Dread Pirate Roberts,' creator and operator of online black market Silk Road. As the only film with exclusive access to the Ulbricht family, Deep Web explores how the brightest minds and thought leaders behind the Deep Web and Bitcoin are now caught in the crosshairs of the battle for control of a future inextricably linked to technology, with our digital rights hanging in the balance.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/dtSOFZ7ioDSaJxPzORaplqo8QZ2.jpg",
|
||||
"release_date": 1426377600,
|
||||
"title": "Deep Web"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Comedy",
|
||||
"Horror"
|
||||
],
|
||||
"id": 179538,
|
||||
"overview": "A gang of gold thieves lands in a coven of witches who are preparing for an ancient ritual... and in need of a sacrifice.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/u7w6vghlbz8xDUZRayOXma3Ax96.jpg",
|
||||
"release_date": 1379635200,
|
||||
"title": "Witching & Bitching"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Comedy"
|
||||
],
|
||||
"id": 70882,
|
||||
"overview": "Roseanne Barr is back with an all-new HBO comedy special! Filmed live at the Comedy Store in Los Angeles, Roseanne returns to her stand-up roots for the first time in 14 years, as she tackles hot issues of today - from gay marriage to President Bush.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/cUkQQnfPTonMXRroZzCyw11eKXr.jpg",
|
||||
"release_date": 1162598400,
|
||||
"title": "Roseanne Barr: Blonde and Bitchin'"
|
||||
}
|
||||
],
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "bitcoin",
|
||||
"requestUid": "[uuid]"
|
||||
},
|
||||
"synchronous": "DontWait"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/stats",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"avgDocumentSize": "[avgDocSize]",
|
||||
"fieldDistribution": {
|
||||
"genres": 31944,
|
||||
"id": 31944,
|
||||
"overview": 31944,
|
||||
"poster": 31944,
|
||||
"release_date": 31944,
|
||||
"title": 31944
|
||||
},
|
||||
"isIndexing": false,
|
||||
"numberOfDocuments": 31944,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
"numberOfEmbeddings": 0,
|
||||
"rawDocumentDbSize": "[rawDbSize]"
|
||||
},
|
||||
"synchronous": "DontWait"
|
||||
},
|
||||
{
|
||||
"route": "tasks?types=upgradeDatabase",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"from": 2,
|
||||
"limit": 20,
|
||||
"next": null,
|
||||
"results": [
|
||||
{
|
||||
"batchUid": 2,
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "[latest]"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"error": null,
|
||||
"finishedAt": "[timestamp]",
|
||||
"indexUid": null,
|
||||
"startedAt": "[timestamp]",
|
||||
"status": "succeeded",
|
||||
"type": "upgradeDatabase",
|
||||
"uid": 2
|
||||
}
|
||||
],
|
||||
"total": 1
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user