mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-11-26 07:40:31 +00:00
Compare commits
12 Commits
v1.26.0
...
change-net
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9bd3482230 | ||
|
|
d6e4e414d7 | ||
|
|
c0617efe76 | ||
|
|
8316c36648 | ||
|
|
572bae9da1 | ||
|
|
2a330dce83 | ||
|
|
d62a6b6f0d | ||
|
|
58b8630862 | ||
|
|
0703767fc6 | ||
|
|
e0c97325d6 | ||
|
|
0f3ef8de73 | ||
|
|
7313cefd74 |
1
.github/dependabot.yml
vendored
1
.github/dependabot.yml
vendored
@@ -7,5 +7,6 @@ updates:
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
labels:
|
||||
- 'skip changelog'
|
||||
- 'dependencies'
|
||||
rebase-strategy: disabled
|
||||
|
||||
33
.github/release-draft-template.yml
vendored
Normal file
33
.github/release-draft-template.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name-template: 'v$RESOLVED_VERSION'
|
||||
tag-template: 'v$RESOLVED_VERSION'
|
||||
exclude-labels:
|
||||
- 'skip changelog'
|
||||
version-resolver:
|
||||
minor:
|
||||
labels:
|
||||
- 'enhancement'
|
||||
default: patch
|
||||
categories:
|
||||
- title: '⚠️ Breaking changes'
|
||||
label: 'breaking-change'
|
||||
- title: '🚀 Enhancements'
|
||||
label: 'enhancement'
|
||||
- title: '🐛 Bug Fixes'
|
||||
label: 'bug'
|
||||
- title: '🔒 Security'
|
||||
label: 'security'
|
||||
- title: '⚙️ Maintenance/misc'
|
||||
label:
|
||||
- 'maintenance'
|
||||
- 'documentation'
|
||||
template: |
|
||||
$CHANGES
|
||||
|
||||
❤️ Huge thanks to our contributors: $CONTRIBUTORS.
|
||||
no-changes-template: 'Changes are coming soon 😎'
|
||||
sort-direction: 'ascending'
|
||||
replacers:
|
||||
- search: '/(?:and )?@dependabot-preview(?:\[bot\])?,?/g'
|
||||
replace: ''
|
||||
- search: '/(?:and )?@dependabot(?:\[bot\])?,?/g'
|
||||
replace: ''
|
||||
2
.github/workflows/publish-docker-images.yml
vendored
2
.github/workflows/publish-docker-images.yml
vendored
@@ -65,7 +65,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # tag=v3.10.0
|
||||
uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # tag=v3.9.2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
|
||||
13
.github/workflows/publish-release-assets.yml
vendored
13
.github/workflows/publish-release-assets.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
check-version:
|
||||
name: Check the version validity
|
||||
runs-on: ubuntu-latest
|
||||
# No need to check the version for dry run (cron or workflow_dispatch)
|
||||
# No need to check the version for dry run (cron)
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
# Check if the tag has the v<nmumber>.<number>.<number> format.
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
- name: Build
|
||||
run: cargo build --release --locked
|
||||
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.11.2
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
- name: Build
|
||||
run: cargo build --release --locked
|
||||
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.11.2
|
||||
@@ -111,7 +111,7 @@ jobs:
|
||||
command: build
|
||||
args: --release --target ${{ matrix.target }}
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.11.2
|
||||
with:
|
||||
@@ -176,7 +176,7 @@ jobs:
|
||||
- name: List target output files
|
||||
run: ls -lR ./target
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.11.2
|
||||
with:
|
||||
@@ -187,7 +187,6 @@ jobs:
|
||||
|
||||
publish-openapi-file:
|
||||
name: Publish OpenAPI file
|
||||
needs: check-version
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -202,7 +201,7 @@ jobs:
|
||||
cd crates/openapi-generator
|
||||
cargo run --release -- --pretty --output ../../meilisearch.json
|
||||
- name: Upload OpenAPI to Release
|
||||
# No need to upload for dry run (cron or workflow_dispatch)
|
||||
# No need to upload for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.11.2
|
||||
with:
|
||||
|
||||
20
.github/workflows/release-drafter.yml
vendored
Normal file
20
.github/workflows/release-drafter.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: Release Drafter
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
update_release_draft:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: release-drafter/release-drafter@v6
|
||||
with:
|
||||
config-name: release-draft-template.yml
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.RELEASE_DRAFTER_TOKEN }}
|
||||
16
.github/workflows/sdks-tests.yml
vendored
16
.github/workflows/sdks-tests.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
with:
|
||||
repository: meilisearch/meilisearch-dotnet
|
||||
- name: Setup .NET Core
|
||||
uses: actions/setup-dotnet@v5
|
||||
uses: actions/setup-dotnet@v4
|
||||
with:
|
||||
dotnet-version: "8.0.x"
|
||||
- name: Install dependencies
|
||||
@@ -100,7 +100,7 @@ jobs:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: stable
|
||||
- uses: actions/checkout@v5
|
||||
@@ -135,13 +135,13 @@ jobs:
|
||||
- name: Set up Java
|
||||
uses: actions/setup-java@v5
|
||||
with:
|
||||
java-version: 17
|
||||
distribution: 'temurin'
|
||||
java-version: 8
|
||||
distribution: 'zulu'
|
||||
cache: gradle
|
||||
- name: Grant execute permission for gradlew
|
||||
run: chmod +x gradlew
|
||||
- name: Build and run unit and integration tests
|
||||
run: ./gradlew build integrationTest --info
|
||||
run: ./gradlew build integrationTest
|
||||
|
||||
meilisearch-js-tests:
|
||||
needs: define-docker-image
|
||||
@@ -160,7 +160,7 @@ jobs:
|
||||
with:
|
||||
repository: meilisearch/meilisearch-js
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
cache: 'yarn'
|
||||
- name: Install dependencies
|
||||
@@ -224,7 +224,7 @@ jobs:
|
||||
with:
|
||||
repository: meilisearch/meilisearch-python
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
- name: Install pipenv
|
||||
uses: dschep/install-pipenv-action@v1
|
||||
- name: Install dependencies
|
||||
@@ -318,7 +318,7 @@ jobs:
|
||||
with:
|
||||
repository: meilisearch/meilisearch-js-plugins
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
cache: yarn
|
||||
- name: Install dependencies
|
||||
|
||||
426
Cargo.lock
generated
426
Cargo.lock
generated
@@ -310,7 +310,6 @@ dependencies = [
|
||||
"const-random",
|
||||
"getrandom 0.3.3",
|
||||
"once_cell",
|
||||
"serde",
|
||||
"version_check",
|
||||
"zerocopy",
|
||||
]
|
||||
@@ -345,6 +344,12 @@ version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
|
||||
|
||||
[[package]]
|
||||
name = "allocator-api2"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c583acf993cf4245c4acb0a2cc2ab1f9cc097de73411bb6d3647ff6af2b1013d"
|
||||
|
||||
[[package]]
|
||||
name = "anes"
|
||||
version = "0.1.6"
|
||||
@@ -448,9 +453,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
|
||||
|
||||
[[package]]
|
||||
name = "arroy"
|
||||
version = "0.6.4-nested-rtxns"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fb8b6b34d9d83e3b837cb7f72a439dbd2293b102393c084af5e5b097212e1532"
|
||||
checksum = "8578a72223dfa13dfd9fc144d15260d134361789ebdea9b16e85a511edc73c7d"
|
||||
dependencies = [
|
||||
"bytemuck",
|
||||
"byteorder",
|
||||
@@ -487,7 +492,7 @@ dependencies = [
|
||||
"backoff",
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"derive_builder",
|
||||
"derive_builder 0.20.2",
|
||||
"eventsource-stream",
|
||||
"futures",
|
||||
"rand 0.8.5",
|
||||
@@ -584,7 +589,7 @@ source = "git+https://github.com/meilisearch/bbqueue#cbb87cc707b5af415ef203bdaf2
|
||||
|
||||
[[package]]
|
||||
name = "benchmarks"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bumpalo",
|
||||
@@ -794,7 +799,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "build-info"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"time",
|
||||
@@ -807,7 +812,7 @@ version = "3.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
|
||||
dependencies = [
|
||||
"allocator-api2",
|
||||
"allocator-api2 0.2.21",
|
||||
"serde",
|
||||
]
|
||||
|
||||
@@ -817,7 +822,7 @@ version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ce682bdc86c2e25ef5cd95881d9d6a1902214eddf74cf9ffea88fe1464377e8"
|
||||
dependencies = [
|
||||
"allocator-api2",
|
||||
"allocator-api2 0.2.21",
|
||||
"bitpacking",
|
||||
"bumpalo",
|
||||
"hashbrown 0.15.5",
|
||||
@@ -940,7 +945,7 @@ dependencies = [
|
||||
"rand 0.9.2",
|
||||
"rand_distr",
|
||||
"rayon",
|
||||
"safetensors 0.4.5",
|
||||
"safetensors",
|
||||
"thiserror 1.0.69",
|
||||
"ug",
|
||||
"ug-cuda",
|
||||
@@ -967,7 +972,7 @@ dependencies = [
|
||||
"half",
|
||||
"num-traits",
|
||||
"rayon",
|
||||
"safetensors 0.4.5",
|
||||
"safetensors",
|
||||
"serde",
|
||||
"thiserror 1.0.69",
|
||||
]
|
||||
@@ -1047,15 +1052,6 @@ version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "castaway"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dec551ab6e7578819132c713a93c022a05d60159dc86e7a7050223577484c55a"
|
||||
dependencies = [
|
||||
"rustversion",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.37"
|
||||
@@ -1079,9 +1075,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cellulite"
|
||||
version = "0.3.1-nested-rtxns-2"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f22d721963ead1a144f10cb8b53dc9469e760723b069123c7c7bc675c7354270"
|
||||
checksum = "71a41aa2cd021bc3f23d97cc1e645848ca8c279fc757d1570ba7fe7ddc021290"
|
||||
dependencies = [
|
||||
"crossbeam",
|
||||
"geo",
|
||||
@@ -1096,7 +1092,7 @@ dependencies = [
|
||||
"steppe",
|
||||
"thiserror 2.0.16",
|
||||
"thread_local",
|
||||
"zerometry",
|
||||
"zerometry 0.3.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1132,9 +1128,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "charabia"
|
||||
version = "0.9.8"
|
||||
version = "0.9.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bbdc8cd8f999e8b8b13ed71d30962bbf98cf39e2f2a9f1ae1ba354199239d66e"
|
||||
checksum = "7c2f456825b7f15eac01a1cae40c12c3f55e931d4327e6e4fa59508d664e9568"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"csv",
|
||||
@@ -1143,6 +1139,7 @@ dependencies = [
|
||||
"irg-kvariants",
|
||||
"jieba-rs",
|
||||
"lindera",
|
||||
"once_cell",
|
||||
"pinyin",
|
||||
"serde",
|
||||
"slice-group-by",
|
||||
@@ -1218,7 +1215,7 @@ dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
"clap_lex",
|
||||
"strsim",
|
||||
"strsim 0.11.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1257,21 +1254,6 @@ version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
|
||||
|
||||
[[package]]
|
||||
name = "compact_str"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fdb1325a1cece981e8a296ab8f0f9b63ae357bd0784a9faaf548cc7b480707a"
|
||||
dependencies = [
|
||||
"castaway",
|
||||
"cfg-if",
|
||||
"itoa",
|
||||
"rustversion",
|
||||
"ryu",
|
||||
"serde",
|
||||
"static_assertions",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "concat-arrays"
|
||||
version = "0.1.2"
|
||||
@@ -1530,14 +1512,38 @@ dependencies = [
|
||||
"libloading",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling"
|
||||
version = "0.14.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850"
|
||||
dependencies = [
|
||||
"darling_core 0.14.4",
|
||||
"darling_macro 0.14.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling"
|
||||
version = "0.20.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
|
||||
dependencies = [
|
||||
"darling_core",
|
||||
"darling_macro",
|
||||
"darling_core 0.20.11",
|
||||
"darling_macro 0.20.11",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_core"
|
||||
version = "0.14.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0"
|
||||
dependencies = [
|
||||
"fnv",
|
||||
"ident_case",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"strsim 0.10.0",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1550,17 +1556,28 @@ dependencies = [
|
||||
"ident_case",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"strsim",
|
||||
"strsim 0.11.1",
|
||||
"syn 2.0.106",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_macro"
|
||||
version = "0.14.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e"
|
||||
dependencies = [
|
||||
"darling_core 0.14.4",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_macro"
|
||||
version = "0.20.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
|
||||
dependencies = [
|
||||
"darling_core",
|
||||
"darling_core 0.20.11",
|
||||
"quote",
|
||||
"syn 2.0.106",
|
||||
]
|
||||
@@ -1570,9 +1587,6 @@ name = "dary_heap"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deadpool"
|
||||
@@ -1628,13 +1642,34 @@ dependencies = [
|
||||
"syn 2.0.106",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8"
|
||||
dependencies = [
|
||||
"derive_builder_macro 0.12.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder"
|
||||
version = "0.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947"
|
||||
dependencies = [
|
||||
"derive_builder_macro",
|
||||
"derive_builder_macro 0.20.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder_core"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f"
|
||||
dependencies = [
|
||||
"darling 0.14.4",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1643,19 +1678,29 @@ version = "0.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8"
|
||||
dependencies = [
|
||||
"darling",
|
||||
"darling 0.20.11",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.106",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder_macro"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e"
|
||||
dependencies = [
|
||||
"derive_builder_core 0.12.0",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder_macro"
|
||||
version = "0.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c"
|
||||
dependencies = [
|
||||
"derive_builder_core",
|
||||
"derive_builder_core 0.20.2",
|
||||
"syn 2.0.106",
|
||||
]
|
||||
|
||||
@@ -1694,7 +1739,7 @@ dependencies = [
|
||||
"serde-cs",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"strsim",
|
||||
"strsim 0.11.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1779,12 +1824,12 @@ version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "415b6ec780d34dcf624666747194393603d0373b7141eef01d12ee58881507d9"
|
||||
dependencies = [
|
||||
"phf 0.11.3",
|
||||
"phf",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dump"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"big_s",
|
||||
@@ -2027,7 +2072,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
||||
|
||||
[[package]]
|
||||
name = "file-store"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"tempfile",
|
||||
"thiserror 2.0.16",
|
||||
@@ -2049,7 +2094,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "filter-parser"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"levenshtein_automata",
|
||||
@@ -2077,7 +2122,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flatten-serde-json"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@@ -2234,7 +2279,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "fuzzers"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"bumpalo",
|
||||
@@ -2532,6 +2577,7 @@ dependencies = [
|
||||
"num-traits",
|
||||
"robust",
|
||||
"rstar",
|
||||
"spade",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2542,6 +2588,7 @@ checksum = "75a4dcd69d35b2c87a7c83bce9af69fd65c9d68d3833a0ded568983928f3fc99"
|
||||
dependencies = [
|
||||
"approx",
|
||||
"num-traits",
|
||||
"rayon",
|
||||
"rstar",
|
||||
"serde",
|
||||
]
|
||||
@@ -2713,9 +2760,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "hannoy"
|
||||
version = "0.0.9-nested-rtxns-2"
|
||||
version = "0.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06eda090938d9dcd568c8c2a5de383047ed9191578ebf4a342d2975d16e621f2"
|
||||
checksum = "0dba13a271c49a119a97862ebf0a74131d879832868400d9fcd937b790058fdd"
|
||||
dependencies = [
|
||||
"bytemuck",
|
||||
"byteorder",
|
||||
@@ -2760,7 +2807,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"allocator-api2",
|
||||
"allocator-api2 0.2.21",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2769,7 +2816,7 @@ version = "0.15.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
|
||||
dependencies = [
|
||||
"allocator-api2",
|
||||
"allocator-api2 0.2.21",
|
||||
"equivalent",
|
||||
"foldhash",
|
||||
"serde",
|
||||
@@ -2793,9 +2840,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
||||
|
||||
[[package]]
|
||||
name = "heed"
|
||||
version = "0.22.1-nested-rtxns-6"
|
||||
version = "0.22.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c69e07cd539834bedcfa938f3d7d8520cce1ad2b0776c122b5ccdf8fd5bafe12"
|
||||
checksum = "6a56c94661ddfb51aa9cdfbf102cfcc340aa69267f95ebccc4af08d7c530d393"
|
||||
dependencies = [
|
||||
"bitflags 2.9.4",
|
||||
"byteorder",
|
||||
@@ -2805,6 +2852,7 @@ dependencies = [
|
||||
"lmdb-master-sys",
|
||||
"once_cell",
|
||||
"page_size",
|
||||
"serde",
|
||||
"synchronoise",
|
||||
"url",
|
||||
]
|
||||
@@ -3012,6 +3060,7 @@ dependencies = [
|
||||
"i_key_sort",
|
||||
"i_shape",
|
||||
"i_tree",
|
||||
"rayon",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3188,7 +3237,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index-scheduler"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"backoff",
|
||||
@@ -3197,17 +3246,18 @@ dependencies = [
|
||||
"bumpalo",
|
||||
"bumparaw-collections",
|
||||
"byte-unit",
|
||||
"bytes",
|
||||
"convert_case 0.8.0",
|
||||
"crossbeam-channel",
|
||||
"csv",
|
||||
"derive_builder",
|
||||
"derive_builder 0.20.2",
|
||||
"dump",
|
||||
"enum-iterator",
|
||||
"file-store",
|
||||
"flate2",
|
||||
"hashbrown 0.15.5",
|
||||
"indexmap",
|
||||
"insta",
|
||||
"itertools 0.14.0",
|
||||
"maplit",
|
||||
"meili-snap",
|
||||
"meilisearch-auth",
|
||||
@@ -3215,13 +3265,10 @@ dependencies = [
|
||||
"memmap2",
|
||||
"page_size",
|
||||
"rayon",
|
||||
"reqwest",
|
||||
"roaring 0.10.12",
|
||||
"rusty-s3",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"synchronoise",
|
||||
"tar",
|
||||
"tempfile",
|
||||
"thiserror 2.0.16",
|
||||
"time",
|
||||
@@ -3368,6 +3415,15 @@ dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569"
|
||||
dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.13.0"
|
||||
@@ -3394,49 +3450,26 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
|
||||
|
||||
[[package]]
|
||||
name = "jieba-macros"
|
||||
version = "0.8.1"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "348294e44ee7e3c42685da656490f8febc7359632544019621588902216da95c"
|
||||
checksum = "7c676b32a471d3cfae8dac2ad2f8334cd52e53377733cca8c1fb0a5062fec192"
|
||||
dependencies = [
|
||||
"phf_codegen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jieba-rs"
|
||||
version = "0.8.1"
|
||||
version = "0.7.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "766bd7012aa5ba49411ebdf4e93bddd59b182d2918e085d58dec5bb9b54b7105"
|
||||
checksum = "f5dd552bbb95d578520ee68403bf8aaf0dbbb2ce55b0854d019f9350ad61040a"
|
||||
dependencies = [
|
||||
"cedarwood",
|
||||
"fxhash",
|
||||
"include-flate",
|
||||
"jieba-macros",
|
||||
"phf 0.13.1",
|
||||
"lazy_static",
|
||||
"phf",
|
||||
"regex",
|
||||
"rustc-hash 2.1.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jiff"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be1f93b8b1eb69c77f24bbb0afdf66f54b632ee39af40ca21c4365a1d7347e49"
|
||||
dependencies = [
|
||||
"jiff-static",
|
||||
"log",
|
||||
"portable-atomic",
|
||||
"portable-atomic-util",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jiff-static"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.106",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3461,7 +3494,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "json-depth-checker"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@@ -3712,7 +3745,7 @@ dependencies = [
|
||||
"bincode 2.0.1",
|
||||
"byteorder",
|
||||
"csv",
|
||||
"derive_builder",
|
||||
"derive_builder 0.20.2",
|
||||
"encoding",
|
||||
"encoding_rs",
|
||||
"encoding_rs_io",
|
||||
@@ -3862,9 +3895,9 @@ checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
|
||||
|
||||
[[package]]
|
||||
name = "lmdb-master-sys"
|
||||
version = "0.2.6-nested-rtxns-6"
|
||||
version = "0.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e113d9bf240f974fbe7fd516cbfd8c422e925c0655495501c7237548425493d0"
|
||||
checksum = "864808e0b19fb6dd3b70ba94ee671b82fce17554cf80aeb0a155c65bb08027df"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"doxygen-rs",
|
||||
@@ -3962,16 +3995,6 @@ version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d"
|
||||
|
||||
[[package]]
|
||||
name = "md-5"
|
||||
version = "0.10.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "md5"
|
||||
version = "0.7.0"
|
||||
@@ -3980,7 +4003,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
|
||||
|
||||
[[package]]
|
||||
name = "meili-snap"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"md5",
|
||||
@@ -3991,7 +4014,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"actix-cors",
|
||||
"actix-http",
|
||||
@@ -4088,7 +4111,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-auth"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"enum-iterator",
|
||||
@@ -4107,7 +4130,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-types"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"actix-web",
|
||||
"anyhow",
|
||||
@@ -4142,7 +4165,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilitool"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"clap",
|
||||
@@ -4176,8 +4199,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "milli"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"allocator-api2 0.3.1",
|
||||
"arroy",
|
||||
"bbqueue",
|
||||
"big_s",
|
||||
@@ -4235,7 +4259,6 @@ dependencies = [
|
||||
"roaring 0.10.12",
|
||||
"rstar",
|
||||
"rustc-hash 2.1.1",
|
||||
"safetensors 0.6.2",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"slice-group-by",
|
||||
@@ -4255,7 +4278,7 @@ dependencies = [
|
||||
"url",
|
||||
"utoipa",
|
||||
"uuid",
|
||||
"zerometry",
|
||||
"zerometry 0.1.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4757,7 +4780,7 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
|
||||
|
||||
[[package]]
|
||||
name = "permissive-json-pointer"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"big_s",
|
||||
"serde_json",
|
||||
@@ -4814,27 +4837,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078"
|
||||
dependencies = [
|
||||
"phf_macros",
|
||||
"phf_shared 0.11.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf"
|
||||
dependencies = [
|
||||
"phf_shared 0.13.1",
|
||||
"serde",
|
||||
"phf_shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_codegen"
|
||||
version = "0.13.1"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49aa7f9d80421bca176ca8dbfebe668cc7a2684708594ec9f3c0db0805d5d6e1"
|
||||
checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a"
|
||||
dependencies = [
|
||||
"phf_generator 0.13.1",
|
||||
"phf_shared 0.13.1",
|
||||
"phf_generator",
|
||||
"phf_shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4843,28 +4856,18 @@ version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d"
|
||||
dependencies = [
|
||||
"phf_shared 0.11.3",
|
||||
"phf_shared",
|
||||
"rand 0.8.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_generator"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737"
|
||||
dependencies = [
|
||||
"fastrand",
|
||||
"phf_shared 0.13.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_macros"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216"
|
||||
dependencies = [
|
||||
"phf_generator 0.11.3",
|
||||
"phf_shared 0.11.3",
|
||||
"phf_generator",
|
||||
"phf_shared",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.106",
|
||||
@@ -4879,15 +4882,6 @@ dependencies = [
|
||||
"siphasher",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_shared"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266"
|
||||
dependencies = [
|
||||
"siphasher",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project"
|
||||
version = "1.1.10"
|
||||
@@ -4975,15 +4969,6 @@ version = "1.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483"
|
||||
|
||||
[[package]]
|
||||
name = "portable-atomic-util"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507"
|
||||
dependencies = [
|
||||
"portable-atomic",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "potential_utf"
|
||||
version = "0.1.3"
|
||||
@@ -5161,16 +5146,6 @@ dependencies = [
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quick-xml"
|
||||
version = "0.38.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "42a232e7487fc2ef313d96dde7948e7a3c05101870d8985e4fd8d26aedd27b89"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quinn"
|
||||
version = "0.11.9"
|
||||
@@ -5346,12 +5321,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rayon-cond"
|
||||
version = "0.4.0"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2964d0cf57a3e7a06e8183d14a8b527195c706b7983549cd5462d5aa3747438f"
|
||||
checksum = "059f538b55efd2309c9794130bc149c6a553db90e9d99c2030785c82f0bd7df9"
|
||||
dependencies = [
|
||||
"either",
|
||||
"itertools 0.14.0",
|
||||
"itertools 0.11.0",
|
||||
"rayon",
|
||||
]
|
||||
|
||||
@@ -5446,7 +5421,6 @@ dependencies = [
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2 0.4.12",
|
||||
"http 1.3.1",
|
||||
"http-body",
|
||||
"http-body-util",
|
||||
@@ -5737,25 +5711,6 @@ version = "1.0.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
|
||||
|
||||
[[package]]
|
||||
name = "rusty-s3"
|
||||
version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fac2edd2f0b56bd79a7343f49afc01c2d41010df480538a510e0abc56044f66c"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"hmac",
|
||||
"jiff",
|
||||
"md-5",
|
||||
"percent-encoding",
|
||||
"quick-xml",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"url",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.20"
|
||||
@@ -5772,16 +5727,6 @@ dependencies = [
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "safetensors"
|
||||
version = "0.6.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "172dd94c5a87b5c79f945c863da53b2ebc7ccef4eca24ac63cca66a41aab2178"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "same-file"
|
||||
version = "1.0.6"
|
||||
@@ -6203,6 +6148,18 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spade"
|
||||
version = "2.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fb313e1c8afee5b5647e00ee0fe6855e3d529eb863a0fdae1d60006c4d1e9990"
|
||||
dependencies = [
|
||||
"hashbrown 0.15.5",
|
||||
"num-traits",
|
||||
"robust",
|
||||
"smallvec",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spin"
|
||||
version = "0.5.2"
|
||||
@@ -6263,6 +6220,12 @@ dependencies = [
|
||||
"indexmap",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.11.1"
|
||||
@@ -6588,24 +6551,21 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||
|
||||
[[package]]
|
||||
name = "tokenizers"
|
||||
version = "0.22.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6475a27088c98ea96d00b39a9ddfb63780d1ad4cceb6f48374349a96ab2b7842"
|
||||
version = "0.15.2"
|
||||
source = "git+https://github.com/huggingface/tokenizers.git?tag=v0.15.2#701a73b869602b5639589d197e805349cdba3223"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"aho-corasick",
|
||||
"compact_str",
|
||||
"dary_heap",
|
||||
"derive_builder",
|
||||
"derive_builder 0.12.0",
|
||||
"esaxx-rs",
|
||||
"getrandom 0.3.3",
|
||||
"itertools 0.14.0",
|
||||
"getrandom 0.2.16",
|
||||
"itertools 0.12.1",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"macro_rules_attribute",
|
||||
"monostate",
|
||||
"onig",
|
||||
"paste",
|
||||
"rand 0.9.2",
|
||||
"rand 0.8.5",
|
||||
"rayon",
|
||||
"rayon-cond",
|
||||
"regex",
|
||||
@@ -6613,7 +6573,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"spm_precompiled",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 1.0.69",
|
||||
"unicode-normalization-alignments",
|
||||
"unicode-segmentation",
|
||||
"unicode_categories",
|
||||
@@ -6975,7 +6935,7 @@ dependencies = [
|
||||
"num-traits",
|
||||
"num_cpus",
|
||||
"rayon",
|
||||
"safetensors 0.4.5",
|
||||
"safetensors",
|
||||
"serde",
|
||||
"thiserror 1.0.69",
|
||||
"tracing",
|
||||
@@ -7205,7 +7165,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6b2bf58be11fc9414104c6d3a2e464163db5ef74b12296bda593cac37b6e4777"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"derive_builder",
|
||||
"derive_builder 0.20.2",
|
||||
"rustversion",
|
||||
"vergen-lib",
|
||||
]
|
||||
@@ -7217,7 +7177,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f6ee511ec45098eabade8a0750e76eec671e7fb2d9360c563911336bea9cac1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"derive_builder",
|
||||
"derive_builder 0.20.2",
|
||||
"git2",
|
||||
"rustversion",
|
||||
"time",
|
||||
@@ -7232,7 +7192,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b07e6010c0f3e59fcb164e0163834597da68d1f864e2b8ca49f74de01e9c166"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"derive_builder",
|
||||
"derive_builder 0.20.2",
|
||||
"rustversion",
|
||||
]
|
||||
|
||||
@@ -7879,7 +7839,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "xtask"
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"build-info",
|
||||
@@ -8026,6 +7986,18 @@ dependencies = [
|
||||
"syn 2.0.106",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerometry"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "681f08f3f4ef27d3021a128eb6d8df1cd781e4c9c797c3971c1f85316374f977"
|
||||
dependencies = [
|
||||
"bytemuck",
|
||||
"byteorder",
|
||||
"geo",
|
||||
"geo-types",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerometry"
|
||||
version = "0.3.0"
|
||||
|
||||
@@ -23,7 +23,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.26.0"
|
||||
version = "1.21.0"
|
||||
authors = [
|
||||
"Quentin de Quelen <quentin@dequelen.me>",
|
||||
"Clément Renault <clement@meilisearch.com>",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Compile
|
||||
FROM rust:1.89-alpine3.22 AS compiler
|
||||
FROM rust:1.89-alpine3.20 AS compiler
|
||||
|
||||
RUN apk add -q --no-cache build-base openssl-dev
|
||||
|
||||
@@ -20,7 +20,7 @@ RUN set -eux; \
|
||||
cargo build --release -p meilisearch -p meilitool
|
||||
|
||||
# Run
|
||||
FROM alpine:3.22
|
||||
FROM alpine:3.20
|
||||
LABEL org.opencontainers.image.source="https://github.com/meilisearch/meilisearch"
|
||||
|
||||
ENV MEILI_HTTP_ADDR 0.0.0.0:7700
|
||||
|
||||
28
LICENSE
28
LICENSE
@@ -1,9 +1,29 @@
|
||||
# License
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019-2025 Meili SAS
|
||||
|
||||
Part of this work fall under the Meilisearch Enterprise Edition (EE) and are licensed under the Business Source License 1.1, please refer to [LICENSE-EE](./LICENSE-EE) for details.
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The other parts of this work are licensed under the [MIT license](./LICENSE-MIT).
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
`SPDX-License-Identifier: MIT AND BUSL-1.1`
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
---
|
||||
|
||||
🔒 Meilisearch Enterprise Edition (EE)
|
||||
|
||||
Certain parts of this codebase are not licensed under the MIT license and governed by the Business Source License 1.1.
|
||||
|
||||
See the LICENSE-EE file for details.
|
||||
|
||||
21
LICENSE-MIT
21
LICENSE-MIT
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019-2025 Meili SAS
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -39,7 +39,6 @@
|
||||
## 🖥 Examples
|
||||
|
||||
- [**Movies**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — An application to help you find streaming platforms to watch movies using [hybrid search](https://www.meilisearch.com/solutions/hybrid-search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos).
|
||||
- [**Flickr**](https://flickr.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — Search and explore one hundred million Flickr images with semantic search.
|
||||
- [**Ecommerce**](https://ecommerce.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Ecommerce website using disjunctive [facets](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos), range and rating filtering, and pagination.
|
||||
- [**Songs**](https://music.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search through 47 million of songs.
|
||||
- [**SaaS**](https://saas.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search for contacts, deals, and companies in this [multi-tenant](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) CRM application.
|
||||
@@ -122,7 +121,7 @@ If you want to know more about the kind of data we collect and what we use it fo
|
||||
|
||||
Meilisearch is a search engine created by [Meili](https://www.meilisearch.com/careers), a software development company headquartered in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
|
||||
|
||||
🗞 [Subscribe to our newsletter](https://share-eu1.hsforms.com/1LN5N0x_GQgq7ss7tXmSykwfg3aq) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
|
||||
🗞 [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
|
||||
|
||||
💌 Want to make a suggestion or give feedback? Here are some of the channels where you can reach us:
|
||||
|
||||
|
||||
@@ -96,8 +96,6 @@ pub struct TaskDump {
|
||||
pub finished_at: Option<OffsetDateTime>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub network: Option<TaskNetwork>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub custom_metadata: Option<String>,
|
||||
}
|
||||
|
||||
// A `Kind` specific version made for the dump. If modified you may break the dump.
|
||||
@@ -160,8 +158,9 @@ pub enum KindDump {
|
||||
UpgradeDatabase {
|
||||
from: (u32, u32, u32),
|
||||
},
|
||||
IndexCompaction {
|
||||
index_uid: String,
|
||||
NetworkTopologyChange {
|
||||
network: Option<meilisearch_types::enterprise_edition::network::Network>,
|
||||
origin: Option<meilisearch_types::tasks::Origin>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -180,7 +179,6 @@ impl From<Task> for TaskDump {
|
||||
started_at: task.started_at,
|
||||
finished_at: task.finished_at,
|
||||
network: task.network,
|
||||
custom_metadata: task.custom_metadata,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -246,8 +244,8 @@ impl From<KindWithContent> for KindDump {
|
||||
KindWithContent::UpgradeDatabase { from: version } => {
|
||||
KindDump::UpgradeDatabase { from: version }
|
||||
}
|
||||
KindWithContent::IndexCompaction { index_uid } => {
|
||||
KindDump::IndexCompaction { index_uid }
|
||||
KindWithContent::NetworkTopologyChange { network, origin } => {
|
||||
KindDump::NetworkTopologyChange { network, origin }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -262,7 +260,7 @@ pub(crate) mod test {
|
||||
use big_s::S;
|
||||
use maplit::{btreemap, btreeset};
|
||||
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
|
||||
use meilisearch_types::enterprise_edition::network::{Network, Remote};
|
||||
use meilisearch_types::enterprise_edition::network::{DbNetwork, DbRemote};
|
||||
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
||||
use meilisearch_types::features::RuntimeTogglableFeatures;
|
||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
@@ -399,7 +397,6 @@ pub(crate) mod test {
|
||||
started_at: Some(datetime!(2022-11-20 0:00 UTC)),
|
||||
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
||||
network: None,
|
||||
custom_metadata: None,
|
||||
},
|
||||
None,
|
||||
),
|
||||
@@ -425,7 +422,6 @@ pub(crate) mod test {
|
||||
started_at: None,
|
||||
finished_at: None,
|
||||
network: None,
|
||||
custom_metadata: None,
|
||||
},
|
||||
Some(vec![
|
||||
json!({ "id": 4, "race": "leonberg" }).as_object().unwrap().clone(),
|
||||
@@ -446,7 +442,6 @@ pub(crate) mod test {
|
||||
started_at: None,
|
||||
finished_at: None,
|
||||
network: None,
|
||||
custom_metadata: None,
|
||||
},
|
||||
None,
|
||||
),
|
||||
@@ -556,10 +551,10 @@ pub(crate) mod test {
|
||||
RuntimeTogglableFeatures::default()
|
||||
}
|
||||
|
||||
fn create_test_network() -> Network {
|
||||
Network {
|
||||
fn create_test_network() -> DbNetwork {
|
||||
DbNetwork {
|
||||
local: Some("myself".to_string()),
|
||||
remotes: maplit::btreemap! {"other".to_string() => Remote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
|
||||
remotes: maplit::btreemap! {"other".to_string() => DbRemote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
|
||||
sharding: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -164,7 +164,6 @@ impl CompatV5ToV6 {
|
||||
started_at: task_view.started_at,
|
||||
finished_at: task_view.finished_at,
|
||||
network: None,
|
||||
custom_metadata: None,
|
||||
};
|
||||
|
||||
(task, content_file)
|
||||
|
||||
@@ -24,7 +24,7 @@ pub type Batch = meilisearch_types::batches::Batch;
|
||||
pub type Key = meilisearch_types::keys::Key;
|
||||
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
|
||||
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
|
||||
pub type Network = meilisearch_types::enterprise_edition::network::Network;
|
||||
pub type Network = meilisearch_types::enterprise_edition::network::DbNetwork;
|
||||
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
|
||||
|
||||
// ===== Other types to clarify the code of the compat module
|
||||
|
||||
@@ -5,7 +5,7 @@ use std::path::PathBuf;
|
||||
use flate2::write::GzEncoder;
|
||||
use flate2::Compression;
|
||||
use meilisearch_types::batches::Batch;
|
||||
use meilisearch_types::enterprise_edition::network::Network;
|
||||
use meilisearch_types::enterprise_edition::network::DbNetwork;
|
||||
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
|
||||
use meilisearch_types::keys::Key;
|
||||
use meilisearch_types::settings::{Checked, Settings};
|
||||
@@ -72,7 +72,7 @@ impl DumpWriter {
|
||||
)?)
|
||||
}
|
||||
|
||||
pub fn create_network(&self, network: Network) -> Result<()> {
|
||||
pub fn create_network(&self, network: DbNetwork) -> Result<()> {
|
||||
Ok(std::fs::write(self.dir.path().join("network.json"), serde_json::to_string(&network)?)?)
|
||||
}
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ impl FileStore {
|
||||
|
||||
/// Returns the file corresponding to the requested uuid.
|
||||
pub fn get_update(&self, uuid: Uuid) -> Result<StdFile> {
|
||||
let path = self.update_path(uuid);
|
||||
let path = self.get_update_path(uuid);
|
||||
let file = match StdFile::open(path) {
|
||||
Ok(file) => file,
|
||||
Err(e) => {
|
||||
@@ -72,7 +72,7 @@ impl FileStore {
|
||||
}
|
||||
|
||||
/// Returns the path that correspond to this uuid, the path could not exists.
|
||||
pub fn update_path(&self, uuid: Uuid) -> PathBuf {
|
||||
pub fn get_update_path(&self, uuid: Uuid) -> PathBuf {
|
||||
self.path.join(uuid.to_string())
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ license.workspace = true
|
||||
anyhow = "1.0.98"
|
||||
bincode = "1.3.3"
|
||||
byte-unit = "5.1.6"
|
||||
bytes = "1.10.1"
|
||||
bumpalo = "3.18.1"
|
||||
bumparaw-collections = "0.1.4"
|
||||
convert_case = "0.8.0"
|
||||
@@ -24,6 +23,7 @@ dump = { path = "../dump" }
|
||||
enum-iterator = "2.1.0"
|
||||
file-store = { path = "../file-store" }
|
||||
flate2 = "1.1.2"
|
||||
hashbrown = "0.15.4"
|
||||
indexmap = "2.9.0"
|
||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
||||
@@ -33,7 +33,6 @@ rayon = "1.10.0"
|
||||
roaring = { version = "0.10.12", features = ["serde"] }
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||
tar = "0.4.44"
|
||||
synchronoise = "1.0.1"
|
||||
tempfile = "3.20.0"
|
||||
thiserror = "2.0.12"
|
||||
@@ -47,8 +46,7 @@ tracing = "0.1.41"
|
||||
ureq = "2.12.1"
|
||||
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||
backoff = "0.4.0"
|
||||
reqwest = { version = "0.12.23", features = ["rustls-tls", "http2"], default-features = false }
|
||||
rusty-s3 = "0.8.1"
|
||||
itertools = "0.14.0"
|
||||
tokio = { version = "1.47.1", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -150,7 +150,6 @@ impl<'a> Dump<'a> {
|
||||
details: task.details,
|
||||
status: task.status,
|
||||
network: task.network,
|
||||
custom_metadata: task.custom_metadata,
|
||||
kind: match task.kind {
|
||||
KindDump::DocumentImport {
|
||||
primary_key,
|
||||
@@ -235,8 +234,8 @@ impl<'a> Dump<'a> {
|
||||
}
|
||||
}
|
||||
KindDump::UpgradeDatabase { from } => KindWithContent::UpgradeDatabase { from },
|
||||
KindDump::IndexCompaction { index_uid } => {
|
||||
KindWithContent::IndexCompaction { index_uid }
|
||||
KindDump::NetworkTopologyChange { network: new_network, origin } => {
|
||||
KindWithContent::NetworkTopologyChange { network: new_network, origin }
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
@@ -5,7 +5,6 @@ use meilisearch_types::error::{Code, ErrorCode};
|
||||
use meilisearch_types::milli::index::RollbackOutcome;
|
||||
use meilisearch_types::tasks::{Kind, Status};
|
||||
use meilisearch_types::{heed, milli};
|
||||
use reqwest::StatusCode;
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::TaskId;
|
||||
@@ -128,14 +127,6 @@ pub enum Error {
|
||||
#[error("Aborted task")]
|
||||
AbortedTask,
|
||||
|
||||
#[error("S3 error: status: {status}, body: {body}")]
|
||||
S3Error { status: StatusCode, body: String },
|
||||
#[error("S3 HTTP error: {0}")]
|
||||
S3HttpError(reqwest::Error),
|
||||
#[error("S3 XML error: {0}")]
|
||||
S3XmlError(Box<dyn std::error::Error + Send + Sync>),
|
||||
#[error("S3 bucket error: {0}")]
|
||||
S3BucketError(rusty_s3::BucketError),
|
||||
#[error(transparent)]
|
||||
Dump(#[from] dump::Error),
|
||||
#[error(transparent)]
|
||||
@@ -235,10 +226,6 @@ impl Error {
|
||||
| Error::TaskCancelationWithEmptyQuery
|
||||
| Error::FromRemoteWhenExporting { .. }
|
||||
| Error::AbortedTask
|
||||
| Error::S3Error { .. }
|
||||
| Error::S3HttpError(_)
|
||||
| Error::S3XmlError(_)
|
||||
| Error::S3BucketError(_)
|
||||
| Error::Dump(_)
|
||||
| Error::Heed(_)
|
||||
| Error::Milli { .. }
|
||||
@@ -306,14 +293,8 @@ impl ErrorCode for Error {
|
||||
Error::BatchNotFound(_) => Code::BatchNotFound,
|
||||
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
|
||||
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
|
||||
// TODO: not sure of the Code to use
|
||||
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
|
||||
Error::S3Error { status, .. } if status.is_client_error() => {
|
||||
Code::InvalidS3SnapshotRequest
|
||||
}
|
||||
Error::S3Error { .. } => Code::S3SnapshotServerError,
|
||||
Error::S3HttpError(_) => Code::S3SnapshotServerError,
|
||||
Error::S3XmlError(_) => Code::S3SnapshotServerError,
|
||||
Error::S3BucketError(_) => Code::InvalidS3SnapshotParameters,
|
||||
Error::Dump(e) => e.error_code(),
|
||||
Error::Milli { error, .. } => error.error_code(),
|
||||
Error::ProcessBatchPanicked(_) => Code::Internal,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use meilisearch_types::enterprise_edition::network::Network;
|
||||
use meilisearch_types::enterprise_edition::network::DbNetwork;
|
||||
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
|
||||
use meilisearch_types::heed::types::{SerdeJson, Str};
|
||||
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
|
||||
@@ -24,7 +24,7 @@ mod db_keys {
|
||||
pub(crate) struct FeatureData {
|
||||
persisted: Database<Str, SerdeJson<RuntimeTogglableFeatures>>,
|
||||
runtime: Arc<RwLock<RuntimeTogglableFeatures>>,
|
||||
network: Arc<RwLock<Network>>,
|
||||
network: Arc<RwLock<DbNetwork>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
@@ -197,8 +197,8 @@ impl FeatureData {
|
||||
}));
|
||||
|
||||
// Once this is stabilized, network should be stored along with webhooks in index-scheduler's persisted database
|
||||
let network_db = runtime_features_db.remap_data_type::<SerdeJson<Network>>();
|
||||
let network: Network = network_db.get(wtxn, db_keys::NETWORK)?.unwrap_or_default();
|
||||
let network_db = runtime_features_db.remap_data_type::<SerdeJson<DbNetwork>>();
|
||||
let network: DbNetwork = network_db.get(wtxn, db_keys::NETWORK)?.unwrap_or_default();
|
||||
|
||||
Ok(Self {
|
||||
persisted: runtime_features_db,
|
||||
@@ -234,8 +234,8 @@ impl FeatureData {
|
||||
RoFeatures::new(self)
|
||||
}
|
||||
|
||||
pub fn put_network(&self, mut wtxn: RwTxn, new_network: Network) -> Result<()> {
|
||||
self.persisted.remap_data_type::<SerdeJson<Network>>().put(
|
||||
pub fn put_network(&self, mut wtxn: RwTxn, new_network: DbNetwork) -> Result<()> {
|
||||
self.persisted.remap_data_type::<SerdeJson<DbNetwork>>().put(
|
||||
&mut wtxn,
|
||||
db_keys::NETWORK,
|
||||
&new_network,
|
||||
@@ -247,7 +247,7 @@ impl FeatureData {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn network(&self) -> Network {
|
||||
Network::clone(&*self.network.read().unwrap())
|
||||
pub fn network(&self) -> DbNetwork {
|
||||
DbNetwork::clone(&*self.network.read().unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,7 +199,7 @@ impl IndexMapper {
|
||||
let uuid = Uuid::new_v4();
|
||||
self.index_mapping.put(&mut wtxn, name, &uuid)?;
|
||||
|
||||
let index_path = self.index_path(uuid);
|
||||
let index_path = self.base_path.join(uuid.to_string());
|
||||
fs::create_dir_all(&index_path)?;
|
||||
|
||||
// Error if the UUIDv4 somehow already exists in the map, since it should be fresh.
|
||||
@@ -286,7 +286,7 @@ impl IndexMapper {
|
||||
};
|
||||
|
||||
let index_map = self.index_map.clone();
|
||||
let index_path = self.index_path(uuid);
|
||||
let index_path = self.base_path.join(uuid.to_string());
|
||||
let index_name = name.to_string();
|
||||
thread::Builder::new()
|
||||
.name(String::from("index_deleter"))
|
||||
@@ -341,26 +341,6 @@ impl IndexMapper {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Closes the specified index.
|
||||
///
|
||||
/// This operation involves closing the underlying environment and so can take a long time to complete.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// - If the Index corresponding to the passed name is concurrently being deleted/resized or cannot be found in the
|
||||
/// in memory hash map.
|
||||
pub fn close_index(&self, rtxn: &RoTxn, name: &str) -> Result<()> {
|
||||
let uuid = self
|
||||
.index_mapping
|
||||
.get(rtxn, name)?
|
||||
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
||||
|
||||
// We remove the index from the in-memory index map.
|
||||
self.index_map.write().unwrap().close_for_resize(&uuid, self.enable_mdb_writemap, 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return an index, may open it if it wasn't already opened.
|
||||
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
|
||||
if let Some((current_name, current_index)) =
|
||||
@@ -408,7 +388,7 @@ impl IndexMapper {
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
let index_path = self.index_path(uuid);
|
||||
let index_path = self.base_path.join(uuid.to_string());
|
||||
// take the lock to reopen the environment.
|
||||
reopen
|
||||
.reopen(&mut self.index_map.write().unwrap(), &index_path)
|
||||
@@ -425,7 +405,7 @@ impl IndexMapper {
|
||||
// if it's not already there.
|
||||
match index_map.get(&uuid) {
|
||||
Missing => {
|
||||
let index_path = self.index_path(uuid);
|
||||
let index_path = self.base_path.join(uuid.to_string());
|
||||
|
||||
break index_map
|
||||
.create(
|
||||
@@ -452,14 +432,6 @@ impl IndexMapper {
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
/// Returns the path of the index.
|
||||
///
|
||||
/// The folder located at this path is containing the data.mdb,
|
||||
/// the lock.mdb and an optional data.mdb.cpy file.
|
||||
pub fn index_path(&self, uuid: Uuid) -> PathBuf {
|
||||
self.base_path.join(uuid.to_string())
|
||||
}
|
||||
|
||||
pub fn rollback_index(
|
||||
&self,
|
||||
rtxn: &RoTxn,
|
||||
@@ -500,7 +472,7 @@ impl IndexMapper {
|
||||
};
|
||||
}
|
||||
|
||||
let index_path = self.index_path(uuid);
|
||||
let index_path = self.base_path.join(uuid.to_string());
|
||||
Index::rollback(milli::heed::EnvOpenOptions::new().read_txn_without_tls(), index_path, to)
|
||||
.map_err(|err| crate::Error::from_milli(err, Some(name.to_string())))
|
||||
}
|
||||
|
||||
@@ -232,7 +232,6 @@ pub fn snapshot_task(task: &Task) -> String {
|
||||
status,
|
||||
kind,
|
||||
network,
|
||||
custom_metadata,
|
||||
} = task;
|
||||
snap.push('{');
|
||||
snap.push_str(&format!("uid: {uid}, "));
|
||||
@@ -253,9 +252,6 @@ pub fn snapshot_task(task: &Task) -> String {
|
||||
if let Some(network) = network {
|
||||
snap.push_str(&format!("network: {network:?}, "))
|
||||
}
|
||||
if let Some(custom_metadata) = custom_metadata {
|
||||
snap.push_str(&format!("custom_metadata: {custom_metadata:?}"))
|
||||
}
|
||||
|
||||
snap.push('}');
|
||||
snap
|
||||
@@ -322,8 +318,8 @@ fn snapshot_details(d: &Details) -> String {
|
||||
Details::UpgradeDatabase { from, to } => {
|
||||
format!("{{ from: {from:?}, to: {to:?} }}")
|
||||
}
|
||||
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
|
||||
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
|
||||
Details::NetworkTopologyChange { network: new_network } => {
|
||||
format!("{{ new_network: {new_network:?} }}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ pub use features::RoFeatures;
|
||||
use flate2::bufread::GzEncoder;
|
||||
use flate2::Compression;
|
||||
use meilisearch_types::batches::Batch;
|
||||
use meilisearch_types::enterprise_edition::network::Network;
|
||||
use meilisearch_types::enterprise_edition::network::DbNetwork;
|
||||
use meilisearch_types::features::{
|
||||
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
|
||||
};
|
||||
@@ -217,7 +217,6 @@ pub struct IndexScheduler {
|
||||
#[cfg(test)]
|
||||
run_loop_iteration: Arc<RwLock<usize>>,
|
||||
|
||||
/// The tokio runtime used for asynchronous tasks.
|
||||
runtime: Option<tokio::runtime::Handle>,
|
||||
}
|
||||
|
||||
@@ -259,23 +258,14 @@ impl IndexScheduler {
|
||||
}
|
||||
|
||||
/// Create an index scheduler and start its run loop.
|
||||
#[allow(private_interfaces)] // because test_utils is private
|
||||
pub fn new(
|
||||
options: IndexSchedulerOptions,
|
||||
auth_env: Env<WithoutTls>,
|
||||
from_db_version: (u32, u32, u32),
|
||||
runtime: Option<tokio::runtime::Handle>,
|
||||
) -> Result<Self> {
|
||||
let this = Self::new_without_run(options, auth_env, from_db_version, runtime)?;
|
||||
|
||||
this.run();
|
||||
Ok(this)
|
||||
}
|
||||
|
||||
fn new_without_run(
|
||||
options: IndexSchedulerOptions,
|
||||
auth_env: Env<WithoutTls>,
|
||||
from_db_version: (u32, u32, u32),
|
||||
runtime: Option<tokio::runtime::Handle>,
|
||||
#[cfg(test)] test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
|
||||
#[cfg(test)] planned_failures: Vec<(usize, test_utils::FailureLocation)>,
|
||||
) -> Result<Self> {
|
||||
std::fs::create_dir_all(&options.tasks_path)?;
|
||||
std::fs::create_dir_all(&options.update_file_path)?;
|
||||
@@ -330,7 +320,8 @@ impl IndexScheduler {
|
||||
|
||||
wtxn.commit()?;
|
||||
|
||||
Ok(Self {
|
||||
// allow unreachable_code to get rids of the warning in the case of a test build.
|
||||
let this = Self {
|
||||
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
|
||||
version,
|
||||
queue,
|
||||
@@ -346,32 +337,16 @@ impl IndexScheduler {
|
||||
webhooks: Arc::new(webhooks),
|
||||
embedders: Default::default(),
|
||||
|
||||
#[cfg(test)] // Will be replaced in `new_tests` in test environments
|
||||
test_breakpoint_sdr: crossbeam_channel::bounded(0).0,
|
||||
#[cfg(test)] // Will be replaced in `new_tests` in test environments
|
||||
planned_failures: Default::default(),
|
||||
#[cfg(test)]
|
||||
test_breakpoint_sdr,
|
||||
#[cfg(test)]
|
||||
planned_failures,
|
||||
#[cfg(test)]
|
||||
run_loop_iteration: Arc::new(RwLock::new(0)),
|
||||
features,
|
||||
chat_settings,
|
||||
runtime,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create an index scheduler and start its run loop.
|
||||
#[cfg(test)]
|
||||
fn new_test(
|
||||
options: IndexSchedulerOptions,
|
||||
auth_env: Env<WithoutTls>,
|
||||
from_db_version: (u32, u32, u32),
|
||||
runtime: Option<tokio::runtime::Handle>,
|
||||
test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
|
||||
planned_failures: Vec<(usize, test_utils::FailureLocation)>,
|
||||
) -> Result<Self> {
|
||||
let mut this = Self::new_without_run(options, auth_env, from_db_version, runtime)?;
|
||||
|
||||
this.test_breakpoint_sdr = test_breakpoint_sdr;
|
||||
this.planned_failures = planned_failures;
|
||||
};
|
||||
|
||||
this.run();
|
||||
Ok(this)
|
||||
@@ -756,19 +731,6 @@ impl IndexScheduler {
|
||||
kind: KindWithContent,
|
||||
task_id: Option<TaskId>,
|
||||
dry_run: bool,
|
||||
) -> Result<Task> {
|
||||
self.register_with_custom_metadata(kind, task_id, None, dry_run)
|
||||
}
|
||||
|
||||
/// Register a new task in the scheduler, with metadata.
|
||||
///
|
||||
/// If it fails and data was associated with the task, it tries to delete the associated data.
|
||||
pub fn register_with_custom_metadata(
|
||||
&self,
|
||||
kind: KindWithContent,
|
||||
task_id: Option<TaskId>,
|
||||
custom_metadata: Option<String>,
|
||||
dry_run: bool,
|
||||
) -> Result<Task> {
|
||||
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
|
||||
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
|
||||
@@ -779,7 +741,7 @@ impl IndexScheduler {
|
||||
}
|
||||
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
let task = self.queue.register(&mut wtxn, &kind, task_id, custom_metadata, dry_run)?;
|
||||
let task = self.queue.register(&mut wtxn, &kind, task_id, dry_run)?;
|
||||
|
||||
// If the registered task is a task cancelation
|
||||
// we inform the processing tasks to stop (if necessary).
|
||||
@@ -935,13 +897,13 @@ impl IndexScheduler {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn put_network(&self, network: Network) -> Result<()> {
|
||||
pub fn put_network(&self, network: DbNetwork) -> Result<()> {
|
||||
let wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
||||
self.features.put_network(wtxn, network)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn network(&self) -> Network {
|
||||
pub fn network(&self) -> DbNetwork {
|
||||
self.features.network()
|
||||
}
|
||||
|
||||
@@ -970,9 +932,10 @@ impl IndexScheduler {
|
||||
|
||||
pub fn embedders(
|
||||
&self,
|
||||
index_uid: String,
|
||||
index_uid: &str,
|
||||
embedding_configs: Vec<IndexEmbeddingConfig>,
|
||||
) -> Result<RuntimeEmbedders> {
|
||||
let err = |err| Error::from_milli(err, Some(index_uid.to_owned()));
|
||||
let res: Result<_> = embedding_configs
|
||||
.into_iter()
|
||||
.map(
|
||||
@@ -985,7 +948,7 @@ impl IndexScheduler {
|
||||
let document_template = prompt
|
||||
.try_into()
|
||||
.map_err(meilisearch_types::milli::Error::from)
|
||||
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?;
|
||||
.map_err(err)?;
|
||||
|
||||
let fragments = fragments
|
||||
.into_inner()
|
||||
@@ -1015,9 +978,8 @@ impl IndexScheduler {
|
||||
let embedder = Arc::new(
|
||||
Embedder::new(embedder_options.clone(), self.scheduler.embedding_cache_cap)
|
||||
.map_err(meilisearch_types::milli::vector::Error::from)
|
||||
.map_err(|err| {
|
||||
Error::from_milli(err.into(), Some(index_uid.clone()))
|
||||
})?,
|
||||
.map_err(milli::Error::from)
|
||||
.map_err(err)?,
|
||||
);
|
||||
{
|
||||
let mut embedders = self.embedders.write().unwrap();
|
||||
|
||||
@@ -75,7 +75,6 @@ make_enum_progress! {
|
||||
pub enum TaskCancelationProgress {
|
||||
RetrievingTasks,
|
||||
CancelingUpgrade,
|
||||
CleaningCompactionLeftover,
|
||||
UpdatingTasks,
|
||||
}
|
||||
}
|
||||
@@ -139,17 +138,6 @@ make_enum_progress! {
|
||||
}
|
||||
}
|
||||
|
||||
make_enum_progress! {
|
||||
pub enum IndexCompaction {
|
||||
RetrieveTheIndex,
|
||||
CreateTemporaryFile,
|
||||
CopyAndCompactTheIndex,
|
||||
PersistTheCompactedIndex,
|
||||
CloseTheIndex,
|
||||
ReopenTheIndex,
|
||||
}
|
||||
}
|
||||
|
||||
make_enum_progress! {
|
||||
pub enum InnerSwappingTwoIndexes {
|
||||
RetrieveTheTasks,
|
||||
|
||||
@@ -257,7 +257,6 @@ impl Queue {
|
||||
wtxn: &mut RwTxn,
|
||||
kind: &KindWithContent,
|
||||
task_id: Option<TaskId>,
|
||||
custom_metadata: Option<String>,
|
||||
dry_run: bool,
|
||||
) -> Result<Task> {
|
||||
let next_task_id = self.tasks.next_task_id(wtxn)?;
|
||||
@@ -281,7 +280,6 @@ impl Queue {
|
||||
status: Status::Enqueued,
|
||||
kind: kind.clone(),
|
||||
network: None,
|
||||
custom_metadata,
|
||||
};
|
||||
// For deletion and cancelation tasks, we want to make extra sure that they
|
||||
// don't attempt to delete/cancel tasks that are newer than themselves.
|
||||
@@ -312,8 +310,7 @@ impl Queue {
|
||||
| self.tasks.status.get(wtxn, &Status::Failed)?.unwrap_or_default()
|
||||
| self.tasks.status.get(wtxn, &Status::Canceled)?.unwrap_or_default();
|
||||
|
||||
let to_delete =
|
||||
RoaringBitmap::from_sorted_iter(finished.into_iter().take(100_000)).unwrap();
|
||||
let to_delete = RoaringBitmap::from_iter(finished.into_iter().rev().take(100_000));
|
||||
|
||||
// /!\ the len must be at least 2 or else we might enter an infinite loop where we only delete
|
||||
// the deletion tasks we enqueued ourselves.
|
||||
@@ -346,7 +343,6 @@ impl Queue {
|
||||
tasks: to_delete,
|
||||
},
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
)?;
|
||||
|
||||
|
||||
@@ -68,14 +68,14 @@ impl From<KindWithContent> for AutobatchKind {
|
||||
KindWithContent::IndexCreation { .. } => AutobatchKind::IndexCreation,
|
||||
KindWithContent::IndexUpdate { .. } => AutobatchKind::IndexUpdate,
|
||||
KindWithContent::IndexSwap { .. } => AutobatchKind::IndexSwap,
|
||||
KindWithContent::IndexCompaction { .. }
|
||||
| KindWithContent::TaskCancelation { .. }
|
||||
KindWithContent::TaskCancelation { .. }
|
||||
| KindWithContent::TaskDeletion { .. }
|
||||
| KindWithContent::DumpCreation { .. }
|
||||
| KindWithContent::Export { .. }
|
||||
| KindWithContent::UpgradeDatabase { .. }
|
||||
| KindWithContent::NetworkTopologyChange { .. }
|
||||
| KindWithContent::SnapshotCreation => {
|
||||
panic!("The autobatcher should never be called with tasks with special priority or that don't apply to an index.")
|
||||
panic!("The autobatcher should never be called with tasks that don't apply to an index.")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -289,9 +289,7 @@ impl BatchKind {
|
||||
|
||||
match (self, autobatch_kind) {
|
||||
// We don't batch any of these operations
|
||||
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition) => {
|
||||
Break((this, BatchStopReason::TaskCannotBeBatched { kind, id }))
|
||||
},
|
||||
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition) => Break((this, BatchStopReason::TaskCannotBeBatched { kind, id })),
|
||||
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
|
||||
(this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
|
||||
Break((this, BatchStopReason::IndexCreationMismatch { id }))
|
||||
|
||||
@@ -55,9 +55,8 @@ pub(crate) enum Batch {
|
||||
UpgradeDatabase {
|
||||
tasks: Vec<Task>,
|
||||
},
|
||||
IndexCompaction {
|
||||
index_uid: String,
|
||||
task: Task,
|
||||
NetworkTopologyChanges {
|
||||
tasks: Vec<Task>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -114,14 +113,14 @@ impl Batch {
|
||||
| Batch::Dump(task)
|
||||
| Batch::IndexCreation { task, .. }
|
||||
| Batch::Export { task }
|
||||
| Batch::IndexUpdate { task, .. }
|
||||
| Batch::IndexCompaction { task, .. } => {
|
||||
| Batch::IndexUpdate { task, .. } => {
|
||||
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
||||
}
|
||||
Batch::SnapshotCreation(tasks)
|
||||
| Batch::TaskDeletions(tasks)
|
||||
| Batch::UpgradeDatabase { tasks }
|
||||
| Batch::IndexDeletion { tasks, .. } => {
|
||||
| Batch::IndexDeletion { tasks, .. }
|
||||
| Batch::NetworkTopologyChanges { tasks } => {
|
||||
RoaringBitmap::from_iter(tasks.iter().map(|task| task.uid))
|
||||
}
|
||||
Batch::IndexOperation { op, .. } => match op {
|
||||
@@ -156,12 +155,12 @@ impl Batch {
|
||||
| Dump(_)
|
||||
| Export { .. }
|
||||
| UpgradeDatabase { .. }
|
||||
| NetworkTopologyChanges { .. }
|
||||
| IndexSwap { .. } => None,
|
||||
IndexOperation { op, .. } => Some(op.index_uid()),
|
||||
IndexCreation { index_uid, .. }
|
||||
| IndexUpdate { index_uid, .. }
|
||||
| IndexDeletion { index_uid, .. }
|
||||
| IndexCompaction { index_uid, .. } => Some(index_uid),
|
||||
| IndexDeletion { index_uid, .. } => Some(index_uid),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -181,8 +180,8 @@ impl fmt::Display for Batch {
|
||||
Batch::IndexUpdate { .. } => f.write_str("IndexUpdate")?,
|
||||
Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?,
|
||||
Batch::IndexSwap { .. } => f.write_str("IndexSwap")?,
|
||||
Batch::IndexCompaction { .. } => f.write_str("IndexCompaction")?,
|
||||
Batch::Export { .. } => f.write_str("Export")?,
|
||||
Batch::NetworkTopologyChanges { .. } => f.write_str("NetworkTopologyChange")?,
|
||||
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
|
||||
};
|
||||
match index_uid {
|
||||
@@ -519,33 +518,17 @@ impl IndexScheduler {
|
||||
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
|
||||
}
|
||||
|
||||
// 3. we get the next task to compact
|
||||
let to_compact = self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)? & enqueued;
|
||||
if let Some(task_id) = to_compact.min() {
|
||||
let mut task =
|
||||
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||
current_batch.processing(Some(&mut task));
|
||||
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
|
||||
kind: Kind::IndexCompaction,
|
||||
id: task_id,
|
||||
});
|
||||
let index_uid =
|
||||
task.index_uid().expect("Compaction task must have an index uid").to_owned();
|
||||
return Ok(Some((Batch::IndexCompaction { index_uid, task }, current_batch)));
|
||||
}
|
||||
|
||||
// 4. we batch the export.
|
||||
// 3. we batch the export.
|
||||
let to_export = self.queue.tasks.get_kind(rtxn, Kind::Export)? & enqueued;
|
||||
if !to_export.is_empty() {
|
||||
let task_id = to_export.iter().next().expect("There must be at least one export task");
|
||||
let mut task = self.queue.tasks.get_task(rtxn, task_id)?.unwrap();
|
||||
current_batch.processing([&mut task]);
|
||||
current_batch
|
||||
.reason(BatchStopReason::TaskCannotBeBatched { kind: Kind::Export, id: task_id });
|
||||
current_batch.reason(BatchStopReason::TaskKindCannotBeBatched { kind: Kind::Export });
|
||||
return Ok(Some((Batch::Export { task }, current_batch)));
|
||||
}
|
||||
|
||||
// 5. we batch the snapshot.
|
||||
// 4. we batch the snapshot.
|
||||
let to_snapshot = self.queue.tasks.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued;
|
||||
if !to_snapshot.is_empty() {
|
||||
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
|
||||
@@ -555,7 +538,7 @@ impl IndexScheduler {
|
||||
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
|
||||
}
|
||||
|
||||
// 6. we batch the dumps.
|
||||
// 5. we batch the dumps.
|
||||
let to_dump = self.queue.tasks.get_kind(rtxn, Kind::DumpCreation)? & enqueued;
|
||||
if let Some(to_dump) = to_dump.min() {
|
||||
let mut task =
|
||||
@@ -568,6 +551,17 @@ impl IndexScheduler {
|
||||
return Ok(Some((Batch::Dump(task), current_batch)));
|
||||
}
|
||||
|
||||
// 6. We batch the network changes.
|
||||
let to_network = self.queue.tasks.get_kind(rtxn, Kind::NetworkTopologyChange)? & enqueued;
|
||||
if !to_network.is_empty() {
|
||||
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_network)?;
|
||||
current_batch.processing(&mut tasks);
|
||||
current_batch.reason(BatchStopReason::TaskKindCannotBeBatched {
|
||||
kind: Kind::NetworkTopologyChange,
|
||||
});
|
||||
return Ok(Some((Batch::NetworkTopologyChanges { tasks }, current_batch)));
|
||||
}
|
||||
|
||||
// 7. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
|
||||
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
|
||||
let mut task =
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
// Copyright © 2025 Meilisearch Some Rights Reserved
|
||||
// This file is part of Meilisearch Enterprise Edition (EE).
|
||||
// Use of this source code is governed by the Business Source License 1.1,
|
||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||
|
||||
mod process_network;
|
||||
@@ -0,0 +1,362 @@
|
||||
// Copyright © 2025 Meilisearch Some Rights Reserved
|
||||
// This file is part of Meilisearch Enterprise Edition (EE).
|
||||
// Use of this source code is governed by the Business Source License 1.1,
|
||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::time::Duration;
|
||||
|
||||
use bumpalo::Bump;
|
||||
use itertools::{EitherOrBoth, Itertools};
|
||||
use meilisearch_types::enterprise_edition::network::{DbNetwork, DbRemote, Network, Remote};
|
||||
use meilisearch_types::milli::documents::PrimaryKey;
|
||||
use meilisearch_types::milli::progress::{EmbedderStats, Progress};
|
||||
use meilisearch_types::milli::update::new::indexer;
|
||||
use meilisearch_types::milli::update::Setting;
|
||||
use meilisearch_types::milli::{self};
|
||||
use meilisearch_types::tasks::{KindWithContent, Status, Task};
|
||||
use roaring::RoaringBitmap;
|
||||
|
||||
use crate::scheduler::process_export::{ExportContext, ExportOptions, TargetInstance};
|
||||
use crate::{Error, IndexScheduler};
|
||||
|
||||
impl IndexScheduler {
|
||||
pub(crate) fn process_network_changes(
|
||||
&self,
|
||||
progress: Progress,
|
||||
mut tasks: Vec<Task>,
|
||||
) -> crate::Result<Vec<Task>> {
|
||||
let old_network = self.network();
|
||||
let mut current_network = Some(old_network.clone());
|
||||
for task in &tasks {
|
||||
let KindWithContent::NetworkTopologyChange { network, origin } = &task.kind else {
|
||||
continue;
|
||||
};
|
||||
current_network = match (current_network, network) {
|
||||
(None, None) => None,
|
||||
(None, Some(network)) => Some(accumulate(DbNetwork::default(), network.clone())?),
|
||||
(Some(current_network), None) => Some(current_network),
|
||||
(Some(current_network), Some(new_network)) => {
|
||||
Some(accumulate(current_network, new_network.clone())?)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
'network: {
|
||||
let mut new_network = current_network.unwrap_or_default();
|
||||
if old_network == new_network {
|
||||
// no change, exit
|
||||
break 'network;
|
||||
}
|
||||
|
||||
/// TODO: only do this if the task originates with an end-user
|
||||
let must_replicate = old_network.sharding || new_network.sharding;
|
||||
|
||||
if !must_replicate {
|
||||
self.put_network(new_network)?;
|
||||
break 'network;
|
||||
}
|
||||
|
||||
let must_stop_processing = &self.scheduler.must_stop_processing;
|
||||
|
||||
/// FIXME: make it mandatory for `self` to be part of the network
|
||||
let old_this = old_network.local.as_deref();
|
||||
/// FIXME: error here
|
||||
let new_this = new_network.local.unwrap();
|
||||
|
||||
// in network replication, we need to tell old nodes that they are no longer part of the network.
|
||||
// This is made difficult by "node aliasing": Meilisearch has no way of knowing if two nodes with different names
|
||||
// or even different URLs actually refer to the same machine in two different versions of the network.
|
||||
//
|
||||
// This implementation ignores aliasing: a node is the same when it has the same name.
|
||||
//
|
||||
// To defeat aliasing, we iterate a first time to collect all deletions and additions, then we make sure to process the deletions
|
||||
// first, rather than processing the tasks in the alphalexical order of remotes.
|
||||
let mut node_deletions = Vec::new();
|
||||
let mut node_additions = Vec::new();
|
||||
for eob in old_network
|
||||
.remotes
|
||||
.iter()
|
||||
.merge_join_by(new_network.remotes.iter(), |(left, _), (right, _)| left.cmp(right))
|
||||
{
|
||||
match eob {
|
||||
EitherOrBoth::Both((to_update_name, _), (_, new_node)) => {
|
||||
if to_update_name.as_str() == new_this {
|
||||
continue; // skip `self`
|
||||
}
|
||||
node_additions.push((to_update_name, new_node));
|
||||
}
|
||||
EitherOrBoth::Left((to_delete_name, to_delete_node)) => {
|
||||
if Some(to_delete_name.as_str()) == old_this {
|
||||
continue; // skip `self`
|
||||
}
|
||||
node_deletions.push((to_delete_name, to_delete_node));
|
||||
}
|
||||
EitherOrBoth::Right((to_add_name, to_add_node)) => {
|
||||
if to_add_name.as_str() == new_this {
|
||||
continue; // skip `self`
|
||||
}
|
||||
node_additions.push((to_add_name, to_add_node));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let runtime = self.runtime.clone().unwrap();
|
||||
let mut in_flight = Vec::new();
|
||||
// process deletions
|
||||
for (to_delete_name, to_delete) in node_deletions {
|
||||
// set `self` to None so that this node is forgotten about
|
||||
new_network.local = None;
|
||||
in_flight.push(proxy_network(&runtime, to_delete.url.as_str(), &new_network)?);
|
||||
}
|
||||
|
||||
runtime.block_on(async {
|
||||
for task in in_flight.drain(..) {
|
||||
// TODO: log and ignore errors during deletion
|
||||
let res = task.await;
|
||||
}
|
||||
});
|
||||
|
||||
// process additions
|
||||
for (to_add_name, to_add) in node_additions {
|
||||
new_network.local = Some(to_add_name.clone());
|
||||
in_flight.push(proxy_network(&runtime, to_add.url.as_str(), &new_network)?);
|
||||
}
|
||||
|
||||
runtime.block_on(async {
|
||||
for task in in_flight.drain(..) {
|
||||
// TODO: handle errors during addition
|
||||
let res = task.await;
|
||||
}
|
||||
});
|
||||
|
||||
// balance documents
|
||||
new_network.local = Some(new_this);
|
||||
|
||||
self.balance_documents(&new_network, &progress, &must_stop_processing)?;
|
||||
|
||||
self.put_network(new_network)?;
|
||||
}
|
||||
|
||||
for task in &mut tasks {
|
||||
task.status = Status::Succeeded;
|
||||
}
|
||||
Ok(tasks)
|
||||
}
|
||||
|
||||
fn balance_documents(
|
||||
&self,
|
||||
new_network: &DbNetwork,
|
||||
progress: &Progress,
|
||||
must_stop_processing: &crate::scheduler::MustStopProcessing,
|
||||
) -> crate::Result<()> {
|
||||
/// FIXME unwrap
|
||||
let new_shards = new_network.shards().unwrap();
|
||||
|
||||
// TECHDEBT: this spawns a `ureq` agent additionally to `reqwest`. We probably want to harmonize all of this.
|
||||
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
||||
|
||||
let mut indexer_alloc = Bump::new();
|
||||
|
||||
// process by batches of 20MiB. Allow for compression? Don't forget about embeddings
|
||||
let _: Vec<()> = self.try_for_each_index(|index_uid, index| -> crate::Result<()> {
|
||||
indexer_alloc.reset();
|
||||
let err = |err| Error::from_milli(err, Some(index_uid.to_string()));
|
||||
let index_rtxn = index.read_txn()?;
|
||||
let all_docids = index.external_documents_ids();
|
||||
let mut documents_to_move_to: hashbrown::HashMap<String, RoaringBitmap> =
|
||||
hashbrown::HashMap::new();
|
||||
let mut documents_to_delete = RoaringBitmap::new();
|
||||
|
||||
for res in all_docids.iter(&index_rtxn)? {
|
||||
let (external_docid, docid) = res?;
|
||||
match new_shards.processing_shard(external_docid) {
|
||||
Some(shard) if shard.is_own => continue,
|
||||
Some(shard) => {
|
||||
documents_to_move_to
|
||||
.entry_ref(shard.name.as_str())
|
||||
.or_default()
|
||||
.insert(docid);
|
||||
}
|
||||
None => {
|
||||
documents_to_delete.insert(docid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
|
||||
|
||||
for (remote, documents_to_move) in documents_to_move_to {
|
||||
/// TODO: justify the unwrap
|
||||
let remote = new_network.remotes.get(&remote).unwrap();
|
||||
|
||||
let target = TargetInstance {
|
||||
base_url: &remote.url,
|
||||
api_key: remote.write_api_key.as_deref(),
|
||||
};
|
||||
let options = ExportOptions {
|
||||
index_uid,
|
||||
payload_size: None,
|
||||
override_settings: false,
|
||||
extra_headers: &Default::default(),
|
||||
};
|
||||
let ctx = ExportContext {
|
||||
index,
|
||||
index_rtxn: &index_rtxn,
|
||||
universe: &documents_to_move,
|
||||
progress,
|
||||
agent: &agent,
|
||||
must_stop_processing,
|
||||
};
|
||||
|
||||
self.export_one_index(target, options, ctx)?;
|
||||
|
||||
documents_to_delete |= documents_to_move;
|
||||
}
|
||||
|
||||
if documents_to_delete.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut new_fields_ids_map = fields_ids_map.clone();
|
||||
|
||||
// candidates not empty => index not empty => a primary key is set
|
||||
let primary_key = index.primary_key(&index_rtxn)?.unwrap();
|
||||
|
||||
let primary_key = PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
|
||||
.map_err(milli::Error::from)
|
||||
.map_err(err)?;
|
||||
|
||||
let mut index_wtxn = index.write_txn()?;
|
||||
|
||||
let mut indexer = indexer::DocumentDeletion::new();
|
||||
indexer.delete_documents_by_docids(documents_to_delete);
|
||||
let document_changes = indexer.into_changes(&indexer_alloc, primary_key);
|
||||
let embedders = index
|
||||
.embedding_configs()
|
||||
.embedding_configs(&index_wtxn)
|
||||
.map_err(milli::Error::from)
|
||||
.map_err(err)?;
|
||||
let embedders = self.embedders(index_uid, embedders)?;
|
||||
let indexer_config = self.index_mapper.indexer_config();
|
||||
let pool = &indexer_config.thread_pool;
|
||||
|
||||
indexer::index(
|
||||
&mut index_wtxn,
|
||||
index,
|
||||
pool,
|
||||
indexer_config.grenad_parameters(),
|
||||
&fields_ids_map,
|
||||
new_fields_ids_map,
|
||||
None, // document deletion never changes primary key
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| must_stop_processing.get(),
|
||||
&progress,
|
||||
&EmbedderStats::default(),
|
||||
)
|
||||
.map_err(err)?;
|
||||
|
||||
index_wtxn.commit()?;
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn proxy_network(
|
||||
runtime: &tokio::runtime::Handle,
|
||||
url: &str,
|
||||
network: &DbNetwork,
|
||||
) -> crate::Result<tokio::task::JoinHandle<()>> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn accumulate(old_network: DbNetwork, new_network: Network) -> crate::Result<DbNetwork> {
|
||||
let err = |err| Err(Error::from_milli(milli::Error::UserError(err), None));
|
||||
|
||||
let merged_local = match new_network.local {
|
||||
Setting::Set(new_self) => Some(new_self),
|
||||
Setting::Reset => None,
|
||||
Setting::NotSet => old_network.local,
|
||||
};
|
||||
|
||||
let merged_sharding = match new_network.sharding {
|
||||
Setting::Set(new_sharding) => new_sharding,
|
||||
Setting::Reset => false,
|
||||
Setting::NotSet => old_network.sharding,
|
||||
};
|
||||
|
||||
if merged_sharding && merged_local.is_none() {
|
||||
return err(milli::UserError::NetworkShardingWithoutSelf);
|
||||
}
|
||||
|
||||
let merged_remotes = match new_network.remotes {
|
||||
Setting::Set(new_remotes) => {
|
||||
let mut merged_remotes = BTreeMap::new();
|
||||
for either_or_both in old_network
|
||||
.remotes
|
||||
.into_iter()
|
||||
.merge_join_by(new_remotes.into_iter(), |left, right| left.0.cmp(&right.0))
|
||||
{
|
||||
match either_or_both {
|
||||
EitherOrBoth::Both((name, old), (_, Some(new))) => {
|
||||
let DbRemote {
|
||||
url: old_url,
|
||||
search_api_key: old_search_api_key,
|
||||
write_api_key: old_write_api_key,
|
||||
} = old;
|
||||
|
||||
let Remote {
|
||||
url: new_url,
|
||||
search_api_key: new_search_api_key,
|
||||
write_api_key: new_write_api_key,
|
||||
} = new;
|
||||
|
||||
let merged = DbRemote {
|
||||
url: match new_url {
|
||||
Setting::Set(new_url) => new_url,
|
||||
Setting::Reset => {
|
||||
return err(milli::UserError::NetworkMissingUrl(name))
|
||||
}
|
||||
Setting::NotSet => old_url,
|
||||
},
|
||||
search_api_key: match new_search_api_key {
|
||||
Setting::Set(new_search_api_key) => Some(new_search_api_key),
|
||||
Setting::Reset => None,
|
||||
Setting::NotSet => old_search_api_key,
|
||||
},
|
||||
write_api_key: match new_write_api_key {
|
||||
Setting::Set(new_write_api_key) => Some(new_write_api_key),
|
||||
Setting::Reset => None,
|
||||
Setting::NotSet => old_write_api_key,
|
||||
},
|
||||
};
|
||||
merged_remotes.insert(name, merged);
|
||||
}
|
||||
EitherOrBoth::Both((_, _), (_, None)) | EitherOrBoth::Right((_, None)) => {}
|
||||
EitherOrBoth::Left((name, node)) => {
|
||||
merged_remotes.insert(name, node);
|
||||
}
|
||||
EitherOrBoth::Right((name, Some(node))) => {
|
||||
let Some(url) = node.url.set() else {
|
||||
return err(milli::UserError::NetworkMissingUrl(name));
|
||||
};
|
||||
let node = DbRemote {
|
||||
url,
|
||||
search_api_key: node.search_api_key.set(),
|
||||
write_api_key: node.write_api_key.set(),
|
||||
};
|
||||
merged_remotes.insert(name, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
merged_remotes
|
||||
}
|
||||
Setting::Reset => BTreeMap::new(),
|
||||
Setting::NotSet => old_network.remotes,
|
||||
};
|
||||
|
||||
Ok(DbNetwork { local: merged_local, remotes: merged_remotes, sharding: merged_sharding })
|
||||
}
|
||||
@@ -2,6 +2,7 @@ mod autobatcher;
|
||||
#[cfg(test)]
|
||||
mod autobatcher_test;
|
||||
mod create_batch;
|
||||
mod enterprise_edition;
|
||||
mod process_batch;
|
||||
mod process_dump_creation;
|
||||
mod process_export;
|
||||
@@ -25,7 +26,6 @@ use convert_case::{Case, Casing as _};
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::heed::{Env, WithoutTls};
|
||||
use meilisearch_types::milli;
|
||||
use meilisearch_types::milli::update::S3SnapshotOptions;
|
||||
use meilisearch_types::tasks::Status;
|
||||
use process_batch::ProcessBatchInfo;
|
||||
use rayon::current_num_threads;
|
||||
@@ -88,14 +88,11 @@ pub struct Scheduler {
|
||||
|
||||
/// Snapshot compaction status.
|
||||
pub(crate) experimental_no_snapshot_compaction: bool,
|
||||
|
||||
/// S3 Snapshot options.
|
||||
pub(crate) s3_snapshot_options: Option<S3SnapshotOptions>,
|
||||
}
|
||||
|
||||
impl Scheduler {
|
||||
pub(crate) fn private_clone(&self) -> Self {
|
||||
Self {
|
||||
pub(crate) fn private_clone(&self) -> Scheduler {
|
||||
Scheduler {
|
||||
must_stop_processing: self.must_stop_processing.clone(),
|
||||
wake_up: self.wake_up.clone(),
|
||||
autobatching_enabled: self.autobatching_enabled,
|
||||
@@ -107,52 +104,23 @@ impl Scheduler {
|
||||
version_file_path: self.version_file_path.clone(),
|
||||
embedding_cache_cap: self.embedding_cache_cap,
|
||||
experimental_no_snapshot_compaction: self.experimental_no_snapshot_compaction,
|
||||
s3_snapshot_options: self.s3_snapshot_options.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(options: &IndexSchedulerOptions, auth_env: Env<WithoutTls>) -> Scheduler {
|
||||
let IndexSchedulerOptions {
|
||||
version_file_path,
|
||||
auth_path: _,
|
||||
tasks_path: _,
|
||||
update_file_path: _,
|
||||
indexes_path: _,
|
||||
snapshots_path,
|
||||
dumps_path,
|
||||
cli_webhook_url: _,
|
||||
cli_webhook_authorization: _,
|
||||
task_db_size: _,
|
||||
index_base_map_size: _,
|
||||
enable_mdb_writemap: _,
|
||||
index_growth_amount: _,
|
||||
index_count: _,
|
||||
indexer_config,
|
||||
autobatching_enabled,
|
||||
cleanup_enabled: _,
|
||||
max_number_of_tasks: _,
|
||||
max_number_of_batched_tasks,
|
||||
batched_tasks_size_limit,
|
||||
instance_features: _,
|
||||
auto_upgrade: _,
|
||||
embedding_cache_cap,
|
||||
experimental_no_snapshot_compaction,
|
||||
} = options;
|
||||
|
||||
Scheduler {
|
||||
must_stop_processing: MustStopProcessing::default(),
|
||||
// we want to start the loop right away in case meilisearch was ctrl+Ced while processing things
|
||||
wake_up: Arc::new(SignalEvent::auto(true)),
|
||||
autobatching_enabled: *autobatching_enabled,
|
||||
max_number_of_batched_tasks: *max_number_of_batched_tasks,
|
||||
batched_tasks_size_limit: *batched_tasks_size_limit,
|
||||
dumps_path: dumps_path.clone(),
|
||||
snapshots_path: snapshots_path.clone(),
|
||||
autobatching_enabled: options.autobatching_enabled,
|
||||
max_number_of_batched_tasks: options.max_number_of_batched_tasks,
|
||||
batched_tasks_size_limit: options.batched_tasks_size_limit,
|
||||
dumps_path: options.dumps_path.clone(),
|
||||
snapshots_path: options.snapshots_path.clone(),
|
||||
auth_env,
|
||||
version_file_path: version_file_path.clone(),
|
||||
embedding_cache_cap: *embedding_cache_cap,
|
||||
experimental_no_snapshot_compaction: *experimental_no_snapshot_compaction,
|
||||
s3_snapshot_options: indexer_config.s3_snapshot_options.clone(),
|
||||
version_file_path: options.version_file_path.clone(),
|
||||
embedding_cache_cap: options.embedding_cache_cap,
|
||||
experimental_no_snapshot_compaction: options.experimental_no_snapshot_compaction,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,27 +1,22 @@
|
||||
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||
use std::fs::{remove_file, File};
|
||||
use std::io::{ErrorKind, Seek, SeekFrom};
|
||||
use std::panic::{catch_unwind, AssertUnwindSafe};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use byte_unit::Byte;
|
||||
use meilisearch_types::batches::{BatchEnqueuedAt, BatchId};
|
||||
use meilisearch_types::heed::{RoTxn, RwTxn};
|
||||
use meilisearch_types::milli::heed::CompactionOption;
|
||||
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
||||
use meilisearch_types::milli::{self, ChannelCongestion};
|
||||
use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status, Task};
|
||||
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
||||
use milli::update::Settings as MilliSettings;
|
||||
use roaring::RoaringBitmap;
|
||||
use tempfile::{PersistError, TempPath};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use super::create_batch::Batch;
|
||||
use crate::processing::{
|
||||
AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress, FinalizingIndexStep,
|
||||
IndexCompaction, InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress,
|
||||
TaskDeletionProgress, UpdateIndexProgress,
|
||||
InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, TaskDeletionProgress,
|
||||
UpdateIndexProgress,
|
||||
};
|
||||
use crate::utils::{
|
||||
self, remove_n_tasks_datetime_earlier_than, remove_task_datetime, swap_index_uid_in_task,
|
||||
@@ -29,9 +24,6 @@ use crate::utils::{
|
||||
};
|
||||
use crate::{Error, IndexScheduler, Result, TaskId};
|
||||
|
||||
/// The name of the copy of the data.mdb file used during compaction.
|
||||
const DATA_MDB_COPY_NAME: &str = "data.mdb.cpy";
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ProcessBatchInfo {
|
||||
/// The write channel congestion. None when unavailable: settings update.
|
||||
@@ -143,6 +135,9 @@ impl IndexScheduler {
|
||||
Batch::Dump(task) => self
|
||||
.process_dump_creation(progress, task)
|
||||
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
||||
Batch::NetworkTopologyChanges { tasks } => self
|
||||
.process_network_changes(progress, tasks)
|
||||
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
||||
Batch::IndexOperation { op, must_create_index } => {
|
||||
let index_uid = op.index_uid().to_string();
|
||||
let index = if must_create_index {
|
||||
@@ -426,47 +421,6 @@ impl IndexScheduler {
|
||||
task.status = Status::Succeeded;
|
||||
Ok((vec![task], ProcessBatchInfo::default()))
|
||||
}
|
||||
Batch::IndexCompaction { index_uid: _, mut task } => {
|
||||
let KindWithContent::IndexCompaction { index_uid } = &task.kind else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let rtxn = self.env.read_txn()?;
|
||||
let ret = catch_unwind(AssertUnwindSafe(|| {
|
||||
self.apply_compaction(&rtxn, &progress, index_uid)
|
||||
}));
|
||||
|
||||
let (pre_size, post_size) = match ret {
|
||||
Ok(Ok(stats)) => stats,
|
||||
Ok(Err(Error::AbortedTask)) => return Err(Error::AbortedTask),
|
||||
Ok(Err(e)) => return Err(e),
|
||||
Err(e) => {
|
||||
let msg = match e.downcast_ref::<&'static str>() {
|
||||
Some(s) => *s,
|
||||
None => match e.downcast_ref::<String>() {
|
||||
Some(s) => &s[..],
|
||||
None => "Box<dyn Any>",
|
||||
},
|
||||
};
|
||||
return Err(Error::Export(Box::new(Error::ProcessBatchPanicked(
|
||||
msg.to_string(),
|
||||
))));
|
||||
}
|
||||
};
|
||||
|
||||
task.status = Status::Succeeded;
|
||||
if let Some(Details::IndexCompaction {
|
||||
index_uid: _,
|
||||
pre_compaction_size,
|
||||
post_compaction_size,
|
||||
}) = task.details.as_mut()
|
||||
{
|
||||
*pre_compaction_size = Some(Byte::from_u64(pre_size));
|
||||
*post_compaction_size = Some(Byte::from_u64(post_size));
|
||||
}
|
||||
|
||||
Ok((vec![task], ProcessBatchInfo::default()))
|
||||
}
|
||||
Batch::Export { mut task } => {
|
||||
let KindWithContent::Export { url, api_key, payload_size, indexes } = &task.kind
|
||||
else {
|
||||
@@ -542,92 +496,6 @@ impl IndexScheduler {
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_compaction(
|
||||
&self,
|
||||
rtxn: &RoTxn,
|
||||
progress: &Progress,
|
||||
index_uid: &str,
|
||||
) -> Result<(u64, u64)> {
|
||||
// 1. Verify that the index exists
|
||||
if !self.index_mapper.index_exists(rtxn, index_uid)? {
|
||||
return Err(Error::IndexNotFound(index_uid.to_owned()));
|
||||
}
|
||||
|
||||
// 2. We retrieve the index and create a temporary file in the index directory
|
||||
progress.update_progress(IndexCompaction::RetrieveTheIndex);
|
||||
let index = self.index_mapper.index(rtxn, index_uid)?;
|
||||
|
||||
// the index operation can take a long time, so save this handle to make it available to the search for the duration of the tick
|
||||
self.index_mapper
|
||||
.set_currently_updating_index(Some((index_uid.to_string(), index.clone())));
|
||||
|
||||
progress.update_progress(IndexCompaction::CreateTemporaryFile);
|
||||
let src_path = index.path().join("data.mdb");
|
||||
let pre_size = std::fs::metadata(&src_path)?.len();
|
||||
|
||||
let dst_path = TempPath::from_path(index.path().join(DATA_MDB_COPY_NAME));
|
||||
let file = File::create(&dst_path)?;
|
||||
let mut file = tempfile::NamedTempFile::from_parts(file, dst_path);
|
||||
|
||||
// 3. We copy the index data to the temporary file
|
||||
progress.update_progress(IndexCompaction::CopyAndCompactTheIndex);
|
||||
index
|
||||
.copy_to_file(file.as_file_mut(), CompactionOption::Enabled)
|
||||
.map_err(|error| Error::Milli { error, index_uid: Some(index_uid.to_string()) })?;
|
||||
// ...and reset the file position as specified in the documentation
|
||||
file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
// 4. We replace the index data file with the temporary file
|
||||
progress.update_progress(IndexCompaction::PersistTheCompactedIndex);
|
||||
match file.persist(src_path) {
|
||||
Ok(file) => file.sync_all()?,
|
||||
// TODO see if we have a _resource busy_ error and probably handle this by:
|
||||
// 1. closing the index, 2. replacing and 3. reopening it
|
||||
Err(PersistError { error, file: _ }) => return Err(Error::IoError(error)),
|
||||
};
|
||||
|
||||
// 5. Prepare to close the index
|
||||
progress.update_progress(IndexCompaction::CloseTheIndex);
|
||||
|
||||
// unmark that the index is the processing one so we don't keep a handle to it, preventing its closing
|
||||
self.index_mapper.set_currently_updating_index(None);
|
||||
|
||||
self.index_mapper.close_index(rtxn, index_uid)?;
|
||||
drop(index);
|
||||
|
||||
progress.update_progress(IndexCompaction::ReopenTheIndex);
|
||||
// 6. Reopen the index
|
||||
// The index will use the compacted data file when being reopened
|
||||
let index = self.index_mapper.index(rtxn, index_uid)?;
|
||||
|
||||
// if the update processed successfully, we're going to store the new
|
||||
// stats of the index. Since the tasks have already been processed and
|
||||
// this is a non-critical operation. If it fails, we should not fail
|
||||
// the entire batch.
|
||||
let res = || -> Result<_> {
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
let index_rtxn = index.read_txn()?;
|
||||
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)
|
||||
.map_err(|e| Error::from_milli(e, Some(index_uid.to_string())))?;
|
||||
self.index_mapper.store_stats_of(&mut wtxn, index_uid, &stats)?;
|
||||
wtxn.commit()?;
|
||||
Ok(stats.database_size)
|
||||
}();
|
||||
|
||||
let post_size = match res {
|
||||
Ok(post_size) => post_size,
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
error = &e as &dyn std::error::Error,
|
||||
"Could not write the stats of the index"
|
||||
);
|
||||
0
|
||||
}
|
||||
};
|
||||
|
||||
Ok((pre_size, post_size))
|
||||
}
|
||||
|
||||
/// Swap the index `lhs` with the index `rhs`.
|
||||
fn apply_index_swap(
|
||||
&self,
|
||||
@@ -915,10 +783,9 @@ impl IndexScheduler {
|
||||
|
||||
let enqueued_tasks = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
||||
|
||||
// 0. Check if any upgrade or compaction tasks were matched.
|
||||
// 0. Check if any upgrade task was matched.
|
||||
// If so, we cancel all the failed or enqueued upgrade tasks.
|
||||
let upgrade_tasks = &self.queue.tasks.get_kind(rtxn, Kind::UpgradeDatabase)?;
|
||||
let compaction_tasks = &self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)?;
|
||||
let is_canceling_upgrade = !matched_tasks.is_disjoint(upgrade_tasks);
|
||||
if is_canceling_upgrade {
|
||||
let failed_tasks = self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
||||
@@ -983,33 +850,7 @@ impl IndexScheduler {
|
||||
}
|
||||
}
|
||||
|
||||
// 3. If we are cancelling a compaction task, remove the tempfiles after incomplete compactions
|
||||
for compaction_task in &tasks_to_cancel & compaction_tasks {
|
||||
progress.update_progress(TaskCancelationProgress::CleaningCompactionLeftover);
|
||||
let task = self.queue.tasks.get_task(rtxn, compaction_task)?.unwrap();
|
||||
let Some(Details::IndexCompaction {
|
||||
index_uid,
|
||||
pre_compaction_size: _,
|
||||
post_compaction_size: _,
|
||||
}) = task.details
|
||||
else {
|
||||
unreachable!("wrong details for compaction task {compaction_task}")
|
||||
};
|
||||
|
||||
let index_path = match self.index_mapper.index_mapping.get(rtxn, &index_uid)? {
|
||||
Some(index_uuid) => self.index_mapper.index_path(index_uuid),
|
||||
None => continue,
|
||||
};
|
||||
|
||||
if let Err(e) = remove_file(index_path.join(DATA_MDB_COPY_NAME)) {
|
||||
match e.kind() {
|
||||
ErrorKind::NotFound => (),
|
||||
_ => return Err(Error::IoError(e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 4. We now have a list of tasks to cancel, cancel them
|
||||
// 3. We now have a list of tasks to cancel, cancel them
|
||||
let (task_progress, progress_obj) = AtomicTaskStep::new(tasks_to_cancel.len() as u32);
|
||||
progress.update_progress(progress_obj);
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOr
|
||||
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
|
||||
use meilisearch_types::settings::{self, SecretPolicy};
|
||||
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
|
||||
use roaring::RoaringBitmap;
|
||||
use serde::Deserialize;
|
||||
use ureq::{json, Response};
|
||||
|
||||
@@ -50,6 +51,7 @@ impl IndexScheduler {
|
||||
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
||||
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
||||
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
|
||||
let err = |err| Error::from_milli(err, Some(uid.to_string()));
|
||||
if must_stop_processing.get() {
|
||||
return Err(Error::AbortedTask);
|
||||
}
|
||||
@@ -61,104 +63,31 @@ impl IndexScheduler {
|
||||
));
|
||||
|
||||
let ExportIndexSettings { filter, override_settings } = export_settings;
|
||||
|
||||
let index = self.index(uid)?;
|
||||
let index_rtxn = index.read_txn()?;
|
||||
let bearer = api_key.map(|api_key| format!("Bearer {api_key}"));
|
||||
|
||||
// First, check if the index already exists
|
||||
let url = format!("{base_url}/indexes/{uid}");
|
||||
let response = retry(&must_stop_processing, || {
|
||||
let mut request = agent.get(&url);
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
|
||||
request.send_bytes(Default::default()).map_err(into_backoff_error)
|
||||
});
|
||||
let index_exists = match response {
|
||||
Ok(response) => response.status() == 200,
|
||||
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => {
|
||||
false
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
let primary_key = index
|
||||
.primary_key(&index_rtxn)
|
||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||
|
||||
// Create the index
|
||||
if !index_exists {
|
||||
let url = format!("{base_url}/indexes");
|
||||
retry(&must_stop_processing, || {
|
||||
let mut request = agent.post(&url);
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
let index_param = json!({ "uid": uid, "primaryKey": primary_key });
|
||||
request.send_json(&index_param).map_err(into_backoff_error)
|
||||
})?;
|
||||
}
|
||||
|
||||
// Patch the index primary key
|
||||
if index_exists && *override_settings {
|
||||
let url = format!("{base_url}/indexes/{uid}");
|
||||
retry(&must_stop_processing, || {
|
||||
let mut request = agent.patch(&url);
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
let index_param = json!({ "primaryKey": primary_key });
|
||||
request.send_json(&index_param).map_err(into_backoff_error)
|
||||
})?;
|
||||
}
|
||||
|
||||
// Send the index settings
|
||||
if !index_exists || *override_settings {
|
||||
let mut settings =
|
||||
settings::settings(&index, &index_rtxn, SecretPolicy::RevealSecrets)
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||
// Remove the experimental chat setting if not enabled
|
||||
if self.features().check_chat_completions("exporting chat settings").is_err() {
|
||||
settings.chat = Setting::NotSet;
|
||||
}
|
||||
// Retry logic for sending settings
|
||||
let url = format!("{base_url}/indexes/{uid}/settings");
|
||||
retry(&must_stop_processing, || {
|
||||
let mut request = agent.patch(&url);
|
||||
if let Some(bearer) = bearer.as_ref() {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
request.send_json(settings.clone()).map_err(into_backoff_error)
|
||||
})?;
|
||||
}
|
||||
|
||||
let filter = filter
|
||||
.as_ref()
|
||||
.map(Filter::from_json)
|
||||
.transpose()
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?
|
||||
.flatten();
|
||||
|
||||
let filter_universe = filter
|
||||
.map(|f| f.evaluate(&index_rtxn, &index))
|
||||
.transpose()
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||
let whole_universe = index
|
||||
.documents_ids(&index_rtxn)
|
||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||
let filter = filter.as_ref().map(Filter::from_json).transpose().map_err(err)?.flatten();
|
||||
let filter_universe =
|
||||
filter.map(|f| f.evaluate(&index_rtxn, &index)).transpose().map_err(err)?;
|
||||
let whole_universe =
|
||||
index.documents_ids(&index_rtxn).map_err(milli::Error::from).map_err(err)?;
|
||||
let universe = filter_universe.unwrap_or(whole_universe);
|
||||
|
||||
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
|
||||
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
||||
|
||||
// We don't need to keep this one alive as we will
|
||||
// spawn many threads to process the documents
|
||||
drop(index_rtxn);
|
||||
|
||||
let total_documents = universe.len() as u32;
|
||||
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
|
||||
progress.update_progress(progress_step);
|
||||
let target = TargetInstance { base_url, api_key };
|
||||
let ctx = ExportContext {
|
||||
index: &index,
|
||||
index_rtxn: &index_rtxn,
|
||||
universe: &universe,
|
||||
progress: &progress,
|
||||
agent: &agent,
|
||||
must_stop_processing: &must_stop_processing,
|
||||
};
|
||||
let options = ExportOptions {
|
||||
index_uid: uid,
|
||||
payload_size,
|
||||
override_settings: *override_settings,
|
||||
extra_headers: &Default::default(),
|
||||
};
|
||||
let total_documents = self.export_one_index(target, options, ctx)?;
|
||||
|
||||
output.insert(
|
||||
IndexUidPattern::new_unchecked(uid.clone()),
|
||||
@@ -167,155 +96,217 @@ impl IndexScheduler {
|
||||
matched_documents: Some(total_documents as u64),
|
||||
},
|
||||
);
|
||||
|
||||
let limit = payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024); // defaults to 20 MiB
|
||||
let documents_url = format!("{base_url}/indexes/{uid}/documents");
|
||||
|
||||
let results = request_threads()
|
||||
.broadcast(|ctx| {
|
||||
let index_rtxn = index
|
||||
.read_txn()
|
||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
let mut tmp_buffer = Vec::new();
|
||||
let mut compressed_buffer = Vec::new();
|
||||
for (i, docid) in universe.iter().enumerate() {
|
||||
if i % ctx.num_threads() != ctx.index() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let document = index
|
||||
.document(&index_rtxn, docid)
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||
|
||||
let mut document = obkv_to_json(&all_fields, &fields_ids_map, document)
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||
|
||||
// TODO definitely factorize this code
|
||||
'inject_vectors: {
|
||||
let embeddings = index
|
||||
.embeddings(&index_rtxn, docid)
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||
|
||||
if embeddings.is_empty() {
|
||||
break 'inject_vectors;
|
||||
}
|
||||
|
||||
let vectors = document
|
||||
.entry(RESERVED_VECTORS_FIELD_NAME)
|
||||
.or_insert(serde_json::Value::Object(Default::default()));
|
||||
|
||||
let serde_json::Value::Object(vectors) = vectors else {
|
||||
return Err(Error::from_milli(
|
||||
milli::Error::UserError(
|
||||
milli::UserError::InvalidVectorsMapType {
|
||||
document_id: {
|
||||
if let Ok(Some(Ok(index))) = index
|
||||
.external_id_of(
|
||||
&index_rtxn,
|
||||
std::iter::once(docid),
|
||||
)
|
||||
.map(|it| it.into_iter().next())
|
||||
{
|
||||
index
|
||||
} else {
|
||||
format!("internal docid={docid}")
|
||||
}
|
||||
},
|
||||
value: vectors.clone(),
|
||||
},
|
||||
),
|
||||
Some(uid.to_string()),
|
||||
));
|
||||
};
|
||||
|
||||
for (
|
||||
embedder_name,
|
||||
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
|
||||
) in embeddings
|
||||
{
|
||||
let embeddings = ExplicitVectors {
|
||||
embeddings: Some(
|
||||
VectorOrArrayOfVectors::from_array_of_vectors(embeddings),
|
||||
),
|
||||
regenerate: regenerate &&
|
||||
// Meilisearch does not handle well dumps with fragments, because as the fragments
|
||||
// are marked as user-provided,
|
||||
// all embeddings would be regenerated on any settings change or document update.
|
||||
// To prevent this, we mark embeddings has non regenerate in this case.
|
||||
!has_fragments,
|
||||
};
|
||||
vectors.insert(
|
||||
embedder_name,
|
||||
serde_json::to_value(embeddings).unwrap(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
tmp_buffer.clear();
|
||||
serde_json::to_writer(&mut tmp_buffer, &document)
|
||||
.map_err(milli::InternalError::from)
|
||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||
|
||||
// Make sure we put at least one document in the buffer even
|
||||
// though we might go above the buffer limit before sending
|
||||
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
|
||||
// We compress the documents before sending them
|
||||
let mut encoder =
|
||||
GzEncoder::new(&mut compressed_buffer, Compression::default());
|
||||
encoder
|
||||
.write_all(&buffer)
|
||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
|
||||
encoder
|
||||
.finish()
|
||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
|
||||
|
||||
retry(&must_stop_processing, || {
|
||||
let mut request = agent.post(&documents_url);
|
||||
request = request.set("Content-Type", "application/x-ndjson");
|
||||
request = request.set("Content-Encoding", "gzip");
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
|
||||
})?;
|
||||
buffer.clear();
|
||||
compressed_buffer.clear();
|
||||
}
|
||||
buffer.extend_from_slice(&tmp_buffer);
|
||||
|
||||
if i > 0 && i % 100 == 0 {
|
||||
step.fetch_add(100, atomic::Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
retry(&must_stop_processing, || {
|
||||
let mut request = agent.post(&documents_url);
|
||||
request = request.set("Content-Type", "application/x-ndjson");
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
request.send_bytes(&buffer).map_err(into_backoff_error)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.map_err(|e| {
|
||||
Error::from_milli(
|
||||
milli::Error::InternalError(InternalError::PanicInThreadPool(e)),
|
||||
Some(uid.to_string()),
|
||||
)
|
||||
})?;
|
||||
for result in results {
|
||||
result?;
|
||||
}
|
||||
|
||||
step.store(total_documents, atomic::Ordering::Relaxed);
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
pub(super) fn export_one_index(
|
||||
&self,
|
||||
target: TargetInstance<'_>,
|
||||
options: ExportOptions<'_>,
|
||||
ctx: ExportContext<'_>,
|
||||
) -> Result<u64, Error> {
|
||||
let err = |err| Error::from_milli(err, Some(options.index_uid.to_string()));
|
||||
|
||||
let bearer = target.api_key.map(|api_key| format!("Bearer {api_key}"));
|
||||
let url = format!(
|
||||
"{base_url}/indexes/{index_uid}",
|
||||
base_url = target.base_url,
|
||||
index_uid = options.index_uid
|
||||
);
|
||||
let response = retry(ctx.must_stop_processing, || {
|
||||
let mut request = ctx.agent.get(&url);
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
|
||||
request.send_bytes(Default::default()).map_err(into_backoff_error)
|
||||
});
|
||||
let index_exists = match response {
|
||||
Ok(response) => response.status() == 200,
|
||||
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => false,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
let primary_key =
|
||||
ctx.index.primary_key(&ctx.index_rtxn).map_err(milli::Error::from).map_err(err)?;
|
||||
if !index_exists {
|
||||
let url = format!("{base_url}/indexes", base_url = target.base_url);
|
||||
retry(ctx.must_stop_processing, || {
|
||||
let mut request = ctx.agent.post(&url);
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
let index_param = json!({ "uid": options.index_uid, "primaryKey": primary_key });
|
||||
request.send_json(&index_param).map_err(into_backoff_error)
|
||||
})?;
|
||||
}
|
||||
if index_exists && options.override_settings {
|
||||
retry(ctx.must_stop_processing, || {
|
||||
let mut request = ctx.agent.patch(&url);
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
let index_param = json!({ "primaryKey": primary_key });
|
||||
request.send_json(&index_param).map_err(into_backoff_error)
|
||||
})?;
|
||||
}
|
||||
if !index_exists || options.override_settings {
|
||||
let mut settings =
|
||||
settings::settings(&ctx.index, &ctx.index_rtxn, SecretPolicy::RevealSecrets)
|
||||
.map_err(err)?;
|
||||
// Remove the experimental chat setting if not enabled
|
||||
if self.features().check_chat_completions("exporting chat settings").is_err() {
|
||||
settings.chat = Setting::NotSet;
|
||||
}
|
||||
// Retry logic for sending settings
|
||||
let url = format!(
|
||||
"{base_url}/indexes/{index_uid}/settings",
|
||||
base_url = target.base_url,
|
||||
index_uid = options.index_uid
|
||||
);
|
||||
retry(ctx.must_stop_processing, || {
|
||||
let mut request = ctx.agent.patch(&url);
|
||||
if let Some(bearer) = bearer.as_ref() {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
request.send_json(settings.clone()).map_err(into_backoff_error)
|
||||
})?;
|
||||
}
|
||||
|
||||
let fields_ids_map = ctx.index.fields_ids_map(&ctx.index_rtxn)?;
|
||||
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
||||
let total_documents = ctx.universe.len() as u32;
|
||||
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
|
||||
ctx.progress.update_progress(progress_step);
|
||||
|
||||
let limit = options.payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024);
|
||||
let documents_url = format!(
|
||||
"{base_url}/indexes/{index_uid}/documents",
|
||||
base_url = target.base_url,
|
||||
index_uid = options.index_uid
|
||||
);
|
||||
let results = request_threads()
|
||||
.broadcast(|broadcast| {
|
||||
let index_rtxn = ctx.index.read_txn().map_err(milli::Error::from).map_err(err)?;
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
let mut tmp_buffer = Vec::new();
|
||||
let mut compressed_buffer = Vec::new();
|
||||
for (i, docid) in ctx.universe.iter().enumerate() {
|
||||
if i % broadcast.num_threads() != broadcast.index() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let document = ctx.index.document(&index_rtxn, docid).map_err(err)?;
|
||||
|
||||
let mut document =
|
||||
obkv_to_json(&all_fields, &fields_ids_map, document).map_err(err)?;
|
||||
|
||||
// TODO definitely factorize this code
|
||||
'inject_vectors: {
|
||||
let embeddings = ctx.index.embeddings(&index_rtxn, docid).map_err(err)?;
|
||||
|
||||
if embeddings.is_empty() {
|
||||
break 'inject_vectors;
|
||||
}
|
||||
|
||||
let vectors = document
|
||||
.entry(RESERVED_VECTORS_FIELD_NAME)
|
||||
.or_insert(serde_json::Value::Object(Default::default()));
|
||||
|
||||
let serde_json::Value::Object(vectors) = vectors else {
|
||||
return Err(err(milli::Error::UserError(
|
||||
milli::UserError::InvalidVectorsMapType {
|
||||
document_id: {
|
||||
if let Ok(Some(Ok(index))) = ctx
|
||||
.index
|
||||
.external_id_of(&index_rtxn, std::iter::once(docid))
|
||||
.map(|it| it.into_iter().next())
|
||||
{
|
||||
index
|
||||
} else {
|
||||
format!("internal docid={docid}")
|
||||
}
|
||||
},
|
||||
value: vectors.clone(),
|
||||
},
|
||||
)));
|
||||
};
|
||||
|
||||
for (
|
||||
embedder_name,
|
||||
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
|
||||
) in embeddings
|
||||
{
|
||||
let embeddings = ExplicitVectors {
|
||||
embeddings: Some(VectorOrArrayOfVectors::from_array_of_vectors(
|
||||
embeddings,
|
||||
)),
|
||||
regenerate: regenerate &&
|
||||
// Meilisearch does not handle well dumps with fragments, because as the fragments
|
||||
// are marked as user-provided,
|
||||
// all embeddings would be regenerated on any settings change or document update.
|
||||
// To prevent this, we mark embeddings has non regenerate in this case.
|
||||
!has_fragments,
|
||||
};
|
||||
vectors
|
||||
.insert(embedder_name, serde_json::to_value(embeddings).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
tmp_buffer.clear();
|
||||
serde_json::to_writer(&mut tmp_buffer, &document)
|
||||
.map_err(milli::InternalError::from)
|
||||
.map_err(milli::Error::from)
|
||||
.map_err(err)?;
|
||||
|
||||
// Make sure we put at least one document in the buffer even
|
||||
// though we might go above the buffer limit before sending
|
||||
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
|
||||
// We compress the documents before sending them
|
||||
let mut encoder =
|
||||
GzEncoder::new(&mut compressed_buffer, Compression::default());
|
||||
encoder.write_all(&buffer).map_err(milli::Error::from).map_err(err)?;
|
||||
encoder.finish().map_err(milli::Error::from).map_err(err)?;
|
||||
|
||||
retry(ctx.must_stop_processing, || {
|
||||
let mut request = ctx.agent.post(&documents_url);
|
||||
request = request.set("Content-Type", "application/x-ndjson");
|
||||
request = request.set("Content-Encoding", "gzip");
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
|
||||
})?;
|
||||
buffer.clear();
|
||||
compressed_buffer.clear();
|
||||
}
|
||||
buffer.extend_from_slice(&tmp_buffer);
|
||||
|
||||
if i > 0 && i % 100 == 0 {
|
||||
step.fetch_add(100, atomic::Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
retry(ctx.must_stop_processing, || {
|
||||
let mut request = ctx.agent.post(&documents_url);
|
||||
request = request.set("Content-Type", "application/x-ndjson");
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
request.send_bytes(&buffer).map_err(into_backoff_error)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.map_err(|e| err(milli::Error::InternalError(InternalError::PanicInThreadPool(e))))?;
|
||||
for result in results {
|
||||
result?;
|
||||
}
|
||||
step.store(total_documents, atomic::Ordering::Relaxed);
|
||||
Ok(total_documents as u64)
|
||||
}
|
||||
}
|
||||
|
||||
fn retry<F>(must_stop_processing: &MustStopProcessing, send_request: F) -> Result<ureq::Response>
|
||||
@@ -374,4 +365,27 @@ fn ureq_error_into_error(error: ureq::Error) -> Error {
|
||||
}
|
||||
}
|
||||
|
||||
// export_one_index arguments
|
||||
pub(super) struct TargetInstance<'a> {
|
||||
pub(super) base_url: &'a str,
|
||||
pub(super) api_key: Option<&'a str>,
|
||||
}
|
||||
|
||||
pub(super) struct ExportOptions<'a> {
|
||||
pub(super) index_uid: &'a str,
|
||||
pub(super) payload_size: Option<&'a Byte>,
|
||||
pub(super) override_settings: bool,
|
||||
pub(super) extra_headers: &'a hashbrown::HashMap<String, String>,
|
||||
}
|
||||
|
||||
pub(super) struct ExportContext<'a> {
|
||||
pub(super) index: &'a meilisearch_types::milli::Index,
|
||||
pub(super) index_rtxn: &'a milli::heed::RoTxn<'a>,
|
||||
pub(super) universe: &'a RoaringBitmap,
|
||||
pub(super) progress: &'a Progress,
|
||||
pub(super) agent: &'a ureq::Agent,
|
||||
pub(super) must_stop_processing: &'a MustStopProcessing,
|
||||
}
|
||||
|
||||
// progress related
|
||||
enum ExportIndex {}
|
||||
|
||||
@@ -97,7 +97,7 @@ impl IndexScheduler {
|
||||
.embedding_configs()
|
||||
.embedding_configs(index_wtxn)
|
||||
.map_err(|e| Error::from_milli(e.into(), Some(index_uid.clone())))?;
|
||||
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
||||
let embedders = self.embedders(&index_uid, embedders)?;
|
||||
for operation in operations {
|
||||
match operation {
|
||||
DocumentOperation::Replace(_content_uuid) => {
|
||||
@@ -284,7 +284,7 @@ impl IndexScheduler {
|
||||
.embedding_configs()
|
||||
.embedding_configs(index_wtxn)
|
||||
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
|
||||
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
||||
let embedders = self.embedders(&index_uid, embedders)?;
|
||||
|
||||
progress.update_progress(DocumentEditionProgress::Indexing);
|
||||
congestion = Some(
|
||||
@@ -434,7 +434,7 @@ impl IndexScheduler {
|
||||
.embedding_configs()
|
||||
.embedding_configs(index_wtxn)
|
||||
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
|
||||
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
||||
let embedders = self.embedders(&index_uid, embedders)?;
|
||||
|
||||
progress.update_progress(DocumentDeletionProgress::Indexing);
|
||||
congestion = Some(
|
||||
|
||||
@@ -12,8 +12,6 @@ use crate::processing::{AtomicUpdateFileStep, SnapshotCreationProgress};
|
||||
use crate::queue::TaskQueue;
|
||||
use crate::{Error, IndexScheduler, Result};
|
||||
|
||||
const UPDATE_FILES_DIR_NAME: &str = "update_files";
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// See [`EnvOpenOptions::open`].
|
||||
@@ -80,32 +78,10 @@ impl IndexScheduler {
|
||||
pub(super) fn process_snapshot(
|
||||
&self,
|
||||
progress: Progress,
|
||||
tasks: Vec<Task>,
|
||||
mut tasks: Vec<Task>,
|
||||
) -> Result<Vec<Task>> {
|
||||
progress.update_progress(SnapshotCreationProgress::StartTheSnapshotCreation);
|
||||
|
||||
match self.scheduler.s3_snapshot_options.clone() {
|
||||
Some(options) => {
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
let _ = options;
|
||||
panic!("Non-unix platform does not support S3 snapshotting");
|
||||
}
|
||||
#[cfg(unix)]
|
||||
self.runtime
|
||||
.as_ref()
|
||||
.expect("Runtime not initialized")
|
||||
.block_on(self.process_snapshot_to_s3(progress, options, tasks))
|
||||
}
|
||||
None => self.process_snapshots_to_disk(progress, tasks),
|
||||
}
|
||||
}
|
||||
|
||||
fn process_snapshots_to_disk(
|
||||
&self,
|
||||
progress: Progress,
|
||||
mut tasks: Vec<Task>,
|
||||
) -> Result<Vec<Task>, Error> {
|
||||
fs::create_dir_all(&self.scheduler.snapshots_path)?;
|
||||
let temp_snapshot_dir = tempfile::tempdir()?;
|
||||
|
||||
@@ -152,7 +128,7 @@ impl IndexScheduler {
|
||||
let rtxn = self.env.read_txn()?;
|
||||
|
||||
// 2.4 Create the update files directory
|
||||
let update_files_dir = temp_snapshot_dir.path().join(UPDATE_FILES_DIR_NAME);
|
||||
let update_files_dir = temp_snapshot_dir.path().join("update_files");
|
||||
fs::create_dir_all(&update_files_dir)?;
|
||||
|
||||
// 2.5 Only copy the update files of the enqueued tasks
|
||||
@@ -164,7 +140,7 @@ impl IndexScheduler {
|
||||
let task =
|
||||
self.queue.tasks.get_task(&rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||
if let Some(content_uuid) = task.content_uuid() {
|
||||
let src = self.queue.file_store.update_path(content_uuid);
|
||||
let src = self.queue.file_store.get_update_path(content_uuid);
|
||||
let dst = update_files_dir.join(content_uuid.to_string());
|
||||
fs::copy(src, dst)?;
|
||||
}
|
||||
@@ -230,403 +206,4 @@ impl IndexScheduler {
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
pub(super) async fn process_snapshot_to_s3(
|
||||
&self,
|
||||
progress: Progress,
|
||||
opts: meilisearch_types::milli::update::S3SnapshotOptions,
|
||||
mut tasks: Vec<Task>,
|
||||
) -> Result<Vec<Task>> {
|
||||
use meilisearch_types::milli::update::S3SnapshotOptions;
|
||||
|
||||
let S3SnapshotOptions {
|
||||
s3_bucket_url,
|
||||
s3_bucket_region,
|
||||
s3_bucket_name,
|
||||
s3_snapshot_prefix,
|
||||
s3_access_key,
|
||||
s3_secret_key,
|
||||
s3_max_in_flight_parts,
|
||||
s3_compression_level: level,
|
||||
s3_signature_duration,
|
||||
s3_multipart_part_size,
|
||||
} = opts;
|
||||
|
||||
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
||||
let retry_backoff = backoff::ExponentialBackoff::default();
|
||||
let db_name = {
|
||||
let mut base_path = self.env.path().to_owned();
|
||||
base_path.pop();
|
||||
base_path.file_name().and_then(OsStr::to_str).unwrap_or("data.ms").to_string()
|
||||
};
|
||||
|
||||
let (reader, writer) = std::io::pipe()?;
|
||||
let uploader_task = tokio::spawn(multipart_stream_to_s3(
|
||||
s3_bucket_url,
|
||||
s3_bucket_region,
|
||||
s3_bucket_name,
|
||||
s3_snapshot_prefix,
|
||||
s3_access_key,
|
||||
s3_secret_key,
|
||||
s3_max_in_flight_parts,
|
||||
s3_signature_duration,
|
||||
s3_multipart_part_size,
|
||||
must_stop_processing,
|
||||
retry_backoff,
|
||||
db_name,
|
||||
reader,
|
||||
));
|
||||
|
||||
let index_scheduler = IndexScheduler::private_clone(self);
|
||||
let builder_task = tokio::task::spawn_blocking(move || {
|
||||
stream_tarball_into_pipe(progress, level, writer, index_scheduler)
|
||||
});
|
||||
|
||||
let (uploader_result, builder_result) = tokio::join!(uploader_task, builder_task);
|
||||
|
||||
// Check uploader result first to early return on task abortion.
|
||||
// safety: JoinHandle can return an error if the task was aborted, cancelled, or panicked.
|
||||
uploader_result.unwrap()?;
|
||||
builder_result.unwrap()?;
|
||||
|
||||
for task in &mut tasks {
|
||||
task.status = Status::Succeeded;
|
||||
}
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
}
|
||||
|
||||
/// Streams a tarball of the database content into a pipe.
|
||||
#[cfg(unix)]
|
||||
fn stream_tarball_into_pipe(
|
||||
progress: Progress,
|
||||
level: u32,
|
||||
writer: std::io::PipeWriter,
|
||||
index_scheduler: IndexScheduler,
|
||||
) -> std::result::Result<(), Error> {
|
||||
use std::io::Write as _;
|
||||
use std::path::Path;
|
||||
|
||||
let writer = flate2::write::GzEncoder::new(writer, flate2::Compression::new(level));
|
||||
let mut tarball = tar::Builder::new(writer);
|
||||
|
||||
// 1. Snapshot the version file
|
||||
tarball
|
||||
.append_path_with_name(&index_scheduler.scheduler.version_file_path, VERSION_FILE_NAME)?;
|
||||
|
||||
// 2. Snapshot the index scheduler LMDB env
|
||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexScheduler);
|
||||
let tasks_env_file = index_scheduler.env.try_clone_inner_file()?;
|
||||
let path = Path::new("tasks").join("data.mdb");
|
||||
append_file_to_tarball(&mut tarball, path, tasks_env_file)?;
|
||||
|
||||
// 2.3 Create a read transaction on the index-scheduler
|
||||
let rtxn = index_scheduler.env.read_txn()?;
|
||||
|
||||
// 2.4 Create the update files directory
|
||||
// And only copy the update files of the enqueued tasks
|
||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheUpdateFiles);
|
||||
let enqueued = index_scheduler.queue.tasks.get_status(&rtxn, Status::Enqueued)?;
|
||||
let (atomic, update_file_progress) = AtomicUpdateFileStep::new(enqueued.len() as u32);
|
||||
progress.update_progress(update_file_progress);
|
||||
|
||||
// We create the update_files directory so that it
|
||||
// always exists even if there are no update files
|
||||
let update_files_dir = Path::new(UPDATE_FILES_DIR_NAME);
|
||||
let src_update_files_dir = {
|
||||
let mut path = index_scheduler.env.path().to_path_buf();
|
||||
path.pop();
|
||||
path.join(UPDATE_FILES_DIR_NAME)
|
||||
};
|
||||
tarball.append_dir(update_files_dir, src_update_files_dir)?;
|
||||
|
||||
for task_id in enqueued {
|
||||
let task = index_scheduler
|
||||
.queue
|
||||
.tasks
|
||||
.get_task(&rtxn, task_id)?
|
||||
.ok_or(Error::CorruptedTaskQueue)?;
|
||||
if let Some(content_uuid) = task.content_uuid() {
|
||||
use std::fs::File;
|
||||
|
||||
let src = index_scheduler.queue.file_store.update_path(content_uuid);
|
||||
let mut update_file = File::open(src)?;
|
||||
let path = update_files_dir.join(content_uuid.to_string());
|
||||
tarball.append_file(path, &mut update_file)?;
|
||||
}
|
||||
atomic.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// 3. Snapshot every indexes
|
||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexes);
|
||||
let index_mapping = index_scheduler.index_mapper.index_mapping;
|
||||
let nb_indexes = index_mapping.len(&rtxn)? as u32;
|
||||
let indexes_dir = Path::new("indexes");
|
||||
let indexes_references: Vec<_> = index_scheduler
|
||||
.index_mapper
|
||||
.index_mapping
|
||||
.iter(&rtxn)?
|
||||
.map(|res| res.map_err(Error::from).map(|(name, uuid)| (name.to_string(), uuid)))
|
||||
.collect::<Result<_, Error>>()?;
|
||||
|
||||
// It's prettier to use a for loop instead of the IndexMapper::try_for_each_index
|
||||
// method, especially when we need to access the UUID, local path and index number.
|
||||
for (i, (name, uuid)) in indexes_references.into_iter().enumerate() {
|
||||
progress.update_progress(VariableNameStep::<SnapshotCreationProgress>::new(
|
||||
&name, i as u32, nb_indexes,
|
||||
));
|
||||
let path = indexes_dir.join(uuid.to_string()).join("data.mdb");
|
||||
let index = index_scheduler.index_mapper.index(&rtxn, &name)?;
|
||||
let index_file = index.try_clone_inner_file()?;
|
||||
tracing::trace!("Appending index file for {name} in {}", path.display());
|
||||
append_file_to_tarball(&mut tarball, path, index_file)?;
|
||||
}
|
||||
|
||||
drop(rtxn);
|
||||
|
||||
// 4. Snapshot the auth LMDB env
|
||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheApiKeys);
|
||||
let auth_env_file = index_scheduler.scheduler.auth_env.try_clone_inner_file()?;
|
||||
let path = Path::new("auth").join("data.mdb");
|
||||
append_file_to_tarball(&mut tarball, path, auth_env_file)?;
|
||||
|
||||
let mut gzencoder = tarball.into_inner()?;
|
||||
gzencoder.flush()?;
|
||||
gzencoder.try_finish()?;
|
||||
let mut writer = gzencoder.finish()?;
|
||||
writer.flush()?;
|
||||
|
||||
Result::<_, Error>::Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn append_file_to_tarball<W, P>(
|
||||
tarball: &mut tar::Builder<W>,
|
||||
path: P,
|
||||
mut auth_env_file: fs::File,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
W: std::io::Write,
|
||||
P: AsRef<std::path::Path>,
|
||||
{
|
||||
use std::io::{Seek as _, SeekFrom};
|
||||
|
||||
// Note: A previous snapshot operation may have left the cursor
|
||||
// at the end of the file so we need to seek to the start.
|
||||
auth_env_file.seek(SeekFrom::Start(0))?;
|
||||
tarball.append_file(path, &mut auth_env_file)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Streams the content read from the given reader to S3.
|
||||
#[cfg(unix)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn multipart_stream_to_s3(
|
||||
s3_bucket_url: String,
|
||||
s3_bucket_region: String,
|
||||
s3_bucket_name: String,
|
||||
s3_snapshot_prefix: String,
|
||||
s3_access_key: String,
|
||||
s3_secret_key: String,
|
||||
s3_max_in_flight_parts: std::num::NonZero<usize>,
|
||||
s3_signature_duration: std::time::Duration,
|
||||
s3_multipart_part_size: u64,
|
||||
must_stop_processing: super::MustStopProcessing,
|
||||
retry_backoff: backoff::exponential::ExponentialBackoff<backoff::SystemClock>,
|
||||
db_name: String,
|
||||
reader: std::io::PipeReader,
|
||||
) -> Result<(), Error> {
|
||||
use std::{collections::VecDeque, os::fd::OwnedFd, path::PathBuf};
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use reqwest::{Client, Response};
|
||||
use rusty_s3::S3Action as _;
|
||||
use rusty_s3::{actions::CreateMultipartUpload, Bucket, BucketError, Credentials, UrlStyle};
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
let reader = OwnedFd::from(reader);
|
||||
let reader = tokio::net::unix::pipe::Receiver::from_owned_fd(reader)?;
|
||||
let s3_snapshot_prefix = PathBuf::from(s3_snapshot_prefix);
|
||||
let url =
|
||||
s3_bucket_url.parse().map_err(BucketError::ParseError).map_err(Error::S3BucketError)?;
|
||||
let bucket = Bucket::new(url, UrlStyle::Path, s3_bucket_name, s3_bucket_region)
|
||||
.map_err(Error::S3BucketError)?;
|
||||
let credential = Credentials::new(s3_access_key, s3_secret_key);
|
||||
|
||||
// Note for the future (rust 1.91+): use with_added_extension, it's prettier
|
||||
let object_path = s3_snapshot_prefix.join(format!("{db_name}.snapshot"));
|
||||
// Note: It doesn't work on Windows and if a port to this platform is needed,
|
||||
// use the slash-path crate or similar to get the correct path separator.
|
||||
let object = object_path.display().to_string();
|
||||
|
||||
let action = bucket.create_multipart_upload(Some(&credential), &object);
|
||||
let url = action.sign(s3_signature_duration);
|
||||
|
||||
let client = Client::new();
|
||||
let resp = client.post(url).send().await.map_err(Error::S3HttpError)?;
|
||||
let status = resp.status();
|
||||
|
||||
let body = match resp.error_for_status_ref() {
|
||||
Ok(_) => resp.text().await.map_err(Error::S3HttpError)?,
|
||||
Err(_) => {
|
||||
return Err(Error::S3Error { status, body: resp.text().await.unwrap_or_default() })
|
||||
}
|
||||
};
|
||||
|
||||
let multipart =
|
||||
CreateMultipartUpload::parse_response(&body).map_err(|e| Error::S3XmlError(Box::new(e)))?;
|
||||
tracing::debug!("Starting the upload of the snapshot to {object}");
|
||||
|
||||
// We use this bumpalo for etags strings.
|
||||
let bump = bumpalo::Bump::new();
|
||||
let mut etags = Vec::<&str>::new();
|
||||
let mut in_flight = VecDeque::<(JoinHandle<reqwest::Result<Response>>, Bytes)>::with_capacity(
|
||||
s3_max_in_flight_parts.get(),
|
||||
);
|
||||
|
||||
// Part numbers start at 1 and cannot be larger than 10k
|
||||
for part_number in 1u16.. {
|
||||
if must_stop_processing.get() {
|
||||
return Err(Error::AbortedTask);
|
||||
}
|
||||
|
||||
let part_upload =
|
||||
bucket.upload_part(Some(&credential), &object, part_number, multipart.upload_id());
|
||||
let url = part_upload.sign(s3_signature_duration);
|
||||
|
||||
// Wait for a buffer to be ready if there are in-flight parts that landed
|
||||
let mut buffer = if in_flight.len() >= s3_max_in_flight_parts.get() {
|
||||
let (handle, buffer) = in_flight.pop_front().expect("At least one in flight request");
|
||||
let resp = join_and_map_error(handle).await?;
|
||||
extract_and_append_etag(&bump, &mut etags, resp.headers())?;
|
||||
|
||||
let mut buffer = match buffer.try_into_mut() {
|
||||
Ok(buffer) => buffer,
|
||||
Err(_) => unreachable!("All bytes references were consumed in the task"),
|
||||
};
|
||||
buffer.clear();
|
||||
buffer
|
||||
} else {
|
||||
BytesMut::with_capacity(s3_multipart_part_size as usize)
|
||||
};
|
||||
|
||||
// If we successfully read enough bytes,
|
||||
// we can continue and send the buffer/part
|
||||
while buffer.len() < (s3_multipart_part_size as usize / 2) {
|
||||
// Wait for the pipe to be readable
|
||||
|
||||
use std::io;
|
||||
reader.readable().await?;
|
||||
|
||||
match reader.try_read_buf(&mut buffer) {
|
||||
Ok(0) => break,
|
||||
// We read some bytes but maybe not enough
|
||||
Ok(_) => continue,
|
||||
// The readiness event is a false positive.
|
||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
if buffer.is_empty() {
|
||||
// Break the loop if the buffer is
|
||||
// empty after we tried to read bytes
|
||||
break;
|
||||
}
|
||||
|
||||
let body = buffer.freeze();
|
||||
tracing::trace!("Sending part {part_number}");
|
||||
let task = tokio::spawn({
|
||||
let client = client.clone();
|
||||
let body = body.clone();
|
||||
backoff::future::retry(retry_backoff.clone(), move || {
|
||||
let client = client.clone();
|
||||
let url = url.clone();
|
||||
let body = body.clone();
|
||||
async move {
|
||||
match client.put(url).body(body).send().await {
|
||||
Ok(resp) if resp.status().is_client_error() => {
|
||||
resp.error_for_status().map_err(backoff::Error::Permanent)
|
||||
}
|
||||
Ok(resp) => Ok(resp),
|
||||
Err(e) => Err(backoff::Error::transient(e)),
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
in_flight.push_back((task, body));
|
||||
}
|
||||
|
||||
for (handle, _buffer) in in_flight {
|
||||
let resp = join_and_map_error(handle).await?;
|
||||
extract_and_append_etag(&bump, &mut etags, resp.headers())?;
|
||||
}
|
||||
|
||||
tracing::debug!("Finalizing the multipart upload");
|
||||
|
||||
let action = bucket.complete_multipart_upload(
|
||||
Some(&credential),
|
||||
&object,
|
||||
multipart.upload_id(),
|
||||
etags.iter().map(AsRef::as_ref),
|
||||
);
|
||||
let url = action.sign(s3_signature_duration);
|
||||
let body = action.body();
|
||||
let resp = backoff::future::retry(retry_backoff, move || {
|
||||
let client = client.clone();
|
||||
let url = url.clone();
|
||||
let body = body.clone();
|
||||
async move {
|
||||
match client.post(url).body(body).send().await {
|
||||
Ok(resp) if resp.status().is_client_error() => {
|
||||
resp.error_for_status().map_err(backoff::Error::Permanent)
|
||||
}
|
||||
Ok(resp) => Ok(resp),
|
||||
Err(e) => Err(backoff::Error::transient(e)),
|
||||
}
|
||||
}
|
||||
})
|
||||
.await
|
||||
.map_err(Error::S3HttpError)?;
|
||||
|
||||
let status = resp.status();
|
||||
let body = resp.text().await.map_err(|e| Error::S3Error { status, body: e.to_string() })?;
|
||||
if status.is_success() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::S3Error { status, body })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn join_and_map_error(
|
||||
join_handle: tokio::task::JoinHandle<Result<reqwest::Response, reqwest::Error>>,
|
||||
) -> Result<reqwest::Response> {
|
||||
// safety: Panic happens if the task (JoinHandle) was aborted, cancelled, or panicked
|
||||
let request = join_handle.await.unwrap();
|
||||
let resp = request.map_err(Error::S3HttpError)?;
|
||||
match resp.error_for_status_ref() {
|
||||
Ok(_) => Ok(resp),
|
||||
Err(_) => Err(Error::S3Error {
|
||||
status: resp.status(),
|
||||
body: resp.text().await.unwrap_or_default(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn extract_and_append_etag<'b>(
|
||||
bump: &'b bumpalo::Bump,
|
||||
etags: &mut Vec<&'b str>,
|
||||
headers: &reqwest::header::HeaderMap,
|
||||
) -> Result<()> {
|
||||
use reqwest::header::ETAG;
|
||||
|
||||
let etag = headers.get(ETAG).ok_or_else(|| Error::S3XmlError("Missing ETag header".into()))?;
|
||||
let etag = etag.to_str().map_err(|e| Error::S3XmlError(Box::new(e)))?;
|
||||
etags.push(bump.alloc_str(etag));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
|
||||
[timestamp] [4,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
||||
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
||||
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,]
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
@@ -37,7 +37,7 @@ catto [1,]
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
0 [0,]
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
----------------------------------------------------------------------
|
||||
@@ -40,7 +40,7 @@ doggo [2,]
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
0 [0,]
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
@@ -43,7 +43,7 @@ doggo [2,3,]
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
0 [0,]
|
||||
|
||||
@@ -742,11 +742,11 @@ fn basic_get_stats() {
|
||||
"documentEdition": 0,
|
||||
"dumpCreation": 0,
|
||||
"export": 0,
|
||||
"indexCompaction": 0,
|
||||
"indexCreation": 3,
|
||||
"indexDeletion": 0,
|
||||
"indexSwap": 0,
|
||||
"indexUpdate": 0,
|
||||
"networkTopologyChange": 0,
|
||||
"settingsUpdate": 0,
|
||||
"snapshotCreation": 0,
|
||||
"taskCancelation": 0,
|
||||
@@ -757,7 +757,7 @@ fn basic_get_stats() {
|
||||
"###);
|
||||
|
||||
handle.advance_till([Start, BatchCreated]);
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
||||
{
|
||||
"indexes": {
|
||||
"catto": 1,
|
||||
@@ -777,7 +777,6 @@ fn basic_get_stats() {
|
||||
"documentEdition": 0,
|
||||
"dumpCreation": 0,
|
||||
"export": 0,
|
||||
"indexCompaction": 0,
|
||||
"indexCreation": 3,
|
||||
"indexDeletion": 0,
|
||||
"indexSwap": 0,
|
||||
@@ -789,7 +788,7 @@ fn basic_get_stats() {
|
||||
"upgradeDatabase": 0
|
||||
}
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
|
||||
handle.advance_till([
|
||||
InsideProcessBatch,
|
||||
@@ -799,7 +798,7 @@ fn basic_get_stats() {
|
||||
Start,
|
||||
BatchCreated,
|
||||
]);
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
||||
{
|
||||
"indexes": {
|
||||
"catto": 1,
|
||||
@@ -819,7 +818,6 @@ fn basic_get_stats() {
|
||||
"documentEdition": 0,
|
||||
"dumpCreation": 0,
|
||||
"export": 0,
|
||||
"indexCompaction": 0,
|
||||
"indexCreation": 3,
|
||||
"indexDeletion": 0,
|
||||
"indexSwap": 0,
|
||||
@@ -831,7 +829,7 @@ fn basic_get_stats() {
|
||||
"upgradeDatabase": 0
|
||||
}
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
|
||||
// now we make one more batch, the started_at field of the new tasks will be past `second_start_time`
|
||||
handle.advance_till([
|
||||
@@ -842,7 +840,7 @@ fn basic_get_stats() {
|
||||
Start,
|
||||
BatchCreated,
|
||||
]);
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
||||
{
|
||||
"indexes": {
|
||||
"catto": 1,
|
||||
@@ -862,7 +860,6 @@ fn basic_get_stats() {
|
||||
"documentEdition": 0,
|
||||
"dumpCreation": 0,
|
||||
"export": 0,
|
||||
"indexCompaction": 0,
|
||||
"indexCreation": 3,
|
||||
"indexDeletion": 0,
|
||||
"indexSwap": 0,
|
||||
@@ -874,7 +871,7 @@ fn basic_get_stats() {
|
||||
"upgradeDatabase": 0
|
||||
}
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -121,7 +121,7 @@ fn import_vectors() {
|
||||
insta::assert_json_snapshot!(simple_hf_config.embedder_options);
|
||||
let simple_hf_name = name.clone();
|
||||
|
||||
let configs = index_scheduler.embedders("doggos".to_string(), configs).unwrap();
|
||||
let configs = index_scheduler.embedders("doggos", configs).unwrap();
|
||||
let hf_runtime = configs.get(&simple_hf_name).unwrap();
|
||||
let hf_embedder = &hf_runtime.embedder;
|
||||
let beagle_embed = hf_embedder
|
||||
|
||||
@@ -126,7 +126,7 @@ impl IndexScheduler {
|
||||
std::fs::create_dir_all(&options.auth_path).unwrap();
|
||||
let auth_env = open_auth_store_env(&options.auth_path).unwrap();
|
||||
let index_scheduler =
|
||||
Self::new_test(options, auth_env, version, None, sender, planned_failures).unwrap();
|
||||
Self::new(options, auth_env, version, None, sender, planned_failures).unwrap();
|
||||
|
||||
// To be 100% consistent between all test we're going to start the scheduler right now
|
||||
// and ensure it's in the expected starting state.
|
||||
|
||||
@@ -45,11 +45,6 @@ pub fn upgrade_index_scheduler(
|
||||
(1, 19, _) => 0,
|
||||
(1, 20, _) => 0,
|
||||
(1, 21, _) => 0,
|
||||
(1, 22, _) => 0,
|
||||
(1, 23, _) => 0,
|
||||
(1, 24, _) => 0,
|
||||
(1, 25, _) => 0,
|
||||
(1, 26, _) => 0,
|
||||
(major, minor, patch) => {
|
||||
if major > current_major
|
||||
|| (major == current_major && minor > current_minor)
|
||||
@@ -100,7 +95,6 @@ pub fn upgrade_index_scheduler(
|
||||
status: Status::Enqueued,
|
||||
kind: KindWithContent::UpgradeDatabase { from },
|
||||
network: None,
|
||||
custom_metadata: None,
|
||||
},
|
||||
)?;
|
||||
wtxn.commit()?;
|
||||
|
||||
@@ -256,15 +256,14 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
|
||||
use KindWithContent as K;
|
||||
let mut index_uids = vec![];
|
||||
match &mut task.kind {
|
||||
K::DocumentAdditionOrUpdate { index_uid, .. }
|
||||
| K::DocumentEdition { index_uid, .. }
|
||||
| K::DocumentDeletion { index_uid, .. }
|
||||
| K::DocumentDeletionByFilter { index_uid, .. }
|
||||
| K::DocumentClear { index_uid }
|
||||
| K::SettingsUpdate { index_uid, .. }
|
||||
| K::IndexDeletion { index_uid }
|
||||
| K::IndexCreation { index_uid, .. }
|
||||
| K::IndexCompaction { index_uid, .. } => index_uids.push(index_uid),
|
||||
K::DocumentAdditionOrUpdate { index_uid, .. } => index_uids.push(index_uid),
|
||||
K::DocumentEdition { index_uid, .. } => index_uids.push(index_uid),
|
||||
K::DocumentDeletion { index_uid, .. } => index_uids.push(index_uid),
|
||||
K::DocumentDeletionByFilter { index_uid, .. } => index_uids.push(index_uid),
|
||||
K::DocumentClear { index_uid } => index_uids.push(index_uid),
|
||||
K::SettingsUpdate { index_uid, .. } => index_uids.push(index_uid),
|
||||
K::IndexDeletion { index_uid } => index_uids.push(index_uid),
|
||||
K::IndexCreation { index_uid, .. } => index_uids.push(index_uid),
|
||||
K::IndexUpdate { index_uid, new_index_uid, .. } => {
|
||||
index_uids.push(index_uid);
|
||||
if let Some(new_uid) = new_index_uid {
|
||||
@@ -286,6 +285,7 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
|
||||
| K::DumpCreation { .. }
|
||||
| K::Export { .. }
|
||||
| K::UpgradeDatabase { .. }
|
||||
| K::NetworkTopologyChange { .. }
|
||||
| K::SnapshotCreation => (),
|
||||
};
|
||||
if let Some(Details::IndexSwap { swaps }) = &mut task.details {
|
||||
@@ -379,7 +379,6 @@ impl crate::IndexScheduler {
|
||||
status,
|
||||
kind,
|
||||
network: _,
|
||||
custom_metadata: _,
|
||||
} = task;
|
||||
assert_eq!(uid, task.uid);
|
||||
if task.status != Status::Enqueued {
|
||||
@@ -620,12 +619,8 @@ impl crate::IndexScheduler {
|
||||
Details::UpgradeDatabase { from: _, to: _ } => {
|
||||
assert_eq!(kind.as_kind(), Kind::UpgradeDatabase);
|
||||
}
|
||||
Details::IndexCompaction {
|
||||
index_uid: _,
|
||||
pre_compaction_size: _,
|
||||
post_compaction_size: _,
|
||||
} => {
|
||||
assert_eq!(kind.as_kind(), Kind::IndexCompaction);
|
||||
Details::NetworkTopologyChange { .. } => {
|
||||
assert_eq!(kind.as_kind(), Kind::NetworkTopologyChange);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,7 +109,6 @@ impl HeedAuthStore {
|
||||
Action::IndexesGet,
|
||||
Action::IndexesUpdate,
|
||||
Action::IndexesSwap,
|
||||
Action::IndexesCompact,
|
||||
]
|
||||
.iter(),
|
||||
);
|
||||
|
||||
@@ -5,31 +5,85 @@
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use milli::update::new::indexer::enterprise_edition::sharding::Shards;
|
||||
use deserr::Deserr;
|
||||
use milli::update::new::indexer::enterprise_edition::sharding::{Shard, Shards};
|
||||
use milli::update::Setting;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::deserr::DeserrJsonError;
|
||||
use crate::error::deserr_codes::{
|
||||
InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf, InvalidNetworkSharding,
|
||||
InvalidNetworkUrl, InvalidNetworkWriteApiKey,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, Deserr, ToSchema, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[schema(rename_all = "camelCase")]
|
||||
pub struct Network {
|
||||
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!("http://localhost:7700"))]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
|
||||
#[serde(default)]
|
||||
pub remotes: Setting<BTreeMap<String, Option<Remote>>>,
|
||||
#[schema(value_type = Option<String>, example = json!("ms-00"), rename = "self")]
|
||||
#[serde(default, rename = "self")]
|
||||
#[deserr(default, rename = "self", error = DeserrJsonError<InvalidNetworkSelf>)]
|
||||
pub local: Setting<String>,
|
||||
#[schema(value_type = Option<bool>, example = json!(true))]
|
||||
#[serde(default)]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkSharding>)]
|
||||
pub sharding: Setting<bool>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserr, ToSchema, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[deserr(error = DeserrJsonError<InvalidNetworkRemotes>, rename_all = camelCase, deny_unknown_fields)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[schema(rename_all = "camelCase")]
|
||||
pub struct Remote {
|
||||
#[schema(value_type = Option<String>, example = json!({
|
||||
"ms-0": Remote { url: Setting::Set("http://localhost:7700".into()), search_api_key: Setting::Reset, write_api_key: Setting::Reset },
|
||||
"ms-1": Remote { url: Setting::Set("http://localhost:7701".into()), search_api_key: Setting::Set("foo".into()), write_api_key: Setting::Set("bar".into()) },
|
||||
"ms-2": Remote { url: Setting::Set("http://localhost:7702".into()), search_api_key: Setting::Set("bar".into()), write_api_key: Setting::Set("foo".into()) },
|
||||
}))]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkUrl>)]
|
||||
#[serde(default)]
|
||||
pub url: Setting<String>,
|
||||
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkSearchApiKey>)]
|
||||
#[serde(default)]
|
||||
pub search_api_key: Setting<String>,
|
||||
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkWriteApiKey>)]
|
||||
#[serde(default)]
|
||||
pub write_api_key: Setting<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Network {
|
||||
pub struct DbNetwork {
|
||||
#[serde(default, rename = "self")]
|
||||
pub local: Option<String>,
|
||||
#[serde(default)]
|
||||
pub remotes: BTreeMap<String, Remote>,
|
||||
pub remotes: BTreeMap<String, DbRemote>,
|
||||
#[serde(default)]
|
||||
pub sharding: bool,
|
||||
}
|
||||
|
||||
impl Network {
|
||||
impl DbNetwork {
|
||||
pub fn shards(&self) -> Option<Shards> {
|
||||
if self.sharding {
|
||||
let this = self.local.as_deref().expect("Inconsistent `sharding` and `self`");
|
||||
let others = self
|
||||
.remotes
|
||||
.keys()
|
||||
.filter(|name| name.as_str() != this)
|
||||
.map(|name| name.to_owned())
|
||||
.collect();
|
||||
Some(Shards { own: vec![this.to_owned()], others })
|
||||
let this = self.local.as_deref();
|
||||
|
||||
Some(Shards(
|
||||
self.remotes
|
||||
.keys()
|
||||
.map(|name| Shard {
|
||||
is_own: Some(name.as_str()) == this,
|
||||
name: name.to_owned(),
|
||||
})
|
||||
.collect(),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@@ -38,7 +92,7 @@ impl Network {
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Remote {
|
||||
pub struct DbRemote {
|
||||
pub url: String,
|
||||
#[serde(default)]
|
||||
pub search_api_key: Option<String>,
|
||||
|
||||
@@ -254,12 +254,10 @@ InvalidSearchHybridQuery , InvalidRequest , BAD_REQU
|
||||
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexCustomMetadata , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexUid , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidMultiSearchFacets , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidMultiSearchFacetsByIndex , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidMultiSearchFacetOrder , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidMultiSearchQueryPersonalization , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidMultiSearchFederated , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidMultiSearchFederationOptions , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidMultiSearchMaxValuesPerFacet , InvalidRequest , BAD_REQUEST ;
|
||||
@@ -317,8 +315,6 @@ InvalidSearchShowRankingScoreDetails , InvalidRequest , BAD_REQU
|
||||
InvalidSimilarShowRankingScoreDetails , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchSort , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchDistinct , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchPersonalize , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchPersonalizeUserContext , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchMediaAndVector , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsDisplayedAttributes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsDistinctAttribute , InvalidRequest , BAD_REQUEST ;
|
||||
@@ -394,9 +390,6 @@ TooManyVectors , InvalidRequest , BAD_REQU
|
||||
UnretrievableDocument , Internal , BAD_REQUEST ;
|
||||
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
|
||||
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
||||
InvalidS3SnapshotRequest , Internal , BAD_REQUEST ;
|
||||
InvalidS3SnapshotParameters , Internal , BAD_REQUEST ;
|
||||
S3SnapshotServerError , Internal , BAD_GATEWAY ;
|
||||
|
||||
// Experimental features
|
||||
VectorEmbeddingError , InvalidRequest , BAD_REQUEST ;
|
||||
@@ -536,6 +529,8 @@ impl ErrorCode for milli::Error {
|
||||
| UserError::DocumentEditionCompilationError(_) => {
|
||||
Code::EditDocumentsByFunctionError
|
||||
}
|
||||
UserError::NetworkShardingWithoutSelf => Code::InvalidNetworkSharding,
|
||||
UserError::NetworkMissingUrl(_) => Code::MissingNetworkUrl,
|
||||
UserError::CelluliteError(err) => match err {
|
||||
cellulite::Error::BuildCanceled
|
||||
| cellulite::Error::VersionMismatchOnBuild(_)
|
||||
@@ -686,18 +681,6 @@ impl fmt::Display for deserr_codes::InvalidNetworkSearchApiKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for deserr_codes::InvalidSearchPersonalize {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "the value of `personalize` is invalid, expected a JSON object with `userContext` string.")
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for deserr_codes::InvalidSearchPersonalizeUserContext {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "the value of `userContext` is invalid, expected a string.")
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! internal_error {
|
||||
($target:ty : $($other:path), *) => {
|
||||
|
||||
@@ -380,9 +380,6 @@ pub enum Action {
|
||||
#[serde(rename = "webhooks.*")]
|
||||
#[deserr(rename = "webhooks.*")]
|
||||
WebhooksAll,
|
||||
#[serde(rename = "indexes.compact")]
|
||||
#[deserr(rename = "indexes.compact")]
|
||||
IndexesCompact,
|
||||
}
|
||||
|
||||
impl Action {
|
||||
@@ -401,7 +398,6 @@ impl Action {
|
||||
INDEXES_UPDATE => Some(Self::IndexesUpdate),
|
||||
INDEXES_DELETE => Some(Self::IndexesDelete),
|
||||
INDEXES_SWAP => Some(Self::IndexesSwap),
|
||||
INDEXES_COMPACT => Some(Self::IndexesCompact),
|
||||
TASKS_ALL => Some(Self::TasksAll),
|
||||
TASKS_CANCEL => Some(Self::TasksCancel),
|
||||
TASKS_DELETE => Some(Self::TasksDelete),
|
||||
@@ -466,7 +462,6 @@ impl Action {
|
||||
IndexesUpdate => false,
|
||||
IndexesDelete => false,
|
||||
IndexesSwap => false,
|
||||
IndexesCompact => false,
|
||||
TasksCancel => false,
|
||||
TasksDelete => false,
|
||||
TasksGet => true,
|
||||
@@ -518,7 +513,6 @@ pub mod actions {
|
||||
pub const INDEXES_UPDATE: u8 = IndexesUpdate.repr();
|
||||
pub const INDEXES_DELETE: u8 = IndexesDelete.repr();
|
||||
pub const INDEXES_SWAP: u8 = IndexesSwap.repr();
|
||||
pub const INDEXES_COMPACT: u8 = IndexesCompact.repr();
|
||||
pub const TASKS_ALL: u8 = TasksAll.repr();
|
||||
pub const TASKS_CANCEL: u8 = TasksCancel.repr();
|
||||
pub const TASKS_DELETE: u8 = TasksDelete.repr();
|
||||
@@ -620,7 +614,6 @@ pub(crate) mod test {
|
||||
assert!(WebhooksDelete.repr() == 47 && WEBHOOKS_DELETE == 47);
|
||||
assert!(WebhooksCreate.repr() == 48 && WEBHOOKS_CREATE == 48);
|
||||
assert!(WebhooksAll.repr() == 49 && WEBHOOKS_ALL == 49);
|
||||
assert!(IndexesCompact.repr() == 50 && INDEXES_COMPACT == 50);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -346,26 +346,24 @@ impl<T> Settings<T> {
|
||||
continue;
|
||||
};
|
||||
|
||||
hide_secret(api_key, 0);
|
||||
Self::hide_secret(api_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Redact a secret string, starting from the `secret_offset`th byte.
|
||||
pub fn hide_secret(secret: &mut String, secret_offset: usize) {
|
||||
match secret.len().checked_sub(secret_offset) {
|
||||
None => (),
|
||||
Some(x) if x < 10 => {
|
||||
secret.replace_range(secret_offset.., "XXX...");
|
||||
}
|
||||
Some(x) if x < 20 => {
|
||||
secret.replace_range((secret_offset + 2).., "XXXX...");
|
||||
}
|
||||
Some(x) if x < 30 => {
|
||||
secret.replace_range((secret_offset + 3).., "XXXXX...");
|
||||
}
|
||||
Some(_x) => {
|
||||
secret.replace_range((secret_offset + 5).., "XXXXXX...");
|
||||
fn hide_secret(secret: &mut String) {
|
||||
match secret.len() {
|
||||
x if x < 10 => {
|
||||
secret.replace_range(.., "XXX...");
|
||||
}
|
||||
x if x < 20 => {
|
||||
secret.replace_range(2.., "XXXX...");
|
||||
}
|
||||
x if x < 30 => {
|
||||
secret.replace_range(3.., "XXXXX...");
|
||||
}
|
||||
_x => {
|
||||
secret.replace_range(5.., "XXXXXX...");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ use time::{Duration, OffsetDateTime};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::batches::BatchId;
|
||||
use crate::enterprise_edition::network::Network;
|
||||
use crate::error::ResponseError;
|
||||
use crate::settings::{Settings, Unchecked};
|
||||
use crate::tasks::{
|
||||
@@ -55,9 +56,6 @@ pub struct TaskView {
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub network: Option<TaskNetwork>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub custom_metadata: Option<String>,
|
||||
}
|
||||
|
||||
impl TaskView {
|
||||
@@ -76,7 +74,6 @@ impl TaskView {
|
||||
started_at: task.started_at,
|
||||
finished_at: task.finished_at,
|
||||
network: task.network.clone(),
|
||||
custom_metadata: task.custom_metadata.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -146,11 +143,9 @@ pub struct DetailsView {
|
||||
pub old_index_uid: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub new_index_uid: Option<String>,
|
||||
// index compaction
|
||||
// network
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pre_compaction_size: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub post_compaction_size: Option<String>,
|
||||
pub network: Option<Network>,
|
||||
}
|
||||
|
||||
impl DetailsView {
|
||||
@@ -323,23 +318,9 @@ impl DetailsView {
|
||||
// We should never be able to batch multiple renames at the same time.
|
||||
(Some(left), Some(_right)) => Some(left),
|
||||
},
|
||||
pre_compaction_size: match (
|
||||
self.pre_compaction_size.clone(),
|
||||
other.pre_compaction_size.clone(),
|
||||
) {
|
||||
network: match (&self.network, &other.network) {
|
||||
(None, None) => None,
|
||||
(None, Some(size)) | (Some(size), None) => Some(size),
|
||||
// We should never be able to batch multiple compactions at the same time.
|
||||
(Some(left), Some(_right)) => Some(left),
|
||||
},
|
||||
post_compaction_size: match (
|
||||
self.post_compaction_size.clone(),
|
||||
other.post_compaction_size.clone(),
|
||||
) {
|
||||
(None, None) => None,
|
||||
(None, Some(size)) | (Some(size), None) => Some(size),
|
||||
// We should never be able to batch multiple compactions at the same time.
|
||||
(Some(left), Some(_right)) => Some(left),
|
||||
(_, Some(network)) | (Some(network), None) => Some(network.clone()),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -442,14 +423,8 @@ impl From<Details> for DetailsView {
|
||||
upgrade_to: Some(format!("v{}.{}.{}", to.0, to.1, to.2)),
|
||||
..Default::default()
|
||||
},
|
||||
Details::IndexCompaction { pre_compaction_size, post_compaction_size, .. } => {
|
||||
DetailsView {
|
||||
pre_compaction_size: pre_compaction_size
|
||||
.map(|size| size.get_appropriate_unit(UnitType::Both).to_string()),
|
||||
post_compaction_size: post_compaction_size
|
||||
.map(|size| size.get_appropriate_unit(UnitType::Both).to_string()),
|
||||
..Default::default()
|
||||
}
|
||||
Details::NetworkTopologyChange { network: new_network } => {
|
||||
DetailsView { network: new_network, ..Default::default() }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ use utoipa::{schema, ToSchema};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::batches::BatchId;
|
||||
use crate::enterprise_edition::network::Network;
|
||||
use crate::error::ResponseError;
|
||||
use crate::index_uid_pattern::IndexUidPattern;
|
||||
use crate::keys::Key;
|
||||
@@ -45,9 +46,6 @@ pub struct Task {
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub network: Option<TaskNetwork>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub custom_metadata: Option<String>,
|
||||
}
|
||||
|
||||
impl Task {
|
||||
@@ -61,6 +59,7 @@ impl Task {
|
||||
| TaskDeletion { .. }
|
||||
| Export { .. }
|
||||
| UpgradeDatabase { .. }
|
||||
| NetworkTopologyChange { .. }
|
||||
| IndexSwap { .. } => None,
|
||||
DocumentAdditionOrUpdate { index_uid, .. }
|
||||
| DocumentEdition { index_uid, .. }
|
||||
@@ -70,8 +69,7 @@ impl Task {
|
||||
| SettingsUpdate { index_uid, .. }
|
||||
| IndexCreation { index_uid, .. }
|
||||
| IndexUpdate { index_uid, .. }
|
||||
| IndexDeletion { index_uid }
|
||||
| IndexCompaction { index_uid } => Some(index_uid),
|
||||
| IndexDeletion { index_uid } => Some(index_uid),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,7 +97,7 @@ impl Task {
|
||||
| KindWithContent::SnapshotCreation
|
||||
| KindWithContent::Export { .. }
|
||||
| KindWithContent::UpgradeDatabase { .. }
|
||||
| KindWithContent::IndexCompaction { .. } => None,
|
||||
| KindWithContent::NetworkTopologyChange { .. } => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -175,8 +173,9 @@ pub enum KindWithContent {
|
||||
UpgradeDatabase {
|
||||
from: (u32, u32, u32),
|
||||
},
|
||||
IndexCompaction {
|
||||
index_uid: String,
|
||||
NetworkTopologyChange {
|
||||
network: Option<Network>,
|
||||
origin: Option<Origin>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -214,7 +213,7 @@ impl KindWithContent {
|
||||
KindWithContent::SnapshotCreation => Kind::SnapshotCreation,
|
||||
KindWithContent::Export { .. } => Kind::Export,
|
||||
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
|
||||
KindWithContent::IndexCompaction { .. } => Kind::IndexCompaction,
|
||||
KindWithContent::NetworkTopologyChange { .. } => Kind::NetworkTopologyChange,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,6 +226,7 @@ impl KindWithContent {
|
||||
| TaskCancelation { .. }
|
||||
| TaskDeletion { .. }
|
||||
| Export { .. }
|
||||
| NetworkTopologyChange { .. }
|
||||
| UpgradeDatabase { .. } => vec![],
|
||||
DocumentAdditionOrUpdate { index_uid, .. }
|
||||
| DocumentEdition { index_uid, .. }
|
||||
@@ -235,8 +235,7 @@ impl KindWithContent {
|
||||
| DocumentClear { index_uid }
|
||||
| SettingsUpdate { index_uid, .. }
|
||||
| IndexCreation { index_uid, .. }
|
||||
| IndexDeletion { index_uid }
|
||||
| IndexCompaction { index_uid } => vec![index_uid],
|
||||
| IndexDeletion { index_uid } => vec![index_uid],
|
||||
IndexUpdate { index_uid, new_index_uid, .. } => {
|
||||
let mut indexes = vec![index_uid.as_str()];
|
||||
if let Some(new_uid) = new_index_uid {
|
||||
@@ -335,11 +334,9 @@ impl KindWithContent {
|
||||
versioning::VERSION_PATCH,
|
||||
),
|
||||
}),
|
||||
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction {
|
||||
index_uid: index_uid.clone(),
|
||||
pre_compaction_size: None,
|
||||
post_compaction_size: None,
|
||||
}),
|
||||
KindWithContent::NetworkTopologyChange { network: new_network, origin: _ } => {
|
||||
Some(Details::NetworkTopologyChange { network: new_network.clone() })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -422,11 +419,9 @@ impl KindWithContent {
|
||||
versioning::VERSION_PATCH,
|
||||
),
|
||||
}),
|
||||
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction {
|
||||
index_uid: index_uid.clone(),
|
||||
pre_compaction_size: None,
|
||||
post_compaction_size: None,
|
||||
}),
|
||||
KindWithContent::NetworkTopologyChange { network: new_network, origin: _s } => {
|
||||
Some(Details::NetworkTopologyChange { network: new_network.clone() })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -489,11 +484,9 @@ impl From<&KindWithContent> for Option<Details> {
|
||||
versioning::VERSION_PATCH,
|
||||
),
|
||||
}),
|
||||
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction {
|
||||
index_uid: index_uid.clone(),
|
||||
pre_compaction_size: None,
|
||||
post_compaction_size: None,
|
||||
}),
|
||||
KindWithContent::NetworkTopologyChange { network: new_network, origin: _ } => {
|
||||
Some(Details::NetworkTopologyChange { network: new_network.clone() })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -604,7 +597,7 @@ pub enum Kind {
|
||||
SnapshotCreation,
|
||||
Export,
|
||||
UpgradeDatabase,
|
||||
IndexCompaction,
|
||||
NetworkTopologyChange,
|
||||
}
|
||||
|
||||
impl Kind {
|
||||
@@ -616,15 +609,15 @@ impl Kind {
|
||||
| Kind::SettingsUpdate
|
||||
| Kind::IndexCreation
|
||||
| Kind::IndexDeletion
|
||||
| Kind::IndexUpdate
|
||||
| Kind::IndexCompaction => true,
|
||||
| Kind::IndexUpdate => true,
|
||||
Kind::IndexSwap
|
||||
| Kind::TaskCancelation
|
||||
| Kind::TaskDeletion
|
||||
| Kind::DumpCreation
|
||||
| Kind::Export
|
||||
| Kind::UpgradeDatabase
|
||||
| Kind::SnapshotCreation => false,
|
||||
| Kind::SnapshotCreation
|
||||
| Kind::NetworkTopologyChange => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -645,7 +638,7 @@ impl Display for Kind {
|
||||
Kind::SnapshotCreation => write!(f, "snapshotCreation"),
|
||||
Kind::Export => write!(f, "export"),
|
||||
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
|
||||
Kind::IndexCompaction => write!(f, "indexCompaction"),
|
||||
Kind::NetworkTopologyChange => write!(f, "networkTopologyChange"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -681,8 +674,8 @@ impl FromStr for Kind {
|
||||
Ok(Kind::Export)
|
||||
} else if kind.eq_ignore_ascii_case("upgradeDatabase") {
|
||||
Ok(Kind::UpgradeDatabase)
|
||||
} else if kind.eq_ignore_ascii_case("indexCompaction") {
|
||||
Ok(Kind::IndexCompaction)
|
||||
} else if kind.eq_ignore_ascii_case("networkTopologyChange") {
|
||||
Ok(Kind::NetworkTopologyChange)
|
||||
} else {
|
||||
Err(ParseTaskKindError(kind.to_owned()))
|
||||
}
|
||||
@@ -768,10 +761,8 @@ pub enum Details {
|
||||
from: (u32, u32, u32),
|
||||
to: (u32, u32, u32),
|
||||
},
|
||||
IndexCompaction {
|
||||
index_uid: String,
|
||||
pre_compaction_size: Option<Byte>,
|
||||
post_compaction_size: Option<Byte>,
|
||||
NetworkTopologyChange {
|
||||
network: Option<Network>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -835,15 +826,12 @@ impl Details {
|
||||
Self::ClearAll { deleted_documents } => *deleted_documents = Some(0),
|
||||
Self::TaskCancelation { canceled_tasks, .. } => *canceled_tasks = Some(0),
|
||||
Self::TaskDeletion { deleted_tasks, .. } => *deleted_tasks = Some(0),
|
||||
Self::IndexCompaction { pre_compaction_size, post_compaction_size, .. } => {
|
||||
*pre_compaction_size = None;
|
||||
*post_compaction_size = None;
|
||||
}
|
||||
Self::SettingsUpdate { .. }
|
||||
| Self::IndexInfo { .. }
|
||||
| Self::Dump { .. }
|
||||
| Self::Export { .. }
|
||||
| Self::UpgradeDatabase { .. }
|
||||
| Self::NetworkTopologyChange { .. }
|
||||
| Self::IndexSwap { .. } => (),
|
||||
}
|
||||
|
||||
|
||||
@@ -11,24 +11,6 @@ pub struct Webhook {
|
||||
pub headers: BTreeMap<String, String>,
|
||||
}
|
||||
|
||||
impl Webhook {
|
||||
pub fn redact_authorization_header(&mut self) {
|
||||
// headers are case insensitive, so to make the redaction robust we iterate over qualifying headers
|
||||
// rather than getting one canonical `Authorization` header.
|
||||
for value in self
|
||||
.headers
|
||||
.iter_mut()
|
||||
.filter_map(|(name, value)| name.eq_ignore_ascii_case("authorization").then_some(value))
|
||||
{
|
||||
if value.starts_with("Bearer ") {
|
||||
crate::settings::hide_secret(value, "Bearer ".len());
|
||||
} else {
|
||||
crate::settings::hide_secret(value, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Default, Clone, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct WebhooksView {
|
||||
|
||||
@@ -91,7 +91,7 @@ time = { version = "0.3.41", features = [
|
||||
] }
|
||||
tokio = { version = "1.45.1", features = ["full"] }
|
||||
toml = "0.8.23"
|
||||
uuid = { version = "1.18.0", features = ["serde", "v4", "v7"] }
|
||||
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
termcolor = "1.4.1"
|
||||
url = { version = "2.5.4", features = ["serde"] }
|
||||
|
||||
@@ -205,10 +205,7 @@ struct Infos {
|
||||
experimental_no_snapshot_compaction: bool,
|
||||
experimental_no_edition_2024_for_dumps: bool,
|
||||
experimental_no_edition_2024_for_settings: bool,
|
||||
experimental_no_edition_2024_for_prefix_post_processing: bool,
|
||||
experimental_no_edition_2024_for_facet_post_processing: bool,
|
||||
experimental_vector_store_setting: bool,
|
||||
experimental_personalization: bool,
|
||||
gpu_enabled: bool,
|
||||
db_path: bool,
|
||||
import_dump: bool,
|
||||
@@ -218,7 +215,6 @@ struct Infos {
|
||||
import_snapshot: bool,
|
||||
schedule_snapshot: Option<u64>,
|
||||
snapshot_dir: bool,
|
||||
uses_s3_snapshots: bool,
|
||||
ignore_missing_snapshot: bool,
|
||||
ignore_snapshot_if_db_exists: bool,
|
||||
http_addr: bool,
|
||||
@@ -287,8 +283,6 @@ impl Infos {
|
||||
indexer_options,
|
||||
config_file_path,
|
||||
no_analytics: _,
|
||||
experimental_personalization_api_key,
|
||||
s3_snapshot_options,
|
||||
} = options;
|
||||
|
||||
let schedule_snapshot = match schedule_snapshot {
|
||||
@@ -302,8 +296,6 @@ impl Infos {
|
||||
skip_index_budget: _,
|
||||
experimental_no_edition_2024_for_settings,
|
||||
experimental_no_edition_2024_for_dumps,
|
||||
experimental_no_edition_2024_for_prefix_post_processing,
|
||||
experimental_no_edition_2024_for_facet_post_processing,
|
||||
} = indexer_options;
|
||||
|
||||
let RuntimeTogglableFeatures {
|
||||
@@ -352,7 +344,6 @@ impl Infos {
|
||||
import_snapshot: import_snapshot.is_some(),
|
||||
schedule_snapshot,
|
||||
snapshot_dir: snapshot_dir != PathBuf::from("snapshots/"),
|
||||
uses_s3_snapshots: s3_snapshot_options.is_some(),
|
||||
ignore_missing_snapshot,
|
||||
ignore_snapshot_if_db_exists,
|
||||
http_addr: http_addr != default_http_addr(),
|
||||
@@ -374,9 +365,6 @@ impl Infos {
|
||||
ssl_resumption,
|
||||
ssl_tickets,
|
||||
experimental_no_edition_2024_for_settings,
|
||||
experimental_no_edition_2024_for_prefix_post_processing,
|
||||
experimental_no_edition_2024_for_facet_post_processing,
|
||||
experimental_personalization: experimental_personalization_api_key.is_some(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,8 +38,6 @@ pub enum MeilisearchHttpError {
|
||||
PaginationInFederatedQuery(usize, &'static str),
|
||||
#[error("Inside `.queries[{0}]`: Using facet options is not allowed in federated queries.\n - Hint: remove `facets` from query #{0} or remove `federation` from the request\n - Hint: pass `federation.facetsByIndex.{1}: {2:?}` for facets in federated search")]
|
||||
FacetsInFederatedQuery(usize, String, Vec<String>),
|
||||
#[error("Inside `.queries[{0}]`: Using `.personalize` is not allowed in federated queries.\n - Hint: remove `personalize` from query #{0} or remove `federation` from the request")]
|
||||
PersonalizationInFederatedQuery(usize),
|
||||
#[error("Inconsistent order for values in facet `{facet}`: index `{previous_uid}` orders {previous_facet_order}, but index `{current_uid}` orders {index_facet_order}.\n - Hint: Remove `federation.mergeFacets` or change `faceting.sortFacetValuesBy` to be consistent in settings.")]
|
||||
InconsistentFacetOrder {
|
||||
facet: String,
|
||||
@@ -139,9 +137,6 @@ impl ErrorCode for MeilisearchHttpError {
|
||||
MeilisearchHttpError::InconsistentFacetOrder { .. } => {
|
||||
Code::InvalidMultiSearchFacetOrder
|
||||
}
|
||||
MeilisearchHttpError::PersonalizationInFederatedQuery(_) => {
|
||||
Code::InvalidMultiSearchQueryPersonalization
|
||||
}
|
||||
MeilisearchHttpError::InconsistentOriginHeaders { .. } => {
|
||||
Code::InconsistentDocumentChangeHeaders
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ pub mod middleware;
|
||||
pub mod option;
|
||||
#[cfg(test)]
|
||||
mod option_test;
|
||||
pub mod personalization;
|
||||
pub mod routes;
|
||||
pub mod search;
|
||||
pub mod search_queue;
|
||||
@@ -59,7 +58,6 @@ use tracing::{error, info_span};
|
||||
use tracing_subscriber::filter::Targets;
|
||||
|
||||
use crate::error::MeilisearchHttpError;
|
||||
use crate::personalization::PersonalizationService;
|
||||
|
||||
/// Default number of simultaneously opened indexes.
|
||||
///
|
||||
@@ -130,8 +128,12 @@ pub type LogStderrType = tracing_subscriber::filter::Filtered<
|
||||
>;
|
||||
|
||||
pub fn create_app(
|
||||
services: ServicesData,
|
||||
index_scheduler: Data<IndexScheduler>,
|
||||
auth_controller: Data<AuthController>,
|
||||
search_queue: Data<SearchQueue>,
|
||||
opt: Opt,
|
||||
logs: (LogRouteHandle, LogStderrHandle),
|
||||
analytics: Data<Analytics>,
|
||||
enable_dashboard: bool,
|
||||
) -> actix_web::App<
|
||||
impl ServiceFactory<
|
||||
@@ -143,7 +145,17 @@ pub fn create_app(
|
||||
>,
|
||||
> {
|
||||
let app = actix_web::App::new()
|
||||
.configure(|s| configure_data(s, services, &opt))
|
||||
.configure(|s| {
|
||||
configure_data(
|
||||
s,
|
||||
index_scheduler.clone(),
|
||||
auth_controller.clone(),
|
||||
search_queue.clone(),
|
||||
&opt,
|
||||
logs,
|
||||
analytics.clone(),
|
||||
)
|
||||
})
|
||||
.configure(routes::configure)
|
||||
.configure(|s| dashboard(s, enable_dashboard));
|
||||
|
||||
@@ -221,11 +233,7 @@ pub fn setup_meilisearch(
|
||||
task_db_size: opt.max_task_db_size.as_u64() as usize,
|
||||
index_base_map_size: opt.max_index_size.as_u64() as usize,
|
||||
enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage,
|
||||
indexer_config: Arc::new({
|
||||
let s3_snapshot_options =
|
||||
opt.s3_snapshot_options.clone().map(|opt| opt.try_into()).transpose()?;
|
||||
IndexerConfig { s3_snapshot_options, ..(&opt.indexer_options).try_into()? }
|
||||
}),
|
||||
indexer_config: Arc::new((&opt.indexer_options).try_into()?),
|
||||
autobatching_enabled: true,
|
||||
cleanup_enabled: !opt.experimental_replication_parameters,
|
||||
max_number_of_tasks: 1_000_000,
|
||||
@@ -526,11 +534,7 @@ fn import_dump(
|
||||
let indexer_config = if base_config.max_threads.is_none() {
|
||||
let (thread_pool, _) = default_thread_pool_and_threads();
|
||||
|
||||
let _config = IndexerConfig {
|
||||
thread_pool,
|
||||
s3_snapshot_options: base_config.s3_snapshot_options.clone(),
|
||||
..*base_config
|
||||
};
|
||||
let _config = IndexerConfig { thread_pool, ..*base_config };
|
||||
backup_config = _config;
|
||||
&backup_config
|
||||
} else {
|
||||
@@ -587,7 +591,7 @@ fn import_dump(
|
||||
let reader = DocumentsBatchReader::from_reader(reader)?;
|
||||
|
||||
let embedder_configs = index.embedding_configs().embedding_configs(&wtxn)?;
|
||||
let embedders = index_scheduler.embedders(uid.to_string(), embedder_configs)?;
|
||||
let embedders = index_scheduler.embedders(&uid, embedder_configs)?;
|
||||
|
||||
let builder = milli::update::IndexDocuments::new(
|
||||
&mut wtxn,
|
||||
@@ -615,7 +619,7 @@ fn import_dump(
|
||||
|
||||
let mut indexer = indexer::DocumentOperation::new();
|
||||
let embedders = index.embedding_configs().embedding_configs(&rtxn)?;
|
||||
let embedders = index_scheduler.embedders(uid.clone(), embedders)?;
|
||||
let embedders = index_scheduler.embedders(&uid, embedders)?;
|
||||
|
||||
let mmap = unsafe { memmap2::Mmap::map(index_reader.documents_file())? };
|
||||
|
||||
@@ -678,26 +682,23 @@ fn import_dump(
|
||||
Ok(index_scheduler_dump.finish()?)
|
||||
}
|
||||
|
||||
pub fn configure_data(config: &mut web::ServiceConfig, services: ServicesData, opt: &Opt) {
|
||||
let ServicesData {
|
||||
index_scheduler,
|
||||
auth,
|
||||
search_queue,
|
||||
personalization_service,
|
||||
logs_route_handle,
|
||||
logs_stderr_handle,
|
||||
analytics,
|
||||
} = services;
|
||||
|
||||
pub fn configure_data(
|
||||
config: &mut web::ServiceConfig,
|
||||
index_scheduler: Data<IndexScheduler>,
|
||||
auth: Data<AuthController>,
|
||||
search_queue: Data<SearchQueue>,
|
||||
opt: &Opt,
|
||||
(logs_route, logs_stderr): (LogRouteHandle, LogStderrHandle),
|
||||
analytics: Data<Analytics>,
|
||||
) {
|
||||
let http_payload_size_limit = opt.http_payload_size_limit.as_u64() as usize;
|
||||
config
|
||||
.app_data(index_scheduler)
|
||||
.app_data(auth)
|
||||
.app_data(search_queue)
|
||||
.app_data(analytics)
|
||||
.app_data(personalization_service)
|
||||
.app_data(logs_route_handle)
|
||||
.app_data(logs_stderr_handle)
|
||||
.app_data(web::Data::new(logs_route))
|
||||
.app_data(web::Data::new(logs_stderr))
|
||||
.app_data(web::Data::new(opt.clone()))
|
||||
.app_data(
|
||||
web::JsonConfig::default()
|
||||
@@ -758,14 +759,3 @@ pub fn dashboard(config: &mut web::ServiceConfig, enable_frontend: bool) {
|
||||
pub fn dashboard(config: &mut web::ServiceConfig, _enable_frontend: bool) {
|
||||
config.service(web::resource("/").route(web::get().to(routes::running)));
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ServicesData {
|
||||
pub index_scheduler: Data<IndexScheduler>,
|
||||
pub auth: Data<AuthController>,
|
||||
pub search_queue: Data<SearchQueue>,
|
||||
pub personalization_service: Data<PersonalizationService>,
|
||||
pub logs_route_handle: Data<LogRouteHandle>,
|
||||
pub logs_stderr_handle: Data<LogStderrHandle>,
|
||||
pub analytics: Data<Analytics>,
|
||||
}
|
||||
|
||||
@@ -14,11 +14,10 @@ use index_scheduler::IndexScheduler;
|
||||
use is_terminal::IsTerminal;
|
||||
use meilisearch::analytics::Analytics;
|
||||
use meilisearch::option::LogMode;
|
||||
use meilisearch::personalization::PersonalizationService;
|
||||
use meilisearch::search_queue::SearchQueue;
|
||||
use meilisearch::{
|
||||
analytics, create_app, setup_meilisearch, LogRouteHandle, LogRouteType, LogStderrHandle,
|
||||
LogStderrType, Opt, ServicesData, SubscriberForSecondLayer,
|
||||
LogStderrType, Opt, SubscriberForSecondLayer,
|
||||
};
|
||||
use meilisearch_auth::{generate_master_key, AuthController, MASTER_KEY_MIN_SIZE};
|
||||
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
|
||||
@@ -153,15 +152,8 @@ async fn run_http(
|
||||
let enable_dashboard = &opt.env == "development";
|
||||
let opt_clone = opt.clone();
|
||||
let index_scheduler = Data::from(index_scheduler);
|
||||
let auth = Data::from(auth_controller);
|
||||
let auth_controller = Data::from(auth_controller);
|
||||
let analytics = Data::from(analytics);
|
||||
// Create personalization service with API key from options
|
||||
let personalization_service = Data::new(
|
||||
opt.experimental_personalization_api_key
|
||||
.clone()
|
||||
.map(PersonalizationService::cohere)
|
||||
.unwrap_or_else(PersonalizationService::disabled),
|
||||
);
|
||||
let search_queue = SearchQueue::new(
|
||||
opt.experimental_search_queue_size,
|
||||
available_parallelism()
|
||||
@@ -173,25 +165,21 @@ async fn run_http(
|
||||
usize::from(opt.experimental_drop_search_after) as u64
|
||||
));
|
||||
let search_queue = Data::new(search_queue);
|
||||
let (logs_route_handle, logs_stderr_handle) = logs;
|
||||
let logs_route_handle = Data::new(logs_route_handle);
|
||||
let logs_stderr_handle = Data::new(logs_stderr_handle);
|
||||
|
||||
let services = ServicesData {
|
||||
index_scheduler,
|
||||
auth,
|
||||
search_queue,
|
||||
personalization_service,
|
||||
logs_route_handle,
|
||||
logs_stderr_handle,
|
||||
analytics,
|
||||
};
|
||||
|
||||
let http_server =
|
||||
HttpServer::new(move || create_app(services.clone(), opt.clone(), enable_dashboard))
|
||||
// Disable signals allows the server to terminate immediately when a user enter CTRL-C
|
||||
.disable_signals()
|
||||
.keep_alive(KeepAlive::Os);
|
||||
let http_server = HttpServer::new(move || {
|
||||
create_app(
|
||||
index_scheduler.clone(),
|
||||
auth_controller.clone(),
|
||||
search_queue.clone(),
|
||||
opt.clone(),
|
||||
logs.clone(),
|
||||
analytics.clone(),
|
||||
enable_dashboard,
|
||||
)
|
||||
})
|
||||
// Disable signals allows the server to terminate immediately when a user enter CTRL-C
|
||||
.disable_signals()
|
||||
.keep_alive(KeepAlive::Os);
|
||||
|
||||
if let Some(config) = opt_clone.get_ssl_config()? {
|
||||
http_server.bind_rustls_0_23(opt_clone.http_addr, config)?.run().await?;
|
||||
|
||||
@@ -114,9 +114,4 @@ lazy_static! {
|
||||
"Meilisearch Task Queue Size Until Stop Registering",
|
||||
))
|
||||
.expect("Can't create a metric");
|
||||
pub static ref MEILISEARCH_PERSONALIZED_SEARCH_REQUESTS: IntGauge = register_int_gauge!(opts!(
|
||||
"meilisearch_personalized_search_requests",
|
||||
"Meilisearch number of search requests with personalization"
|
||||
))
|
||||
.expect("Can't create a metric");
|
||||
}
|
||||
|
||||
@@ -7,13 +7,12 @@ use std::ops::Deref;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::{env, fmt, fs};
|
||||
|
||||
use byte_unit::{Byte, ParseError, UnitType};
|
||||
use clap::Parser;
|
||||
use meilisearch_types::features::InstanceTogglableFeatures;
|
||||
use meilisearch_types::milli::update::{IndexerConfig, S3SnapshotOptions};
|
||||
use meilisearch_types::milli::update::IndexerConfig;
|
||||
use meilisearch_types::milli::ThreadPoolNoAbortBuilder;
|
||||
use rustls::server::{ServerSessionMemoryCache, WebPkiClientVerifier};
|
||||
use rustls::RootCertStore;
|
||||
@@ -56,10 +55,6 @@ const MEILI_EXPERIMENTAL_ENABLE_LOGS_ROUTE: &str = "MEILI_EXPERIMENTAL_ENABLE_LO
|
||||
const MEILI_EXPERIMENTAL_CONTAINS_FILTER: &str = "MEILI_EXPERIMENTAL_CONTAINS_FILTER";
|
||||
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_SETTINGS: &str =
|
||||
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_SETTINGS";
|
||||
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_FACET_POST_PROCESSING: &str =
|
||||
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_FACET_POST_PROCESSING";
|
||||
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_PREFIX_POST_PROCESSING: &str =
|
||||
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_PREFIX_POST_PROCESSING";
|
||||
const MEILI_EXPERIMENTAL_ENABLE_METRICS: &str = "MEILI_EXPERIMENTAL_ENABLE_METRICS";
|
||||
const MEILI_EXPERIMENTAL_SEARCH_QUEUE_SIZE: &str = "MEILI_EXPERIMENTAL_SEARCH_QUEUE_SIZE";
|
||||
const MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER: &str = "MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER";
|
||||
@@ -75,22 +70,6 @@ const MEILI_EXPERIMENTAL_EMBEDDING_CACHE_ENTRIES: &str =
|
||||
const MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION: &str = "MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION";
|
||||
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS: &str =
|
||||
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS";
|
||||
const MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY: &str =
|
||||
"MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY";
|
||||
|
||||
// Related to S3 snapshots
|
||||
const MEILI_S3_BUCKET_URL: &str = "MEILI_S3_BUCKET_URL";
|
||||
const MEILI_S3_BUCKET_REGION: &str = "MEILI_S3_BUCKET_REGION";
|
||||
const MEILI_S3_BUCKET_NAME: &str = "MEILI_S3_BUCKET_NAME";
|
||||
const MEILI_S3_SNAPSHOT_PREFIX: &str = "MEILI_S3_SNAPSHOT_PREFIX";
|
||||
const MEILI_S3_ACCESS_KEY: &str = "MEILI_S3_ACCESS_KEY";
|
||||
const MEILI_S3_SECRET_KEY: &str = "MEILI_S3_SECRET_KEY";
|
||||
const MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS: &str = "MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS";
|
||||
const MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL: &str = "MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL";
|
||||
const MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS: &str =
|
||||
"MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS";
|
||||
const MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE: &str = "MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE";
|
||||
|
||||
const DEFAULT_CONFIG_FILE_PATH: &str = "./config.toml";
|
||||
const DEFAULT_DB_PATH: &str = "./data.ms";
|
||||
const DEFAULT_HTTP_ADDR: &str = "localhost:7700";
|
||||
@@ -100,10 +79,6 @@ const DEFAULT_SNAPSHOT_DIR: &str = "snapshots/";
|
||||
const DEFAULT_SNAPSHOT_INTERVAL_SEC: u64 = 86400;
|
||||
const DEFAULT_SNAPSHOT_INTERVAL_SEC_STR: &str = "86400";
|
||||
const DEFAULT_DUMP_DIR: &str = "dumps/";
|
||||
const DEFAULT_S3_SNAPSHOT_MAX_IN_FLIGHT_PARTS: NonZeroUsize = NonZeroUsize::new(10).unwrap();
|
||||
const DEFAULT_S3_SNAPSHOT_COMPRESSION_LEVEL: u32 = 0;
|
||||
const DEFAULT_S3_SNAPSHOT_SIGNATURE_DURATION_SECONDS: u64 = 8 * 3600; // 8 hours
|
||||
const DEFAULT_S3_SNAPSHOT_MULTIPART_PART_SIZE: Byte = Byte::from_u64(375 * 1024 * 1024); // 375 MiB
|
||||
|
||||
const MEILI_MAX_INDEXING_MEMORY: &str = "MEILI_MAX_INDEXING_MEMORY";
|
||||
const MEILI_MAX_INDEXING_THREADS: &str = "MEILI_MAX_INDEXING_THREADS";
|
||||
@@ -496,20 +471,10 @@ pub struct Opt {
|
||||
#[serde(default)]
|
||||
pub experimental_no_snapshot_compaction: bool,
|
||||
|
||||
/// Experimental personalization API key feature.
|
||||
///
|
||||
/// Sets the API key for personalization features.
|
||||
#[clap(long, env = MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY)]
|
||||
pub experimental_personalization_api_key: Option<String>,
|
||||
|
||||
#[serde(flatten)]
|
||||
#[clap(flatten)]
|
||||
pub indexer_options: IndexerOpts,
|
||||
|
||||
#[serde(flatten)]
|
||||
#[clap(flatten)]
|
||||
pub s3_snapshot_options: Option<S3SnapshotOpts>,
|
||||
|
||||
/// Set the path to a configuration file that should be used to setup the engine.
|
||||
/// Format must be TOML.
|
||||
#[clap(long)]
|
||||
@@ -611,8 +576,6 @@ impl Opt {
|
||||
experimental_limit_batched_tasks_total_size,
|
||||
experimental_embedding_cache_entries,
|
||||
experimental_no_snapshot_compaction,
|
||||
experimental_personalization_api_key,
|
||||
s3_snapshot_options,
|
||||
} = self;
|
||||
export_to_env_if_not_present(MEILI_DB_PATH, db_path);
|
||||
export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr);
|
||||
@@ -713,22 +676,7 @@ impl Opt {
|
||||
MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION,
|
||||
experimental_no_snapshot_compaction.to_string(),
|
||||
);
|
||||
if let Some(experimental_personalization_api_key) = experimental_personalization_api_key {
|
||||
export_to_env_if_not_present(
|
||||
MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY,
|
||||
experimental_personalization_api_key,
|
||||
);
|
||||
}
|
||||
indexer_options.export_to_env();
|
||||
if let Some(s3_snapshot_options) = s3_snapshot_options {
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
let _ = s3_snapshot_options;
|
||||
panic!("S3 snapshot options are not supported on Windows");
|
||||
}
|
||||
#[cfg(unix)]
|
||||
s3_snapshot_options.export_to_env();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_ssl_config(&self) -> anyhow::Result<Option<rustls::ServerConfig>> {
|
||||
@@ -824,22 +772,6 @@ pub struct IndexerOpts {
|
||||
#[clap(long, env = MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS)]
|
||||
#[serde(default)]
|
||||
pub experimental_no_edition_2024_for_dumps: bool,
|
||||
|
||||
/// Experimental no edition 2024 to compute prefixes. For more information,
|
||||
/// see: <https://github.com/orgs/meilisearch/discussions/862>
|
||||
///
|
||||
/// Enables the experimental no edition 2024 to compute prefixes.
|
||||
#[clap(long, env = MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_PREFIX_POST_PROCESSING)]
|
||||
#[serde(default)]
|
||||
pub experimental_no_edition_2024_for_prefix_post_processing: bool,
|
||||
|
||||
/// Experimental no edition 2024 to compute facets. For more information,
|
||||
/// see: <https://github.com/orgs/meilisearch/discussions/862>
|
||||
///
|
||||
/// Enables the experimental no edition 2024 to compute facets.
|
||||
#[clap(long, env = MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_FACET_POST_PROCESSING)]
|
||||
#[serde(default)]
|
||||
pub experimental_no_edition_2024_for_facet_post_processing: bool,
|
||||
}
|
||||
|
||||
impl IndexerOpts {
|
||||
@@ -851,8 +783,6 @@ impl IndexerOpts {
|
||||
skip_index_budget: _,
|
||||
experimental_no_edition_2024_for_settings,
|
||||
experimental_no_edition_2024_for_dumps,
|
||||
experimental_no_edition_2024_for_prefix_post_processing,
|
||||
experimental_no_edition_2024_for_facet_post_processing,
|
||||
} = self;
|
||||
if let Some(max_indexing_memory) = max_indexing_memory.0 {
|
||||
export_to_env_if_not_present(
|
||||
@@ -878,18 +808,6 @@ impl IndexerOpts {
|
||||
experimental_no_edition_2024_for_dumps.to_string(),
|
||||
);
|
||||
}
|
||||
if experimental_no_edition_2024_for_prefix_post_processing {
|
||||
export_to_env_if_not_present(
|
||||
MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_PREFIX_POST_PROCESSING,
|
||||
experimental_no_edition_2024_for_prefix_post_processing.to_string(),
|
||||
);
|
||||
}
|
||||
if experimental_no_edition_2024_for_facet_post_processing {
|
||||
export_to_env_if_not_present(
|
||||
MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_FACET_POST_PROCESSING,
|
||||
experimental_no_edition_2024_for_facet_post_processing.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -897,16 +815,6 @@ impl TryFrom<&IndexerOpts> for IndexerConfig {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(other: &IndexerOpts) -> Result<Self, Self::Error> {
|
||||
let IndexerOpts {
|
||||
max_indexing_memory,
|
||||
max_indexing_threads,
|
||||
skip_index_budget,
|
||||
experimental_no_edition_2024_for_settings,
|
||||
experimental_no_edition_2024_for_dumps,
|
||||
experimental_no_edition_2024_for_prefix_post_processing,
|
||||
experimental_no_edition_2024_for_facet_post_processing,
|
||||
} = other;
|
||||
|
||||
let thread_pool = ThreadPoolNoAbortBuilder::new_for_indexing()
|
||||
.num_threads(other.max_indexing_threads.unwrap_or_else(|| num_cpus::get() / 2))
|
||||
.build()?;
|
||||
@@ -914,163 +822,17 @@ impl TryFrom<&IndexerOpts> for IndexerConfig {
|
||||
Ok(Self {
|
||||
thread_pool,
|
||||
log_every_n: Some(DEFAULT_LOG_EVERY_N),
|
||||
max_memory: max_indexing_memory.map(|b| b.as_u64() as usize),
|
||||
max_threads: max_indexing_threads.0,
|
||||
max_memory: other.max_indexing_memory.map(|b| b.as_u64() as usize),
|
||||
max_threads: *other.max_indexing_threads,
|
||||
max_positions_per_attributes: None,
|
||||
skip_index_budget: *skip_index_budget,
|
||||
experimental_no_edition_2024_for_settings: *experimental_no_edition_2024_for_settings,
|
||||
experimental_no_edition_2024_for_dumps: *experimental_no_edition_2024_for_dumps,
|
||||
skip_index_budget: other.skip_index_budget,
|
||||
experimental_no_edition_2024_for_settings: other
|
||||
.experimental_no_edition_2024_for_settings,
|
||||
experimental_no_edition_2024_for_dumps: other.experimental_no_edition_2024_for_dumps,
|
||||
chunk_compression_type: Default::default(),
|
||||
chunk_compression_level: Default::default(),
|
||||
documents_chunk_size: Default::default(),
|
||||
max_nb_chunks: Default::default(),
|
||||
experimental_no_edition_2024_for_prefix_post_processing:
|
||||
*experimental_no_edition_2024_for_prefix_post_processing,
|
||||
experimental_no_edition_2024_for_facet_post_processing:
|
||||
*experimental_no_edition_2024_for_facet_post_processing,
|
||||
s3_snapshot_options: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Parser, Deserialize)]
|
||||
// This group is a bit tricky but makes it possible to require all listed fields if one of them
|
||||
// is specified. It lets us keep an Option for the S3SnapshotOpts configuration.
|
||||
// <https://github.com/clap-rs/clap/issues/5092#issuecomment-2616986075>
|
||||
#[group(requires_all = ["s3_bucket_url", "s3_bucket_region", "s3_bucket_name", "s3_snapshot_prefix", "s3_access_key", "s3_secret_key"])]
|
||||
pub struct S3SnapshotOpts {
|
||||
/// The S3 bucket URL in the format https://s3.<region>.amazonaws.com.
|
||||
#[clap(long, env = MEILI_S3_BUCKET_URL, required = false)]
|
||||
#[serde(default)]
|
||||
pub s3_bucket_url: String,
|
||||
|
||||
/// The region in the format us-east-1.
|
||||
#[clap(long, env = MEILI_S3_BUCKET_REGION, required = false)]
|
||||
#[serde(default)]
|
||||
pub s3_bucket_region: String,
|
||||
|
||||
/// The bucket name.
|
||||
#[clap(long, env = MEILI_S3_BUCKET_NAME, required = false)]
|
||||
#[serde(default)]
|
||||
pub s3_bucket_name: String,
|
||||
|
||||
/// The prefix path where to put the snapshot, uses normal slashes (/).
|
||||
#[clap(long, env = MEILI_S3_SNAPSHOT_PREFIX, required = false)]
|
||||
#[serde(default)]
|
||||
pub s3_snapshot_prefix: String,
|
||||
|
||||
/// The S3 access key.
|
||||
#[clap(long, env = MEILI_S3_ACCESS_KEY, required = false)]
|
||||
#[serde(default)]
|
||||
pub s3_access_key: String,
|
||||
|
||||
/// The S3 secret key.
|
||||
#[clap(long, env = MEILI_S3_SECRET_KEY, required = false)]
|
||||
#[serde(default)]
|
||||
pub s3_secret_key: String,
|
||||
|
||||
/// The maximum number of parts that can be uploaded in parallel.
|
||||
///
|
||||
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
|
||||
#[clap(long, env = MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS, default_value_t = default_experimental_s3_snapshot_max_in_flight_parts())]
|
||||
#[serde(default = "default_experimental_s3_snapshot_max_in_flight_parts")]
|
||||
pub experimental_s3_max_in_flight_parts: NonZeroUsize,
|
||||
|
||||
/// The compression level. Defaults to no compression (0).
|
||||
///
|
||||
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
|
||||
#[clap(long, env = MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL, default_value_t = default_experimental_s3_snapshot_compression_level())]
|
||||
#[serde(default = "default_experimental_s3_snapshot_compression_level")]
|
||||
pub experimental_s3_compression_level: u32,
|
||||
|
||||
/// The signature duration for the multipart upload.
|
||||
///
|
||||
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
|
||||
#[clap(long, env = MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS, default_value_t = default_experimental_s3_snapshot_signature_duration_seconds())]
|
||||
#[serde(default = "default_experimental_s3_snapshot_signature_duration_seconds")]
|
||||
pub experimental_s3_signature_duration_seconds: u64,
|
||||
|
||||
/// The size of the the multipart parts.
|
||||
///
|
||||
/// Must not be less than 10MiB and larger than 8GiB. Yes,
|
||||
/// twice the boundaries of the AWS S3 multipart upload
|
||||
/// because we use it a bit differently internally.
|
||||
///
|
||||
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
|
||||
#[clap(long, env = MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE, default_value_t = default_experimental_s3_snapshot_multipart_part_size())]
|
||||
#[serde(default = "default_experimental_s3_snapshot_multipart_part_size")]
|
||||
pub experimental_s3_multipart_part_size: Byte,
|
||||
}
|
||||
|
||||
impl S3SnapshotOpts {
|
||||
/// Exports the values to their corresponding env vars if they are not set.
|
||||
pub fn export_to_env(self) {
|
||||
let S3SnapshotOpts {
|
||||
s3_bucket_url,
|
||||
s3_bucket_region,
|
||||
s3_bucket_name,
|
||||
s3_snapshot_prefix,
|
||||
s3_access_key,
|
||||
s3_secret_key,
|
||||
experimental_s3_max_in_flight_parts,
|
||||
experimental_s3_compression_level,
|
||||
experimental_s3_signature_duration_seconds,
|
||||
experimental_s3_multipart_part_size,
|
||||
} = self;
|
||||
|
||||
export_to_env_if_not_present(MEILI_S3_BUCKET_URL, s3_bucket_url);
|
||||
export_to_env_if_not_present(MEILI_S3_BUCKET_REGION, s3_bucket_region);
|
||||
export_to_env_if_not_present(MEILI_S3_BUCKET_NAME, s3_bucket_name);
|
||||
export_to_env_if_not_present(MEILI_S3_SNAPSHOT_PREFIX, s3_snapshot_prefix);
|
||||
export_to_env_if_not_present(MEILI_S3_ACCESS_KEY, s3_access_key);
|
||||
export_to_env_if_not_present(MEILI_S3_SECRET_KEY, s3_secret_key);
|
||||
export_to_env_if_not_present(
|
||||
MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS,
|
||||
experimental_s3_max_in_flight_parts.to_string(),
|
||||
);
|
||||
export_to_env_if_not_present(
|
||||
MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL,
|
||||
experimental_s3_compression_level.to_string(),
|
||||
);
|
||||
export_to_env_if_not_present(
|
||||
MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS,
|
||||
experimental_s3_signature_duration_seconds.to_string(),
|
||||
);
|
||||
export_to_env_if_not_present(
|
||||
MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE,
|
||||
experimental_s3_multipart_part_size.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<S3SnapshotOpts> for S3SnapshotOptions {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(other: S3SnapshotOpts) -> Result<Self, Self::Error> {
|
||||
let S3SnapshotOpts {
|
||||
s3_bucket_url,
|
||||
s3_bucket_region,
|
||||
s3_bucket_name,
|
||||
s3_snapshot_prefix,
|
||||
s3_access_key,
|
||||
s3_secret_key,
|
||||
experimental_s3_max_in_flight_parts,
|
||||
experimental_s3_compression_level,
|
||||
experimental_s3_signature_duration_seconds,
|
||||
experimental_s3_multipart_part_size,
|
||||
} = other;
|
||||
|
||||
Ok(S3SnapshotOptions {
|
||||
s3_bucket_url,
|
||||
s3_bucket_region,
|
||||
s3_bucket_name,
|
||||
s3_snapshot_prefix,
|
||||
s3_access_key,
|
||||
s3_secret_key,
|
||||
s3_max_in_flight_parts: experimental_s3_max_in_flight_parts,
|
||||
s3_compression_level: experimental_s3_compression_level,
|
||||
s3_signature_duration: Duration::from_secs(experimental_s3_signature_duration_seconds),
|
||||
s3_multipart_part_size: experimental_s3_multipart_part_size.as_u64(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1289,22 +1051,6 @@ fn default_snapshot_interval_sec() -> &'static str {
|
||||
DEFAULT_SNAPSHOT_INTERVAL_SEC_STR
|
||||
}
|
||||
|
||||
fn default_experimental_s3_snapshot_max_in_flight_parts() -> NonZeroUsize {
|
||||
DEFAULT_S3_SNAPSHOT_MAX_IN_FLIGHT_PARTS
|
||||
}
|
||||
|
||||
fn default_experimental_s3_snapshot_compression_level() -> u32 {
|
||||
DEFAULT_S3_SNAPSHOT_COMPRESSION_LEVEL
|
||||
}
|
||||
|
||||
fn default_experimental_s3_snapshot_signature_duration_seconds() -> u64 {
|
||||
DEFAULT_S3_SNAPSHOT_SIGNATURE_DURATION_SECONDS
|
||||
}
|
||||
|
||||
fn default_experimental_s3_snapshot_multipart_part_size() -> Byte {
|
||||
DEFAULT_S3_SNAPSHOT_MULTIPART_PART_SIZE
|
||||
}
|
||||
|
||||
fn default_dump_dir() -> PathBuf {
|
||||
PathBuf::from(DEFAULT_DUMP_DIR)
|
||||
}
|
||||
|
||||
@@ -1,366 +0,0 @@
|
||||
use crate::search::{Personalize, SearchResult};
|
||||
use meilisearch_types::{
|
||||
error::{Code, ErrorCode, ResponseError},
|
||||
milli::TimeBudget,
|
||||
};
|
||||
use rand::Rng;
|
||||
use reqwest::Client;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
const COHERE_API_URL: &str = "https://api.cohere.ai/v1/rerank";
|
||||
const MAX_RETRIES: u32 = 10;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum PersonalizationError {
|
||||
#[error("Personalization service: HTTP request failed: {0}")]
|
||||
Request(#[from] reqwest::Error),
|
||||
#[error("Personalization service: Failed to parse response: {0}")]
|
||||
Parse(String),
|
||||
#[error("Personalization service: Cohere API error: {0}")]
|
||||
Api(String),
|
||||
#[error("Personalization service: Unauthorized: invalid API key")]
|
||||
Unauthorized,
|
||||
#[error("Personalization service: Rate limited: too many requests")]
|
||||
RateLimited,
|
||||
#[error("Personalization service: Bad request: {0}")]
|
||||
BadRequest(String),
|
||||
#[error("Personalization service: Internal server error: {0}")]
|
||||
InternalServerError(String),
|
||||
#[error("Personalization service: Network error: {0}")]
|
||||
Network(String),
|
||||
#[error("Personalization service: Deadline exceeded")]
|
||||
DeadlineExceeded,
|
||||
#[error(transparent)]
|
||||
FeatureNotEnabled(#[from] index_scheduler::error::FeatureNotEnabledError),
|
||||
}
|
||||
|
||||
impl ErrorCode for PersonalizationError {
|
||||
fn error_code(&self) -> Code {
|
||||
match self {
|
||||
PersonalizationError::FeatureNotEnabled { .. } => Code::FeatureNotEnabled,
|
||||
PersonalizationError::Unauthorized => Code::RemoteInvalidApiKey,
|
||||
PersonalizationError::RateLimited => Code::TooManySearchRequests,
|
||||
PersonalizationError::BadRequest(_) => Code::RemoteBadRequest,
|
||||
PersonalizationError::InternalServerError(_) => Code::RemoteRemoteError,
|
||||
PersonalizationError::Network(_) | PersonalizationError::Request(_) => {
|
||||
Code::RemoteCouldNotSendRequest
|
||||
}
|
||||
PersonalizationError::Parse(_) | PersonalizationError::Api(_) => {
|
||||
Code::RemoteBadResponse
|
||||
}
|
||||
PersonalizationError::DeadlineExceeded => Code::Internal, // should not be returned to the client
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CohereService {
|
||||
client: Client,
|
||||
api_key: String,
|
||||
}
|
||||
|
||||
impl CohereService {
|
||||
pub fn new(api_key: String) -> Self {
|
||||
info!("Personalization service initialized with Cohere API");
|
||||
let client = Client::builder()
|
||||
.timeout(Duration::from_secs(30))
|
||||
.build()
|
||||
.expect("Failed to create HTTP client");
|
||||
Self { client, api_key }
|
||||
}
|
||||
|
||||
pub async fn rerank_search_results(
|
||||
&self,
|
||||
search_result: SearchResult,
|
||||
personalize: &Personalize,
|
||||
query: Option<&str>,
|
||||
time_budget: TimeBudget,
|
||||
) -> Result<SearchResult, ResponseError> {
|
||||
if time_budget.exceeded() {
|
||||
warn!("Could not rerank due to deadline");
|
||||
// If the deadline is exceeded, return the original search result instead of an error
|
||||
return Ok(search_result);
|
||||
}
|
||||
|
||||
// Extract user context from personalization
|
||||
let user_context = personalize.user_context.as_str();
|
||||
|
||||
// Build the prompt by merging query and user context
|
||||
let prompt = match query {
|
||||
Some(q) => format!("User Context: {user_context}\nQuery: {q}"),
|
||||
None => format!("User Context: {user_context}"),
|
||||
};
|
||||
|
||||
// Extract documents for reranking
|
||||
let documents: Vec<String> = search_result
|
||||
.hits
|
||||
.iter()
|
||||
.map(|hit| {
|
||||
// Convert the document to a string representation for reranking
|
||||
serde_json::to_string(&hit.document).unwrap_or_else(|_| "{}".to_string())
|
||||
})
|
||||
.collect();
|
||||
|
||||
if documents.is_empty() {
|
||||
return Ok(search_result);
|
||||
}
|
||||
|
||||
// Call Cohere's rerank API with retry logic
|
||||
let reranked_indices =
|
||||
match self.call_rerank_with_retry(&prompt, &documents, time_budget).await {
|
||||
Ok(indices) => indices,
|
||||
Err(PersonalizationError::DeadlineExceeded) => {
|
||||
// If the deadline is exceeded, return the original search result instead of an error
|
||||
return Ok(search_result);
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
debug!("Cohere rerank successful, reordering {} results", search_result.hits.len());
|
||||
|
||||
// Reorder the hits based on Cohere's reranking
|
||||
let mut reranked_hits = Vec::new();
|
||||
for index in reranked_indices.iter() {
|
||||
if let Some(hit) = search_result.hits.get(*index) {
|
||||
reranked_hits.push(hit.clone());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(SearchResult { hits: reranked_hits, ..search_result })
|
||||
}
|
||||
|
||||
async fn call_rerank_with_retry(
|
||||
&self,
|
||||
query: &str,
|
||||
documents: &[String],
|
||||
time_budget: TimeBudget,
|
||||
) -> Result<Vec<usize>, PersonalizationError> {
|
||||
let request_body = CohereRerankRequest {
|
||||
query: query.to_string(),
|
||||
documents: documents.to_vec(),
|
||||
model: "rerank-english-v3.0".to_string(),
|
||||
};
|
||||
|
||||
// Retry loop similar to vector extraction
|
||||
for attempt in 0..MAX_RETRIES {
|
||||
let response_result = self.send_rerank_request(&request_body).await;
|
||||
|
||||
let retry_duration = match self.handle_response(response_result).await {
|
||||
Ok(indices) => return Ok(indices),
|
||||
Err(retry) => {
|
||||
warn!("Cohere rerank attempt #{} failed: {}", attempt, retry.error);
|
||||
|
||||
if time_budget.exceeded() {
|
||||
warn!("Could not rerank due to deadline");
|
||||
return Err(PersonalizationError::DeadlineExceeded);
|
||||
} else {
|
||||
match retry.into_duration(attempt) {
|
||||
Ok(d) => d,
|
||||
Err(error) => return Err(error),
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// randomly up to double the retry duration
|
||||
let retry_duration = retry_duration
|
||||
+ rand::thread_rng().gen_range(std::time::Duration::ZERO..retry_duration);
|
||||
|
||||
warn!("Retrying after {}ms", retry_duration.as_millis());
|
||||
tokio::time::sleep(retry_duration).await;
|
||||
}
|
||||
|
||||
// Final attempt without retry
|
||||
let response_result = self.send_rerank_request(&request_body).await;
|
||||
|
||||
match self.handle_response(response_result).await {
|
||||
Ok(indices) => Ok(indices),
|
||||
Err(retry) => Err(retry.into_error()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_rerank_request(
|
||||
&self,
|
||||
request_body: &CohereRerankRequest,
|
||||
) -> Result<reqwest::Response, reqwest::Error> {
|
||||
self.client
|
||||
.post(COHERE_API_URL)
|
||||
.header("Authorization", format!("Bearer {}", self.api_key))
|
||||
.header("Content-Type", "application/json")
|
||||
.json(request_body)
|
||||
.send()
|
||||
.await
|
||||
}
|
||||
|
||||
async fn handle_response(
|
||||
&self,
|
||||
response_result: Result<reqwest::Response, reqwest::Error>,
|
||||
) -> Result<Vec<usize>, Retry> {
|
||||
let response = match response_result {
|
||||
Ok(r) => r,
|
||||
Err(e) if e.is_timeout() => {
|
||||
return Err(Retry::retry_later(PersonalizationError::Network(format!(
|
||||
"Request timeout: {}",
|
||||
e
|
||||
))));
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(Retry::retry_later(PersonalizationError::Network(format!(
|
||||
"Network error: {}",
|
||||
e
|
||||
))));
|
||||
}
|
||||
};
|
||||
|
||||
let status = response.status();
|
||||
let status_code = status.as_u16();
|
||||
|
||||
if status.is_success() {
|
||||
let rerank_response: CohereRerankResponse = match response.json().await {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
return Err(Retry::retry_later(PersonalizationError::Parse(format!(
|
||||
"Failed to parse response: {}",
|
||||
e
|
||||
))));
|
||||
}
|
||||
};
|
||||
|
||||
// Extract indices from rerank results
|
||||
let indices: Vec<usize> =
|
||||
rerank_response.results.iter().map(|result| result.index as usize).collect();
|
||||
|
||||
return Ok(indices);
|
||||
}
|
||||
|
||||
// Handle error status codes
|
||||
let error_body = response.text().await.unwrap_or_else(|_| "Unknown error".to_string());
|
||||
|
||||
let retry = match status_code {
|
||||
401 => Retry::give_up(PersonalizationError::Unauthorized),
|
||||
429 => Retry::rate_limited(PersonalizationError::RateLimited),
|
||||
400 => Retry::give_up(PersonalizationError::BadRequest(error_body)),
|
||||
500..=599 => Retry::retry_later(PersonalizationError::InternalServerError(format!(
|
||||
"Status {}: {}",
|
||||
status_code, error_body
|
||||
))),
|
||||
402..=499 => Retry::give_up(PersonalizationError::Api(format!(
|
||||
"Status {}: {}",
|
||||
status_code, error_body
|
||||
))),
|
||||
_ => Retry::retry_later(PersonalizationError::Api(format!(
|
||||
"Unexpected status {}: {}",
|
||||
status_code, error_body
|
||||
))),
|
||||
};
|
||||
|
||||
Err(retry)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct CohereRerankRequest {
|
||||
query: String,
|
||||
documents: Vec<String>,
|
||||
model: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct CohereRerankResponse {
|
||||
results: Vec<CohereRerankResult>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct CohereRerankResult {
|
||||
index: u32,
|
||||
}
|
||||
|
||||
// Retry strategy similar to vector extraction
|
||||
struct Retry {
|
||||
error: PersonalizationError,
|
||||
strategy: RetryStrategy,
|
||||
}
|
||||
|
||||
enum RetryStrategy {
|
||||
GiveUp,
|
||||
Retry,
|
||||
RetryAfterRateLimit,
|
||||
}
|
||||
|
||||
impl Retry {
|
||||
fn give_up(error: PersonalizationError) -> Self {
|
||||
Self { error, strategy: RetryStrategy::GiveUp }
|
||||
}
|
||||
|
||||
fn retry_later(error: PersonalizationError) -> Self {
|
||||
Self { error, strategy: RetryStrategy::Retry }
|
||||
}
|
||||
|
||||
fn rate_limited(error: PersonalizationError) -> Self {
|
||||
Self { error, strategy: RetryStrategy::RetryAfterRateLimit }
|
||||
}
|
||||
|
||||
fn into_duration(self, attempt: u32) -> Result<Duration, PersonalizationError> {
|
||||
match self.strategy {
|
||||
RetryStrategy::GiveUp => Err(self.error),
|
||||
RetryStrategy::Retry => {
|
||||
// Exponential backoff: 10^attempt milliseconds
|
||||
Ok(Duration::from_millis((10u64).pow(attempt)))
|
||||
}
|
||||
RetryStrategy::RetryAfterRateLimit => {
|
||||
// Longer backoff for rate limits: 100ms + exponential
|
||||
Ok(Duration::from_millis(100 + (10u64).pow(attempt)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn into_error(self) -> PersonalizationError {
|
||||
self.error
|
||||
}
|
||||
}
|
||||
|
||||
pub enum PersonalizationService {
|
||||
Cohere(CohereService),
|
||||
Disabled,
|
||||
}
|
||||
|
||||
impl PersonalizationService {
|
||||
pub fn cohere(api_key: String) -> Self {
|
||||
// If the API key is empty, consider the personalization service as disabled
|
||||
if api_key.trim().is_empty() {
|
||||
Self::disabled()
|
||||
} else {
|
||||
Self::Cohere(CohereService::new(api_key))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn disabled() -> Self {
|
||||
debug!("Personalization service disabled");
|
||||
Self::Disabled
|
||||
}
|
||||
|
||||
pub async fn rerank_search_results(
|
||||
&self,
|
||||
search_result: SearchResult,
|
||||
personalize: &Personalize,
|
||||
query: Option<&str>,
|
||||
time_budget: TimeBudget,
|
||||
) -> Result<SearchResult, ResponseError> {
|
||||
match self {
|
||||
Self::Cohere(cohere_service) => {
|
||||
cohere_service
|
||||
.rerank_search_results(search_result, personalize, query, time_budget)
|
||||
.await
|
||||
}
|
||||
Self::Disabled => Err(PersonalizationError::FeatureNotEnabled(
|
||||
index_scheduler::error::FeatureNotEnabledError {
|
||||
disabled_action: "reranking search results",
|
||||
feature: "personalization",
|
||||
issue_link: "https://github.com/orgs/meilisearch/discussions/866",
|
||||
},
|
||||
)
|
||||
.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -282,8 +282,7 @@ async fn process_search_request(
|
||||
if let Some(search_rules) = auth_filter.get_index_search_rules(&index_uid) {
|
||||
add_search_rules(&mut query.filter, search_rules);
|
||||
}
|
||||
let search_kind =
|
||||
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
|
||||
let search_kind = search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)?;
|
||||
|
||||
let permit = search_queue.try_get_search_permit().await?;
|
||||
let features = index_scheduler.features();
|
||||
@@ -300,7 +299,7 @@ async fn process_search_request(
|
||||
let (search, _is_finite_pagination, _max_total_hits, _offset) =
|
||||
prepare_search(&index_cloned, &rtxn, &query, &search_kind, time_budget, features)?;
|
||||
|
||||
match search_from_kind(index_uid, search_kind, search) {
|
||||
match search_from_kind(&index_uid, search_kind, search) {
|
||||
Ok((search_results, _)) => Ok((rtxn, Ok(search_results))),
|
||||
Err(MeilisearchHttpError::Milli {
|
||||
error: meilisearch_types::milli::Error::UserError(user_error),
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
use actix_web::web::{self, Data};
|
||||
use actix_web::{HttpRequest, HttpResponse};
|
||||
use index_scheduler::IndexScheduler;
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::index_uid::IndexUid;
|
||||
use meilisearch_types::keys::actions;
|
||||
use meilisearch_types::tasks::KindWithContent;
|
||||
use tracing::debug;
|
||||
use utoipa::OpenApi;
|
||||
|
||||
use super::ActionPolicy;
|
||||
use crate::analytics::Analytics;
|
||||
use crate::extractors::authentication::GuardedData;
|
||||
use crate::extractors::sequential_extractor::SeqHandler;
|
||||
use crate::routes::SummarizedTaskView;
|
||||
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(
|
||||
paths(compact),
|
||||
tags(
|
||||
(
|
||||
name = "Compact an index",
|
||||
description = "The /compact route uses compacts the database to reorganize and make it smaller and more efficient.",
|
||||
external_docs(url = "https://www.meilisearch.com/docs/reference/api/compact"),
|
||||
),
|
||||
),
|
||||
)]
|
||||
pub struct CompactApi;
|
||||
|
||||
pub fn configure(cfg: &mut web::ServiceConfig) {
|
||||
cfg.service(web::resource("").route(web::post().to(SeqHandler(compact))));
|
||||
}
|
||||
|
||||
/// Compact an index
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "{indexUid}/compact",
|
||||
tag = "Compact an index",
|
||||
security(("Bearer" = ["search", "*"])),
|
||||
params(("indexUid" = String, Path, example = "movies", description = "Index Unique Identifier", nullable = false)),
|
||||
responses(
|
||||
(status = ACCEPTED, description = "Task successfully enqueued", body = SummarizedTaskView, content_type = "application/json", example = json!(
|
||||
{
|
||||
"taskUid": 147,
|
||||
"indexUid": null,
|
||||
"status": "enqueued",
|
||||
"type": "documentDeletion",
|
||||
"enqueuedAt": "2024-08-08T17:05:55.791772Z"
|
||||
}
|
||||
)),
|
||||
(status = 401, description = "The authorization header is missing", body = ResponseError, content_type = "application/json", example = json!(
|
||||
{
|
||||
"message": "The Authorization header is missing. It must use the bearer authorization method.",
|
||||
"code": "missing_authorization_header",
|
||||
"type": "auth",
|
||||
"link": "https://docs.meilisearch.com/errors#missing_authorization_header"
|
||||
}
|
||||
)),
|
||||
)
|
||||
)]
|
||||
pub async fn compact(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::INDEXES_COMPACT }>, Data<IndexScheduler>>,
|
||||
index_uid: web::Path<String>,
|
||||
req: HttpRequest,
|
||||
analytics: web::Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||
|
||||
analytics.publish(IndexCompacted::default(), &req);
|
||||
|
||||
let task = KindWithContent::IndexCompaction { index_uid: index_uid.to_string() };
|
||||
let task =
|
||||
match tokio::task::spawn_blocking(move || index_scheduler.register(task, None, false))
|
||||
.await?
|
||||
{
|
||||
Ok(task) => task,
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
debug!(returns = ?task, "Compact the {index_uid} index");
|
||||
Ok(HttpResponse::Accepted().json(SummarizedTaskView::from(task)))
|
||||
}
|
||||
|
||||
crate::empty_analytics!(IndexCompacted, "Index Compacted");
|
||||
@@ -45,7 +45,7 @@ use crate::extractors::authentication::policies::*;
|
||||
use crate::extractors::authentication::GuardedData;
|
||||
use crate::extractors::payload::Payload;
|
||||
use crate::extractors::sequential_extractor::SeqHandler;
|
||||
use crate::routes::indexes::enterprise_edition::proxy::{proxy, Body};
|
||||
use crate::routes::indexes::enterprise_edition::proxy::{check_leader, proxy, Body};
|
||||
use crate::routes::indexes::search::fix_sort_query_parameters;
|
||||
use crate::routes::{
|
||||
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
|
||||
@@ -333,15 +333,14 @@ impl Aggregate for DocumentsDeletionAggregator {
|
||||
pub async fn delete_document(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
||||
path: web::Path<DocumentParam>,
|
||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
||||
req: HttpRequest,
|
||||
opt: web::Data<Opt>,
|
||||
analytics: web::Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
||||
let DocumentParam { index_uid, document_id } = path.into_inner();
|
||||
let index_uid = IndexUid::try_from(index_uid)?;
|
||||
let network = index_scheduler.network();
|
||||
let origin = check_leader(&req, &network)?;
|
||||
|
||||
analytics.publish(
|
||||
DocumentsDeletionAggregator {
|
||||
@@ -361,14 +360,11 @@ pub async fn delete_document(
|
||||
let dry_run = is_dry_run(&req, &opt)?;
|
||||
let task = {
|
||||
let index_scheduler = index_scheduler.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||
})
|
||||
.await??
|
||||
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||
};
|
||||
|
||||
if network.sharding && !dry_run {
|
||||
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
||||
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::none(), &task).await?;
|
||||
}
|
||||
|
||||
let task: SummarizedTaskView = task.into();
|
||||
@@ -683,19 +679,6 @@ pub struct UpdateDocumentsQuery {
|
||||
#[param(value_type = char, default = ",", example = ";")]
|
||||
#[deserr(default, try_from(char) = from_char_csv_delimiter -> DeserrQueryParamError<InvalidDocumentCsvDelimiter>, error = DeserrQueryParamError<InvalidDocumentCsvDelimiter>)]
|
||||
pub csv_delimiter: Option<u8>,
|
||||
|
||||
#[param(example = "custom")]
|
||||
#[deserr(default, error = DeserrQueryParamError<InvalidIndexCustomMetadata>)]
|
||||
pub custom_metadata: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Deserr, IntoParams)]
|
||||
#[deserr(error = DeserrQueryParamError, rename_all = camelCase, deny_unknown_fields)]
|
||||
#[into_params(parameter_in = Query, rename_all = "camelCase")]
|
||||
pub struct CustomMetadataQuery {
|
||||
#[param(example = "custom")]
|
||||
#[deserr(default, error = DeserrQueryParamError<InvalidIndexCustomMetadata>)]
|
||||
pub custom_metadata: Option<String>,
|
||||
}
|
||||
|
||||
fn from_char_csv_delimiter(
|
||||
@@ -837,7 +820,6 @@ pub async fn replace_documents(
|
||||
body,
|
||||
IndexDocumentsMethod::ReplaceDocuments,
|
||||
uid,
|
||||
params.custom_metadata,
|
||||
dry_run,
|
||||
allow_index_creation,
|
||||
&req,
|
||||
@@ -940,7 +922,6 @@ pub async fn update_documents(
|
||||
body,
|
||||
IndexDocumentsMethod::UpdateDocuments,
|
||||
uid,
|
||||
params.custom_metadata,
|
||||
dry_run,
|
||||
allow_index_creation,
|
||||
&req,
|
||||
@@ -960,13 +941,13 @@ async fn document_addition(
|
||||
body: Payload,
|
||||
method: IndexDocumentsMethod,
|
||||
task_id: Option<TaskId>,
|
||||
custom_metadata: Option<String>,
|
||||
dry_run: bool,
|
||||
allow_index_creation: bool,
|
||||
req: &HttpRequest,
|
||||
) -> Result<SummarizedTaskView, MeilisearchHttpError> {
|
||||
let mime_type = extract_mime_type(req)?;
|
||||
let network = index_scheduler.network();
|
||||
let origin = check_leader(&req, &network)?;
|
||||
|
||||
let format = match (
|
||||
mime_type.as_ref().map(|m| (m.type_().as_str(), m.subtype().as_str())),
|
||||
@@ -1086,10 +1067,8 @@ async fn document_addition(
|
||||
};
|
||||
|
||||
let scheduler = index_scheduler.clone();
|
||||
let task = match tokio::task::spawn_blocking(move || {
|
||||
scheduler.register_with_custom_metadata(task, task_id, custom_metadata, dry_run)
|
||||
})
|
||||
.await?
|
||||
let task = match tokio::task::spawn_blocking(move || scheduler.register(task, task_id, dry_run))
|
||||
.await?
|
||||
{
|
||||
Ok(task) => task,
|
||||
Err(e) => {
|
||||
@@ -1104,6 +1083,7 @@ async fn document_addition(
|
||||
&index_scheduler,
|
||||
&index_uid,
|
||||
req,
|
||||
origin,
|
||||
network,
|
||||
Body::with_ndjson_payload(file),
|
||||
&task,
|
||||
@@ -1153,7 +1133,7 @@ async fn copy_body_to_file(
|
||||
/// Delete a set of documents based on an array of document ids.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "{indexUid}/documents/delete-batch",
|
||||
path = "{indexUid}/delete-batch",
|
||||
tag = "Documents",
|
||||
security(("Bearer" = ["documents.delete", "documents.*", "*"])),
|
||||
params(
|
||||
@@ -1184,16 +1164,14 @@ pub async fn delete_documents_batch(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
||||
index_uid: web::Path<String>,
|
||||
body: web::Json<Vec<Value>>,
|
||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
||||
req: HttpRequest,
|
||||
opt: web::Data<Opt>,
|
||||
analytics: web::Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
debug!(parameters = ?body, "Delete documents by batch");
|
||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
||||
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||
let network = index_scheduler.network();
|
||||
let origin = check_leader(&req, &network)?;
|
||||
|
||||
analytics.publish(
|
||||
DocumentsDeletionAggregator {
|
||||
@@ -1216,14 +1194,12 @@ pub async fn delete_documents_batch(
|
||||
let dry_run = is_dry_run(&req, &opt)?;
|
||||
let task = {
|
||||
let index_scheduler = index_scheduler.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||
})
|
||||
.await??
|
||||
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||
};
|
||||
|
||||
if network.sharding && !dry_run {
|
||||
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
|
||||
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::Inline(body), &task)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let task: SummarizedTaskView = task.into();
|
||||
@@ -1273,19 +1249,17 @@ pub struct DocumentDeletionByFilter {
|
||||
pub async fn delete_documents_by_filter(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
||||
index_uid: web::Path<String>,
|
||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
||||
body: AwebJson<DocumentDeletionByFilter, DeserrJsonError>,
|
||||
req: HttpRequest,
|
||||
opt: web::Data<Opt>,
|
||||
analytics: web::Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
debug!(parameters = ?body, "Delete documents by filter");
|
||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
||||
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||
let index_uid = index_uid.into_inner();
|
||||
let filter = body.into_inner();
|
||||
let network = index_scheduler.network();
|
||||
let origin = check_leader(&req, &network)?;
|
||||
|
||||
analytics.publish(
|
||||
DocumentsDeletionAggregator {
|
||||
@@ -1314,14 +1288,12 @@ pub async fn delete_documents_by_filter(
|
||||
let dry_run = is_dry_run(&req, &opt)?;
|
||||
let task = {
|
||||
let index_scheduler = index_scheduler.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||
})
|
||||
.await??
|
||||
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||
};
|
||||
|
||||
if network.sharding && !dry_run {
|
||||
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(filter), &task).await?;
|
||||
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::Inline(filter), &task)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let task: SummarizedTaskView = task.into();
|
||||
@@ -1407,40 +1379,39 @@ impl Aggregate for EditDocumentsByFunctionAggregator {
|
||||
pub async fn edit_documents_by_function(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_ALL }>, Data<IndexScheduler>>,
|
||||
index_uid: web::Path<String>,
|
||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
||||
body: AwebJson<DocumentEditionByFunction, DeserrJsonError>,
|
||||
params: AwebJson<DocumentEditionByFunction, DeserrJsonError>,
|
||||
req: HttpRequest,
|
||||
opt: web::Data<Opt>,
|
||||
analytics: web::Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
debug!(parameters = ?body, "Edit documents by function");
|
||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
||||
debug!(parameters = ?params, "Edit documents by function");
|
||||
|
||||
index_scheduler
|
||||
.features()
|
||||
.check_edit_documents_by_function("Using the documents edit route")?;
|
||||
|
||||
let network = index_scheduler.network();
|
||||
let origin = check_leader(&req, &network)?;
|
||||
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||
let index_uid = index_uid.into_inner();
|
||||
let body = body.into_inner();
|
||||
let params = params.into_inner();
|
||||
|
||||
analytics.publish(
|
||||
EditDocumentsByFunctionAggregator {
|
||||
filtered: body.filter.is_some(),
|
||||
with_context: body.context.is_some(),
|
||||
filtered: params.filter.is_some(),
|
||||
with_context: params.context.is_some(),
|
||||
index_creation: index_scheduler.index(&index_uid).is_err(),
|
||||
},
|
||||
&req,
|
||||
);
|
||||
|
||||
let engine = milli::rhai::Engine::new();
|
||||
if let Err(e) = engine.compile(&body.function) {
|
||||
if let Err(e) = engine.compile(¶ms.function) {
|
||||
return Err(ResponseError::from_msg(e.to_string(), Code::BadRequest));
|
||||
}
|
||||
|
||||
if let Some(ref filter) = body.filter {
|
||||
if let Some(ref filter) = params.filter {
|
||||
// we ensure the filter is well formed before enqueuing it
|
||||
crate::search::parse_filter(
|
||||
filter,
|
||||
@@ -1451,8 +1422,8 @@ pub async fn edit_documents_by_function(
|
||||
}
|
||||
let task = KindWithContent::DocumentEdition {
|
||||
index_uid: index_uid.clone(),
|
||||
filter_expr: body.filter.clone(),
|
||||
context: match body.context.clone() {
|
||||
filter_expr: params.filter.clone(),
|
||||
context: match params.context.clone() {
|
||||
Some(Value::Object(m)) => Some(m),
|
||||
None => None,
|
||||
_ => {
|
||||
@@ -1462,21 +1433,19 @@ pub async fn edit_documents_by_function(
|
||||
))
|
||||
}
|
||||
},
|
||||
function: body.function.clone(),
|
||||
function: params.function.clone(),
|
||||
};
|
||||
|
||||
let uid = get_task_id(&req, &opt)?;
|
||||
let dry_run = is_dry_run(&req, &opt)?;
|
||||
let task = {
|
||||
let index_scheduler = index_scheduler.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||
})
|
||||
.await??
|
||||
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||
};
|
||||
|
||||
if network.sharding && !dry_run {
|
||||
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
|
||||
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::Inline(params), &task)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let task: SummarizedTaskView = task.into();
|
||||
@@ -1517,14 +1486,13 @@ pub async fn edit_documents_by_function(
|
||||
pub async fn clear_all_documents(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
||||
index_uid: web::Path<String>,
|
||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
||||
req: HttpRequest,
|
||||
opt: web::Data<Opt>,
|
||||
analytics: web::Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||
let network = index_scheduler.network();
|
||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
||||
let origin = check_leader(&req, &network)?;
|
||||
|
||||
analytics.publish(
|
||||
DocumentsDeletionAggregator {
|
||||
@@ -1543,14 +1511,11 @@ pub async fn clear_all_documents(
|
||||
let task = {
|
||||
let index_scheduler = index_scheduler.clone();
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||
})
|
||||
.await??
|
||||
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||
};
|
||||
|
||||
if network.sharding && !dry_run {
|
||||
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
||||
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::none(), &task).await?;
|
||||
}
|
||||
|
||||
let task: SummarizedTaskView = task.into();
|
||||
|
||||
@@ -38,6 +38,27 @@ impl Body<()> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_leader(
|
||||
req: &HttpRequest,
|
||||
network: &meilisearch_types::enterprise_edition::network::DbNetwork,
|
||||
) -> Result<Option<Origin>, MeilisearchHttpError> {
|
||||
match origin_from_req(req)? {
|
||||
Some(origin) => Ok(Some(origin)),
|
||||
None => {
|
||||
let this = network
|
||||
.local
|
||||
.as_deref()
|
||||
.expect("inconsistent `network.sharding` and `network.self`");
|
||||
|
||||
let is_leader = this == todo!();
|
||||
if !is_leader {
|
||||
return Err(MeilisearchHttpError::NotLeader { leader: todo!() });
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// If necessary, proxies the passed request to the network and update the task description.
|
||||
///
|
||||
/// This function reads the custom headers from the request to determine if must proxy the request or if the request
|
||||
@@ -52,11 +73,12 @@ pub async fn proxy<T: serde::Serialize>(
|
||||
index_scheduler: &IndexScheduler,
|
||||
index_uid: &str,
|
||||
req: &HttpRequest,
|
||||
network: meilisearch_types::enterprise_edition::network::Network,
|
||||
origin: Option<Origin>,
|
||||
network: meilisearch_types::enterprise_edition::network::DbNetwork,
|
||||
body: Body<T>,
|
||||
task: &meilisearch_types::tasks::Task,
|
||||
) -> Result<(), MeilisearchHttpError> {
|
||||
match origin_from_req(req)? {
|
||||
match origin {
|
||||
Some(origin) => {
|
||||
index_scheduler.set_task_network(task.uid, TaskNetwork::Origin { origin })?
|
||||
}
|
||||
|
||||
@@ -260,7 +260,7 @@ pub async fn search(
|
||||
}
|
||||
|
||||
let index = index_scheduler.index(&index_uid)?;
|
||||
let search_kind = search_kind(&search_query, &index_scheduler, index_uid.to_string(), &index)?;
|
||||
let search_kind = search_kind(&search_query, &index_scheduler, &index_uid, &index)?;
|
||||
let permit = search_queue.try_get_search_permit().await?;
|
||||
let search_result = tokio::task::spawn_blocking(move || {
|
||||
perform_facet_search(
|
||||
@@ -343,7 +343,6 @@ impl From<FacetSearchQuery> for SearchQuery {
|
||||
hybrid,
|
||||
ranking_score_threshold,
|
||||
locales,
|
||||
personalize: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@ use crate::extractors::sequential_extractor::SeqHandler;
|
||||
use crate::routes::is_dry_run;
|
||||
use crate::Opt;
|
||||
|
||||
pub mod compact;
|
||||
pub mod documents;
|
||||
mod enterprise_edition;
|
||||
pub mod facet_search;
|
||||
@@ -50,9 +49,8 @@ pub use enterprise_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TAS
|
||||
(path = "/", api = facet_search::FacetSearchApi),
|
||||
(path = "/", api = similar::SimilarApi),
|
||||
(path = "/", api = settings::SettingsApi),
|
||||
(path = "/", api = compact::CompactApi),
|
||||
),
|
||||
paths(list_indexes, create_index, get_index, update_index, delete_index, get_index_stats, compact::compact),
|
||||
paths(list_indexes, create_index, get_index, update_index, delete_index, get_index_stats),
|
||||
tags(
|
||||
(
|
||||
name = "Indexes",
|
||||
@@ -82,8 +80,7 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
|
||||
.service(web::scope("/search").configure(search::configure))
|
||||
.service(web::scope("/facet-search").configure(facet_search::configure))
|
||||
.service(web::scope("/similar").configure(similar::configure))
|
||||
.service(web::scope("/settings").configure(settings::configure))
|
||||
.service(web::scope("/compact").configure(compact::configure)),
|
||||
.service(web::scope("/settings").configure(settings::configure)),
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ use meilisearch_types::serde_cs::vec::CS;
|
||||
use serde_json::Value;
|
||||
use tracing::debug;
|
||||
use utoipa::{IntoParams, OpenApi};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::analytics::Analytics;
|
||||
use crate::error::MeilisearchHttpError;
|
||||
@@ -22,12 +21,11 @@ use crate::extractors::authentication::GuardedData;
|
||||
use crate::extractors::sequential_extractor::SeqHandler;
|
||||
use crate::metrics::MEILISEARCH_DEGRADED_SEARCH_REQUESTS;
|
||||
use crate::routes::indexes::search_analytics::{SearchAggregator, SearchGET, SearchPOST};
|
||||
use crate::routes::parse_include_metadata_header;
|
||||
use crate::search::{
|
||||
add_search_rules, perform_search, HybridQuery, MatchingStrategy, Personalize,
|
||||
RankingScoreThreshold, RetrieveVectors, SearchKind, SearchParams, SearchQuery, SearchResult,
|
||||
SemanticRatio, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG,
|
||||
DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET, DEFAULT_SEMANTIC_RATIO,
|
||||
add_search_rules, perform_search, HybridQuery, MatchingStrategy, RankingScoreThreshold,
|
||||
RetrieveVectors, SearchKind, SearchQuery, SearchResult, SemanticRatio, DEFAULT_CROP_LENGTH,
|
||||
DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG,
|
||||
DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET, DEFAULT_SEMANTIC_RATIO,
|
||||
};
|
||||
use crate::search_queue::SearchQueue;
|
||||
|
||||
@@ -134,8 +132,6 @@ pub struct SearchQueryGet {
|
||||
#[deserr(default, error = DeserrQueryParamError<InvalidSearchLocales>)]
|
||||
#[param(value_type = Vec<Locale>, explode = false)]
|
||||
pub locales: Option<CS<Locale>>,
|
||||
#[deserr(default, error = DeserrQueryParamError<InvalidSearchPersonalizeUserContext>)]
|
||||
pub personalize_user_context: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, deserr::Deserr)]
|
||||
@@ -207,9 +203,6 @@ impl TryFrom<SearchQueryGet> for SearchQuery {
|
||||
));
|
||||
}
|
||||
|
||||
let personalize =
|
||||
other.personalize_user_context.map(|user_context| Personalize { user_context });
|
||||
|
||||
Ok(Self {
|
||||
q: other.q,
|
||||
// `media` not supported for `GET`
|
||||
@@ -239,7 +232,6 @@ impl TryFrom<SearchQueryGet> for SearchQuery {
|
||||
hybrid,
|
||||
ranking_score_threshold: other.ranking_score_threshold.map(|o| o.0),
|
||||
locales: other.locales.map(|o| o.into_iter().collect()),
|
||||
personalize,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -328,14 +320,12 @@ pub fn fix_sort_query_parameters(sort_query: &str) -> Vec<String> {
|
||||
pub async fn search_with_url_query(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
|
||||
search_queue: web::Data<SearchQueue>,
|
||||
personalization_service: web::Data<crate::personalization::PersonalizationService>,
|
||||
index_uid: web::Path<String>,
|
||||
params: AwebQueryParameter<SearchQueryGet, DeserrQueryParamError>,
|
||||
req: HttpRequest,
|
||||
analytics: web::Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let request_uid = Uuid::now_v7();
|
||||
debug!(request_uid = ?request_uid, parameters = ?params, "Search get");
|
||||
debug!(parameters = ?params, "Search get");
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||
|
||||
let mut query: SearchQuery = params.into_inner().try_into()?;
|
||||
@@ -349,56 +339,30 @@ pub async fn search_with_url_query(
|
||||
|
||||
let index = index_scheduler.index(&index_uid)?;
|
||||
|
||||
// Extract personalization and query string before moving query
|
||||
let personalize = query.personalize.take();
|
||||
|
||||
let search_kind =
|
||||
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
|
||||
let search_kind = search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)?;
|
||||
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
|
||||
|
||||
// Save the query string for personalization if requested
|
||||
let personalize_query = personalize.is_some().then(|| query.q.clone()).flatten();
|
||||
|
||||
let permit = search_queue.try_get_search_permit().await?;
|
||||
let include_metadata = parse_include_metadata_header(&req);
|
||||
|
||||
let search_result = tokio::task::spawn_blocking(move || {
|
||||
perform_search(
|
||||
SearchParams {
|
||||
index_uid: index_uid.to_string(),
|
||||
query,
|
||||
search_kind,
|
||||
retrieve_vectors: retrieve_vector,
|
||||
features: index_scheduler.features(),
|
||||
request_uid,
|
||||
include_metadata,
|
||||
},
|
||||
&index_uid,
|
||||
&index,
|
||||
query,
|
||||
search_kind,
|
||||
retrieve_vector,
|
||||
index_scheduler.features(),
|
||||
)
|
||||
})
|
||||
.await;
|
||||
permit.drop().await;
|
||||
let search_result = search_result?;
|
||||
if let Ok((search_result, _)) = search_result.as_ref() {
|
||||
if let Ok(ref search_result) = search_result {
|
||||
aggregate.succeed(search_result);
|
||||
}
|
||||
analytics.publish(aggregate, &req);
|
||||
|
||||
let (mut search_result, time_budget) = search_result?;
|
||||
let search_result = search_result?;
|
||||
|
||||
// Apply personalization if requested
|
||||
if let Some(personalize) = personalize.as_ref() {
|
||||
search_result = personalization_service
|
||||
.rerank_search_results(
|
||||
search_result,
|
||||
personalize,
|
||||
personalize_query.as_deref(),
|
||||
time_budget,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
debug!(request_uid = ?request_uid, returns = ?search_result, "Search get");
|
||||
debug!(returns = ?search_result, "Search get");
|
||||
Ok(HttpResponse::Ok().json(search_result))
|
||||
}
|
||||
|
||||
@@ -461,17 +425,15 @@ pub async fn search_with_url_query(
|
||||
pub async fn search_with_post(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
|
||||
search_queue: web::Data<SearchQueue>,
|
||||
personalization_service: web::Data<crate::personalization::PersonalizationService>,
|
||||
index_uid: web::Path<String>,
|
||||
params: AwebJson<SearchQuery, DeserrJsonError>,
|
||||
req: HttpRequest,
|
||||
analytics: web::Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||
let request_uid = Uuid::now_v7();
|
||||
|
||||
let mut query = params.into_inner();
|
||||
debug!(request_uid = ?request_uid, parameters = ?query, "Search post");
|
||||
debug!(parameters = ?query, "Search post");
|
||||
|
||||
// Tenant token search_rules.
|
||||
if let Some(search_rules) = index_scheduler.filters().get_index_search_rules(&index_uid) {
|
||||
@@ -482,37 +444,24 @@ pub async fn search_with_post(
|
||||
|
||||
let index = index_scheduler.index(&index_uid)?;
|
||||
|
||||
// Extract personalization and query string before moving query
|
||||
let personalize = query.personalize.take();
|
||||
|
||||
let search_kind =
|
||||
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
|
||||
let search_kind = search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)?;
|
||||
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
|
||||
|
||||
let include_metadata = parse_include_metadata_header(&req);
|
||||
|
||||
// Save the query string for personalization if requested
|
||||
let personalize_query = personalize.is_some().then(|| query.q.clone()).flatten();
|
||||
|
||||
let permit = search_queue.try_get_search_permit().await?;
|
||||
let search_result = tokio::task::spawn_blocking(move || {
|
||||
perform_search(
|
||||
SearchParams {
|
||||
index_uid: index_uid.to_string(),
|
||||
query,
|
||||
search_kind,
|
||||
retrieve_vectors,
|
||||
features: index_scheduler.features(),
|
||||
request_uid,
|
||||
include_metadata,
|
||||
},
|
||||
&index_uid,
|
||||
&index,
|
||||
query,
|
||||
search_kind,
|
||||
retrieve_vectors,
|
||||
index_scheduler.features(),
|
||||
)
|
||||
})
|
||||
.await;
|
||||
permit.drop().await;
|
||||
let search_result = search_result?;
|
||||
if let Ok((ref search_result, _)) = search_result {
|
||||
if let Ok(ref search_result) = search_result {
|
||||
aggregate.succeed(search_result);
|
||||
if search_result.degraded {
|
||||
MEILISEARCH_DEGRADED_SEARCH_REQUESTS.inc();
|
||||
@@ -520,28 +469,16 @@ pub async fn search_with_post(
|
||||
}
|
||||
analytics.publish(aggregate, &req);
|
||||
|
||||
let (mut search_result, time_budget) = search_result?;
|
||||
let search_result = search_result?;
|
||||
|
||||
// Apply personalization if requested
|
||||
if let Some(personalize) = personalize.as_ref() {
|
||||
search_result = personalization_service
|
||||
.rerank_search_results(
|
||||
search_result,
|
||||
personalize,
|
||||
personalize_query.as_deref(),
|
||||
time_budget,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
debug!(request_uid = ?request_uid, returns = ?search_result, "Search post");
|
||||
debug!(returns = ?search_result, "Search post");
|
||||
Ok(HttpResponse::Ok().json(search_result))
|
||||
}
|
||||
|
||||
pub fn search_kind(
|
||||
query: &SearchQuery,
|
||||
index_scheduler: &IndexScheduler,
|
||||
index_uid: String,
|
||||
index_uid: &str,
|
||||
index: &milli::Index,
|
||||
) -> Result<SearchKind, ResponseError> {
|
||||
let is_placeholder_query =
|
||||
|
||||
@@ -7,7 +7,6 @@ use serde_json::{json, Value};
|
||||
|
||||
use crate::aggregate_methods;
|
||||
use crate::analytics::{Aggregate, AggregateMethod};
|
||||
use crate::metrics::MEILISEARCH_PERSONALIZED_SEARCH_REQUESTS;
|
||||
use crate::search::{
|
||||
SearchQuery, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
|
||||
DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT,
|
||||
@@ -96,9 +95,6 @@ pub struct SearchAggregator<Method: AggregateMethod> {
|
||||
show_ranking_score_details: bool,
|
||||
ranking_score_threshold: bool,
|
||||
|
||||
// personalization
|
||||
total_personalized: usize,
|
||||
|
||||
marker: std::marker::PhantomData<Method>,
|
||||
}
|
||||
|
||||
@@ -133,7 +129,6 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
|
||||
hybrid,
|
||||
ranking_score_threshold,
|
||||
locales,
|
||||
personalize,
|
||||
} = query;
|
||||
|
||||
let mut ret = Self::default();
|
||||
@@ -209,12 +204,6 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
|
||||
ret.locales = locales.iter().copied().collect();
|
||||
}
|
||||
|
||||
// personalization
|
||||
if personalize.is_some() {
|
||||
ret.total_personalized = 1;
|
||||
MEILISEARCH_PERSONALIZED_SEARCH_REQUESTS.inc();
|
||||
}
|
||||
|
||||
ret.highlight_pre_tag = *highlight_pre_tag != DEFAULT_HIGHLIGHT_PRE_TAG();
|
||||
ret.highlight_post_tag = *highlight_post_tag != DEFAULT_HIGHLIGHT_POST_TAG();
|
||||
ret.crop_marker = *crop_marker != DEFAULT_CROP_MARKER();
|
||||
@@ -245,8 +234,6 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
|
||||
facet_stats: _,
|
||||
degraded,
|
||||
used_negative_operator,
|
||||
request_uid: _,
|
||||
metadata: _,
|
||||
} = result;
|
||||
|
||||
self.total_succeeded = self.total_succeeded.saturating_add(1);
|
||||
@@ -307,7 +294,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
|
||||
total_used_negative_operator,
|
||||
ranking_score_threshold,
|
||||
mut locales,
|
||||
total_personalized,
|
||||
marker: _,
|
||||
} = *new;
|
||||
|
||||
@@ -393,9 +379,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
|
||||
// locales
|
||||
self.locales.append(&mut locales);
|
||||
|
||||
// personalization
|
||||
self.total_personalized = self.total_personalized.saturating_add(total_personalized);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
@@ -441,7 +424,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
|
||||
total_used_negative_operator,
|
||||
ranking_score_threshold,
|
||||
locales,
|
||||
total_personalized,
|
||||
marker: _,
|
||||
} = *self;
|
||||
|
||||
@@ -515,9 +497,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
|
||||
"show_ranking_score_details": show_ranking_score_details,
|
||||
"ranking_score_threshold": ranking_score_threshold,
|
||||
},
|
||||
"personalization": {
|
||||
"total_personalized": total_personalized,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,7 +227,7 @@ async fn similar(
|
||||
|
||||
let (embedder_name, embedder, quantized) = SearchKind::embedder(
|
||||
&index_scheduler,
|
||||
index_uid.to_string(),
|
||||
index_uid.as_str(),
|
||||
&index,
|
||||
&query.embedder,
|
||||
None,
|
||||
|
||||
@@ -39,15 +39,11 @@ use crate::routes::features::RuntimeTogglableFeatures;
|
||||
use crate::routes::indexes::documents::{DocumentDeletionByFilter, DocumentEditionByFunction};
|
||||
use crate::routes::indexes::IndexView;
|
||||
use crate::routes::multi_search::SearchResults;
|
||||
use crate::routes::network::{Network, Remote};
|
||||
use crate::routes::swap_indexes::SwapIndexesPayload;
|
||||
use crate::routes::webhooks::{
|
||||
WebhookResults, WebhookSettings, WebhookWithMetadataRedactedAuthorization,
|
||||
};
|
||||
use crate::routes::webhooks::{WebhookResults, WebhookSettings, WebhookWithMetadata};
|
||||
use crate::search::{
|
||||
FederatedSearch, FederatedSearchResult, Federation, FederationOptions, MergeFacets,
|
||||
SearchQueryWithIndex, SearchResultWithIndex, SimilarQuery, SimilarResult,
|
||||
INCLUDE_METADATA_HEADER,
|
||||
};
|
||||
use crate::search_queue::SearchQueue;
|
||||
use crate::Opt;
|
||||
@@ -105,7 +101,7 @@ mod webhooks;
|
||||
url = "/",
|
||||
description = "Local server",
|
||||
)),
|
||||
components(schemas(PaginationView<KeyView>, PaginationView<IndexView>, IndexView, DocumentDeletionByFilter, AllBatches, BatchStats, ProgressStepView, ProgressView, BatchView, RuntimeTogglableFeatures, SwapIndexesPayload, DocumentEditionByFunction, MergeFacets, FederationOptions, SearchQueryWithIndex, Federation, FederatedSearch, FederatedSearchResult, SearchResults, SearchResultWithIndex, SimilarQuery, SimilarResult, PaginationView<serde_json::Value>, BrowseQuery, UpdateIndexRequest, IndexUid, IndexCreateRequest, KeyView, Action, CreateApiKey, UpdateStderrLogs, LogMode, GetLogs, IndexStats, Stats, HealthStatus, HealthResponse, VersionResponse, Code, ErrorType, AllTasks, TaskView, Status, DetailsView, ResponseError, Settings<Unchecked>, Settings<Checked>, TypoSettings, MinWordSizeTyposSetting, FacetingSettings, PaginationSettings, SummarizedTaskView, Kind, Network, Remote, FilterableAttributesRule, FilterableAttributesPatterns, AttributePatterns, FilterableAttributesFeatures, FilterFeatures, Export, WebhookSettings, WebhookResults, WebhookWithMetadataRedactedAuthorization, meilisearch_types::milli::vector::VectorStoreBackend))
|
||||
components(schemas(PaginationView<KeyView>, PaginationView<IndexView>, IndexView, DocumentDeletionByFilter, AllBatches, BatchStats, ProgressStepView, ProgressView, BatchView, RuntimeTogglableFeatures, SwapIndexesPayload, DocumentEditionByFunction, MergeFacets, FederationOptions, SearchQueryWithIndex, Federation, FederatedSearch, FederatedSearchResult, SearchResults, SearchResultWithIndex, SimilarQuery, SimilarResult, PaginationView<serde_json::Value>, BrowseQuery, UpdateIndexRequest, IndexUid, IndexCreateRequest, KeyView, Action, CreateApiKey, UpdateStderrLogs, LogMode, GetLogs, IndexStats, Stats, HealthStatus, HealthResponse, VersionResponse, Code, ErrorType, AllTasks, TaskView, Status, DetailsView, ResponseError, Settings<Unchecked>, Settings<Checked>, TypoSettings, MinWordSizeTyposSetting, FacetingSettings, PaginationSettings, SummarizedTaskView, Kind, meilisearch_types::enterprise_edition::network::Network, meilisearch_types::enterprise_edition::network::Remote, FilterableAttributesRule, FilterableAttributesPatterns, AttributePatterns, FilterableAttributesFeatures, FilterFeatures, Export, WebhookSettings, WebhookResults, WebhookWithMetadata, meilisearch_types::milli::vector::VectorStoreBackend))
|
||||
)]
|
||||
pub struct MeilisearchApi;
|
||||
|
||||
@@ -187,18 +183,6 @@ pub fn is_dry_run(req: &HttpRequest, opt: &Opt) -> Result<bool, ResponseError> {
|
||||
.is_some_and(|s| s.to_lowercase() == "true"))
|
||||
}
|
||||
|
||||
/// Parse the `Meili-Include-Metadata` header from an HTTP request.
|
||||
///
|
||||
/// Returns `true` if the header is present and set to "true" or "1" (case-insensitive).
|
||||
/// Returns `false` if the header is not present or has any other value.
|
||||
pub fn parse_include_metadata_header(req: &HttpRequest) -> bool {
|
||||
req.headers()
|
||||
.get(INCLUDE_METADATA_HEADER)
|
||||
.and_then(|h| h.to_str().ok())
|
||||
.map(|v| matches!(v.to_lowercase().as_str(), "true" | "1"))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SummarizedTaskView {
|
||||
@@ -218,8 +202,6 @@ pub struct SummarizedTaskView {
|
||||
deserialize_with = "time::serde::rfc3339::deserialize"
|
||||
)]
|
||||
enqueued_at: OffsetDateTime,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
custom_metadata: Option<String>,
|
||||
}
|
||||
|
||||
impl From<Task> for SummarizedTaskView {
|
||||
@@ -230,7 +212,6 @@ impl From<Task> for SummarizedTaskView {
|
||||
status: task.status,
|
||||
kind: task.kind.as_kind(),
|
||||
enqueued_at: task.enqueued_at,
|
||||
custom_metadata: task.custom_metadata,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ use meilisearch_types::keys::actions;
|
||||
use serde::Serialize;
|
||||
use tracing::debug;
|
||||
use utoipa::{OpenApi, ToSchema};
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::multi_search_analytics::MultiSearchAggregator;
|
||||
use crate::analytics::Analytics;
|
||||
@@ -18,11 +17,10 @@ use crate::extractors::authentication::policies::ActionPolicy;
|
||||
use crate::extractors::authentication::{AuthenticationError, GuardedData};
|
||||
use crate::extractors::sequential_extractor::SeqHandler;
|
||||
use crate::routes::indexes::search::search_kind;
|
||||
use crate::routes::parse_include_metadata_header;
|
||||
use crate::search::{
|
||||
add_search_rules, perform_federated_search, perform_search, FederatedSearch,
|
||||
FederatedSearchResult, RetrieveVectors, SearchParams, SearchQueryWithIndex,
|
||||
SearchResultWithIndex, PROXY_SEARCH_HEADER, PROXY_SEARCH_HEADER_VALUE,
|
||||
FederatedSearchResult, RetrieveVectors, SearchQueryWithIndex, SearchResultWithIndex,
|
||||
PROXY_SEARCH_HEADER, PROXY_SEARCH_HEADER_VALUE,
|
||||
};
|
||||
use crate::search_queue::SearchQueue;
|
||||
|
||||
@@ -146,7 +144,6 @@ pub struct SearchResults {
|
||||
pub async fn multi_search_with_post(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
|
||||
search_queue: Data<SearchQueue>,
|
||||
personalization_service: web::Data<crate::personalization::PersonalizationService>,
|
||||
params: AwebJson<FederatedSearch, DeserrJsonError>,
|
||||
req: HttpRequest,
|
||||
analytics: web::Data<Analytics>,
|
||||
@@ -154,7 +151,6 @@ pub async fn multi_search_with_post(
|
||||
// Since we don't want to process half of the search requests and then get a permit refused
|
||||
// we're going to get one permit for the whole duration of the multi-search request.
|
||||
let permit = search_queue.try_get_search_permit().await?;
|
||||
let request_uid = Uuid::now_v7();
|
||||
|
||||
let federated_search = params.into_inner();
|
||||
|
||||
@@ -190,31 +186,16 @@ pub async fn multi_search_with_post(
|
||||
err
|
||||
})?;
|
||||
|
||||
let include_metadata = parse_include_metadata_header(&req);
|
||||
let response = match federation {
|
||||
Some(federation) => {
|
||||
debug!(
|
||||
request_uid = ?request_uid,
|
||||
federation = ?federation,
|
||||
parameters = ?queries,
|
||||
"Federated-search"
|
||||
);
|
||||
|
||||
// check remote header
|
||||
let is_proxy = req
|
||||
.headers()
|
||||
.get(PROXY_SEARCH_HEADER)
|
||||
.is_some_and(|value| value.as_bytes() == PROXY_SEARCH_HEADER_VALUE.as_bytes());
|
||||
let search_result = perform_federated_search(
|
||||
&index_scheduler,
|
||||
queries,
|
||||
federation,
|
||||
features,
|
||||
is_proxy,
|
||||
request_uid,
|
||||
include_metadata,
|
||||
)
|
||||
.await;
|
||||
let search_result =
|
||||
perform_federated_search(&index_scheduler, queries, federation, features, is_proxy)
|
||||
.await;
|
||||
permit.drop().await;
|
||||
|
||||
if search_result.is_ok() {
|
||||
@@ -222,13 +203,6 @@ pub async fn multi_search_with_post(
|
||||
}
|
||||
|
||||
analytics.publish(multi_aggregate, &req);
|
||||
|
||||
debug!(
|
||||
request_uid = ?request_uid,
|
||||
returns = ?search_result,
|
||||
"Federated-search"
|
||||
);
|
||||
|
||||
HttpResponse::Ok().json(search_result?)
|
||||
}
|
||||
None => {
|
||||
@@ -237,17 +211,12 @@ pub async fn multi_search_with_post(
|
||||
// changes.
|
||||
let search_results: Result<_, (ResponseError, usize)> = async {
|
||||
let mut search_results = Vec::with_capacity(queries.len());
|
||||
for (query_index, (index_uid, mut query, federation_options)) in queries
|
||||
for (query_index, (index_uid, query, federation_options)) in queries
|
||||
.into_iter()
|
||||
.map(SearchQueryWithIndex::into_index_query_federation)
|
||||
.enumerate()
|
||||
{
|
||||
debug!(
|
||||
request_uid = ?request_uid,
|
||||
on_index = query_index,
|
||||
parameters = ?query,
|
||||
"Multi-search"
|
||||
);
|
||||
debug!(on_index = query_index, parameters = ?query, "Multi-search");
|
||||
|
||||
if federation_options.is_some() {
|
||||
return Err((
|
||||
@@ -270,58 +239,30 @@ pub async fn multi_search_with_post(
|
||||
})
|
||||
.with_index(query_index)?;
|
||||
|
||||
// Extract personalization and query string before moving query
|
||||
let personalize = query.personalize.take();
|
||||
|
||||
// Save the query string for personalization if requested
|
||||
let personalize_query =
|
||||
personalize.is_some().then(|| query.q.clone()).flatten();
|
||||
|
||||
let index_uid_str = index_uid.to_string();
|
||||
|
||||
let search_kind = search_kind(
|
||||
&query,
|
||||
index_scheduler.get_ref(),
|
||||
index_uid_str.clone(),
|
||||
&index,
|
||||
)
|
||||
.with_index(query_index)?;
|
||||
let search_kind =
|
||||
search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)
|
||||
.with_index(query_index)?;
|
||||
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
|
||||
|
||||
let (mut search_result, time_budget) = tokio::task::spawn_blocking(move || {
|
||||
perform_search(
|
||||
SearchParams {
|
||||
index_uid: index_uid_str.clone(),
|
||||
let search_result = tokio::task::spawn_blocking({
|
||||
let index_uid = index_uid.clone();
|
||||
move || {
|
||||
perform_search(
|
||||
&index_uid,
|
||||
&index,
|
||||
query,
|
||||
search_kind,
|
||||
retrieve_vectors: retrieve_vector,
|
||||
retrieve_vector,
|
||||
features,
|
||||
request_uid,
|
||||
include_metadata,
|
||||
},
|
||||
&index,
|
||||
)
|
||||
)
|
||||
}
|
||||
})
|
||||
.await
|
||||
.with_index(query_index)?
|
||||
.with_index(query_index)?;
|
||||
|
||||
// Apply personalization if requested
|
||||
if let Some(personalize) = personalize.as_ref() {
|
||||
search_result = personalization_service
|
||||
.rerank_search_results(
|
||||
search_result,
|
||||
personalize,
|
||||
personalize_query.as_deref(),
|
||||
time_budget,
|
||||
)
|
||||
.await
|
||||
.with_index(query_index)?;
|
||||
}
|
||||
|
||||
search_results.push(SearchResultWithIndex {
|
||||
index_uid: index_uid.into_inner(),
|
||||
result: search_result,
|
||||
result: search_result.with_index(query_index)?,
|
||||
});
|
||||
}
|
||||
Ok(search_results)
|
||||
@@ -342,11 +283,7 @@ pub async fn multi_search_with_post(
|
||||
err
|
||||
})?;
|
||||
|
||||
debug!(
|
||||
request_uid = ?request_uid,
|
||||
returns = ?search_results,
|
||||
"Multi-search"
|
||||
);
|
||||
debug!(returns = ?search_results, "Multi-search");
|
||||
|
||||
HttpResponse::Ok().json(SearchResults { results: search_results })
|
||||
}
|
||||
|
||||
@@ -67,7 +67,6 @@ impl MultiSearchAggregator {
|
||||
hybrid: _,
|
||||
ranking_score_threshold: _,
|
||||
locales: _,
|
||||
personalize: _,
|
||||
} in &federated_search.queries
|
||||
{
|
||||
if let Some(federation_options) = federation_options {
|
||||
|
||||
@@ -1,28 +1,21 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use actix_web::web::{self, Data};
|
||||
use actix_web::{HttpRequest, HttpResponse};
|
||||
use deserr::actix_web::AwebJson;
|
||||
use deserr::Deserr;
|
||||
use index_scheduler::IndexScheduler;
|
||||
use itertools::{EitherOrBoth, Itertools};
|
||||
use meilisearch_types::deserr::DeserrJsonError;
|
||||
use meilisearch_types::enterprise_edition::network::{Network as DbNetwork, Remote as DbRemote};
|
||||
use meilisearch_types::error::deserr_codes::{
|
||||
InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf, InvalidNetworkSharding,
|
||||
InvalidNetworkUrl, InvalidNetworkWriteApiKey,
|
||||
};
|
||||
use meilisearch_types::enterprise_edition::network::{Network, Remote};
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::keys::actions;
|
||||
use meilisearch_types::milli::update::Setting;
|
||||
use serde::Serialize;
|
||||
use tracing::debug;
|
||||
use utoipa::{OpenApi, ToSchema};
|
||||
use utoipa::OpenApi;
|
||||
|
||||
use crate::analytics::{Aggregate, Analytics};
|
||||
use crate::extractors::authentication::policies::ActionPolicy;
|
||||
use crate::extractors::authentication::GuardedData;
|
||||
use crate::extractors::sequential_extractor::SeqHandler;
|
||||
use crate::routes::SummarizedTaskView;
|
||||
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(
|
||||
@@ -31,7 +24,7 @@ use crate::extractors::sequential_extractor::SeqHandler;
|
||||
name = "Network",
|
||||
description = "The `/network` route allows you to describe the topology of a network of Meilisearch instances.
|
||||
|
||||
This route is **synchronous**. This means that no task object will be returned, and any change to the network will be made available immediately.",
|
||||
This route is **asynchronous**. A task uid will be returned, and any change to the network will be effective after the corresponding task has been processed.",
|
||||
external_docs(url = "https://www.meilisearch.com/docs/reference/api/network"),
|
||||
)),
|
||||
)]
|
||||
@@ -83,73 +76,6 @@ async fn get_network(
|
||||
Ok(HttpResponse::Ok().json(network))
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserr, ToSchema, Serialize)]
|
||||
#[deserr(error = DeserrJsonError<InvalidNetworkRemotes>, rename_all = camelCase, deny_unknown_fields)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[schema(rename_all = "camelCase")]
|
||||
pub struct Remote {
|
||||
#[schema(value_type = Option<String>, example = json!({
|
||||
"ms-0": Remote { url: Setting::Set("http://localhost:7700".into()), search_api_key: Setting::Reset, write_api_key: Setting::Reset },
|
||||
"ms-1": Remote { url: Setting::Set("http://localhost:7701".into()), search_api_key: Setting::Set("foo".into()), write_api_key: Setting::Set("bar".into()) },
|
||||
"ms-2": Remote { url: Setting::Set("http://localhost:7702".into()), search_api_key: Setting::Set("bar".into()), write_api_key: Setting::Set("foo".into()) },
|
||||
}))]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkUrl>)]
|
||||
#[serde(default)]
|
||||
pub url: Setting<String>,
|
||||
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkSearchApiKey>)]
|
||||
#[serde(default)]
|
||||
pub search_api_key: Setting<String>,
|
||||
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkWriteApiKey>)]
|
||||
#[serde(default)]
|
||||
pub write_api_key: Setting<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserr, ToSchema, Serialize)]
|
||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[schema(rename_all = "camelCase")]
|
||||
pub struct Network {
|
||||
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!("http://localhost:7700"))]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
|
||||
#[serde(default)]
|
||||
pub remotes: Setting<BTreeMap<String, Option<Remote>>>,
|
||||
#[schema(value_type = Option<String>, example = json!("ms-00"), rename = "self")]
|
||||
#[serde(default, rename = "self")]
|
||||
#[deserr(default, rename = "self", error = DeserrJsonError<InvalidNetworkSelf>)]
|
||||
pub local: Setting<String>,
|
||||
#[schema(value_type = Option<bool>, example = json!(true))]
|
||||
#[serde(default)]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkSharding>)]
|
||||
pub sharding: Setting<bool>,
|
||||
}
|
||||
|
||||
impl Remote {
|
||||
pub fn try_into_db_node(self, name: &str) -> Result<DbRemote, ResponseError> {
|
||||
Ok(DbRemote {
|
||||
url: self
|
||||
.url
|
||||
.set()
|
||||
.ok_or(ResponseError::from_msg(
|
||||
format!("Missing field `.remotes.{name}.url`"),
|
||||
meilisearch_types::error::Code::MissingNetworkUrl,
|
||||
))
|
||||
.and_then(|url| {
|
||||
if let Err(error) = url::Url::parse(&url) {
|
||||
return Err(ResponseError::from_msg(
|
||||
format!("Invalid `.remotes.{name}.url` (`{url}`): {error}"),
|
||||
meilisearch_types::error::Code::InvalidNetworkUrl,
|
||||
));
|
||||
}
|
||||
Ok(url)
|
||||
})?,
|
||||
search_api_key: self.search_api_key.set(),
|
||||
write_api_key: self.write_api_key.set(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct PatchNetworkAnalytics {
|
||||
network_size: usize,
|
||||
@@ -208,111 +134,58 @@ async fn patch_network(
|
||||
index_scheduler.features().check_network("Using the /network route")?;
|
||||
|
||||
let new_network = new_network.0;
|
||||
let old_network = index_scheduler.network();
|
||||
debug!(parameters = ?new_network, "Patch network");
|
||||
|
||||
let merged_self = match new_network.local {
|
||||
Setting::Set(new_self) => Some(new_self),
|
||||
Setting::Reset => None,
|
||||
Setting::NotSet => old_network.local,
|
||||
};
|
||||
|
||||
let merged_sharding = match new_network.sharding {
|
||||
Setting::Set(new_sharding) => new_sharding,
|
||||
Setting::Reset => false,
|
||||
Setting::NotSet => old_network.sharding,
|
||||
};
|
||||
|
||||
if merged_sharding && merged_self.is_none() {
|
||||
return Err(ResponseError::from_msg(
|
||||
"`.sharding`: enabling the sharding requires `.self` to be set\n - Hint: Disable `sharding` or set `self` to a value.".into(),
|
||||
meilisearch_types::error::Code::InvalidNetworkSharding,
|
||||
));
|
||||
}
|
||||
|
||||
let merged_remotes = match new_network.remotes {
|
||||
Setting::Set(new_remotes) => {
|
||||
let mut merged_remotes = BTreeMap::new();
|
||||
for either_or_both in old_network
|
||||
.remotes
|
||||
.into_iter()
|
||||
.merge_join_by(new_remotes.into_iter(), |left, right| left.0.cmp(&right.0))
|
||||
{
|
||||
match either_or_both {
|
||||
EitherOrBoth::Both((key, old), (_, Some(new))) => {
|
||||
let DbRemote {
|
||||
url: old_url,
|
||||
search_api_key: old_search_api_key,
|
||||
write_api_key: old_write_api_key,
|
||||
} = old;
|
||||
|
||||
let Remote {
|
||||
url: new_url,
|
||||
search_api_key: new_search_api_key,
|
||||
write_api_key: new_write_api_key,
|
||||
} = new;
|
||||
|
||||
let merged = DbRemote {
|
||||
url: match new_url {
|
||||
Setting::Set(new_url) => {
|
||||
if let Err(error) = url::Url::parse(&new_url) {
|
||||
return Err(ResponseError::from_msg(
|
||||
format!("Invalid `.remotes.{key}.url` (`{new_url}`): {error}"),
|
||||
meilisearch_types::error::Code::InvalidNetworkUrl,
|
||||
));
|
||||
}
|
||||
new_url
|
||||
}
|
||||
Setting::Reset => {
|
||||
return Err(ResponseError::from_msg(
|
||||
format!(
|
||||
"Field `.remotes.{key}.url` cannot be set to `null`"
|
||||
),
|
||||
meilisearch_types::error::Code::InvalidNetworkUrl,
|
||||
))
|
||||
}
|
||||
Setting::NotSet => old_url,
|
||||
},
|
||||
search_api_key: match new_search_api_key {
|
||||
Setting::Set(new_search_api_key) => Some(new_search_api_key),
|
||||
Setting::Reset => None,
|
||||
Setting::NotSet => old_search_api_key,
|
||||
},
|
||||
write_api_key: match new_write_api_key {
|
||||
Setting::Set(new_write_api_key) => Some(new_write_api_key),
|
||||
Setting::Reset => None,
|
||||
Setting::NotSet => old_write_api_key,
|
||||
},
|
||||
};
|
||||
merged_remotes.insert(key, merged);
|
||||
}
|
||||
EitherOrBoth::Both((_, _), (_, None)) | EitherOrBoth::Right((_, None)) => {}
|
||||
EitherOrBoth::Left((key, node)) => {
|
||||
merged_remotes.insert(key, node);
|
||||
}
|
||||
EitherOrBoth::Right((key, Some(node))) => {
|
||||
let node = node.try_into_db_node(&key)?;
|
||||
merged_remotes.insert(key, node);
|
||||
// check the URLs of all remotes
|
||||
if let Setting::Set(remotes) = &new_network.remotes {
|
||||
for (remote_name, remote) in remotes.iter() {
|
||||
let Some(remote) = remote else {
|
||||
continue;
|
||||
};
|
||||
match &remote.url {
|
||||
Setting::Set(new_url) => {
|
||||
if let Err(error) = url::Url::parse(&new_url) {
|
||||
return Err(ResponseError::from_msg(
|
||||
format!("Invalid `.remotes.{remote_name}.url` (`{new_url}`): {error}"),
|
||||
meilisearch_types::error::Code::InvalidNetworkUrl,
|
||||
));
|
||||
}
|
||||
}
|
||||
Setting::Reset => {
|
||||
return Err(ResponseError::from_msg(
|
||||
format!("Field `.remotes.{remote_name}.url` cannot be set to `null`"),
|
||||
meilisearch_types::error::Code::InvalidNetworkUrl,
|
||||
))
|
||||
}
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
merged_remotes
|
||||
}
|
||||
Setting::Reset => BTreeMap::new(),
|
||||
Setting::NotSet => old_network.remotes,
|
||||
};
|
||||
}
|
||||
|
||||
analytics.publish(
|
||||
PatchNetworkAnalytics {
|
||||
network_size: merged_remotes.len(),
|
||||
network_has_self: merged_self.is_some(),
|
||||
network_size: new_network
|
||||
.remotes
|
||||
.as_ref()
|
||||
.set()
|
||||
.map(|remotes| remotes.len())
|
||||
.unwrap_or_default(),
|
||||
network_has_self: new_network.local.as_ref().set().is_some(),
|
||||
},
|
||||
&req,
|
||||
);
|
||||
|
||||
let merged_network =
|
||||
DbNetwork { local: merged_self, remotes: merged_remotes, sharding: merged_sharding };
|
||||
index_scheduler.put_network(merged_network.clone())?;
|
||||
debug!(returns = ?merged_network, "Patch network");
|
||||
Ok(HttpResponse::Ok().json(merged_network))
|
||||
let task = index_scheduler.register(
|
||||
meilisearch_types::tasks::KindWithContent::NetworkTopologyChange {
|
||||
network: Some(new_network),
|
||||
origin: None,
|
||||
},
|
||||
None,
|
||||
false,
|
||||
)?;
|
||||
debug!(returns = ?task, "Patch network");
|
||||
|
||||
let task: SummarizedTaskView = task.into();
|
||||
|
||||
return Ok(HttpResponse::Accepted().json(task));
|
||||
}
|
||||
|
||||
@@ -228,7 +228,7 @@ mod tests {
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
|
||||
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `networkTopologyChange`.",
|
||||
"code": "invalid_task_types",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||
|
||||
@@ -90,7 +90,7 @@ fn deny_immutable_fields_webhook(
|
||||
#[derive(Debug, Serialize, ToSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[schema(rename_all = "camelCase")]
|
||||
pub(super) struct WebhookWithMetadataRedactedAuthorization {
|
||||
pub(super) struct WebhookWithMetadata {
|
||||
uuid: Uuid,
|
||||
is_editable: bool,
|
||||
#[schema(value_type = WebhookSettings)]
|
||||
@@ -98,9 +98,8 @@ pub(super) struct WebhookWithMetadataRedactedAuthorization {
|
||||
webhook: Webhook,
|
||||
}
|
||||
|
||||
impl WebhookWithMetadataRedactedAuthorization {
|
||||
pub fn from(uuid: Uuid, mut webhook: Webhook) -> Self {
|
||||
webhook.redact_authorization_header();
|
||||
impl WebhookWithMetadata {
|
||||
pub fn from(uuid: Uuid, webhook: Webhook) -> Self {
|
||||
Self { uuid, is_editable: uuid != Uuid::nil(), webhook }
|
||||
}
|
||||
}
|
||||
@@ -108,7 +107,7 @@ impl WebhookWithMetadataRedactedAuthorization {
|
||||
#[derive(Debug, Serialize, ToSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub(super) struct WebhookResults {
|
||||
results: Vec<WebhookWithMetadataRedactedAuthorization>,
|
||||
results: Vec<WebhookWithMetadata>,
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
@@ -151,7 +150,7 @@ async fn get_webhooks(
|
||||
let results = webhooks
|
||||
.webhooks
|
||||
.into_iter()
|
||||
.map(|(uuid, webhook)| WebhookWithMetadataRedactedAuthorization::from(uuid, webhook))
|
||||
.map(|(uuid, webhook)| WebhookWithMetadata::from(uuid, webhook))
|
||||
.collect::<Vec<_>>();
|
||||
let results = WebhookResults { results };
|
||||
|
||||
@@ -302,7 +301,7 @@ fn check_changed(uuid: Uuid, webhook: &Webhook) -> Result<(), WebhooksError> {
|
||||
tag = "Webhooks",
|
||||
security(("Bearer" = ["webhooks.get", "webhooks.*", "*.get", "*"])),
|
||||
responses(
|
||||
(status = 200, description = "Webhook found", body = WebhookWithMetadataRedactedAuthorization, content_type = "application/json", example = json!({
|
||||
(status = 200, description = "Webhook found", body = WebhookWithMetadata, content_type = "application/json", example = json!({
|
||||
"uuid": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"url": "https://your.site/on-tasks-completed",
|
||||
"headers": {
|
||||
@@ -325,7 +324,7 @@ async fn get_webhook(
|
||||
let mut webhooks = index_scheduler.webhooks_view();
|
||||
|
||||
let webhook = webhooks.webhooks.remove(&uuid).ok_or(WebhookNotFound(uuid))?;
|
||||
let webhook = WebhookWithMetadataRedactedAuthorization::from(uuid, webhook);
|
||||
let webhook = WebhookWithMetadata::from(uuid, webhook);
|
||||
|
||||
debug!(returns = ?webhook, "Get webhook");
|
||||
Ok(HttpResponse::Ok().json(webhook))
|
||||
@@ -338,7 +337,7 @@ async fn get_webhook(
|
||||
request_body = WebhookSettings,
|
||||
security(("Bearer" = ["webhooks.create", "webhooks.*", "*"])),
|
||||
responses(
|
||||
(status = 201, description = "Webhook created successfully", body = WebhookWithMetadataRedactedAuthorization, content_type = "application/json", example = json!({
|
||||
(status = 201, description = "Webhook created successfully", body = WebhookWithMetadata, content_type = "application/json", example = json!({
|
||||
"uuid": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"url": "https://your.site/on-tasks-completed",
|
||||
"headers": {
|
||||
@@ -384,7 +383,7 @@ async fn post_webhook(
|
||||
|
||||
analytics.publish(PostWebhooksAnalytics, &req);
|
||||
|
||||
let response = WebhookWithMetadataRedactedAuthorization::from(uuid, webhook);
|
||||
let response = WebhookWithMetadata::from(uuid, webhook);
|
||||
debug!(returns = ?response, "Post webhook");
|
||||
Ok(HttpResponse::Created().json(response))
|
||||
}
|
||||
@@ -396,7 +395,7 @@ async fn post_webhook(
|
||||
request_body = WebhookSettings,
|
||||
security(("Bearer" = ["webhooks.update", "webhooks.*", "*"])),
|
||||
responses(
|
||||
(status = 200, description = "Webhook updated successfully", body = WebhookWithMetadataRedactedAuthorization, content_type = "application/json", example = json!({
|
||||
(status = 200, description = "Webhook updated successfully", body = WebhookWithMetadata, content_type = "application/json", example = json!({
|
||||
"uuid": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"url": "https://your.site/on-tasks-completed",
|
||||
"headers": {
|
||||
@@ -436,7 +435,7 @@ async fn patch_webhook(
|
||||
|
||||
analytics.publish(PatchWebhooksAnalytics, &req);
|
||||
|
||||
let response = WebhookWithMetadataRedactedAuthorization::from(uuid, webhook);
|
||||
let response = WebhookWithMetadata::from(uuid, webhook);
|
||||
debug!(returns = ?response, "Patch webhook");
|
||||
Ok(HttpResponse::Ok().json(response))
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ use std::vec::{IntoIter, Vec};
|
||||
use actix_http::StatusCode;
|
||||
use index_scheduler::{IndexScheduler, RoFeatures};
|
||||
use itertools::Itertools;
|
||||
use meilisearch_types::enterprise_edition::network::{Network, Remote};
|
||||
use meilisearch_types::enterprise_edition::network::{DbNetwork, DbRemote};
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::milli::order_by_map::OrderByMap;
|
||||
use meilisearch_types::milli::score_details::{ScoreDetails, WeightedScoreValue};
|
||||
@@ -17,13 +17,11 @@ use meilisearch_types::milli::vector::Embedding;
|
||||
use meilisearch_types::milli::{self, DocumentId, OrderBy, TimeBudget, DEFAULT_VALUES_PER_FACET};
|
||||
use roaring::RoaringBitmap;
|
||||
use tokio::task::JoinHandle;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::super::ranking_rules::{self, RankingRules};
|
||||
use super::super::{
|
||||
compute_facet_distribution_stats, prepare_search, AttributesFormat, ComputedFacets, HitMaker,
|
||||
HitsInfo, RetrieveVectors, SearchHit, SearchKind, SearchMetadata, SearchQuery,
|
||||
SearchQueryWithIndex,
|
||||
HitsInfo, RetrieveVectors, SearchHit, SearchKind, SearchQuery, SearchQueryWithIndex,
|
||||
};
|
||||
use super::proxy::{proxy_search, ProxySearchError, ProxySearchParams};
|
||||
use super::types::{
|
||||
@@ -41,58 +39,32 @@ pub async fn perform_federated_search(
|
||||
federation: Federation,
|
||||
features: RoFeatures,
|
||||
is_proxy: bool,
|
||||
request_uid: Uuid,
|
||||
include_metadata: bool,
|
||||
) -> Result<FederatedSearchResult, ResponseError> {
|
||||
if is_proxy {
|
||||
features.check_network("Performing a remote federated search")?;
|
||||
}
|
||||
let before_search = std::time::Instant::now();
|
||||
|
||||
let timeout = std::env::var("MEILI_EXPERIMENTAL_REMOTE_SEARCH_TIMEOUT_SECONDS")
|
||||
.ok()
|
||||
.map(|p| p.parse().unwrap())
|
||||
.unwrap_or(25);
|
||||
|
||||
let deadline = before_search + std::time::Duration::from_secs(timeout);
|
||||
let deadline = before_search + std::time::Duration::from_secs(9);
|
||||
|
||||
let required_hit_count = federation.limit + federation.offset;
|
||||
let retrieve_vectors = queries.iter().any(|q| q.retrieve_vectors);
|
||||
|
||||
let network = index_scheduler.network();
|
||||
|
||||
// Preconstruct metadata keeping the original queries order for later metadata building
|
||||
let precomputed_query_metadata: Option<Vec<_>> = include_metadata.then(|| {
|
||||
queries
|
||||
.iter()
|
||||
.map(|q| {
|
||||
(
|
||||
q.index_uid.to_string(),
|
||||
q.federation_options.as_ref().and_then(|o| o.remote.clone()),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
});
|
||||
|
||||
// this implementation partition the queries by index to guarantee an important property:
|
||||
// - all the queries to a particular index use the same read transaction.
|
||||
// This is an important property, otherwise we cannot guarantee the self-consistency of the results.
|
||||
|
||||
// 1. partition queries by host and index
|
||||
let mut partitioned_queries = PartitionedQueries::new();
|
||||
|
||||
for (query_index, federated_query) in queries.into_iter().enumerate() {
|
||||
partitioned_queries.partition(federated_query, query_index, &network, features)?
|
||||
}
|
||||
|
||||
// 2. perform queries, merge and make hits index by index
|
||||
// 2.1. start remote queries
|
||||
let remote_search = RemoteSearch::start(
|
||||
partitioned_queries.remote_queries_by_host,
|
||||
&federation,
|
||||
deadline,
|
||||
include_metadata,
|
||||
);
|
||||
let remote_search =
|
||||
RemoteSearch::start(partitioned_queries.remote_queries_by_host, &federation, deadline);
|
||||
|
||||
// 2.2. concurrently execute local queries
|
||||
let params = SearchByIndexParams {
|
||||
@@ -134,25 +106,11 @@ pub async fn perform_federated_search(
|
||||
let after_waiting_remote_results = std::time::Instant::now();
|
||||
|
||||
// 3. merge hits and metadata across indexes and hosts
|
||||
|
||||
// 3.1. Build metadata in the same order as the original queries
|
||||
let query_metadata = precomputed_query_metadata.map(|precomputed_query_metadata| {
|
||||
// If a remote is present, set the local remote name
|
||||
let local_remote_name = network.local.clone().filter(|_| partitioned_queries.has_remote);
|
||||
|
||||
build_query_metadata(
|
||||
precomputed_query_metadata,
|
||||
local_remote_name,
|
||||
&remote_results,
|
||||
&results_by_index,
|
||||
)
|
||||
});
|
||||
|
||||
// 3.2. merge federation metadata
|
||||
// 3.1. merge metadata
|
||||
let (estimated_total_hits, degraded, used_negative_operator, facets, max_remote_duration) =
|
||||
merge_metadata(&mut results_by_index, &remote_results);
|
||||
|
||||
// 3.3. merge hits
|
||||
// 3.2. merge hits
|
||||
let merged_hits: Vec<_> = merge_index_global_results(results_by_index, &mut remote_results)
|
||||
.skip(federation.offset)
|
||||
.take(federation.limit)
|
||||
@@ -167,7 +125,7 @@ pub async fn perform_federated_search(
|
||||
.map(|hit| hit.hit())
|
||||
.collect();
|
||||
|
||||
// 3.4. merge query vectors
|
||||
// 3.3. merge query vectors
|
||||
let query_vectors = if retrieve_vectors {
|
||||
for remote_results in remote_results.iter_mut() {
|
||||
if let Some(remote_vectors) = remote_results.query_vectors.take() {
|
||||
@@ -186,7 +144,7 @@ pub async fn perform_federated_search(
|
||||
None
|
||||
};
|
||||
|
||||
// 3.5. merge facets
|
||||
// 3.4. merge facets
|
||||
let (facet_distribution, facet_stats, facets_by_index) =
|
||||
facet_order.merge(federation.merge_facets, remote_results, facets);
|
||||
|
||||
@@ -212,8 +170,6 @@ pub async fn perform_federated_search(
|
||||
facet_stats,
|
||||
facets_by_index,
|
||||
remote_errors: partitioned_queries.has_remote.then_some(remote_errors),
|
||||
request_uid: Some(request_uid),
|
||||
metadata: query_metadata,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -437,7 +393,6 @@ struct SearchHitByIndex {
|
||||
|
||||
struct SearchResultByIndex {
|
||||
index: String,
|
||||
primary_key: Option<String>,
|
||||
hits: Vec<SearchHitByIndex>,
|
||||
estimated_total_hits: usize,
|
||||
degraded: bool,
|
||||
@@ -445,61 +400,6 @@ struct SearchResultByIndex {
|
||||
facets: Option<ComputedFacets>,
|
||||
}
|
||||
|
||||
/// Builds query metadata for federated search results.
|
||||
///
|
||||
/// This function creates metadata for each query in the same order as the original queries,
|
||||
/// combining information from both local and remote search results. It handles the mapping
|
||||
/// of primary keys to their respective indexes and remotes to prevent collisions when
|
||||
/// multiple remotes have the same index_uid but different primary keys.
|
||||
fn build_query_metadata(
|
||||
precomputed_query_metadata: Vec<(String, Option<String>)>,
|
||||
local_remote_name: Option<String>,
|
||||
remote_results: &[FederatedSearchResult],
|
||||
results_by_index: &[SearchResultByIndex],
|
||||
) -> Vec<SearchMetadata> {
|
||||
// Create a map of (remote, index_uid) -> primary_key for quick lookup
|
||||
// This prevents collisions when multiple remotes have the same index_uid but different primary keys
|
||||
let mut primary_key_per_index = std::collections::HashMap::new();
|
||||
|
||||
// Build metadata for remote results
|
||||
for remote_result in remote_results {
|
||||
if let Some(remote_metadata) = &remote_result.metadata {
|
||||
for remote_meta in remote_metadata {
|
||||
if let SearchMetadata {
|
||||
remote: Some(remote_name),
|
||||
index_uid,
|
||||
primary_key: Some(primary_key),
|
||||
..
|
||||
} = remote_meta
|
||||
{
|
||||
let key = (Some(remote_name), index_uid);
|
||||
primary_key_per_index.insert(key, primary_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build metadata for local results
|
||||
for local_meta in results_by_index {
|
||||
if let SearchResultByIndex { index, primary_key: Some(primary_key), .. } = local_meta {
|
||||
let key = (None, index);
|
||||
primary_key_per_index.insert(key, primary_key);
|
||||
}
|
||||
}
|
||||
|
||||
// Build metadata in the same order as the original queries
|
||||
let mut query_metadata = Vec::new();
|
||||
for (index_uid, remote) in precomputed_query_metadata {
|
||||
let primary_key =
|
||||
primary_key_per_index.get(&(remote.as_ref(), &index_uid)).map(|pk| pk.to_string());
|
||||
let query_uid = Uuid::now_v7();
|
||||
// if the remote is not set, use the local remote name
|
||||
let remote = remote.or_else(|| local_remote_name.clone());
|
||||
query_metadata.push(SearchMetadata { query_uid, primary_key, index_uid, remote });
|
||||
}
|
||||
query_metadata
|
||||
}
|
||||
|
||||
fn merge_metadata(
|
||||
results_by_index: &mut Vec<SearchResultByIndex>,
|
||||
remote_results: &Vec<FederatedSearchResult>,
|
||||
@@ -511,7 +411,6 @@ fn merge_metadata(
|
||||
let mut max_remote_duration = Duration::ZERO;
|
||||
for SearchResultByIndex {
|
||||
index,
|
||||
primary_key: _,
|
||||
hits: _,
|
||||
estimated_total_hits: estimated_total_hits_by_index,
|
||||
facets: facets_by_index,
|
||||
@@ -540,8 +439,6 @@ fn merge_metadata(
|
||||
degraded: degraded_for_host,
|
||||
used_negative_operator: host_used_negative_operator,
|
||||
remote_errors: _,
|
||||
metadata: _,
|
||||
request_uid: _,
|
||||
} in remote_results
|
||||
{
|
||||
let this_remote_duration = Duration::from_millis(*processing_time_ms as u64);
|
||||
@@ -559,7 +456,7 @@ fn merge_metadata(
|
||||
}
|
||||
|
||||
type LocalQueriesByIndex = BTreeMap<String, Vec<QueryByIndex>>;
|
||||
type RemoteQueriesByHost = BTreeMap<String, (Remote, Vec<SearchQueryWithIndex>)>;
|
||||
type RemoteQueriesByHost = BTreeMap<String, (DbRemote, Vec<SearchQueryWithIndex>)>;
|
||||
|
||||
struct PartitionedQueries {
|
||||
local_queries_by_index: LocalQueriesByIndex,
|
||||
@@ -580,7 +477,7 @@ impl PartitionedQueries {
|
||||
&mut self,
|
||||
federated_query: SearchQueryWithIndex,
|
||||
query_index: usize,
|
||||
network: &Network,
|
||||
network: &DbNetwork,
|
||||
features: RoFeatures,
|
||||
) -> Result<(), ResponseError> {
|
||||
if let Some(pagination_field) = federated_query.has_pagination() {
|
||||
@@ -601,10 +498,6 @@ impl PartitionedQueries {
|
||||
.into());
|
||||
}
|
||||
|
||||
if federated_query.has_personalize() {
|
||||
return Err(MeilisearchHttpError::PersonalizationInFederatedQuery(query_index).into());
|
||||
}
|
||||
|
||||
let (index_uid, query, federation_options) = federated_query.into_index_query_federation();
|
||||
|
||||
let federation_options = federation_options.unwrap_or_default();
|
||||
@@ -673,12 +566,7 @@ struct RemoteSearch {
|
||||
}
|
||||
|
||||
impl RemoteSearch {
|
||||
fn start(
|
||||
queries: RemoteQueriesByHost,
|
||||
federation: &Federation,
|
||||
deadline: Instant,
|
||||
include_metadata: bool,
|
||||
) -> Self {
|
||||
fn start(queries: RemoteQueriesByHost, federation: &Federation, deadline: Instant) -> Self {
|
||||
let mut in_flight_remote_queries = BTreeMap::new();
|
||||
let client = reqwest::ClientBuilder::new()
|
||||
.connect_timeout(std::time::Duration::from_millis(200))
|
||||
@@ -698,10 +586,7 @@ impl RemoteSearch {
|
||||
// never merge distant facets
|
||||
proxy_federation.merge_facets = None;
|
||||
let params = params.clone();
|
||||
async move {
|
||||
proxy_search(&node, queries, proxy_federation, ¶ms, include_metadata)
|
||||
.await
|
||||
}
|
||||
async move { proxy_search(&node, queries, proxy_federation, ¶ms).await }
|
||||
}),
|
||||
);
|
||||
}
|
||||
@@ -745,13 +630,6 @@ impl RemoteSearch {
|
||||
continue 'remote_queries;
|
||||
}
|
||||
|
||||
// Add remote name to metadata
|
||||
if let Some(metadata) = res.metadata.as_mut() {
|
||||
for meta in metadata {
|
||||
meta.remote = Some(node_name.clone());
|
||||
}
|
||||
}
|
||||
|
||||
federation.insert(
|
||||
FEDERATION_REMOTE.to_string(),
|
||||
serde_json::Value::String(node_name.clone()),
|
||||
@@ -794,7 +672,7 @@ struct SearchByIndexParams<'a> {
|
||||
features: RoFeatures,
|
||||
is_proxy: bool,
|
||||
has_remote: bool,
|
||||
network: &'a Network,
|
||||
network: &'a DbNetwork,
|
||||
}
|
||||
|
||||
struct SearchByIndex {
|
||||
@@ -847,7 +725,6 @@ impl SearchByIndex {
|
||||
}
|
||||
};
|
||||
let rtxn = index.read_txn()?;
|
||||
let primary_key = index.primary_key(&rtxn)?.map(|pk| pk.to_string());
|
||||
let criteria = index.criteria(&rtxn)?;
|
||||
let dictionary = index.dictionary(&rtxn)?;
|
||||
let dictionary: Option<Vec<_>> =
|
||||
@@ -874,18 +751,11 @@ impl SearchByIndex {
|
||||
return Err(error);
|
||||
}
|
||||
let mut results_by_query = Vec::with_capacity(queries.len());
|
||||
|
||||
// all queries for an index share the same budget
|
||||
let time_budget = match cutoff {
|
||||
Some(cutoff) => TimeBudget::new(Duration::from_millis(cutoff)),
|
||||
None => TimeBudget::default(),
|
||||
};
|
||||
for QueryByIndex { query, weight, query_index } in queries {
|
||||
// use an immediately invoked lambda to capture the result without returning from the function
|
||||
|
||||
let res: Result<(), ResponseError> = (|| {
|
||||
let search_kind =
|
||||
search_kind(&query, params.index_scheduler, index_uid.to_string(), &index)?;
|
||||
let search_kind = search_kind(&query, params.index_scheduler, &index_uid, &index)?;
|
||||
|
||||
let canonicalization_kind = match (&search_kind, &query.q) {
|
||||
(SearchKind::SemanticOnly { .. }, _) => {
|
||||
@@ -935,11 +805,11 @@ impl SearchByIndex {
|
||||
{
|
||||
Some((previous_ranking_rules, previous_query_index, previous_index_uid))
|
||||
} else {
|
||||
Some((ranking_rules, query_index, index_uid.clone()))
|
||||
Some((ranking_rules, query_index, index_uid.to_string()))
|
||||
};
|
||||
} else {
|
||||
self.previous_query_data =
|
||||
Some((ranking_rules, query_index, index_uid.clone()));
|
||||
Some((ranking_rules, query_index, index_uid.to_string()));
|
||||
}
|
||||
|
||||
match search_kind {
|
||||
@@ -949,13 +819,17 @@ impl SearchByIndex {
|
||||
|
||||
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
|
||||
|
||||
let time_budget = match cutoff {
|
||||
Some(cutoff) => TimeBudget::new(Duration::from_millis(cutoff)),
|
||||
None => TimeBudget::default(),
|
||||
};
|
||||
|
||||
let (mut search, _is_finite_pagination, _max_total_hits, _offset) = prepare_search(
|
||||
&index,
|
||||
&rtxn,
|
||||
&query,
|
||||
&search_kind,
|
||||
// clones of `TimeBudget` share the budget rather than restart it
|
||||
time_budget.clone(),
|
||||
time_budget,
|
||||
params.features,
|
||||
)?;
|
||||
|
||||
@@ -964,7 +838,7 @@ impl SearchByIndex {
|
||||
search.limit(params.required_hit_count);
|
||||
|
||||
let (result, _semantic_hit_count) =
|
||||
super::super::search_from_kind(index_uid.to_string(), search_kind, search)?;
|
||||
super::super::search_from_kind(&index_uid, search_kind, search)?;
|
||||
let format = AttributesFormat {
|
||||
attributes_to_retrieve: query.attributes_to_retrieve,
|
||||
retrieve_vectors,
|
||||
@@ -1102,7 +976,6 @@ impl SearchByIndex {
|
||||
})?;
|
||||
self.results_by_index.push(SearchResultByIndex {
|
||||
index: index_uid,
|
||||
primary_key,
|
||||
hits: merged_result,
|
||||
estimated_total_hits,
|
||||
degraded,
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
pub use error::ProxySearchError;
|
||||
use error::ReqwestErrorWithoutUrl;
|
||||
use meilisearch_types::enterprise_edition::network::Remote;
|
||||
use meilisearch_types::enterprise_edition::network::DbRemote;
|
||||
use rand::Rng as _;
|
||||
use reqwest::{Client, Response, StatusCode};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
|
||||
use super::types::{FederatedSearch, FederatedSearchResult, Federation};
|
||||
use crate::search::{SearchQueryWithIndex, INCLUDE_METADATA_HEADER};
|
||||
use crate::search::SearchQueryWithIndex;
|
||||
|
||||
pub const PROXY_SEARCH_HEADER: &str = "Meili-Proxy-Search";
|
||||
pub const PROXY_SEARCH_HEADER_VALUE: &str = "true";
|
||||
@@ -94,11 +94,10 @@ pub struct ProxySearchParams {
|
||||
|
||||
/// Performs a federated search on a remote host and returns the results
|
||||
pub async fn proxy_search(
|
||||
node: &Remote,
|
||||
node: &DbRemote,
|
||||
queries: Vec<SearchQueryWithIndex>,
|
||||
federation: Federation,
|
||||
params: &ProxySearchParams,
|
||||
include_metadata: bool,
|
||||
) -> Result<FederatedSearchResult, ProxySearchError> {
|
||||
let url = format!("{}/multi-search", node.url);
|
||||
|
||||
@@ -106,12 +105,7 @@ pub async fn proxy_search(
|
||||
|
||||
let search_api_key = node.search_api_key.as_deref();
|
||||
|
||||
let timeout = std::env::var("MEILI_EXPERIMENTAL_REMOTE_SEARCH_TIMEOUT_SECONDS")
|
||||
.ok()
|
||||
.map(|p| p.parse().unwrap())
|
||||
.unwrap_or(25);
|
||||
|
||||
let max_deadline = std::time::Instant::now() + std::time::Duration::from_secs(timeout);
|
||||
let max_deadline = std::time::Instant::now() + std::time::Duration::from_secs(5);
|
||||
|
||||
let deadline = if let Some(deadline) = params.deadline {
|
||||
std::time::Instant::min(deadline, max_deadline)
|
||||
@@ -120,16 +114,7 @@ pub async fn proxy_search(
|
||||
};
|
||||
|
||||
for i in 0..params.try_count {
|
||||
match try_proxy_search(
|
||||
&url,
|
||||
search_api_key,
|
||||
&federated,
|
||||
¶ms.client,
|
||||
deadline,
|
||||
include_metadata,
|
||||
)
|
||||
.await
|
||||
{
|
||||
match try_proxy_search(&url, search_api_key, &federated, ¶ms.client, deadline).await {
|
||||
Ok(response) => return Ok(response),
|
||||
Err(retry) => {
|
||||
let duration = retry.into_duration(i)?;
|
||||
@@ -137,7 +122,7 @@ pub async fn proxy_search(
|
||||
}
|
||||
}
|
||||
}
|
||||
try_proxy_search(&url, search_api_key, &federated, ¶ms.client, deadline, include_metadata)
|
||||
try_proxy_search(&url, search_api_key, &federated, ¶ms.client, deadline)
|
||||
.await
|
||||
.map_err(Retry::into_error)
|
||||
}
|
||||
@@ -148,7 +133,6 @@ async fn try_proxy_search(
|
||||
federated: &FederatedSearch,
|
||||
client: &Client,
|
||||
deadline: std::time::Instant,
|
||||
include_metadata: bool,
|
||||
) -> Result<FederatedSearchResult, Retry> {
|
||||
let timeout = deadline.saturating_duration_since(std::time::Instant::now());
|
||||
|
||||
@@ -159,8 +143,6 @@ async fn try_proxy_search(
|
||||
request
|
||||
};
|
||||
let request = request.header(PROXY_SEARCH_HEADER, PROXY_SEARCH_HEADER_VALUE);
|
||||
let request =
|
||||
if include_metadata { request.header(INCLUDE_METADATA_HEADER, "true") } else { request };
|
||||
|
||||
let response = request.send().await;
|
||||
let response = match response {
|
||||
|
||||
@@ -16,9 +16,6 @@ use meilisearch_types::milli::order_by_map::OrderByMap;
|
||||
use meilisearch_types::milli::OrderBy;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::search::SearchMetadata;
|
||||
|
||||
use super::super::{ComputedFacets, FacetStats, HitsInfo, SearchHit, SearchQueryWithIndex};
|
||||
use crate::milli::vector::Embedding;
|
||||
@@ -134,10 +131,6 @@ pub struct FederatedSearchResult {
|
||||
pub facet_stats: Option<BTreeMap<String, FacetStats>>,
|
||||
#[serde(default, skip_serializing_if = "FederatedFacets::is_empty")]
|
||||
pub facets_by_index: FederatedFacets,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub request_uid: Option<Uuid>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub metadata: Option<Vec<SearchMetadata>>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub remote_errors: Option<BTreeMap<String, ResponseError>>,
|
||||
@@ -163,8 +156,6 @@ impl fmt::Debug for FederatedSearchResult {
|
||||
facet_stats,
|
||||
facets_by_index,
|
||||
remote_errors,
|
||||
request_uid,
|
||||
metadata,
|
||||
} = self;
|
||||
|
||||
let mut debug = f.debug_struct("SearchResult");
|
||||
@@ -197,12 +188,6 @@ impl fmt::Debug for FederatedSearchResult {
|
||||
if let Some(remote_errors) = remote_errors {
|
||||
debug.field("remote_errors", &remote_errors);
|
||||
}
|
||||
if let Some(request_uid) = request_uid {
|
||||
debug.field("request_uid", &request_uid);
|
||||
}
|
||||
if let Some(metadata) = metadata {
|
||||
debug.field("metadata", &metadata);
|
||||
}
|
||||
|
||||
debug.finish()
|
||||
}
|
||||
|
||||
@@ -36,7 +36,6 @@ use serde_json::{json, Value};
|
||||
#[cfg(test)]
|
||||
mod mod_test;
|
||||
use utoipa::ToSchema;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::error::MeilisearchHttpError;
|
||||
|
||||
@@ -57,14 +56,6 @@ pub const DEFAULT_CROP_MARKER: fn() -> String = || "…".to_string();
|
||||
pub const DEFAULT_HIGHLIGHT_PRE_TAG: fn() -> String = || "<em>".to_string();
|
||||
pub const DEFAULT_HIGHLIGHT_POST_TAG: fn() -> String = || "</em>".to_string();
|
||||
pub const DEFAULT_SEMANTIC_RATIO: fn() -> SemanticRatio = || SemanticRatio(0.5);
|
||||
pub const INCLUDE_METADATA_HEADER: &str = "Meili-Include-Metadata";
|
||||
|
||||
#[derive(Clone, Default, PartialEq, Deserr, ToSchema, Debug)]
|
||||
#[deserr(error = DeserrJsonError<InvalidSearchPersonalize>, rename_all = camelCase, deny_unknown_fields)]
|
||||
pub struct Personalize {
|
||||
#[deserr(error = DeserrJsonError<InvalidSearchPersonalizeUserContext>)]
|
||||
pub user_context: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, PartialEq, Deserr, ToSchema)]
|
||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||
@@ -129,8 +120,6 @@ pub struct SearchQuery {
|
||||
pub ranking_score_threshold: Option<RankingScoreThreshold>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchLocales>)]
|
||||
pub locales: Option<Vec<Locale>>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchPersonalize>, default)]
|
||||
pub personalize: Option<Personalize>,
|
||||
}
|
||||
|
||||
impl From<SearchParameters> for SearchQuery {
|
||||
@@ -178,7 +167,6 @@ impl From<SearchParameters> for SearchQuery {
|
||||
highlight_post_tag: DEFAULT_HIGHLIGHT_POST_TAG(),
|
||||
crop_marker: DEFAULT_CROP_MARKER(),
|
||||
locales: None,
|
||||
personalize: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -260,7 +248,6 @@ impl fmt::Debug for SearchQuery {
|
||||
attributes_to_search_on,
|
||||
ranking_score_threshold,
|
||||
locales,
|
||||
personalize,
|
||||
} = self;
|
||||
|
||||
let mut debug = f.debug_struct("SearchQuery");
|
||||
@@ -349,10 +336,6 @@ impl fmt::Debug for SearchQuery {
|
||||
debug.field("locales", &locales);
|
||||
}
|
||||
|
||||
if let Some(personalize) = personalize {
|
||||
debug.field("personalize", &personalize);
|
||||
}
|
||||
|
||||
debug.finish()
|
||||
}
|
||||
}
|
||||
@@ -379,7 +362,7 @@ pub enum SearchKind {
|
||||
impl SearchKind {
|
||||
pub(crate) fn semantic(
|
||||
index_scheduler: &index_scheduler::IndexScheduler,
|
||||
index_uid: String,
|
||||
index_uid: &str,
|
||||
index: &Index,
|
||||
embedder_name: &str,
|
||||
vector_len: Option<usize>,
|
||||
@@ -397,7 +380,7 @@ impl SearchKind {
|
||||
|
||||
pub(crate) fn hybrid(
|
||||
index_scheduler: &index_scheduler::IndexScheduler,
|
||||
index_uid: String,
|
||||
index_uid: &str,
|
||||
index: &Index,
|
||||
embedder_name: &str,
|
||||
semantic_ratio: f32,
|
||||
@@ -416,7 +399,7 @@ impl SearchKind {
|
||||
|
||||
pub(crate) fn embedder(
|
||||
index_scheduler: &index_scheduler::IndexScheduler,
|
||||
index_uid: String,
|
||||
index_uid: &str,
|
||||
index: &Index,
|
||||
embedder_name: &str,
|
||||
vector_len: Option<usize>,
|
||||
@@ -558,9 +541,6 @@ pub struct SearchQueryWithIndex {
|
||||
pub ranking_score_threshold: Option<RankingScoreThreshold>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchLocales>, default)]
|
||||
pub locales: Option<Vec<Locale>>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchPersonalize>, default)]
|
||||
#[serde(skip)]
|
||||
pub personalize: Option<Personalize>,
|
||||
|
||||
#[deserr(default)]
|
||||
pub federation_options: Option<FederationOptions>,
|
||||
@@ -585,10 +565,6 @@ impl SearchQueryWithIndex {
|
||||
self.facets.as_deref().filter(|v| !v.is_empty())
|
||||
}
|
||||
|
||||
pub fn has_personalize(&self) -> bool {
|
||||
self.personalize.is_some()
|
||||
}
|
||||
|
||||
pub fn from_index_query_federation(
|
||||
index_uid: IndexUid,
|
||||
query: SearchQuery,
|
||||
@@ -622,7 +598,6 @@ impl SearchQueryWithIndex {
|
||||
attributes_to_search_on,
|
||||
ranking_score_threshold,
|
||||
locales,
|
||||
personalize,
|
||||
} = query;
|
||||
|
||||
SearchQueryWithIndex {
|
||||
@@ -654,7 +629,6 @@ impl SearchQueryWithIndex {
|
||||
attributes_to_search_on,
|
||||
ranking_score_threshold,
|
||||
locales,
|
||||
personalize,
|
||||
federation_options,
|
||||
}
|
||||
}
|
||||
@@ -690,7 +664,6 @@ impl SearchQueryWithIndex {
|
||||
hybrid,
|
||||
ranking_score_threshold,
|
||||
locales,
|
||||
personalize,
|
||||
} = self;
|
||||
(
|
||||
index_uid,
|
||||
@@ -722,7 +695,6 @@ impl SearchQueryWithIndex {
|
||||
hybrid,
|
||||
ranking_score_threshold,
|
||||
locales,
|
||||
personalize,
|
||||
// do not use ..Default::default() here,
|
||||
// rather add any missing field from `SearchQuery` to `SearchQueryWithIndex`
|
||||
},
|
||||
@@ -863,18 +835,6 @@ pub struct SearchHit {
|
||||
pub ranking_score_details: Option<serde_json::Map<String, serde_json::Value>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, ToSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[schema(rename_all = "camelCase")]
|
||||
pub struct SearchMetadata {
|
||||
pub query_uid: Uuid,
|
||||
pub index_uid: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub primary_key: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub remote: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone, PartialEq, ToSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[schema(rename_all = "camelCase")]
|
||||
@@ -891,10 +851,6 @@ pub struct SearchResult {
|
||||
pub facet_distribution: Option<BTreeMap<String, IndexMap<String, u64>>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub facet_stats: Option<BTreeMap<String, FacetStats>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub request_uid: Option<Uuid>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub metadata: Option<SearchMetadata>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub semantic_hit_count: Option<u32>,
|
||||
@@ -916,8 +872,6 @@ impl fmt::Debug for SearchResult {
|
||||
hits_info,
|
||||
facet_distribution,
|
||||
facet_stats,
|
||||
request_uid,
|
||||
metadata,
|
||||
semantic_hit_count,
|
||||
degraded,
|
||||
used_negative_operator,
|
||||
@@ -947,12 +901,6 @@ impl fmt::Debug for SearchResult {
|
||||
if let Some(semantic_hit_count) = semantic_hit_count {
|
||||
debug.field("semantic_hit_count", &semantic_hit_count);
|
||||
}
|
||||
if let Some(request_uid) = request_uid {
|
||||
debug.field("request_uid", &request_uid);
|
||||
}
|
||||
if let Some(metadata) = metadata {
|
||||
debug.field("metadata", &metadata);
|
||||
}
|
||||
|
||||
debug.finish()
|
||||
}
|
||||
@@ -1165,31 +1113,15 @@ pub fn prepare_search<'t>(
|
||||
Ok((search, is_finite_pagination, max_total_hits, offset))
|
||||
}
|
||||
|
||||
pub struct SearchParams {
|
||||
pub index_uid: String,
|
||||
pub query: SearchQuery,
|
||||
pub search_kind: SearchKind,
|
||||
pub retrieve_vectors: RetrieveVectors,
|
||||
pub features: RoFeatures,
|
||||
pub request_uid: Uuid,
|
||||
pub include_metadata: bool,
|
||||
}
|
||||
|
||||
pub fn perform_search(
|
||||
params: SearchParams,
|
||||
index_uid: &str,
|
||||
index: &Index,
|
||||
) -> Result<(SearchResult, TimeBudget), ResponseError> {
|
||||
let SearchParams {
|
||||
index_uid,
|
||||
query,
|
||||
search_kind,
|
||||
retrieve_vectors,
|
||||
features,
|
||||
request_uid,
|
||||
include_metadata,
|
||||
} = params;
|
||||
query: SearchQuery,
|
||||
search_kind: SearchKind,
|
||||
retrieve_vectors: RetrieveVectors,
|
||||
features: RoFeatures,
|
||||
) -> Result<SearchResult, ResponseError> {
|
||||
let before_search = Instant::now();
|
||||
let index_uid_for_metadata = index_uid.clone();
|
||||
let rtxn = index.read_txn()?;
|
||||
let time_budget = match index.search_cutoff(&rtxn)? {
|
||||
Some(cutoff) => TimeBudget::new(Duration::from_millis(cutoff)),
|
||||
@@ -1197,7 +1129,7 @@ pub fn perform_search(
|
||||
};
|
||||
|
||||
let (search, is_finite_pagination, max_total_hits, offset) =
|
||||
prepare_search(index, &rtxn, &query, &search_kind, time_budget.clone(), features)?;
|
||||
prepare_search(index, &rtxn, &query, &search_kind, time_budget, features)?;
|
||||
|
||||
let (
|
||||
milli::SearchResult {
|
||||
@@ -1210,20 +1142,7 @@ pub fn perform_search(
|
||||
query_vector,
|
||||
},
|
||||
semantic_hit_count,
|
||||
) = search_from_kind(index_uid.clone(), search_kind, search)?;
|
||||
|
||||
let metadata = if include_metadata {
|
||||
let query_uid = Uuid::now_v7();
|
||||
let primary_key = index.primary_key(&rtxn)?.map(|pk| pk.to_string());
|
||||
Some(SearchMetadata {
|
||||
query_uid,
|
||||
index_uid: index_uid_for_metadata,
|
||||
primary_key,
|
||||
remote: None, // Local searches don't have a remote
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
) = search_from_kind(index_uid, search_kind, search)?;
|
||||
|
||||
let SearchQuery {
|
||||
q,
|
||||
@@ -1255,7 +1174,6 @@ pub fn perform_search(
|
||||
attributes_to_search_on: _,
|
||||
filter: _,
|
||||
distinct: _,
|
||||
personalize: _,
|
||||
} = query;
|
||||
|
||||
let format = AttributesFormat {
|
||||
@@ -1307,6 +1225,7 @@ pub fn perform_search(
|
||||
.transpose()?
|
||||
.map(|ComputedFacets { distribution, stats }| (distribution, stats))
|
||||
.unzip();
|
||||
|
||||
let result = SearchResult {
|
||||
hits: documents,
|
||||
hits_info,
|
||||
@@ -1318,10 +1237,8 @@ pub fn perform_search(
|
||||
degraded,
|
||||
used_negative_operator,
|
||||
semantic_hit_count,
|
||||
request_uid: Some(request_uid),
|
||||
metadata,
|
||||
};
|
||||
Ok((result, time_budget))
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize, ToSchema)]
|
||||
@@ -1382,27 +1299,24 @@ fn compute_facet_distribution_stats<S: AsRef<str>>(
|
||||
}
|
||||
|
||||
pub fn search_from_kind(
|
||||
index_uid: String,
|
||||
index_uid: &str,
|
||||
search_kind: SearchKind,
|
||||
search: milli::Search<'_>,
|
||||
) -> Result<(milli::SearchResult, Option<u32>), MeilisearchHttpError> {
|
||||
let err = |e| MeilisearchHttpError::from_milli(e, Some(index_uid.to_string()));
|
||||
let (milli_result, semantic_hit_count) = match &search_kind {
|
||||
SearchKind::KeywordOnly => {
|
||||
let results = search
|
||||
.execute()
|
||||
.map_err(|e| MeilisearchHttpError::from_milli(e, Some(index_uid.to_string())))?;
|
||||
let results = search.execute().map_err(err)?;
|
||||
(results, None)
|
||||
}
|
||||
SearchKind::SemanticOnly { .. } => {
|
||||
let results = search
|
||||
.execute()
|
||||
.map_err(|e| MeilisearchHttpError::from_milli(e, Some(index_uid.to_string())))?;
|
||||
let results = search.execute().map_err(err)?;
|
||||
let semantic_hit_count = results.document_scores.len() as u32;
|
||||
(results, Some(semantic_hit_count))
|
||||
}
|
||||
SearchKind::Hybrid { semantic_ratio, .. } => search
|
||||
.execute_hybrid(*semantic_ratio)
|
||||
.map_err(|e| MeilisearchHttpError::from_milli(e, Some(index_uid)))?,
|
||||
SearchKind::Hybrid { semantic_ratio, .. } => {
|
||||
search.execute_hybrid(*semantic_ratio).map_err(err)?
|
||||
}
|
||||
};
|
||||
Ok((milli_result, semantic_hit_count))
|
||||
}
|
||||
|
||||
@@ -419,14 +419,14 @@ async fn error_add_api_key_invalid_parameters_actions() {
|
||||
let (response, code) = server.add_api_key(content).await;
|
||||
|
||||
meili_snap::snapshot!(code, @"400 Bad Request");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]" }), @r###"
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]" }), @r#"
|
||||
{
|
||||
"message": "Unknown value `doc.add` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`, `indexes.compact`",
|
||||
"message": "Unknown value `doc.add` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`",
|
||||
"code": "invalid_api_key_actions",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_api_key_actions"
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
|
||||
@@ -91,14 +91,14 @@ async fn create_api_key_bad_actions() {
|
||||
// can't parse
|
||||
let (response, code) = server.add_api_key(json!({ "actions": ["doggo"] })).await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
snapshot!(json_string!(response), @r#"
|
||||
{
|
||||
"message": "Unknown value `doggo` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`, `indexes.compact`",
|
||||
"message": "Unknown value `doggo` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`",
|
||||
"code": "invalid_api_key_actions",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_api_key_actions"
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
|
||||
@@ -42,7 +42,7 @@ async fn batch_bad_types() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `networkTopologyChange`.",
|
||||
"code": "invalid_task_types",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||
|
||||
@@ -91,16 +91,7 @@ impl<'a> Index<'a, Owned> {
|
||||
documents: Value,
|
||||
primary_key: Option<&str>,
|
||||
) -> (Value, StatusCode) {
|
||||
self._add_documents(documents, primary_key, None).await
|
||||
}
|
||||
|
||||
pub async fn add_documents_with_custom_metadata(
|
||||
&self,
|
||||
documents: Value,
|
||||
primary_key: Option<&str>,
|
||||
custom_metadata: Option<&str>,
|
||||
) -> (Value, StatusCode) {
|
||||
self._add_documents(documents, primary_key, custom_metadata).await
|
||||
self._add_documents(documents, primary_key).await
|
||||
}
|
||||
|
||||
pub async fn raw_add_documents(
|
||||
@@ -361,25 +352,12 @@ impl<State> Index<'_, State> {
|
||||
&self,
|
||||
documents: Value,
|
||||
primary_key: Option<&str>,
|
||||
custom_metadata: Option<&str>,
|
||||
) -> (Value, StatusCode) {
|
||||
let url = match (primary_key, custom_metadata) {
|
||||
(Some(key), Some(meta)) => {
|
||||
format!(
|
||||
"/indexes/{}/documents?primaryKey={key}&customMetadata={meta}",
|
||||
urlencode(self.uid.as_ref()),
|
||||
)
|
||||
let url = match primary_key {
|
||||
Some(key) => {
|
||||
format!("/indexes/{}/documents?primaryKey={}", urlencode(self.uid.as_ref()), key)
|
||||
}
|
||||
(None, Some(meta)) => {
|
||||
format!(
|
||||
"/indexes/{}/documents?&customMetadata={meta}",
|
||||
urlencode(self.uid.as_ref()),
|
||||
)
|
||||
}
|
||||
(Some(key), None) => {
|
||||
format!("/indexes/{}/documents?&primaryKey={key}", urlencode(self.uid.as_ref()),)
|
||||
}
|
||||
(None, None) => format!("/indexes/{}/documents", urlencode(self.uid.as_ref())),
|
||||
None => format!("/indexes/{}/documents", urlencode(self.uid.as_ref())),
|
||||
};
|
||||
self.service.post_encoded(url, documents, self.encoder).await
|
||||
}
|
||||
@@ -538,18 +516,6 @@ impl<State> Index<'_, State> {
|
||||
self.service.post_encoded(url, query, self.encoder).await
|
||||
}
|
||||
|
||||
pub async fn search_with_headers(
|
||||
&self,
|
||||
query: Value,
|
||||
headers: Vec<(&str, &str)>,
|
||||
) -> (Value, StatusCode) {
|
||||
let url = format!("/indexes/{}/search", urlencode(self.uid.as_ref()));
|
||||
let body = serde_json::to_string(&query).unwrap();
|
||||
let mut all_headers = vec![("content-type", "application/json")];
|
||||
all_headers.extend(headers);
|
||||
self.service.post_str(url, body, all_headers).await
|
||||
}
|
||||
|
||||
pub async fn search_get(&self, query: &str) -> (Value, StatusCode) {
|
||||
let url = format!("/indexes/{}/search{}", urlencode(self.uid.as_ref()), query);
|
||||
self.service.get(url).await
|
||||
|
||||
@@ -241,7 +241,7 @@ pub async fn shared_index_with_documents() -> &'static Index<'static, Shared> {
|
||||
let server = Server::new_shared();
|
||||
let index = server._index("SHARED_DOCUMENTS").to_shared();
|
||||
let documents = DOCUMENTS.clone();
|
||||
let (response, _code) = index._add_documents(documents, None, None).await;
|
||||
let (response, _code) = index._add_documents(documents, None).await;
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
let (response, _code) = index
|
||||
._update_settings(
|
||||
@@ -284,7 +284,7 @@ pub async fn shared_index_with_score_documents() -> &'static Index<'static, Shar
|
||||
let server = Server::new_shared();
|
||||
let index = server._index("SHARED_SCORE_DOCUMENTS").to_shared();
|
||||
let documents = SCORE_DOCUMENTS.clone();
|
||||
let (response, _code) = index._add_documents(documents, None, None).await;
|
||||
let (response, _code) = index._add_documents(documents, None).await;
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
let (response, _code) = index
|
||||
._update_settings(
|
||||
@@ -361,7 +361,7 @@ pub async fn shared_index_with_nested_documents() -> &'static Index<'static, Sha
|
||||
let server = Server::new_shared();
|
||||
let index = server._index("SHARED_NESTED_DOCUMENTS").to_shared();
|
||||
let documents = NESTED_DOCUMENTS.clone();
|
||||
let (response, _code) = index._add_documents(documents, None, None).await;
|
||||
let (response, _code) = index._add_documents(documents, None).await;
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
let (response, _code) = index
|
||||
._update_settings(
|
||||
@@ -508,7 +508,7 @@ pub async fn shared_index_with_geo_documents() -> &'static Index<'static, Shared
|
||||
.get_or_init(|| async {
|
||||
let server = Server::new_shared();
|
||||
let index = server._index("SHARED_GEO_DOCUMENTS").to_shared();
|
||||
let (response, _code) = index._add_documents(GEO_DOCUMENTS.clone(), None, None).await;
|
||||
let (response, _code) = index._add_documents(GEO_DOCUMENTS.clone(), None).await;
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (response, _code) = index
|
||||
@@ -531,7 +531,7 @@ pub async fn shared_index_geojson_documents() -> &'static Index<'static, Shared>
|
||||
let index = server._index("SHARED_GEOJSON_DOCUMENTS").to_shared();
|
||||
let countries = include_str!("../documents/geojson/assets/countries.json");
|
||||
let lille = serde_json::from_str::<serde_json::Value>(countries).unwrap();
|
||||
let (response, _code) = index._add_documents(Value(lille), Some("name"), None).await;
|
||||
let (response, _code) = index._add_documents(Value(lille), Some("name")).await;
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (response, _code) =
|
||||
|
||||
@@ -395,17 +395,6 @@ impl<State> Server<State> {
|
||||
self.service.post("/multi-search", queries).await
|
||||
}
|
||||
|
||||
pub async fn multi_search_with_headers(
|
||||
&self,
|
||||
queries: Value,
|
||||
headers: Vec<(&str, &str)>,
|
||||
) -> (Value, StatusCode) {
|
||||
let body = serde_json::to_string(&queries).unwrap();
|
||||
let mut all_headers = vec![("content-type", "application/json")];
|
||||
all_headers.extend(headers);
|
||||
self.service.post_str("/multi-search", body, all_headers).await
|
||||
}
|
||||
|
||||
pub async fn list_indexes_raw(&self, parameters: &str) -> (Value, StatusCode) {
|
||||
self.service.get(format!("/indexes{parameters}")).await
|
||||
}
|
||||
@@ -506,8 +495,6 @@ pub fn default_settings(dir: impl AsRef<Path>) -> Opt {
|
||||
max_indexing_threads: MaxThreads::from_str("2").unwrap(),
|
||||
experimental_no_edition_2024_for_settings: false,
|
||||
experimental_no_edition_2024_for_dumps: false,
|
||||
experimental_no_edition_2024_for_prefix_post_processing: false,
|
||||
experimental_no_edition_2024_for_facet_post_processing: false,
|
||||
},
|
||||
experimental_enable_metrics: false,
|
||||
..Parser::parse_from(None as Option<&str>)
|
||||
|
||||
@@ -10,9 +10,8 @@ use actix_web::test::TestRequest;
|
||||
use actix_web::web::Data;
|
||||
use index_scheduler::IndexScheduler;
|
||||
use meilisearch::analytics::Analytics;
|
||||
use meilisearch::personalization::PersonalizationService;
|
||||
use meilisearch::search_queue::SearchQueue;
|
||||
use meilisearch::{create_app, Opt, ServicesData, SubscriberForSecondLayer};
|
||||
use meilisearch::{create_app, Opt, SubscriberForSecondLayer};
|
||||
use meilisearch_auth::AuthController;
|
||||
use tracing::level_filters::LevelFilter;
|
||||
use tracing_subscriber::Layer;
|
||||
@@ -136,24 +135,14 @@ impl Service {
|
||||
self.options.experimental_search_queue_size,
|
||||
NonZeroUsize::new(1).unwrap(),
|
||||
);
|
||||
let personalization_service = self
|
||||
.options
|
||||
.experimental_personalization_api_key
|
||||
.clone()
|
||||
.map(PersonalizationService::cohere)
|
||||
.unwrap_or_else(PersonalizationService::disabled);
|
||||
|
||||
actix_web::test::init_service(create_app(
|
||||
ServicesData {
|
||||
index_scheduler: self.index_scheduler.clone().into(),
|
||||
auth: self.auth.clone().into(),
|
||||
search_queue: Data::new(search_queue),
|
||||
personalization_service: Data::new(personalization_service),
|
||||
logs_route_handle: Data::new(route_layer_handle),
|
||||
logs_stderr_handle: Data::new(stderr_layer_handle),
|
||||
analytics: Data::new(Analytics::no_analytics()),
|
||||
},
|
||||
self.index_scheduler.clone().into(),
|
||||
self.auth.clone().into(),
|
||||
Data::new(search_queue),
|
||||
self.options.clone(),
|
||||
(route_layer_handle, stderr_layer_handle),
|
||||
Data::new(Analytics::no_analytics()),
|
||||
true,
|
||||
))
|
||||
.await
|
||||
|
||||
@@ -1853,7 +1853,7 @@ async fn add_documents_with_geo_field() {
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
// we are expecting docs 4 and 3 first as they have geo
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }),
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }),
|
||||
@r###"
|
||||
{
|
||||
"hits": [
|
||||
@@ -1885,8 +1885,7 @@ async fn add_documents_with_geo_field() {
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 4,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 4
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -1941,7 +1940,7 @@ async fn update_documents_with_geo_field() {
|
||||
let (response, code) = index.search_post(json!({"sort": ["_geoPoint(10,0):asc"]})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
// we are expecting docs 4 and 3 first as they have geo
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }),
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }),
|
||||
@r###"
|
||||
{
|
||||
"hits": [
|
||||
@@ -1973,8 +1972,7 @@ async fn update_documents_with_geo_field() {
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 4,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 4
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -2046,7 +2044,7 @@ async fn update_documents_with_geo_field() {
|
||||
let (response, code) = index.search_post(json!({"sort": ["_geoPoint(10,0):asc"]})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
// the search response should not have changed: we are expecting docs 4 and 3 first as they have geo
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }),
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }),
|
||||
@r###"
|
||||
{
|
||||
"hits": [
|
||||
@@ -2079,8 +2077,7 @@ async fn update_documents_with_geo_field() {
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 4,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 4
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
@@ -14,18 +14,17 @@ async fn basic_add_settings_and_geojson_documents() {
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
let (response, _) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }),
|
||||
@r###"
|
||||
snapshot!(response,
|
||||
@r#"
|
||||
{
|
||||
"hits": [],
|
||||
"query": "",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 0,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 0
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
|
||||
let lille: serde_json::Value = serde_json::from_str(LILLE).unwrap();
|
||||
let documents = json!([
|
||||
@@ -93,8 +92,8 @@ async fn basic_add_settings_and_geojson_documents() {
|
||||
"#);
|
||||
|
||||
let (response, _code) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }),
|
||||
@r###"
|
||||
snapshot!(response,
|
||||
@r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -112,10 +111,9 @@ async fn basic_add_settings_and_geojson_documents() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -176,8 +174,8 @@ async fn basic_add_geojson_documents_and_settings() {
|
||||
index.update_settings(json!({"filterableAttributes": ["_geojson"]})).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (response, _code) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }),
|
||||
@r###"
|
||||
snapshot!(response,
|
||||
@r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -195,10 +193,9 @@ async fn basic_add_geojson_documents_and_settings() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -295,7 +292,7 @@ async fn geo_bounding_box() {
|
||||
let (response, code) =
|
||||
index.search_get("?filter=_geoBoundingBox([50.53987503447863,21.43443989912143],[43.76393151539099,0.54979129195425])&attributesToRetrieve=name").await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -354,17 +351,16 @@ async fn geo_bounding_box() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 17,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 17
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
|
||||
// Between Russia and Alaska
|
||||
let (response, code) = index
|
||||
.search_get("?filter=_geoBoundingBox([70,-148],[63,152])&attributesToRetrieve=name")
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -381,41 +377,44 @@ async fn geo_bounding_box() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn bug_5904() {
|
||||
// https://github.com/meilisearch/meilisearch/issues/5904
|
||||
async fn geo_radius() {
|
||||
let index = shared_index_geojson_documents().await;
|
||||
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (response, _code) =
|
||||
index.update_settings(json!({"filterableAttributes": ["_geojson"]})).await;
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let geojson = json!({
|
||||
"id": 1,
|
||||
"_geojson": {
|
||||
"type": "FeatureCollection",
|
||||
"features": [
|
||||
{
|
||||
"type": "Feature",
|
||||
"geometry": {
|
||||
"type": "Point",
|
||||
"coordinates": [
|
||||
4.23914,
|
||||
48.382893
|
||||
]
|
||||
},
|
||||
"properties": {}
|
||||
}
|
||||
]
|
||||
}
|
||||
});
|
||||
let (response, _code) = index.add_documents(geojson, Some("id")).await;
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
// 200km around Luxembourg
|
||||
let (response, code) = index
|
||||
.search_get("?filter=_geoRadius(49.4369862,6.5576591,200000)&attributesToRetrieve=name")
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(response, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"name": "Belgium"
|
||||
},
|
||||
{
|
||||
"name": "Germany"
|
||||
},
|
||||
{
|
||||
"name": "France"
|
||||
},
|
||||
{
|
||||
"name": "Luxembourg"
|
||||
},
|
||||
{
|
||||
"name": "Netherlands"
|
||||
}
|
||||
],
|
||||
"query": "",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 5
|
||||
}
|
||||
"#);
|
||||
}
|
||||
|
||||
@@ -207,118 +207,3 @@ async fn errors() {
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_with_personalization_without_enabling_the_feature() {
|
||||
let server = Server::new().await;
|
||||
let index = server.unique_index();
|
||||
|
||||
// Create the index and add some documents
|
||||
let (task, _code) = index.create(None).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
let (task, _code) = index
|
||||
.add_documents(
|
||||
json!([
|
||||
{"id": 1, "title": "The Dark Knight", "genre": "Action"},
|
||||
{"id": 2, "title": "Inception", "genre": "Sci-Fi"},
|
||||
{"id": 3, "title": "The Matrix", "genre": "Sci-Fi"}
|
||||
]),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Try to search with personalization - should return feature_not_enabled error
|
||||
let (response, code) = index
|
||||
.search_post(json!({
|
||||
"q": "movie",
|
||||
"personalize": {
|
||||
"userContext": "I love science fiction movies"
|
||||
}
|
||||
}))
|
||||
.await;
|
||||
|
||||
meili_snap::snapshot!(code, @"400 Bad Request");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"message": "reranking search results requires enabling the `personalization` experimental feature. See https://github.com/orgs/meilisearch/discussions/866",
|
||||
"code": "feature_not_enabled",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#feature_not_enabled"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn multi_search_with_personalization_without_enabling_the_feature() {
|
||||
let server = Server::new().await;
|
||||
let index = server.unique_index();
|
||||
|
||||
// Create the index and add some documents
|
||||
let (task, _code) = index.create(None).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
let (task, _code) = index
|
||||
.add_documents(
|
||||
json!([
|
||||
{"id": 1, "title": "The Dark Knight", "genre": "Action"},
|
||||
{"id": 2, "title": "Inception", "genre": "Sci-Fi"},
|
||||
{"id": 3, "title": "The Matrix", "genre": "Sci-Fi"}
|
||||
]),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Try to multi-search with personalization - should return feature_not_enabled error
|
||||
let (response, code) = server
|
||||
.multi_search(json!({
|
||||
"queries": [
|
||||
{
|
||||
"indexUid": index.uid,
|
||||
"q": "movie",
|
||||
"personalize": {
|
||||
"userContext": "I love science fiction movies"
|
||||
}
|
||||
}
|
||||
]
|
||||
}))
|
||||
.await;
|
||||
|
||||
meili_snap::snapshot!(code, @"400 Bad Request");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"message": "Inside `.queries[0]`: reranking search results requires enabling the `personalization` experimental feature. See https://github.com/orgs/meilisearch/discussions/866",
|
||||
"code": "feature_not_enabled",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#feature_not_enabled"
|
||||
}
|
||||
"###);
|
||||
|
||||
// Try to federated search with personalization - should return feature_not_enabled error
|
||||
let (response, code) = server
|
||||
.multi_search(json!({
|
||||
"federation": {},
|
||||
"queries": [
|
||||
{
|
||||
"indexUid": index.uid,
|
||||
"q": "movie",
|
||||
"personalize": {
|
||||
"userContext": "I love science fiction movies"
|
||||
}
|
||||
}
|
||||
]
|
||||
}))
|
||||
.await;
|
||||
|
||||
meili_snap::snapshot!(code, @"400 Bad Request");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"message": "Inside `.queries[0]`: Using `.personalize` is not allowed in federated queries.\n - Hint: remove `personalize` from query #0 or remove `federation` from the request",
|
||||
"code": "invalid_multi_search_query_personalization",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_multi_search_query_personalization"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
@@ -8,9 +8,8 @@ use actix_web::http::header::ContentType;
|
||||
use actix_web::web::Data;
|
||||
use meili_snap::snapshot;
|
||||
use meilisearch::analytics::Analytics;
|
||||
use meilisearch::personalization::PersonalizationService;
|
||||
use meilisearch::search_queue::SearchQueue;
|
||||
use meilisearch::{create_app, Opt, ServicesData, SubscriberForSecondLayer};
|
||||
use meilisearch::{create_app, Opt, SubscriberForSecondLayer};
|
||||
use tracing::level_filters::LevelFilter;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::Layer;
|
||||
@@ -51,16 +50,12 @@ async fn basic_test_log_stream_route() {
|
||||
);
|
||||
|
||||
let app = actix_web::test::init_service(create_app(
|
||||
ServicesData {
|
||||
index_scheduler: server.service.index_scheduler.clone().into(),
|
||||
auth: server.service.auth.clone().into(),
|
||||
search_queue: Data::new(search_queue),
|
||||
personalization_service: Data::new(PersonalizationService::disabled()),
|
||||
logs_route_handle: Data::new(route_layer_handle),
|
||||
logs_stderr_handle: Data::new(stderr_layer_handle),
|
||||
analytics: Data::new(Analytics::no_analytics()),
|
||||
},
|
||||
server.service.index_scheduler.clone().into(),
|
||||
server.service.auth.clone().into(),
|
||||
Data::new(search_queue),
|
||||
server.service.options.clone(),
|
||||
(route_layer_handle, stderr_layer_handle),
|
||||
Data::new(Analytics::no_analytics()),
|
||||
true,
|
||||
))
|
||||
.await;
|
||||
|
||||
@@ -1,11 +1,7 @@
|
||||
use meili_snap::*;
|
||||
use meilisearch::Opt;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::test_settings_documents_indexing_swapping_and_search;
|
||||
use crate::common::{
|
||||
default_settings, shared_does_not_exists_index, Server, DOCUMENTS, NESTED_DOCUMENTS,
|
||||
};
|
||||
use crate::common::{shared_does_not_exists_index, Server, DOCUMENTS, NESTED_DOCUMENTS};
|
||||
use crate::json;
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -1324,98 +1320,3 @@ async fn search_with_contains_without_enabling_the_feature() {
|
||||
}
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
#[ignore]
|
||||
async fn search_with_personalization_invalid_api_key() {
|
||||
// Create a server with a fake personalization API key
|
||||
let dir = TempDir::new().unwrap();
|
||||
let options = Opt {
|
||||
experimental_personalization_api_key: Some("fake-api-key-12345".to_string()),
|
||||
..default_settings(dir.path())
|
||||
};
|
||||
let server = Server::new_with_options(options).await.unwrap();
|
||||
let index = server.unique_index();
|
||||
|
||||
// Create the index and add some documents
|
||||
let (task, _code) = index.create(None).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
let (task, _code) = index
|
||||
.add_documents(
|
||||
json!([
|
||||
{"id": 1, "title": "The Dark Knight", "genre": "Action"},
|
||||
{"id": 2, "title": "Inception", "genre": "Sci-Fi"},
|
||||
{"id": 3, "title": "The Matrix", "genre": "Sci-Fi"}
|
||||
]),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Try to search with personalization - should return remote_invalid_api_key error
|
||||
let (response, code) = index
|
||||
.search_post(json!({
|
||||
"q": "the",
|
||||
"personalize": {
|
||||
"userContext": "I love science fiction movies"
|
||||
}
|
||||
}))
|
||||
.await;
|
||||
|
||||
snapshot!(code, @"403 Forbidden");
|
||||
snapshot!(json_string!(response), @r#"
|
||||
{
|
||||
"message": "Personalization service: Unauthorized: invalid API key",
|
||||
"code": "remote_invalid_api_key",
|
||||
"type": "auth",
|
||||
"link": "https://docs.meilisearch.com/errors#remote_invalid_api_key"
|
||||
}
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_with_personalization_no_user_context() {
|
||||
// Create a server with a fake personalization API key
|
||||
let dir = TempDir::new().unwrap();
|
||||
let options = Opt {
|
||||
experimental_personalization_api_key: Some("fake-api-key-12345".to_string()),
|
||||
..default_settings(dir.path())
|
||||
};
|
||||
let server = Server::new_with_options(options).await.unwrap();
|
||||
let index = server.unique_index();
|
||||
|
||||
// Create the index and add some documents
|
||||
let (task, _code) = index.create(None).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
let (task, _code) = index
|
||||
.add_documents(
|
||||
json!([
|
||||
{"id": 1, "title": "The Dark Knight", "genre": "Action"},
|
||||
{"id": 2, "title": "Inception", "genre": "Sci-Fi"},
|
||||
{"id": 3, "title": "The Matrix", "genre": "Sci-Fi"}
|
||||
]),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Try to search with personalization - should return remote_invalid_api_key error
|
||||
let (response, code) = index
|
||||
.search_post(json!({
|
||||
"q": "the",
|
||||
"personalize": {}
|
||||
}))
|
||||
.await;
|
||||
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Missing field `userContext` inside `.personalize`",
|
||||
"code": "invalid_search_personalize",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_personalize"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
@@ -742,7 +742,7 @@ async fn vector_filter_all_embedders() {
|
||||
"attributesToRetrieve": ["name"]
|
||||
}))
|
||||
.await;
|
||||
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(value, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -762,10 +762,9 @@ async fn vector_filter_all_embedders() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 4,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 4
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -840,7 +839,7 @@ async fn vector_filter_specific_embedder() {
|
||||
"attributesToRetrieve": ["name"]
|
||||
}))
|
||||
.await;
|
||||
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(value, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -860,10 +859,9 @@ async fn vector_filter_specific_embedder() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 4,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 4
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -876,7 +874,7 @@ async fn vector_filter_user_provided() {
|
||||
"attributesToRetrieve": ["name"]
|
||||
}))
|
||||
.await;
|
||||
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(value, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -887,10 +885,9 @@ async fn vector_filter_user_provided() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -903,7 +900,7 @@ async fn vector_filter_specific_fragment() {
|
||||
"attributesToRetrieve": ["name"]
|
||||
}))
|
||||
.await;
|
||||
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(value, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -917,10 +914,9 @@ async fn vector_filter_specific_fragment() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 2,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 2
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
|
||||
let (value, _code) = index
|
||||
.search_post(json!({
|
||||
@@ -928,7 +924,7 @@ async fn vector_filter_specific_fragment() {
|
||||
"attributesToRetrieve": ["name"]
|
||||
}))
|
||||
.await;
|
||||
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(value, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -945,10 +941,9 @@ async fn vector_filter_specific_fragment() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -981,17 +976,16 @@ async fn vector_filter_document_template_but_fragments_used() {
|
||||
"attributesToRetrieve": ["name"]
|
||||
}))
|
||||
.await;
|
||||
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(value, @r#"
|
||||
{
|
||||
"hits": [],
|
||||
"query": "",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 0,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 0
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -1029,7 +1023,7 @@ async fn vector_filter_document_template() {
|
||||
"attributesToRetrieve": ["name"]
|
||||
}))
|
||||
.await;
|
||||
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(value, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1046,10 +1040,9 @@ async fn vector_filter_document_template() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -1082,7 +1075,7 @@ async fn vector_filter_negation() {
|
||||
"attributesToRetrieve": ["name"]
|
||||
}))
|
||||
.await;
|
||||
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(value, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1099,10 +1092,9 @@ async fn vector_filter_negation() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -1115,7 +1107,7 @@ async fn vector_filter_or_combination() {
|
||||
"attributesToRetrieve": ["name"]
|
||||
}))
|
||||
.await;
|
||||
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(value, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1132,10 +1124,9 @@ async fn vector_filter_or_combination() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -1148,7 +1139,7 @@ async fn vector_filter_regenerate() {
|
||||
"attributesToRetrieve": ["name"]
|
||||
}))
|
||||
.await;
|
||||
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(value, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1165,8 +1156,7 @@ async fn vector_filter_regenerate() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ async fn geo_bounding_box_with_string_and_number() {
|
||||
}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -63,8 +63,7 @@ async fn geo_bounding_box_with_string_and_number() {
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 2,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 2
|
||||
}
|
||||
"###);
|
||||
},
|
||||
@@ -85,7 +84,7 @@ async fn bug_4640() {
|
||||
}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -124,8 +123,7 @@ async fn bug_4640() {
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
},
|
||||
@@ -149,7 +147,7 @@ async fn geo_asc_with_words() {
|
||||
&json!({"q": "jean"}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -181,8 +179,7 @@ async fn geo_asc_with_words() {
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
},
|
||||
@@ -195,7 +192,7 @@ async fn geo_asc_with_words() {
|
||||
&json!({"q": "bob"}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -219,8 +216,7 @@ async fn geo_asc_with_words() {
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 2,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 2
|
||||
}
|
||||
"###);
|
||||
},
|
||||
@@ -233,7 +229,7 @@ async fn geo_asc_with_words() {
|
||||
&json!({"q": "intel"}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -249,8 +245,7 @@ async fn geo_asc_with_words() {
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
},
|
||||
@@ -274,7 +269,7 @@ async fn geo_sort_with_words() {
|
||||
&json!({"q": "jean", "sort": ["_geoPoint(0.0, 0.0):asc"]}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -309,8 +304,7 @@ async fn geo_sort_with_words() {
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
},
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use meili_snap::{json_string, snapshot};
|
||||
use meili_snap::snapshot;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use crate::common::index::Index;
|
||||
@@ -148,7 +148,7 @@ async fn simple_search() {
|
||||
)
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -209,10 +209,9 @@ async fn simple_search() {
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]",
|
||||
"semanticHitCount": 0
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
snapshot!(response["semanticHitCount"], @"0");
|
||||
|
||||
let (response, code) = index
|
||||
@@ -221,7 +220,7 @@ async fn simple_search() {
|
||||
)
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -285,10 +284,9 @@ async fn simple_search() {
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]",
|
||||
"semanticHitCount": 2
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
snapshot!(response["semanticHitCount"], @"2");
|
||||
|
||||
let (response, code) = index
|
||||
@@ -297,7 +295,7 @@ async fn simple_search() {
|
||||
)
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -361,10 +359,9 @@ async fn simple_search() {
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]",
|
||||
"semanticHitCount": 3
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
snapshot!(response["semanticHitCount"], @"3");
|
||||
}
|
||||
|
||||
|
||||
@@ -104,7 +104,7 @@ async fn simple_search() {
|
||||
// english
|
||||
index
|
||||
.search(json!({"q": "Atta", "attributesToRetrieve": ["id"]}), |response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -115,8 +115,7 @@ async fn simple_search() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -126,7 +125,7 @@ async fn simple_search() {
|
||||
// japanese
|
||||
index
|
||||
.search(json!({"q": "進撃", "attributesToRetrieve": ["id"]}), |response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -137,8 +136,7 @@ async fn simple_search() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -149,7 +147,7 @@ async fn simple_search() {
|
||||
.search(
|
||||
json!({"q": "進撃", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -160,10 +158,9 @@ async fn simple_search() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
snapshot!(code, @"200 OK");
|
||||
},
|
||||
)
|
||||
@@ -172,7 +169,7 @@ async fn simple_search() {
|
||||
// chinese
|
||||
index
|
||||
.search(json!({"q": "进击", "attributesToRetrieve": ["id"]}), |response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -183,10 +180,9 @@ async fn simple_search() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
snapshot!(code, @"200 OK");
|
||||
})
|
||||
.await;
|
||||
@@ -226,7 +222,7 @@ async fn force_locales() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -237,8 +233,7 @@ async fn force_locales() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -251,7 +246,7 @@ async fn force_locales() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -262,8 +257,7 @@ async fn force_locales() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -306,7 +300,7 @@ async fn force_locales_with_pattern() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -317,8 +311,7 @@ async fn force_locales_with_pattern() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -331,7 +324,7 @@ async fn force_locales_with_pattern() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -342,8 +335,7 @@ async fn force_locales_with_pattern() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -384,15 +376,14 @@ async fn force_locales_with_pattern_nested() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [],
|
||||
"query": "\"进击的巨人\"",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 0,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 0
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -405,7 +396,7 @@ async fn force_locales_with_pattern_nested() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -416,8 +407,7 @@ async fn force_locales_with_pattern_nested() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -461,15 +451,14 @@ async fn force_different_locales_with_pattern() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [],
|
||||
"query": "\"进击的巨人\"",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 0,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 0
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -482,7 +471,7 @@ async fn force_different_locales_with_pattern() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -493,8 +482,7 @@ async fn force_different_locales_with_pattern() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -541,15 +529,14 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [],
|
||||
"query": "\"进击的巨人\"",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 0,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 0
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -562,7 +549,7 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"], "attributesToSearchOn": ["name_zh", "description_zh"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -573,8 +560,7 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -616,7 +602,7 @@ async fn auto_infer_locales_at_search() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -627,8 +613,7 @@ async fn auto_infer_locales_at_search() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -640,21 +625,20 @@ async fn auto_infer_locales_at_search() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"id": 853
|
||||
"hits": [
|
||||
{
|
||||
"id": 853
|
||||
}
|
||||
],
|
||||
"query": "\"进击的巨人\"",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
],
|
||||
"query": "\"进击的巨人\"",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
}
|
||||
"###);
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
},
|
||||
)
|
||||
@@ -664,7 +648,7 @@ async fn auto_infer_locales_at_search() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -675,8 +659,7 @@ async fn auto_infer_locales_at_search() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -719,15 +702,14 @@ async fn force_different_locales_with_pattern_nested() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [],
|
||||
"query": "\"进击的巨人\"",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 0,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 0
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -740,21 +722,20 @@ async fn force_different_locales_with_pattern_nested() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"id": 852
|
||||
"hits": [
|
||||
{
|
||||
"id": 852
|
||||
}
|
||||
],
|
||||
"query": "\"进击的巨人\"",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
],
|
||||
"query": "\"进击的巨人\"",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
}
|
||||
"###);
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
},
|
||||
)
|
||||
@@ -765,7 +746,7 @@ async fn force_different_locales_with_pattern_nested() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "locales": ["ja"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -776,8 +757,7 @@ async fn force_different_locales_with_pattern_nested() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -819,15 +799,14 @@ async fn settings_change() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [],
|
||||
"query": "\"进击的巨人\"",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 0,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 0
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -840,15 +819,14 @@ async fn settings_change() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [],
|
||||
"query": "\"进击的巨人\"",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 0,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 0
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -884,15 +862,14 @@ async fn settings_change() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [],
|
||||
"query": "\"进击的巨人\"",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 0,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 0
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -905,15 +882,14 @@ async fn settings_change() {
|
||||
.search(
|
||||
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [],
|
||||
"query": "\"进击的巨人\"",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 0,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 0
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -1188,7 +1164,7 @@ async fn swedish_search() {
|
||||
// infer swedish
|
||||
index
|
||||
.search(json!({"q": "trä", "attributesToRetrieve": ["product"]}), |response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1202,8 +1178,7 @@ async fn swedish_search() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 2,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 2
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -1212,7 +1187,7 @@ async fn swedish_search() {
|
||||
|
||||
index
|
||||
.search(json!({"q": "tra", "attributesToRetrieve": ["product"]}), |response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1226,8 +1201,7 @@ async fn swedish_search() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 2,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 2
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -1239,7 +1213,7 @@ async fn swedish_search() {
|
||||
.search(
|
||||
json!({"q": "trä", "locales": ["swe"], "attributesToRetrieve": ["product"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1253,8 +1227,7 @@ async fn swedish_search() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 2,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 2
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -1265,7 +1238,7 @@ async fn swedish_search() {
|
||||
.search(
|
||||
json!({"q": "tra", "locales": ["swe"], "attributesToRetrieve": ["product"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1279,8 +1252,7 @@ async fn swedish_search() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 2,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 2
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
@@ -1315,21 +1287,20 @@ async fn german_search() {
|
||||
.search(
|
||||
json!({"q": "kulturalität", "attributesToRetrieve": ["product"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"product": "Interkulturalität"
|
||||
}
|
||||
],
|
||||
"query": "kulturalität",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"product": "Interkulturalität"
|
||||
}
|
||||
"###);
|
||||
],
|
||||
"query": "kulturalität",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
},
|
||||
)
|
||||
@@ -1339,7 +1310,7 @@ async fn german_search() {
|
||||
.search(
|
||||
json!({"q": "organisation", "attributesToRetrieve": ["product"]}),
|
||||
|response, code| {
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1350,8 +1321,7 @@ async fn german_search() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"200 OK");
|
||||
|
||||
@@ -1,387 +0,0 @@
|
||||
use meili_snap::{json_string, snapshot};
|
||||
|
||||
use crate::common::{shared_index_with_documents, Server, DOCUMENTS};
|
||||
use crate::json;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_without_metadata_header() {
|
||||
let index = shared_index_with_documents().await;
|
||||
|
||||
// Test that metadata is not included by default
|
||||
index
|
||||
.search(json!({"q": "glass"}), |response, code| {
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"title": "Gläss",
|
||||
"id": "450465",
|
||||
"color": [
|
||||
"blue",
|
||||
"red"
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "glass",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
}
|
||||
"###);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_with_metadata_header() {
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
let (task, _code) = index.add_documents(documents, None).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Test with Meili-Include-Metadata header
|
||||
let (response, code) = index
|
||||
.search_with_headers(json!({"q": "glass"}), vec![("Meili-Include-Metadata", "true")])
|
||||
.await;
|
||||
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]", ".metadata.queryUid" => "[uuid]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"title": "Gläss",
|
||||
"id": "450465",
|
||||
"color": [
|
||||
"blue",
|
||||
"red"
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "glass",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]",
|
||||
"metadata": {
|
||||
"queryUid": "[uuid]",
|
||||
"indexUid": "[uuid]",
|
||||
"primaryKey": "id"
|
||||
}
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_with_metadata_header_and_primary_key() {
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
let (task, _code) = index.add_documents(documents, Some("id")).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Test with Meili-Include-Metadata header
|
||||
let (response, code) = index
|
||||
.search_with_headers(json!({"q": "glass"}), vec![("Meili-Include-Metadata", "true")])
|
||||
.await;
|
||||
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]", ".metadata.queryUid" => "[uuid]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"id": "450465",
|
||||
"title": "Gläss",
|
||||
"color": [
|
||||
"blue",
|
||||
"red"
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "glass",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]",
|
||||
"metadata": {
|
||||
"queryUid": "[uuid]",
|
||||
"indexUid": "[uuid]",
|
||||
"primaryKey": "id"
|
||||
}
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn multi_search_without_metadata_header() {
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
let (task, _code) = index.add_documents(documents, None).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Test multi-search without metadata header
|
||||
let (response, code) = server
|
||||
.multi_search(json!({
|
||||
"queries": [
|
||||
{"indexUid": index.uid, "q": "glass"},
|
||||
{"indexUid": index.uid, "q": "dragon"}
|
||||
]
|
||||
}))
|
||||
.await;
|
||||
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".results[0].processingTimeMs" => "[duration]", ".results[0].requestUid" => "[uuid]", ".results[1].processingTimeMs" => "[duration]", ".results[1].requestUid" => "[uuid]" }), @r###"
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"indexUid": "[uuid]",
|
||||
"hits": [
|
||||
{
|
||||
"title": "Gläss",
|
||||
"id": "450465",
|
||||
"color": [
|
||||
"blue",
|
||||
"red"
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "glass",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
},
|
||||
{
|
||||
"indexUid": "[uuid]",
|
||||
"hits": [
|
||||
{
|
||||
"title": "How to Train Your Dragon: The Hidden World",
|
||||
"id": "166428",
|
||||
"color": [
|
||||
"green",
|
||||
"red"
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "dragon",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
}
|
||||
]
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn multi_search_with_metadata_header() {
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
let (task, _code) = index.add_documents(documents, Some("id")).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Test multi-search with metadata header
|
||||
let (response, code) = server
|
||||
.multi_search_with_headers(
|
||||
json!({
|
||||
"queries": [
|
||||
{"indexUid": index.uid, "q": "glass"},
|
||||
{"indexUid": index.uid, "q": "dragon"}
|
||||
]
|
||||
}),
|
||||
vec![("Meili-Include-Metadata", "true")],
|
||||
)
|
||||
.await;
|
||||
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".results[0].processingTimeMs" => "[duration]", ".results[0].requestUid" => "[uuid]", ".results[0].metadata.queryUid" => "[uuid]", ".results[1].processingTimeMs" => "[duration]", ".results[1].requestUid" => "[uuid]", ".results[1].metadata.queryUid" => "[uuid]" }), @r###"
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"indexUid": "[uuid]",
|
||||
"hits": [
|
||||
{
|
||||
"id": "450465",
|
||||
"title": "Gläss",
|
||||
"color": [
|
||||
"blue",
|
||||
"red"
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "glass",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]",
|
||||
"metadata": {
|
||||
"queryUid": "[uuid]",
|
||||
"indexUid": "[uuid]",
|
||||
"primaryKey": "id"
|
||||
}
|
||||
},
|
||||
{
|
||||
"indexUid": "[uuid]",
|
||||
"hits": [
|
||||
{
|
||||
"id": "166428",
|
||||
"title": "How to Train Your Dragon: The Hidden World",
|
||||
"color": [
|
||||
"green",
|
||||
"red"
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "dragon",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]",
|
||||
"metadata": {
|
||||
"queryUid": "[uuid]",
|
||||
"indexUid": "[uuid]",
|
||||
"primaryKey": "id"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_metadata_header_false_value() {
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
let (task, _code) = index.add_documents(documents, None).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Test with header set to false
|
||||
let (response, code) = index
|
||||
.search_with_headers(json!({"q": "glass"}), vec![("Meili-Include-Metadata", "false")])
|
||||
.await;
|
||||
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"title": "Gläss",
|
||||
"id": "450465",
|
||||
"color": [
|
||||
"blue",
|
||||
"red"
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "glass",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_metadata_uuid_format() {
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
let (task, _code) = index.add_documents(documents, None).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
let (response, code) = index
|
||||
.search_with_headers(json!({"q": "glass"}), vec![("Meili-Include-Metadata", "true")])
|
||||
.await;
|
||||
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]", ".metadata.queryUid" => "[uuid]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"title": "Gläss",
|
||||
"id": "450465",
|
||||
"color": [
|
||||
"blue",
|
||||
"red"
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "glass",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]",
|
||||
"metadata": {
|
||||
"queryUid": "[uuid]",
|
||||
"indexUid": "[uuid]",
|
||||
"primaryKey": "id"
|
||||
}
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_metadata_consistency_across_requests() {
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
let (task, _code) = index.add_documents(documents, Some("id")).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Make multiple requests and check that metadata is consistent
|
||||
for _i in 0..3 {
|
||||
let (response, code) = index
|
||||
.search_with_headers(json!({"q": "glass"}), vec![("Meili-Include-Metadata", "true")])
|
||||
.await;
|
||||
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]", ".metadata.queryUid" => "[uuid]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"id": "450465",
|
||||
"title": "Gläss",
|
||||
"color": [
|
||||
"blue",
|
||||
"red"
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "glass",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]",
|
||||
"metadata": {
|
||||
"queryUid": "[uuid]",
|
||||
"indexUid": "[uuid]",
|
||||
"primaryKey": "id"
|
||||
}
|
||||
}
|
||||
"###);
|
||||
}
|
||||
}
|
||||
@@ -11,7 +11,6 @@ mod hybrid;
|
||||
#[cfg(not(feature = "chinese-pinyin"))]
|
||||
mod locales;
|
||||
mod matching_strategy;
|
||||
mod metadata;
|
||||
mod multi;
|
||||
mod pagination;
|
||||
mod restrict_searchable;
|
||||
@@ -1045,7 +1044,7 @@ async fn test_degraded_score_details() {
|
||||
}),
|
||||
|response, code| {
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1104,8 +1103,7 @@ async fn test_degraded_score_details() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
},
|
||||
@@ -2129,102 +2127,3 @@ async fn simple_search_changing_unrelated_settings() {
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn ranking_score_bug_with_sort() {
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
|
||||
// Create documents with a "created" field for sorting
|
||||
let documents = json!([
|
||||
{
|
||||
"id": "1",
|
||||
"title": "Coffee Mug",
|
||||
"created": "2023-01-01T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"id": "2",
|
||||
"title": "Water Bottle",
|
||||
"created": "2023-01-02T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"id": "3",
|
||||
"title": "Tumbler Cup",
|
||||
"created": "2023-01-03T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"id": "4",
|
||||
"title": "Stainless Steel Tumbler",
|
||||
"created": "2023-01-04T00:00:00Z"
|
||||
}
|
||||
]);
|
||||
|
||||
// Add documents
|
||||
let (task, code) = index.add_documents(documents, None).await;
|
||||
assert_eq!(code, 202, "{task}");
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Configure sortable attributes
|
||||
let (task, code) = index
|
||||
.update_settings(json!({
|
||||
"sortableAttributes": ["created"]
|
||||
}))
|
||||
.await;
|
||||
assert_eq!(code, 202, "{task}");
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Test 1: Search without sort - should have proper ranking scores
|
||||
index
|
||||
.search(
|
||||
json!({
|
||||
"q": "tumbler",
|
||||
"showRankingScore": true,
|
||||
"rankingScoreThreshold": 0.0,
|
||||
"attributesToRetrieve": ["title"]
|
||||
}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response["hits"]), @r###"
|
||||
[
|
||||
{
|
||||
"title": "Tumbler Cup",
|
||||
"_rankingScore": 0.9848484848484848
|
||||
},
|
||||
{
|
||||
"title": "Stainless Steel Tumbler",
|
||||
"_rankingScore": 0.8787878787878788
|
||||
}
|
||||
]
|
||||
"###);
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
// Test 2: Search with sort - this is where the bug occurs
|
||||
index
|
||||
.search(
|
||||
json!({
|
||||
"q": "tumbler",
|
||||
"showRankingScore": true,
|
||||
"rankingScoreThreshold": 0.0,
|
||||
"sort": ["created:desc"],
|
||||
"attributesToRetrieve": ["title"]
|
||||
}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response["hits"]), @r###"
|
||||
[
|
||||
{
|
||||
"title": "Tumbler Cup",
|
||||
"_rankingScore": 0.9848484848484848
|
||||
},
|
||||
{
|
||||
"title": "Stainless Steel Tumbler",
|
||||
"_rankingScore": 0.8787878787878788
|
||||
}
|
||||
]
|
||||
"###);
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -93,14 +93,13 @@ async fn federation_empty_list() {
|
||||
|
||||
let (response, code) = server.multi_search(json!({"federation": {}, "queries": []})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [],
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 0,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 0
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -165,7 +164,7 @@ async fn simple_search_single_index() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response["results"], { ".**.processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response["results"], { ".**.processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
[
|
||||
{
|
||||
"indexUid": "SHARED_DOCUMENTS",
|
||||
@@ -183,8 +182,7 @@ async fn simple_search_single_index() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
},
|
||||
{
|
||||
"indexUid": "SHARED_DOCUMENTS",
|
||||
@@ -202,8 +200,7 @@ async fn simple_search_single_index() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
]
|
||||
"###);
|
||||
@@ -220,7 +217,7 @@ async fn federation_single_search_single_index() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -240,8 +237,7 @@ async fn federation_single_search_single_index() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -260,7 +256,7 @@ async fn federation_multiple_search_single_index() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -312,8 +308,7 @@ async fn federation_multiple_search_single_index() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 5,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 5
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -330,7 +325,7 @@ async fn federation_two_search_single_index() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -363,8 +358,7 @@ async fn federation_two_search_single_index() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 2,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 2
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -463,7 +457,7 @@ async fn simple_search_two_indexes() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response["results"], { ".**.processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response["results"], { ".**.processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
[
|
||||
{
|
||||
"indexUid": "SHARED_DOCUMENTS",
|
||||
@@ -481,8 +475,7 @@ async fn simple_search_two_indexes() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 1
|
||||
},
|
||||
{
|
||||
"indexUid": "SHARED_NESTED_DOCUMENTS",
|
||||
@@ -523,8 +516,7 @@ async fn simple_search_two_indexes() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 2,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 2
|
||||
}
|
||||
]
|
||||
"###);
|
||||
@@ -543,7 +535,7 @@ async fn federation_two_search_two_indexes() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -604,8 +596,7 @@ async fn federation_two_search_two_indexes() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -635,7 +626,7 @@ async fn federation_multiple_search_multiple_indexes() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -804,8 +795,7 @@ async fn federation_multiple_search_multiple_indexes() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 12,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 12
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -1111,7 +1101,7 @@ async fn federation_filter() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1150,8 +1140,7 @@ async fn federation_filter() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -1188,7 +1177,7 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1277,8 +1266,7 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 4,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 4
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -1290,7 +1278,7 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1365,8 +1353,7 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -1462,7 +1449,7 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1551,8 +1538,7 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 4,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 4
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -1565,7 +1551,7 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1640,8 +1626,7 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -1719,7 +1704,7 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1846,8 +1831,7 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 10,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 10
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -1860,7 +1844,7 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -1931,8 +1915,7 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 6,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 6
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -1953,7 +1936,7 @@ async fn federation_sort_different_ranking_rules() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -2080,8 +2063,7 @@ async fn federation_sort_different_ranking_rules() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 10,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 10
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -2160,7 +2142,7 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -2287,8 +2269,7 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 10,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 10
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -2301,7 +2282,7 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -2372,8 +2353,7 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 6,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 6
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -2444,7 +2424,7 @@ async fn federation_limit_offset() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -2547,8 +2527,7 @@ async fn federation_limit_offset() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 12,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 12
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -2570,7 +2549,7 @@ async fn federation_limit_offset() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -2585,8 +2564,7 @@ async fn federation_limit_offset() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 1,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 12,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 12
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -2608,7 +2586,7 @@ async fn federation_limit_offset() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -2695,8 +2673,7 @@ async fn federation_limit_offset() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 2,
|
||||
"estimatedTotalHits": 12,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 12
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -2718,14 +2695,13 @@ async fn federation_limit_offset() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [],
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 12,
|
||||
"estimatedTotalHits": 12,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 12
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -2755,7 +2731,7 @@ async fn federation_formatting() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -2885,8 +2861,7 @@ async fn federation_formatting() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 12,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 12
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -2908,7 +2883,7 @@ async fn federation_formatting() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -2923,8 +2898,7 @@ async fn federation_formatting() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 1,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 12,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 12
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -2946,7 +2920,7 @@ async fn federation_formatting() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -3033,8 +3007,7 @@ async fn federation_formatting() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 2,
|
||||
"estimatedTotalHits": 12,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 12
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -3056,14 +3029,13 @@ async fn federation_formatting() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [],
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 12,
|
||||
"estimatedTotalHits": 12,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 12
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -3126,7 +3098,7 @@ async fn federation_null_weight() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -3165,8 +3137,7 @@ async fn federation_null_weight() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -3273,7 +3244,7 @@ async fn federation_federated_contains_facets() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -3309,8 +3280,7 @@ async fn federation_federated_contains_facets() {
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"estimatedTotalHits": 3
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -3518,7 +3488,7 @@ async fn federation_vector_single_index() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -3562,8 +3532,7 @@ async fn federation_vector_single_index() {
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 4,
|
||||
"semanticHitCount": 4,
|
||||
"requestUid": "[uuid]"
|
||||
"semanticHitCount": 4
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -3576,7 +3545,7 @@ async fn federation_vector_single_index() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -3620,8 +3589,7 @@ async fn federation_vector_single_index() {
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 4,
|
||||
"semanticHitCount": 4,
|
||||
"requestUid": "[uuid]"
|
||||
"semanticHitCount": 4
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -3635,7 +3603,7 @@ async fn federation_vector_single_index() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -3683,8 +3651,7 @@ async fn federation_vector_single_index() {
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 4,
|
||||
"semanticHitCount": 3,
|
||||
"requestUid": "[uuid]"
|
||||
"semanticHitCount": 3
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -3736,7 +3703,7 @@ async fn federation_vector_two_indexes() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -3955,10 +3922,9 @@ async fn federation_vector_two_indexes() {
|
||||
0.6
|
||||
]
|
||||
},
|
||||
"semanticHitCount": 6,
|
||||
"requestUid": "[uuid]"
|
||||
"semanticHitCount": 6
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
|
||||
// hybrid search, distinct embedder
|
||||
let (response, code) = server
|
||||
@@ -3968,7 +3934,7 @@ async fn federation_vector_two_indexes() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r#"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -4195,8 +4161,7 @@ async fn federation_vector_two_indexes() {
|
||||
0.6
|
||||
]
|
||||
},
|
||||
"semanticHitCount": 8,
|
||||
"requestUid": "[uuid]"
|
||||
"semanticHitCount": 8
|
||||
}
|
||||
"#);
|
||||
}
|
||||
@@ -4244,7 +4209,7 @@ async fn federation_facets_different_indexes_same_facet() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -4415,8 +4380,7 @@ async fn federation_facets_different_indexes_same_facet() {
|
||||
},
|
||||
"stats": {}
|
||||
}
|
||||
},
|
||||
"requestUid": "[uuid]"
|
||||
}
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -4435,7 +4399,7 @@ async fn federation_facets_different_indexes_same_facet() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -4577,8 +4541,7 @@ async fn federation_facets_different_indexes_same_facet() {
|
||||
"Shazam!": 1
|
||||
}
|
||||
},
|
||||
"facetStats": {},
|
||||
"requestUid": "[uuid]"
|
||||
"facetStats": {}
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -4598,7 +4561,7 @@ async fn federation_facets_different_indexes_same_facet() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -4723,8 +4686,7 @@ async fn federation_facets_different_indexes_same_facet() {
|
||||
"distribution": {},
|
||||
"stats": {}
|
||||
}
|
||||
},
|
||||
"requestUid": "[uuid]"
|
||||
}
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -4786,7 +4748,7 @@ async fn federation_facets_same_indexes() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -4844,8 +4806,7 @@ async fn federation_facets_same_indexes() {
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"requestUid": "[uuid]"
|
||||
}
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -4861,7 +4822,7 @@ async fn federation_facets_same_indexes() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -4947,8 +4908,7 @@ async fn federation_facets_same_indexes() {
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"requestUid": "[uuid]"
|
||||
}
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -4965,7 +4925,7 @@ async fn federation_facets_same_indexes() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -5027,8 +4987,7 @@ async fn federation_facets_same_indexes() {
|
||||
"min": 2.0,
|
||||
"max": 6.0
|
||||
}
|
||||
},
|
||||
"requestUid": "[uuid]"
|
||||
}
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -5081,7 +5040,7 @@ async fn federation_inconsistent_merge_order() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -5258,8 +5217,7 @@ async fn federation_inconsistent_merge_order() {
|
||||
},
|
||||
"stats": {}
|
||||
}
|
||||
},
|
||||
"requestUid": "[uuid]"
|
||||
}
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -5306,7 +5264,7 @@ async fn federation_inconsistent_merge_order() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@@ -5446,8 +5404,7 @@ async fn federation_inconsistent_merge_order() {
|
||||
"Batman Returns": 1
|
||||
}
|
||||
},
|
||||
"facetStats": {},
|
||||
"requestUid": "[uuid]"
|
||||
"facetStats": {}
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user