Compare commits

..

49 Commits

Author SHA1 Message Date
Clément Renault
349f123642 WIP 2025-10-07 16:04:01 +02:00
Clément Renault
f9b77b35f0 Improve the pre-compaction size information 2025-10-07 15:25:10 +02:00
Clément Renault
d9e77e1950 Make the tests to pass 2025-10-07 15:25:10 +02:00
Clément Renault
657fbec367 Make Clippy happy 2025-10-07 14:37:38 +02:00
Clément Renault
c1ecaba168 Introduce a function to effectively close an index 2025-10-07 14:33:54 +02:00
Clément Renault
9530e72d04 Expose the env closing event so we can wait for the index to close 2025-10-07 14:33:54 +02:00
Kerollmops
8fb8f389ae Implement the index compaction task 2025-10-07 14:33:54 +02:00
Kerollmops
fbe6e0b2df Rename operation to IndexCompaction 2025-10-06 16:19:04 +02:00
Clément Renault
4a84f1cd1a Add the necessary batches and tasks in the process 2025-10-02 17:17:32 +02:00
Clément Renault
511cb0ff82 Add a new CompactIndex action 2025-10-02 17:14:50 +02:00
Clément Renault
55d6a81a75 Introduce a new /indexes/{indexUid}/compact route 2025-10-02 17:12:10 +02:00
Many the fish
b98e2cef81 Merge pull request #5863 from meilisearch/add-request-uid-to-search-routes
Add request uid to search routes
2025-10-02 10:09:31 +00:00
ManyTheFish
f97384da6c Fix geo_json snapshots 2025-09-30 17:03:21 +02:00
ManyTheFish
6ea76f2771 Add uuid v7 feature 2025-09-30 15:42:03 +02:00
ManyTheFish
715b255371 fix tests 2025-09-30 15:42:03 +02:00
ManyTheFish
db094d3923 Add requestUid field in search response and add debug logs with requestUid 2025-09-30 15:42:03 +02:00
Many the fish
c29bdcae23 Merge pull request #5913 from meilisearch/dependabot/github_actions/actions/setup-python-6
Bump actions/setup-python from 5 to 6
2025-09-29 14:58:45 +00:00
Many the fish
75219181a3 Merge pull request #5834 from meilisearch/fix-openapi-ci
Minor improvement in OpenAPI CI
2025-09-29 13:55:12 +00:00
Many the fish
a5b5cf7cd1 Merge pull request #5916 from meilisearch/dependabot/github_actions/sigstore/cosign-installer-3.10.0
Bump sigstore/cosign-installer from 3.9.2 to 3.10.0
2025-09-29 13:52:31 +00:00
Many the fish
142ba8ea00 Merge pull request #5915 from meilisearch/dependabot/github_actions/actions/setup-node-5
Bump actions/setup-node from 4 to 5
2025-09-29 13:52:28 +00:00
Many the fish
4bc823e07c Merge pull request #5914 from meilisearch/dependabot/github_actions/actions/setup-dotnet-5
Bump actions/setup-dotnet from 4 to 5
2025-09-29 13:52:10 +00:00
Many the fish
db06ca7138 Merge pull request #5912 from meilisearch/dependabot/github_actions/actions/setup-go-6
Bump actions/setup-go from 5 to 6
2025-09-29 13:52:06 +00:00
Clément Renault
95595a768e Merge pull request #5911 from EazyAl/main
Update README.md to fix newsletter link
2025-09-29 13:10:16 +00:00
dependabot[bot]
36f649768e Bump sigstore/cosign-installer from 3.9.2 to 3.10.0
Bumps [sigstore/cosign-installer](https://github.com/sigstore/cosign-installer) from 3.9.2 to 3.10.0.
- [Release notes](https://github.com/sigstore/cosign-installer/releases)
- [Commits](d58896d6a1...d7543c93d8)

---
updated-dependencies:
- dependency-name: sigstore/cosign-installer
  dependency-version: 3.10.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-25 18:01:14 +00:00
dependabot[bot]
0c6fc243f2 Bump actions/setup-node from 4 to 5
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 4 to 5.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/setup-node
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-25 18:01:11 +00:00
dependabot[bot]
dfc46d5627 Bump actions/setup-dotnet from 4 to 5
Bumps [actions/setup-dotnet](https://github.com/actions/setup-dotnet) from 4 to 5.
- [Release notes](https://github.com/actions/setup-dotnet/releases)
- [Commits](https://github.com/actions/setup-dotnet/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/setup-dotnet
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-25 18:01:08 +00:00
dependabot[bot]
11d55f2121 Bump actions/setup-python from 5 to 6
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5 to 6.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](https://github.com/actions/setup-python/compare/v5...v6)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-25 18:01:03 +00:00
dependabot[bot]
014da57cf6 Bump actions/setup-go from 5 to 6
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5 to 6.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/v5...v6)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-25 18:01:00 +00:00
Clément Renault
70a0ff4a8f Merge pull request #5900 from meilisearch/show-dependencies
Show Dependabot dependency upgrade in the changelog
2025-09-25 16:04:03 +00:00
Clément Renault
dd0d5e4b90 Merge pull request #5910 from meilisearch/curquiza-patch-1
Change Java version in SDK CI
2025-09-25 14:32:16 +00:00
Ali Imran
15b3bb1700 Update README.md to fix newsletter link 2025-09-25 16:07:08 +02:00
Louis Dureuil
077ec2ab11 Merge pull request #5908 from meilisearch/update-version
Update version
2025-09-25 13:10:34 +00:00
Clémentine
f25db0795e Change Java version in SDK CI
Updated Java version and distribution in workflow.
2025-09-25 15:03:50 +02:00
Tamo
c50a337c29 bump version for 1.22.1 2025-09-25 13:44:44 +02:00
Tamo
efeae09ce1 Merge pull request #5906 from meilisearch/task-deletion-strategy
Delete oldest tasks first
2025-09-25 10:11:33 +00:00
Tamo
ad55b48664 Merge pull request #5907 from meilisearch/fix-geojson-bug
use the latest version of zerometry that supports collection, lines and multi-lines
2025-09-25 09:56:01 +00:00
Tamo
94eabd34e6 fmt 2025-09-25 11:01:53 +02:00
Tamo
6935589f74 use the latest version of zerometry that supports collection, lines and multi-lines 2025-09-25 10:31:07 +02:00
Louis Dureuil
4beb452027 Optimize by using from_sorted_iter
Co-authored-by: Tamo <tamo@meilisearch.com>
2025-09-25 10:16:30 +02:00
Louis Dureuil
b722da303a Do not start from the end of the finished tasks when selecting the tasks to delete 2025-09-25 09:54:58 +02:00
Louis Dureuil
ad39263b94 Merge pull request #5902 from meilisearch/bump-version
bump the version of meilisearch
2025-09-24 07:23:39 +00:00
Tamo
0ffb08b112 bump the version of meilisearch 2025-09-23 17:37:31 +02:00
Clément Renault
ff80b4d0ff Merge pull request #5891 from nnethercott/fix-hannoy-arroy-conversion
Bump `hannoy` to v0.0.8
2025-09-23 13:26:54 +00:00
curquiza
6f0d26c22c Show dependency upgrade in the changelog for full transparency 2025-09-22 18:30:34 +02:00
nnethercott
7a6cf30cb2 bump hannoy to 0.0.8 2025-09-18 11:23:57 +02:00
nnethercott
f9ffb8ada5 bump from hannoy 0.0.6 to 0.0.7 2025-09-16 12:00:36 +02:00
nnethercott
a47888f02c bump hannoy to 0.6 2025-09-16 11:02:46 +02:00
nnethercott
5bef2f4d86 Update arroy-hannoy conversion internals 2025-09-15 16:10:56 +02:00
curquiza
d52c7dcc94 Add needs: check-version 2025-08-12 20:47:43 +02:00
100 changed files with 1619 additions and 1543 deletions

View File

@@ -7,6 +7,5 @@ updates:
schedule:
interval: "monthly"
labels:
- 'skip changelog'
- 'dependencies'
rebase-strategy: disabled

View File

@@ -18,6 +18,7 @@ categories:
label: 'security'
- title: '⚙️ Maintenance/misc'
label:
- 'dependencies'
- 'maintenance'
- 'documentation'
template: |
@@ -26,8 +27,3 @@ template: |
❤️ Huge thanks to our contributors: $CONTRIBUTORS.
no-changes-template: 'Changes are coming soon 😎'
sort-direction: 'ascending'
replacers:
- search: '/(?:and )?@dependabot-preview(?:\[bot\])?,?/g'
replace: ''
- search: '/(?:and )?@dependabot(?:\[bot\])?,?/g'
replace: ''

View File

@@ -65,7 +65,7 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Install cosign
uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # tag=v3.9.2
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # tag=v3.10.0
- name: Login to Docker Hub
uses: docker/login-action@v3

View File

@@ -11,7 +11,7 @@ jobs:
check-version:
name: Check the version validity
runs-on: ubuntu-latest
# No need to check the version for dry run (cron)
# No need to check the version for dry run (cron or workflow_dispatch)
steps:
- uses: actions/checkout@v5
# Check if the tag has the v<nmumber>.<number>.<number> format.
@@ -48,7 +48,7 @@ jobs:
- uses: dtolnay/rust-toolchain@1.89
- name: Build
run: cargo build --release --locked
# No need to upload binaries for dry run (cron)
# No need to upload binaries for dry run (cron or workflow_dispatch)
- name: Upload binaries to release
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
@@ -78,7 +78,7 @@ jobs:
- uses: dtolnay/rust-toolchain@1.89
- name: Build
run: cargo build --release --locked
# No need to upload binaries for dry run (cron)
# No need to upload binaries for dry run (cron or workflow_dispatch)
- name: Upload binaries to release
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
@@ -111,7 +111,7 @@ jobs:
command: build
args: --release --target ${{ matrix.target }}
- name: Upload the binary to release
# No need to upload binaries for dry run (cron)
# No need to upload binaries for dry run (cron or workflow_dispatch)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
@@ -176,7 +176,7 @@ jobs:
- name: List target output files
run: ls -lR ./target
- name: Upload the binary to release
# No need to upload binaries for dry run (cron)
# No need to upload binaries for dry run (cron or workflow_dispatch)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
@@ -187,6 +187,7 @@ jobs:
publish-openapi-file:
name: Publish OpenAPI file
needs: check-version
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -201,7 +202,7 @@ jobs:
cd crates/openapi-generator
cargo run --release -- --pretty --output ../../meilisearch.json
- name: Upload OpenAPI to Release
# No need to upload for dry run (cron)
# No need to upload for dry run (cron or workflow_dispatch)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:

View File

@@ -50,7 +50,7 @@ jobs:
with:
repository: meilisearch/meilisearch-dotnet
- name: Setup .NET Core
uses: actions/setup-dotnet@v4
uses: actions/setup-dotnet@v5
with:
dotnet-version: "8.0.x"
- name: Install dependencies
@@ -100,7 +100,7 @@ jobs:
- '7700:7700'
steps:
- name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version: stable
- uses: actions/checkout@v5
@@ -135,13 +135,13 @@ jobs:
- name: Set up Java
uses: actions/setup-java@v5
with:
java-version: 8
distribution: 'zulu'
java-version: 17
distribution: 'temurin'
cache: gradle
- name: Grant execute permission for gradlew
run: chmod +x gradlew
- name: Build and run unit and integration tests
run: ./gradlew build integrationTest
run: ./gradlew build integrationTest --info
meilisearch-js-tests:
needs: define-docker-image
@@ -160,7 +160,7 @@ jobs:
with:
repository: meilisearch/meilisearch-js
- name: Setup node
uses: actions/setup-node@v4
uses: actions/setup-node@v5
with:
cache: 'yarn'
- name: Install dependencies
@@ -224,7 +224,7 @@ jobs:
with:
repository: meilisearch/meilisearch-python
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v6
- name: Install pipenv
uses: dschep/install-pipenv-action@v1
- name: Install dependencies
@@ -318,7 +318,7 @@ jobs:
with:
repository: meilisearch/meilisearch-js-plugins
- name: Setup node
uses: actions/setup-node@v4
uses: actions/setup-node@v5
with:
cache: yarn
- name: Install dependencies

68
Cargo.lock generated
View File

@@ -589,7 +589,7 @@ source = "git+https://github.com/meilisearch/bbqueue#cbb87cc707b5af415ef203bdaf2
[[package]]
name = "benchmarks"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"anyhow",
"bumpalo",
@@ -799,7 +799,7 @@ dependencies = [
[[package]]
name = "build-info"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"anyhow",
"time",
@@ -1092,7 +1092,7 @@ dependencies = [
"steppe",
"thiserror 2.0.16",
"thread_local",
"zerometry 0.3.0",
"zerometry",
]
[[package]]
@@ -1829,7 +1829,7 @@ dependencies = [
[[package]]
name = "dump"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"anyhow",
"big_s",
@@ -2072,7 +2072,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "file-store"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"tempfile",
"thiserror 2.0.16",
@@ -2094,7 +2094,7 @@ dependencies = [
[[package]]
name = "filter-parser"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"insta",
"levenshtein_automata",
@@ -2122,7 +2122,7 @@ dependencies = [
[[package]]
name = "flatten-serde-json"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"criterion",
"serde_json",
@@ -2279,7 +2279,7 @@ dependencies = [
[[package]]
name = "fuzzers"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"arbitrary",
"bumpalo",
@@ -2577,7 +2577,6 @@ dependencies = [
"num-traits",
"robust",
"rstar",
"spade",
]
[[package]]
@@ -2588,7 +2587,6 @@ checksum = "75a4dcd69d35b2c87a7c83bce9af69fd65c9d68d3833a0ded568983928f3fc99"
dependencies = [
"approx",
"num-traits",
"rayon",
"rstar",
"serde",
]
@@ -3060,7 +3058,6 @@ dependencies = [
"i_key_sort",
"i_shape",
"i_tree",
"rayon",
]
[[package]]
@@ -3237,7 +3234,7 @@ dependencies = [
[[package]]
name = "index-scheduler"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"anyhow",
"backoff",
@@ -3254,10 +3251,8 @@ dependencies = [
"enum-iterator",
"file-store",
"flate2",
"hashbrown 0.15.5",
"indexmap",
"insta",
"itertools 0.14.0",
"maplit",
"meili-snap",
"meilisearch-auth",
@@ -3272,7 +3267,6 @@ dependencies = [
"tempfile",
"thiserror 2.0.16",
"time",
"tokio",
"tracing",
"ureq",
"uuid",
@@ -3494,7 +3488,7 @@ dependencies = [
[[package]]
name = "json-depth-checker"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"criterion",
"serde_json",
@@ -4003,7 +3997,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
[[package]]
name = "meili-snap"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"insta",
"md5",
@@ -4014,7 +4008,7 @@ dependencies = [
[[package]]
name = "meilisearch"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"actix-cors",
"actix-http",
@@ -4111,7 +4105,7 @@ dependencies = [
[[package]]
name = "meilisearch-auth"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"base64 0.22.1",
"enum-iterator",
@@ -4130,7 +4124,7 @@ dependencies = [
[[package]]
name = "meilisearch-types"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"actix-web",
"anyhow",
@@ -4165,7 +4159,7 @@ dependencies = [
[[package]]
name = "meilitool"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"anyhow",
"clap",
@@ -4199,7 +4193,7 @@ dependencies = [
[[package]]
name = "milli"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"allocator-api2 0.3.1",
"arroy",
@@ -4278,7 +4272,7 @@ dependencies = [
"url",
"utoipa",
"uuid",
"zerometry 0.1.0",
"zerometry",
]
[[package]]
@@ -4780,7 +4774,7 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "permissive-json-pointer"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"big_s",
"serde_json",
@@ -6148,18 +6142,6 @@ dependencies = [
"winapi",
]
[[package]]
name = "spade"
version = "2.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb313e1c8afee5b5647e00ee0fe6855e3d529eb863a0fdae1d60006c4d1e9990"
dependencies = [
"hashbrown 0.15.5",
"num-traits",
"robust",
"smallvec",
]
[[package]]
name = "spin"
version = "0.5.2"
@@ -7839,7 +7821,7 @@ dependencies = [
[[package]]
name = "xtask"
version = "1.21.0"
version = "1.22.1"
dependencies = [
"anyhow",
"build-info",
@@ -7986,18 +7968,6 @@ dependencies = [
"syn 2.0.106",
]
[[package]]
name = "zerometry"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "681f08f3f4ef27d3021a128eb6d8df1cd781e4c9c797c3971c1f85316374f977"
dependencies = [
"bytemuck",
"byteorder",
"geo",
"geo-types",
]
[[package]]
name = "zerometry"
version = "0.3.0"

View File

@@ -23,7 +23,7 @@ members = [
]
[workspace.package]
version = "1.21.0"
version = "1.22.1"
authors = [
"Quentin de Quelen <quentin@dequelen.me>",
"Clément Renault <clement@meilisearch.com>",

View File

@@ -121,7 +121,7 @@ If you want to know more about the kind of data we collect and what we use it fo
Meilisearch is a search engine created by [Meili](https://www.meilisearch.com/careers), a software development company headquartered in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
🗞 [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
🗞 [Subscribe to our newsletter](https://share-eu1.hsforms.com/1LN5N0x_GQgq7ss7tXmSykwfg3aq) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
💌 Want to make a suggestion or give feedback? Here are some of the channels where you can reach us:

View File

@@ -158,9 +158,8 @@ pub enum KindDump {
UpgradeDatabase {
from: (u32, u32, u32),
},
NetworkTopologyChange {
network: Option<meilisearch_types::enterprise_edition::network::Network>,
origin: Option<meilisearch_types::tasks::Origin>,
IndexCompaction {
index_uid: String,
},
}
@@ -244,8 +243,8 @@ impl From<KindWithContent> for KindDump {
KindWithContent::UpgradeDatabase { from: version } => {
KindDump::UpgradeDatabase { from: version }
}
KindWithContent::NetworkTopologyChange { network, origin } => {
KindDump::NetworkTopologyChange { network, origin }
KindWithContent::IndexCompaction { index_uid } => {
KindDump::IndexCompaction { index_uid }
}
}
}
@@ -260,7 +259,7 @@ pub(crate) mod test {
use big_s::S;
use maplit::{btreemap, btreeset};
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
use meilisearch_types::enterprise_edition::network::{DbNetwork, DbRemote};
use meilisearch_types::enterprise_edition::network::{Network, Remote};
use meilisearch_types::facet_values_sort::FacetValuesSort;
use meilisearch_types::features::RuntimeTogglableFeatures;
use meilisearch_types::index_uid_pattern::IndexUidPattern;
@@ -551,10 +550,10 @@ pub(crate) mod test {
RuntimeTogglableFeatures::default()
}
fn create_test_network() -> DbNetwork {
DbNetwork {
fn create_test_network() -> Network {
Network {
local: Some("myself".to_string()),
remotes: maplit::btreemap! {"other".to_string() => DbRemote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
remotes: maplit::btreemap! {"other".to_string() => Remote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
sharding: false,
}
}

View File

@@ -24,7 +24,7 @@ pub type Batch = meilisearch_types::batches::Batch;
pub type Key = meilisearch_types::keys::Key;
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
pub type Network = meilisearch_types::enterprise_edition::network::DbNetwork;
pub type Network = meilisearch_types::enterprise_edition::network::Network;
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
// ===== Other types to clarify the code of the compat module

View File

@@ -5,7 +5,7 @@ use std::path::PathBuf;
use flate2::write::GzEncoder;
use flate2::Compression;
use meilisearch_types::batches::Batch;
use meilisearch_types::enterprise_edition::network::DbNetwork;
use meilisearch_types::enterprise_edition::network::Network;
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
use meilisearch_types::keys::Key;
use meilisearch_types::settings::{Checked, Settings};
@@ -72,7 +72,7 @@ impl DumpWriter {
)?)
}
pub fn create_network(&self, network: DbNetwork) -> Result<()> {
pub fn create_network(&self, network: Network) -> Result<()> {
Ok(std::fs::write(self.dir.path().join("network.json"), serde_json::to_string(&network)?)?)
}

View File

@@ -23,7 +23,6 @@ dump = { path = "../dump" }
enum-iterator = "2.1.0"
file-store = { path = "../file-store" }
flate2 = "1.1.2"
hashbrown = "0.15.4"
indexmap = "2.9.0"
meilisearch-auth = { path = "../meilisearch-auth" }
meilisearch-types = { path = "../meilisearch-types" }
@@ -46,8 +45,6 @@ tracing = "0.1.41"
ureq = "2.12.1"
uuid = { version = "1.17.0", features = ["serde", "v4"] }
backoff = "0.4.0"
itertools = "0.14.0"
tokio = { version = "1.47.1", features = ["full"] }
[dev-dependencies]
big_s = "1.0.2"

View File

@@ -234,8 +234,8 @@ impl<'a> Dump<'a> {
}
}
KindDump::UpgradeDatabase { from } => KindWithContent::UpgradeDatabase { from },
KindDump::NetworkTopologyChange { network: new_network, origin } => {
KindWithContent::NetworkTopologyChange { network: new_network, origin }
KindDump::IndexCompaction { index_uid } => {
KindWithContent::IndexCompaction { index_uid }
}
},
};

View File

@@ -1,6 +1,6 @@
use std::sync::{Arc, RwLock};
use meilisearch_types::enterprise_edition::network::DbNetwork;
use meilisearch_types::enterprise_edition::network::Network;
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
use meilisearch_types::heed::types::{SerdeJson, Str};
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
@@ -24,7 +24,7 @@ mod db_keys {
pub(crate) struct FeatureData {
persisted: Database<Str, SerdeJson<RuntimeTogglableFeatures>>,
runtime: Arc<RwLock<RuntimeTogglableFeatures>>,
network: Arc<RwLock<DbNetwork>>,
network: Arc<RwLock<Network>>,
}
#[derive(Debug, Clone, Copy)]
@@ -197,8 +197,8 @@ impl FeatureData {
}));
// Once this is stabilized, network should be stored along with webhooks in index-scheduler's persisted database
let network_db = runtime_features_db.remap_data_type::<SerdeJson<DbNetwork>>();
let network: DbNetwork = network_db.get(wtxn, db_keys::NETWORK)?.unwrap_or_default();
let network_db = runtime_features_db.remap_data_type::<SerdeJson<Network>>();
let network: Network = network_db.get(wtxn, db_keys::NETWORK)?.unwrap_or_default();
Ok(Self {
persisted: runtime_features_db,
@@ -234,8 +234,8 @@ impl FeatureData {
RoFeatures::new(self)
}
pub fn put_network(&self, mut wtxn: RwTxn, new_network: DbNetwork) -> Result<()> {
self.persisted.remap_data_type::<SerdeJson<DbNetwork>>().put(
pub fn put_network(&self, mut wtxn: RwTxn, new_network: Network) -> Result<()> {
self.persisted.remap_data_type::<SerdeJson<Network>>().put(
&mut wtxn,
db_keys::NETWORK,
&new_network,
@@ -247,7 +247,7 @@ impl FeatureData {
Ok(())
}
pub fn network(&self) -> DbNetwork {
DbNetwork::clone(&*self.network.read().unwrap())
pub fn network(&self) -> Network {
Network::clone(&*self.network.read().unwrap())
}
}

View File

@@ -220,11 +220,9 @@ impl IndexMap {
uuid: &Uuid,
enable_mdb_writemap: bool,
map_size_growth: usize,
) {
let Some(index) = self.available.remove(uuid) else {
return;
};
self.close(*uuid, index, enable_mdb_writemap, map_size_growth);
) -> Option<EnvClosingEvent> {
let index = self.available.remove(uuid)?;
Some(self.close(*uuid, index, enable_mdb_writemap, map_size_growth))
}
fn close(
@@ -233,14 +231,21 @@ impl IndexMap {
index: Index,
enable_mdb_writemap: bool,
map_size_growth: usize,
) {
) -> EnvClosingEvent {
let map_size = index.map_size() + map_size_growth;
let closing_event = index.prepare_for_closing();
let generation = self.next_generation();
self.unavailable.insert(
uuid,
Some(ClosingIndex { uuid, closing_event, enable_mdb_writemap, map_size, generation }),
Some(ClosingIndex {
uuid,
closing_event: closing_event.clone(),
enable_mdb_writemap,
map_size,
generation,
}),
);
closing_event
}
/// Attempts to delete and index.

View File

@@ -4,7 +4,7 @@ use std::time::Duration;
use std::{fs, thread};
use meilisearch_types::heed::types::{SerdeJson, Str};
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
use meilisearch_types::heed::{Database, Env, EnvClosingEvent, RoTxn, RwTxn, WithoutTls};
use meilisearch_types::milli;
use meilisearch_types::milli::database_stats::DatabaseStats;
use meilisearch_types::milli::index::RollbackOutcome;
@@ -341,6 +341,24 @@ impl IndexMapper {
Ok(())
}
/// Closes the specified index.
///
/// This operation involves closing the underlying environment and so can take a long time to complete.
///
/// # Panics
///
/// - If the Index corresponding to the passed name is concurrently being deleted/resized or cannot be found in the
/// in memory hash map.
pub fn close_index(&self, rtxn: &RoTxn, name: &str) -> Result<Option<EnvClosingEvent>> {
let uuid = self
.index_mapping
.get(rtxn, name)?
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
// We remove the index from the in-memory index map.
Ok(self.index_map.write().unwrap().close_for_resize(&uuid, self.enable_mdb_writemap, 0))
}
/// Return an index, may open it if it wasn't already opened.
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
if let Some((current_name, current_index)) =

View File

@@ -36,7 +36,6 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
run_loop_iteration: _,
embedders: _,
chat_settings: _,
runtime: _,
} = scheduler;
let rtxn = env.read_txn().unwrap();
@@ -318,8 +317,8 @@ fn snapshot_details(d: &Details) -> String {
Details::UpgradeDatabase { from, to } => {
format!("{{ from: {from:?}, to: {to:?} }}")
}
Details::NetworkTopologyChange { network: new_network } => {
format!("{{ new_network: {new_network:?} }}")
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
}
}
}

View File

@@ -54,7 +54,7 @@ pub use features::RoFeatures;
use flate2::bufread::GzEncoder;
use flate2::Compression;
use meilisearch_types::batches::Batch;
use meilisearch_types::enterprise_edition::network::DbNetwork;
use meilisearch_types::enterprise_edition::network::Network;
use meilisearch_types::features::{
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
};
@@ -216,8 +216,6 @@ pub struct IndexScheduler {
/// A counter that is incremented before every call to [`tick`](IndexScheduler::tick)
#[cfg(test)]
run_loop_iteration: Arc<RwLock<usize>>,
runtime: Option<tokio::runtime::Handle>,
}
impl IndexScheduler {
@@ -244,7 +242,6 @@ impl IndexScheduler {
run_loop_iteration: self.run_loop_iteration.clone(),
features: self.features.clone(),
chat_settings: self.chat_settings,
runtime: self.runtime.clone(),
}
}
@@ -263,7 +260,6 @@ impl IndexScheduler {
options: IndexSchedulerOptions,
auth_env: Env<WithoutTls>,
from_db_version: (u32, u32, u32),
runtime: Option<tokio::runtime::Handle>,
#[cfg(test)] test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
#[cfg(test)] planned_failures: Vec<(usize, test_utils::FailureLocation)>,
) -> Result<Self> {
@@ -345,7 +341,6 @@ impl IndexScheduler {
run_loop_iteration: Arc::new(RwLock::new(0)),
features,
chat_settings,
runtime,
};
this.run();
@@ -897,13 +892,13 @@ impl IndexScheduler {
Ok(())
}
pub fn put_network(&self, network: DbNetwork) -> Result<()> {
pub fn put_network(&self, network: Network) -> Result<()> {
let wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
self.features.put_network(wtxn, network)?;
Ok(())
}
pub fn network(&self) -> DbNetwork {
pub fn network(&self) -> Network {
self.features.network()
}
@@ -932,10 +927,9 @@ impl IndexScheduler {
pub fn embedders(
&self,
index_uid: &str,
index_uid: String,
embedding_configs: Vec<IndexEmbeddingConfig>,
) -> Result<RuntimeEmbedders> {
let err = |err| Error::from_milli(err, Some(index_uid.to_owned()));
let res: Result<_> = embedding_configs
.into_iter()
.map(
@@ -948,7 +942,7 @@ impl IndexScheduler {
let document_template = prompt
.try_into()
.map_err(meilisearch_types::milli::Error::from)
.map_err(err)?;
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?;
let fragments = fragments
.into_inner()
@@ -978,8 +972,9 @@ impl IndexScheduler {
let embedder = Arc::new(
Embedder::new(embedder_options.clone(), self.scheduler.embedding_cache_cap)
.map_err(meilisearch_types::milli::vector::Error::from)
.map_err(milli::Error::from)
.map_err(err)?,
.map_err(|err| {
Error::from_milli(err.into(), Some(index_uid.clone()))
})?,
);
{
let mut embedders = self.embedders.write().unwrap();

View File

@@ -138,6 +138,16 @@ make_enum_progress! {
}
}
make_enum_progress! {
pub enum IndexCompaction {
RetrieveTheIndex,
CreateTemporaryFile,
CopyAndCompactTheIndex,
PersistTheCompactedIndex,
CloseTheIndex,
}
}
make_enum_progress! {
pub enum InnerSwappingTwoIndexes {
RetrieveTheTasks,

View File

@@ -310,7 +310,8 @@ impl Queue {
| self.tasks.status.get(wtxn, &Status::Failed)?.unwrap_or_default()
| self.tasks.status.get(wtxn, &Status::Canceled)?.unwrap_or_default();
let to_delete = RoaringBitmap::from_iter(finished.into_iter().rev().take(100_000));
let to_delete =
RoaringBitmap::from_sorted_iter(finished.into_iter().take(100_000)).unwrap();
// /!\ the len must be at least 2 or else we might enter an infinite loop where we only delete
// the deletion tasks we enqueued ourselves.

View File

@@ -25,6 +25,7 @@ enum AutobatchKind {
IndexDeletion,
IndexUpdate,
IndexSwap,
IndexCompaction,
}
impl AutobatchKind {
@@ -68,12 +69,12 @@ impl From<KindWithContent> for AutobatchKind {
KindWithContent::IndexCreation { .. } => AutobatchKind::IndexCreation,
KindWithContent::IndexUpdate { .. } => AutobatchKind::IndexUpdate,
KindWithContent::IndexSwap { .. } => AutobatchKind::IndexSwap,
KindWithContent::IndexCompaction { .. } => AutobatchKind::IndexCompaction,
KindWithContent::TaskCancelation { .. }
| KindWithContent::TaskDeletion { .. }
| KindWithContent::DumpCreation { .. }
| KindWithContent::Export { .. }
| KindWithContent::UpgradeDatabase { .. }
| KindWithContent::NetworkTopologyChange { .. }
| KindWithContent::SnapshotCreation => {
panic!("The autobatcher should never be called with tasks that don't apply to an index.")
}
@@ -119,6 +120,9 @@ pub enum BatchKind {
IndexSwap {
id: TaskId,
},
IndexCompaction {
id: TaskId,
},
}
impl BatchKind {
@@ -184,6 +188,13 @@ impl BatchKind {
)),
false,
),
K::IndexCompaction => (
Break((
BatchKind::IndexCompaction { id: task_id },
BatchStopReason::TaskCannotBeBatched { kind, id: task_id },
)),
false,
),
K::DocumentClear => (Continue(BatchKind::DocumentClear { ids: vec![task_id] }), false),
K::DocumentImport { allow_index_creation, primary_key: pk }
if primary_key.is_none() || pk.is_none() || primary_key == pk.as_deref() =>
@@ -289,7 +300,9 @@ impl BatchKind {
match (self, autobatch_kind) {
// We don't batch any of these operations
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition) => Break((this, BatchStopReason::TaskCannotBeBatched { kind, id })),
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition | K::IndexCompaction) => {
Break((this, BatchStopReason::TaskCannotBeBatched { kind, id }))
},
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
(this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
Break((this, BatchStopReason::IndexCreationMismatch { id }))
@@ -484,6 +497,7 @@ impl BatchKind {
| BatchKind::IndexDeletion { .. }
| BatchKind::IndexUpdate { .. }
| BatchKind::IndexSwap { .. }
| BatchKind::IndexCompaction { .. }
| BatchKind::DocumentEdition { .. },
_,
) => {

View File

@@ -55,8 +55,9 @@ pub(crate) enum Batch {
UpgradeDatabase {
tasks: Vec<Task>,
},
NetworkTopologyChanges {
tasks: Vec<Task>,
IndexCompaction {
index_uid: String,
task: Task,
},
}
@@ -113,14 +114,14 @@ impl Batch {
| Batch::Dump(task)
| Batch::IndexCreation { task, .. }
| Batch::Export { task }
| Batch::IndexUpdate { task, .. } => {
| Batch::IndexUpdate { task, .. }
| Batch::IndexCompaction { task, .. } => {
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
}
Batch::SnapshotCreation(tasks)
| Batch::TaskDeletions(tasks)
| Batch::UpgradeDatabase { tasks }
| Batch::IndexDeletion { tasks, .. }
| Batch::NetworkTopologyChanges { tasks } => {
| Batch::IndexDeletion { tasks, .. } => {
RoaringBitmap::from_iter(tasks.iter().map(|task| task.uid))
}
Batch::IndexOperation { op, .. } => match op {
@@ -155,12 +156,12 @@ impl Batch {
| Dump(_)
| Export { .. }
| UpgradeDatabase { .. }
| NetworkTopologyChanges { .. }
| IndexSwap { .. } => None,
IndexOperation { op, .. } => Some(op.index_uid()),
IndexCreation { index_uid, .. }
| IndexUpdate { index_uid, .. }
| IndexDeletion { index_uid, .. } => Some(index_uid),
| IndexDeletion { index_uid, .. }
| IndexCompaction { index_uid, .. } => Some(index_uid),
}
}
}
@@ -180,8 +181,8 @@ impl fmt::Display for Batch {
Batch::IndexUpdate { .. } => f.write_str("IndexUpdate")?,
Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?,
Batch::IndexSwap { .. } => f.write_str("IndexSwap")?,
Batch::IndexCompaction { .. } => f.write_str("IndexCompaction")?,
Batch::Export { .. } => f.write_str("Export")?,
Batch::NetworkTopologyChanges { .. } => f.write_str("NetworkTopologyChange")?,
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
};
match index_uid {
@@ -436,6 +437,12 @@ impl IndexScheduler {
current_batch.processing(Some(&mut task));
Ok(Some(Batch::IndexSwap { task }))
}
BatchKind::IndexCompaction { id } => {
let mut task =
self.queue.tasks.get_task(rtxn, id)?.ok_or(Error::CorruptedTaskQueue)?;
current_batch.processing(Some(&mut task));
Ok(Some(Batch::IndexCompaction { index_uid, task }))
}
}
}
@@ -551,18 +558,7 @@ impl IndexScheduler {
return Ok(Some((Batch::Dump(task), current_batch)));
}
// 6. We batch the network changes.
let to_network = self.queue.tasks.get_kind(rtxn, Kind::NetworkTopologyChange)? & enqueued;
if !to_network.is_empty() {
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_network)?;
current_batch.processing(&mut tasks);
current_batch.reason(BatchStopReason::TaskKindCannotBeBatched {
kind: Kind::NetworkTopologyChange,
});
return Ok(Some((Batch::NetworkTopologyChanges { tasks }, current_batch)));
}
// 7. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
// 6. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
let mut task =
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;

View File

@@ -1,6 +0,0 @@
// Copyright © 2025 Meilisearch Some Rights Reserved
// This file is part of Meilisearch Enterprise Edition (EE).
// Use of this source code is governed by the Business Source License 1.1,
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
mod process_network;

View File

@@ -1,362 +0,0 @@
// Copyright © 2025 Meilisearch Some Rights Reserved
// This file is part of Meilisearch Enterprise Edition (EE).
// Use of this source code is governed by the Business Source License 1.1,
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
use std::collections::BTreeMap;
use std::time::Duration;
use bumpalo::Bump;
use itertools::{EitherOrBoth, Itertools};
use meilisearch_types::enterprise_edition::network::{DbNetwork, DbRemote, Network, Remote};
use meilisearch_types::milli::documents::PrimaryKey;
use meilisearch_types::milli::progress::{EmbedderStats, Progress};
use meilisearch_types::milli::update::new::indexer;
use meilisearch_types::milli::update::Setting;
use meilisearch_types::milli::{self};
use meilisearch_types::tasks::{KindWithContent, Status, Task};
use roaring::RoaringBitmap;
use crate::scheduler::process_export::{ExportContext, ExportOptions, TargetInstance};
use crate::{Error, IndexScheduler};
impl IndexScheduler {
pub(crate) fn process_network_changes(
&self,
progress: Progress,
mut tasks: Vec<Task>,
) -> crate::Result<Vec<Task>> {
let old_network = self.network();
let mut current_network = Some(old_network.clone());
for task in &tasks {
let KindWithContent::NetworkTopologyChange { network, origin } = &task.kind else {
continue;
};
current_network = match (current_network, network) {
(None, None) => None,
(None, Some(network)) => Some(accumulate(DbNetwork::default(), network.clone())?),
(Some(current_network), None) => Some(current_network),
(Some(current_network), Some(new_network)) => {
Some(accumulate(current_network, new_network.clone())?)
}
};
}
'network: {
let mut new_network = current_network.unwrap_or_default();
if old_network == new_network {
// no change, exit
break 'network;
}
/// TODO: only do this if the task originates with an end-user
let must_replicate = old_network.sharding || new_network.sharding;
if !must_replicate {
self.put_network(new_network)?;
break 'network;
}
let must_stop_processing = &self.scheduler.must_stop_processing;
/// FIXME: make it mandatory for `self` to be part of the network
let old_this = old_network.local.as_deref();
/// FIXME: error here
let new_this = new_network.local.unwrap();
// in network replication, we need to tell old nodes that they are no longer part of the network.
// This is made difficult by "node aliasing": Meilisearch has no way of knowing if two nodes with different names
// or even different URLs actually refer to the same machine in two different versions of the network.
//
// This implementation ignores aliasing: a node is the same when it has the same name.
//
// To defeat aliasing, we iterate a first time to collect all deletions and additions, then we make sure to process the deletions
// first, rather than processing the tasks in the alphalexical order of remotes.
let mut node_deletions = Vec::new();
let mut node_additions = Vec::new();
for eob in old_network
.remotes
.iter()
.merge_join_by(new_network.remotes.iter(), |(left, _), (right, _)| left.cmp(right))
{
match eob {
EitherOrBoth::Both((to_update_name, _), (_, new_node)) => {
if to_update_name.as_str() == new_this {
continue; // skip `self`
}
node_additions.push((to_update_name, new_node));
}
EitherOrBoth::Left((to_delete_name, to_delete_node)) => {
if Some(to_delete_name.as_str()) == old_this {
continue; // skip `self`
}
node_deletions.push((to_delete_name, to_delete_node));
}
EitherOrBoth::Right((to_add_name, to_add_node)) => {
if to_add_name.as_str() == new_this {
continue; // skip `self`
}
node_additions.push((to_add_name, to_add_node));
}
}
}
let runtime = self.runtime.clone().unwrap();
let mut in_flight = Vec::new();
// process deletions
for (to_delete_name, to_delete) in node_deletions {
// set `self` to None so that this node is forgotten about
new_network.local = None;
in_flight.push(proxy_network(&runtime, to_delete.url.as_str(), &new_network)?);
}
runtime.block_on(async {
for task in in_flight.drain(..) {
// TODO: log and ignore errors during deletion
let res = task.await;
}
});
// process additions
for (to_add_name, to_add) in node_additions {
new_network.local = Some(to_add_name.clone());
in_flight.push(proxy_network(&runtime, to_add.url.as_str(), &new_network)?);
}
runtime.block_on(async {
for task in in_flight.drain(..) {
// TODO: handle errors during addition
let res = task.await;
}
});
// balance documents
new_network.local = Some(new_this);
self.balance_documents(&new_network, &progress, &must_stop_processing)?;
self.put_network(new_network)?;
}
for task in &mut tasks {
task.status = Status::Succeeded;
}
Ok(tasks)
}
fn balance_documents(
&self,
new_network: &DbNetwork,
progress: &Progress,
must_stop_processing: &crate::scheduler::MustStopProcessing,
) -> crate::Result<()> {
/// FIXME unwrap
let new_shards = new_network.shards().unwrap();
// TECHDEBT: this spawns a `ureq` agent additionally to `reqwest`. We probably want to harmonize all of this.
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
let mut indexer_alloc = Bump::new();
// process by batches of 20MiB. Allow for compression? Don't forget about embeddings
let _: Vec<()> = self.try_for_each_index(|index_uid, index| -> crate::Result<()> {
indexer_alloc.reset();
let err = |err| Error::from_milli(err, Some(index_uid.to_string()));
let index_rtxn = index.read_txn()?;
let all_docids = index.external_documents_ids();
let mut documents_to_move_to: hashbrown::HashMap<String, RoaringBitmap> =
hashbrown::HashMap::new();
let mut documents_to_delete = RoaringBitmap::new();
for res in all_docids.iter(&index_rtxn)? {
let (external_docid, docid) = res?;
match new_shards.processing_shard(external_docid) {
Some(shard) if shard.is_own => continue,
Some(shard) => {
documents_to_move_to
.entry_ref(shard.name.as_str())
.or_default()
.insert(docid);
}
None => {
documents_to_delete.insert(docid);
}
}
}
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
for (remote, documents_to_move) in documents_to_move_to {
/// TODO: justify the unwrap
let remote = new_network.remotes.get(&remote).unwrap();
let target = TargetInstance {
base_url: &remote.url,
api_key: remote.write_api_key.as_deref(),
};
let options = ExportOptions {
index_uid,
payload_size: None,
override_settings: false,
extra_headers: &Default::default(),
};
let ctx = ExportContext {
index,
index_rtxn: &index_rtxn,
universe: &documents_to_move,
progress,
agent: &agent,
must_stop_processing,
};
self.export_one_index(target, options, ctx)?;
documents_to_delete |= documents_to_move;
}
if documents_to_delete.is_empty() {
return Ok(());
}
let mut new_fields_ids_map = fields_ids_map.clone();
// candidates not empty => index not empty => a primary key is set
let primary_key = index.primary_key(&index_rtxn)?.unwrap();
let primary_key = PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
.map_err(milli::Error::from)
.map_err(err)?;
let mut index_wtxn = index.write_txn()?;
let mut indexer = indexer::DocumentDeletion::new();
indexer.delete_documents_by_docids(documents_to_delete);
let document_changes = indexer.into_changes(&indexer_alloc, primary_key);
let embedders = index
.embedding_configs()
.embedding_configs(&index_wtxn)
.map_err(milli::Error::from)
.map_err(err)?;
let embedders = self.embedders(index_uid, embedders)?;
let indexer_config = self.index_mapper.indexer_config();
let pool = &indexer_config.thread_pool;
indexer::index(
&mut index_wtxn,
index,
pool,
indexer_config.grenad_parameters(),
&fields_ids_map,
new_fields_ids_map,
None, // document deletion never changes primary key
&document_changes,
embedders,
&|| must_stop_processing.get(),
&progress,
&EmbedderStats::default(),
)
.map_err(err)?;
index_wtxn.commit()?;
Ok(())
})?;
Ok(())
}
}
fn proxy_network(
runtime: &tokio::runtime::Handle,
url: &str,
network: &DbNetwork,
) -> crate::Result<tokio::task::JoinHandle<()>> {
todo!()
}
fn accumulate(old_network: DbNetwork, new_network: Network) -> crate::Result<DbNetwork> {
let err = |err| Err(Error::from_milli(milli::Error::UserError(err), None));
let merged_local = match new_network.local {
Setting::Set(new_self) => Some(new_self),
Setting::Reset => None,
Setting::NotSet => old_network.local,
};
let merged_sharding = match new_network.sharding {
Setting::Set(new_sharding) => new_sharding,
Setting::Reset => false,
Setting::NotSet => old_network.sharding,
};
if merged_sharding && merged_local.is_none() {
return err(milli::UserError::NetworkShardingWithoutSelf);
}
let merged_remotes = match new_network.remotes {
Setting::Set(new_remotes) => {
let mut merged_remotes = BTreeMap::new();
for either_or_both in old_network
.remotes
.into_iter()
.merge_join_by(new_remotes.into_iter(), |left, right| left.0.cmp(&right.0))
{
match either_or_both {
EitherOrBoth::Both((name, old), (_, Some(new))) => {
let DbRemote {
url: old_url,
search_api_key: old_search_api_key,
write_api_key: old_write_api_key,
} = old;
let Remote {
url: new_url,
search_api_key: new_search_api_key,
write_api_key: new_write_api_key,
} = new;
let merged = DbRemote {
url: match new_url {
Setting::Set(new_url) => new_url,
Setting::Reset => {
return err(milli::UserError::NetworkMissingUrl(name))
}
Setting::NotSet => old_url,
},
search_api_key: match new_search_api_key {
Setting::Set(new_search_api_key) => Some(new_search_api_key),
Setting::Reset => None,
Setting::NotSet => old_search_api_key,
},
write_api_key: match new_write_api_key {
Setting::Set(new_write_api_key) => Some(new_write_api_key),
Setting::Reset => None,
Setting::NotSet => old_write_api_key,
},
};
merged_remotes.insert(name, merged);
}
EitherOrBoth::Both((_, _), (_, None)) | EitherOrBoth::Right((_, None)) => {}
EitherOrBoth::Left((name, node)) => {
merged_remotes.insert(name, node);
}
EitherOrBoth::Right((name, Some(node))) => {
let Some(url) = node.url.set() else {
return err(milli::UserError::NetworkMissingUrl(name));
};
let node = DbRemote {
url,
search_api_key: node.search_api_key.set(),
write_api_key: node.write_api_key.set(),
};
merged_remotes.insert(name, node);
}
}
}
merged_remotes
}
Setting::Reset => BTreeMap::new(),
Setting::NotSet => old_network.remotes,
};
Ok(DbNetwork { local: merged_local, remotes: merged_remotes, sharding: merged_sharding })
}

View File

@@ -2,7 +2,6 @@ mod autobatcher;
#[cfg(test)]
mod autobatcher_test;
mod create_batch;
mod enterprise_edition;
mod process_batch;
mod process_dump_creation;
mod process_export;

View File

@@ -1,22 +1,26 @@
use std::collections::{BTreeSet, HashMap, HashSet};
use std::io::{Seek, SeekFrom};
use std::panic::{catch_unwind, AssertUnwindSafe};
use std::sync::atomic::Ordering;
use byte_unit::Byte;
use meilisearch_types::batches::{BatchEnqueuedAt, BatchId};
use meilisearch_types::heed::{RoTxn, RwTxn};
use meilisearch_types::milli::heed::CompactionOption;
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
use meilisearch_types::milli::{self, ChannelCongestion};
use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status, Task};
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
use milli::update::Settings as MilliSettings;
use roaring::RoaringBitmap;
use tempfile::PersistError;
use time::OffsetDateTime;
use super::create_batch::Batch;
use crate::processing::{
AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress, FinalizingIndexStep,
InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, TaskDeletionProgress,
UpdateIndexProgress,
IndexCompaction, InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress,
TaskDeletionProgress, UpdateIndexProgress,
};
use crate::utils::{
self, remove_n_tasks_datetime_earlier_than, remove_task_datetime, swap_index_uid_in_task,
@@ -135,9 +139,6 @@ impl IndexScheduler {
Batch::Dump(task) => self
.process_dump_creation(progress, task)
.map(|tasks| (tasks, ProcessBatchInfo::default())),
Batch::NetworkTopologyChanges { tasks } => self
.process_network_changes(progress, tasks)
.map(|tasks| (tasks, ProcessBatchInfo::default())),
Batch::IndexOperation { op, must_create_index } => {
let index_uid = op.index_uid().to_string();
let index = if must_create_index {
@@ -421,6 +422,46 @@ impl IndexScheduler {
task.status = Status::Succeeded;
Ok((vec![task], ProcessBatchInfo::default()))
}
Batch::IndexCompaction { index_uid: _, mut task } => {
let KindWithContent::IndexCompaction { index_uid } = &task.kind else {
unreachable!()
};
let rtxn = self.env.read_txn()?;
let ret = catch_unwind(AssertUnwindSafe(|| {
self.apply_compaction(&rtxn, &progress, index_uid)
}));
let (pre_size, post_size) = match ret {
Ok(Ok(stats)) => stats,
Ok(Err(Error::AbortedTask)) => return Err(Error::AbortedTask),
Ok(Err(e)) => return Err(e),
Err(e) => {
let msg = match e.downcast_ref::<&'static str>() {
Some(s) => *s,
None => match e.downcast_ref::<String>() {
Some(s) => &s[..],
None => "Box<dyn Any>",
},
};
return Err(Error::Export(Box::new(Error::ProcessBatchPanicked(
msg.to_string(),
))));
}
};
task.status = Status::Succeeded;
if let Some(Details::IndexCompaction {
index_uid: _,
pre_compaction_size,
post_compaction_size,
}) = task.details.as_mut()
{
*pre_compaction_size = Some(Byte::from_u64(pre_size));
*post_compaction_size = Some(Byte::from_u64(post_size));
}
Ok((vec![task], ProcessBatchInfo::default()))
}
Batch::Export { mut task } => {
let KindWithContent::Export { url, api_key, payload_size, indexes } = &task.kind
else {
@@ -496,6 +537,58 @@ impl IndexScheduler {
}
}
fn apply_compaction(
&self,
rtxn: &RoTxn,
progress: &Progress,
index_uid: &str,
) -> Result<(u64, u64)> {
// 1. Verify that the index exists
if !self.index_mapper.index_exists(rtxn, index_uid)? {
return Err(Error::IndexNotFound(index_uid.to_owned()));
}
// 2. We retrieve the index and create a temporary file in the index directory
progress.update_progress(IndexCompaction::RetrieveTheIndex);
let index = self.index_mapper.index(rtxn, index_uid)?;
progress.update_progress(IndexCompaction::CreateTemporaryFile);
let pre_size = std::fs::metadata(index.path().join("data.mdb"))?.len();
let mut file = tempfile::Builder::new()
.suffix("data.")
.prefix(".mdb.cpy")
.tempfile_in(index.path())?;
// 3. We copy the index data to the temporary file
progress.update_progress(IndexCompaction::CopyAndCompactTheIndex);
index
.copy_to_file(file.as_file_mut(), CompactionOption::Enabled)
.map_err(|error| Error::Milli { error, index_uid: Some(index_uid.to_string()) })?;
// ...and reset the file position as specified in the documentation
file.seek(SeekFrom::Start(0))?;
// 4. We replace the index data file with the temporary file
progress.update_progress(IndexCompaction::PersistTheCompactedIndex);
let post_size = file.as_file().metadata()?.len();
match file.persist(index.path().join("data.mdb")) {
Ok(file) => file.sync_all()?,
// TODO see if we have a _resource busy_ error and probably handle this by:
// 1. closing the index, 2. replacing and 3. reopening it
Err(PersistError { error, file: _ }) => return Err(Error::IoError(error)),
};
// 5. prepare to close the index and wait for it
// The next time the index is opened, it will use the new compacted data
let closing_event = self.index_mapper.close_index(rtxn, index_uid)?;
if let Some(closing_event) = closing_event {
progress.update_progress(IndexCompaction::CloseTheIndex);
drop(index);
closing_event.wait();
}
Ok((pre_size, post_size))
}
/// Swap the index `lhs` with the index `rhs`.
fn apply_index_swap(
&self,

View File

@@ -16,7 +16,6 @@ use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOr
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
use meilisearch_types::settings::{self, SecretPolicy};
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
use roaring::RoaringBitmap;
use serde::Deserialize;
use ureq::{json, Response};
@@ -51,7 +50,6 @@ impl IndexScheduler {
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
let must_stop_processing = self.scheduler.must_stop_processing.clone();
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
let err = |err| Error::from_milli(err, Some(uid.to_string()));
if must_stop_processing.get() {
return Err(Error::AbortedTask);
}
@@ -63,31 +61,104 @@ impl IndexScheduler {
));
let ExportIndexSettings { filter, override_settings } = export_settings;
let index = self.index(uid)?;
let index_rtxn = index.read_txn()?;
let filter = filter.as_ref().map(Filter::from_json).transpose().map_err(err)?.flatten();
let filter_universe =
filter.map(|f| f.evaluate(&index_rtxn, &index)).transpose().map_err(err)?;
let whole_universe =
index.documents_ids(&index_rtxn).map_err(milli::Error::from).map_err(err)?;
let bearer = api_key.map(|api_key| format!("Bearer {api_key}"));
// First, check if the index already exists
let url = format!("{base_url}/indexes/{uid}");
let response = retry(&must_stop_processing, || {
let mut request = agent.get(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(Default::default()).map_err(into_backoff_error)
});
let index_exists = match response {
Ok(response) => response.status() == 200,
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => {
false
}
Err(e) => return Err(e),
};
let primary_key = index
.primary_key(&index_rtxn)
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
// Create the index
if !index_exists {
let url = format!("{base_url}/indexes");
retry(&must_stop_processing, || {
let mut request = agent.post(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "uid": uid, "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
})?;
}
// Patch the index primary key
if index_exists && *override_settings {
let url = format!("{base_url}/indexes/{uid}");
retry(&must_stop_processing, || {
let mut request = agent.patch(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
})?;
}
// Send the index settings
if !index_exists || *override_settings {
let mut settings =
settings::settings(&index, &index_rtxn, SecretPolicy::RevealSecrets)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
// Remove the experimental chat setting if not enabled
if self.features().check_chat_completions("exporting chat settings").is_err() {
settings.chat = Setting::NotSet;
}
// Retry logic for sending settings
let url = format!("{base_url}/indexes/{uid}/settings");
retry(&must_stop_processing, || {
let mut request = agent.patch(&url);
if let Some(bearer) = bearer.as_ref() {
request = request.set("Authorization", bearer);
}
request.send_json(settings.clone()).map_err(into_backoff_error)
})?;
}
let filter = filter
.as_ref()
.map(Filter::from_json)
.transpose()
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?
.flatten();
let filter_universe = filter
.map(|f| f.evaluate(&index_rtxn, &index))
.transpose()
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
let whole_universe = index
.documents_ids(&index_rtxn)
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
let universe = filter_universe.unwrap_or(whole_universe);
let target = TargetInstance { base_url, api_key };
let ctx = ExportContext {
index: &index,
index_rtxn: &index_rtxn,
universe: &universe,
progress: &progress,
agent: &agent,
must_stop_processing: &must_stop_processing,
};
let options = ExportOptions {
index_uid: uid,
payload_size,
override_settings: *override_settings,
extra_headers: &Default::default(),
};
let total_documents = self.export_one_index(target, options, ctx)?;
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
// We don't need to keep this one alive as we will
// spawn many threads to process the documents
drop(index_rtxn);
let total_documents = universe.len() as u32;
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
progress.update_progress(progress_step);
output.insert(
IndexUidPattern::new_unchecked(uid.clone()),
@@ -96,217 +167,155 @@ impl IndexScheduler {
matched_documents: Some(total_documents as u64),
},
);
let limit = payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024); // defaults to 20 MiB
let documents_url = format!("{base_url}/indexes/{uid}/documents");
let results = request_threads()
.broadcast(|ctx| {
let index_rtxn = index
.read_txn()
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
let mut buffer = Vec::new();
let mut tmp_buffer = Vec::new();
let mut compressed_buffer = Vec::new();
for (i, docid) in universe.iter().enumerate() {
if i % ctx.num_threads() != ctx.index() {
continue;
}
let document = index
.document(&index_rtxn, docid)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
let mut document = obkv_to_json(&all_fields, &fields_ids_map, document)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
// TODO definitely factorize this code
'inject_vectors: {
let embeddings = index
.embeddings(&index_rtxn, docid)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
if embeddings.is_empty() {
break 'inject_vectors;
}
let vectors = document
.entry(RESERVED_VECTORS_FIELD_NAME)
.or_insert(serde_json::Value::Object(Default::default()));
let serde_json::Value::Object(vectors) = vectors else {
return Err(Error::from_milli(
milli::Error::UserError(
milli::UserError::InvalidVectorsMapType {
document_id: {
if let Ok(Some(Ok(index))) = index
.external_id_of(
&index_rtxn,
std::iter::once(docid),
)
.map(|it| it.into_iter().next())
{
index
} else {
format!("internal docid={docid}")
}
},
value: vectors.clone(),
},
),
Some(uid.to_string()),
));
};
for (
embedder_name,
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
) in embeddings
{
let embeddings = ExplicitVectors {
embeddings: Some(
VectorOrArrayOfVectors::from_array_of_vectors(embeddings),
),
regenerate: regenerate &&
// Meilisearch does not handle well dumps with fragments, because as the fragments
// are marked as user-provided,
// all embeddings would be regenerated on any settings change or document update.
// To prevent this, we mark embeddings has non regenerate in this case.
!has_fragments,
};
vectors.insert(
embedder_name,
serde_json::to_value(embeddings).unwrap(),
);
}
}
tmp_buffer.clear();
serde_json::to_writer(&mut tmp_buffer, &document)
.map_err(milli::InternalError::from)
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
// Make sure we put at least one document in the buffer even
// though we might go above the buffer limit before sending
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
// We compress the documents before sending them
let mut encoder =
GzEncoder::new(&mut compressed_buffer, Compression::default());
encoder
.write_all(&buffer)
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
encoder
.finish()
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
retry(&must_stop_processing, || {
let mut request = agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson");
request = request.set("Content-Encoding", "gzip");
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
})?;
buffer.clear();
compressed_buffer.clear();
}
buffer.extend_from_slice(&tmp_buffer);
if i > 0 && i % 100 == 0 {
step.fetch_add(100, atomic::Ordering::Relaxed);
}
}
retry(&must_stop_processing, || {
let mut request = agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson");
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(&buffer).map_err(into_backoff_error)
})?;
Ok(())
})
.map_err(|e| {
Error::from_milli(
milli::Error::InternalError(InternalError::PanicInThreadPool(e)),
Some(uid.to_string()),
)
})?;
for result in results {
result?;
}
step.store(total_documents, atomic::Ordering::Relaxed);
}
Ok(output)
}
pub(super) fn export_one_index(
&self,
target: TargetInstance<'_>,
options: ExportOptions<'_>,
ctx: ExportContext<'_>,
) -> Result<u64, Error> {
let err = |err| Error::from_milli(err, Some(options.index_uid.to_string()));
let bearer = target.api_key.map(|api_key| format!("Bearer {api_key}"));
let url = format!(
"{base_url}/indexes/{index_uid}",
base_url = target.base_url,
index_uid = options.index_uid
);
let response = retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.get(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(Default::default()).map_err(into_backoff_error)
});
let index_exists = match response {
Ok(response) => response.status() == 200,
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => false,
Err(e) => return Err(e),
};
let primary_key =
ctx.index.primary_key(&ctx.index_rtxn).map_err(milli::Error::from).map_err(err)?;
if !index_exists {
let url = format!("{base_url}/indexes", base_url = target.base_url);
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.post(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "uid": options.index_uid, "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
})?;
}
if index_exists && options.override_settings {
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.patch(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
})?;
}
if !index_exists || options.override_settings {
let mut settings =
settings::settings(&ctx.index, &ctx.index_rtxn, SecretPolicy::RevealSecrets)
.map_err(err)?;
// Remove the experimental chat setting if not enabled
if self.features().check_chat_completions("exporting chat settings").is_err() {
settings.chat = Setting::NotSet;
}
// Retry logic for sending settings
let url = format!(
"{base_url}/indexes/{index_uid}/settings",
base_url = target.base_url,
index_uid = options.index_uid
);
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.patch(&url);
if let Some(bearer) = bearer.as_ref() {
request = request.set("Authorization", bearer);
}
request.send_json(settings.clone()).map_err(into_backoff_error)
})?;
}
let fields_ids_map = ctx.index.fields_ids_map(&ctx.index_rtxn)?;
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
let total_documents = ctx.universe.len() as u32;
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
ctx.progress.update_progress(progress_step);
let limit = options.payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024);
let documents_url = format!(
"{base_url}/indexes/{index_uid}/documents",
base_url = target.base_url,
index_uid = options.index_uid
);
let results = request_threads()
.broadcast(|broadcast| {
let index_rtxn = ctx.index.read_txn().map_err(milli::Error::from).map_err(err)?;
let mut buffer = Vec::new();
let mut tmp_buffer = Vec::new();
let mut compressed_buffer = Vec::new();
for (i, docid) in ctx.universe.iter().enumerate() {
if i % broadcast.num_threads() != broadcast.index() {
continue;
}
let document = ctx.index.document(&index_rtxn, docid).map_err(err)?;
let mut document =
obkv_to_json(&all_fields, &fields_ids_map, document).map_err(err)?;
// TODO definitely factorize this code
'inject_vectors: {
let embeddings = ctx.index.embeddings(&index_rtxn, docid).map_err(err)?;
if embeddings.is_empty() {
break 'inject_vectors;
}
let vectors = document
.entry(RESERVED_VECTORS_FIELD_NAME)
.or_insert(serde_json::Value::Object(Default::default()));
let serde_json::Value::Object(vectors) = vectors else {
return Err(err(milli::Error::UserError(
milli::UserError::InvalidVectorsMapType {
document_id: {
if let Ok(Some(Ok(index))) = ctx
.index
.external_id_of(&index_rtxn, std::iter::once(docid))
.map(|it| it.into_iter().next())
{
index
} else {
format!("internal docid={docid}")
}
},
value: vectors.clone(),
},
)));
};
for (
embedder_name,
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
) in embeddings
{
let embeddings = ExplicitVectors {
embeddings: Some(VectorOrArrayOfVectors::from_array_of_vectors(
embeddings,
)),
regenerate: regenerate &&
// Meilisearch does not handle well dumps with fragments, because as the fragments
// are marked as user-provided,
// all embeddings would be regenerated on any settings change or document update.
// To prevent this, we mark embeddings has non regenerate in this case.
!has_fragments,
};
vectors
.insert(embedder_name, serde_json::to_value(embeddings).unwrap());
}
}
tmp_buffer.clear();
serde_json::to_writer(&mut tmp_buffer, &document)
.map_err(milli::InternalError::from)
.map_err(milli::Error::from)
.map_err(err)?;
// Make sure we put at least one document in the buffer even
// though we might go above the buffer limit before sending
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
// We compress the documents before sending them
let mut encoder =
GzEncoder::new(&mut compressed_buffer, Compression::default());
encoder.write_all(&buffer).map_err(milli::Error::from).map_err(err)?;
encoder.finish().map_err(milli::Error::from).map_err(err)?;
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson");
request = request.set("Content-Encoding", "gzip");
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
})?;
buffer.clear();
compressed_buffer.clear();
}
buffer.extend_from_slice(&tmp_buffer);
if i > 0 && i % 100 == 0 {
step.fetch_add(100, atomic::Ordering::Relaxed);
}
}
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson");
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(&buffer).map_err(into_backoff_error)
})?;
Ok(())
})
.map_err(|e| err(milli::Error::InternalError(InternalError::PanicInThreadPool(e))))?;
for result in results {
result?;
}
step.store(total_documents, atomic::Ordering::Relaxed);
Ok(total_documents as u64)
}
}
fn retry<F>(must_stop_processing: &MustStopProcessing, send_request: F) -> Result<ureq::Response>
@@ -365,27 +374,4 @@ fn ureq_error_into_error(error: ureq::Error) -> Error {
}
}
// export_one_index arguments
pub(super) struct TargetInstance<'a> {
pub(super) base_url: &'a str,
pub(super) api_key: Option<&'a str>,
}
pub(super) struct ExportOptions<'a> {
pub(super) index_uid: &'a str,
pub(super) payload_size: Option<&'a Byte>,
pub(super) override_settings: bool,
pub(super) extra_headers: &'a hashbrown::HashMap<String, String>,
}
pub(super) struct ExportContext<'a> {
pub(super) index: &'a meilisearch_types::milli::Index,
pub(super) index_rtxn: &'a milli::heed::RoTxn<'a>,
pub(super) universe: &'a RoaringBitmap,
pub(super) progress: &'a Progress,
pub(super) agent: &'a ureq::Agent,
pub(super) must_stop_processing: &'a MustStopProcessing,
}
// progress related
enum ExportIndex {}

View File

@@ -97,7 +97,7 @@ impl IndexScheduler {
.embedding_configs()
.embedding_configs(index_wtxn)
.map_err(|e| Error::from_milli(e.into(), Some(index_uid.clone())))?;
let embedders = self.embedders(&index_uid, embedders)?;
let embedders = self.embedders(index_uid.clone(), embedders)?;
for operation in operations {
match operation {
DocumentOperation::Replace(_content_uuid) => {
@@ -284,7 +284,7 @@ impl IndexScheduler {
.embedding_configs()
.embedding_configs(index_wtxn)
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
let embedders = self.embedders(&index_uid, embedders)?;
let embedders = self.embedders(index_uid.clone(), embedders)?;
progress.update_progress(DocumentEditionProgress::Indexing);
congestion = Some(
@@ -434,7 +434,7 @@ impl IndexScheduler {
.embedding_configs()
.embedding_configs(index_wtxn)
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
let embedders = self.embedders(&index_uid, embedders)?;
let embedders = self.embedders(index_uid.clone(), embedders)?;
progress.update_progress(DocumentDeletionProgress::Indexing);
congestion = Some(

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 22, 1) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
[timestamp] [4,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.22.1"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 22, 1) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
----------------------------------------------------------------------
### Status:
enqueued [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 22, 1) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
----------------------------------------------------------------------
### Status:

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 22, 1) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
----------------------------------------------------------------------
### Status:
@@ -37,7 +37,7 @@ catto [1,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.22.1"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 22, 1) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
----------------------------------------------------------------------
@@ -40,7 +40,7 @@ doggo [2,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.22.1"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 22, 1) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -43,7 +43,7 @@ doggo [2,3,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.22.1"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -742,11 +742,11 @@ fn basic_get_stats() {
"documentEdition": 0,
"dumpCreation": 0,
"export": 0,
"indexCompaction": 0,
"indexCreation": 3,
"indexDeletion": 0,
"indexSwap": 0,
"indexUpdate": 0,
"networkTopologyChange": 0,
"settingsUpdate": 0,
"snapshotCreation": 0,
"taskCancelation": 0,
@@ -757,7 +757,7 @@ fn basic_get_stats() {
"###);
handle.advance_till([Start, BatchCreated]);
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
{
"indexes": {
"catto": 1,
@@ -777,6 +777,7 @@ fn basic_get_stats() {
"documentEdition": 0,
"dumpCreation": 0,
"export": 0,
"indexCompaction": 0,
"indexCreation": 3,
"indexDeletion": 0,
"indexSwap": 0,
@@ -788,7 +789,7 @@ fn basic_get_stats() {
"upgradeDatabase": 0
}
}
"#);
"###);
handle.advance_till([
InsideProcessBatch,
@@ -798,7 +799,7 @@ fn basic_get_stats() {
Start,
BatchCreated,
]);
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
{
"indexes": {
"catto": 1,
@@ -818,6 +819,7 @@ fn basic_get_stats() {
"documentEdition": 0,
"dumpCreation": 0,
"export": 0,
"indexCompaction": 0,
"indexCreation": 3,
"indexDeletion": 0,
"indexSwap": 0,
@@ -829,7 +831,7 @@ fn basic_get_stats() {
"upgradeDatabase": 0
}
}
"#);
"###);
// now we make one more batch, the started_at field of the new tasks will be past `second_start_time`
handle.advance_till([
@@ -840,7 +842,7 @@ fn basic_get_stats() {
Start,
BatchCreated,
]);
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
{
"indexes": {
"catto": 1,
@@ -860,6 +862,7 @@ fn basic_get_stats() {
"documentEdition": 0,
"dumpCreation": 0,
"export": 0,
"indexCompaction": 0,
"indexCreation": 3,
"indexDeletion": 0,
"indexSwap": 0,
@@ -871,7 +874,7 @@ fn basic_get_stats() {
"upgradeDatabase": 0
}
}
"#);
"###);
}
#[test]

View File

@@ -121,7 +121,7 @@ fn import_vectors() {
insta::assert_json_snapshot!(simple_hf_config.embedder_options);
let simple_hf_name = name.clone();
let configs = index_scheduler.embedders("doggos", configs).unwrap();
let configs = index_scheduler.embedders("doggos".to_string(), configs).unwrap();
let hf_runtime = configs.get(&simple_hf_name).unwrap();
let hf_embedder = &hf_runtime.embedder;
let beagle_embed = hf_embedder

View File

@@ -126,7 +126,7 @@ impl IndexScheduler {
std::fs::create_dir_all(&options.auth_path).unwrap();
let auth_env = open_auth_store_env(&options.auth_path).unwrap();
let index_scheduler =
Self::new(options, auth_env, version, None, sender, planned_failures).unwrap();
Self::new(options, auth_env, version, sender, planned_failures).unwrap();
// To be 100% consistent between all test we're going to start the scheduler right now
// and ensure it's in the expected starting state.

View File

@@ -45,6 +45,7 @@ pub fn upgrade_index_scheduler(
(1, 19, _) => 0,
(1, 20, _) => 0,
(1, 21, _) => 0,
(1, 22, _) => 0,
(major, minor, patch) => {
if major > current_major
|| (major == current_major && minor > current_minor)

View File

@@ -256,14 +256,15 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
use KindWithContent as K;
let mut index_uids = vec![];
match &mut task.kind {
K::DocumentAdditionOrUpdate { index_uid, .. } => index_uids.push(index_uid),
K::DocumentEdition { index_uid, .. } => index_uids.push(index_uid),
K::DocumentDeletion { index_uid, .. } => index_uids.push(index_uid),
K::DocumentDeletionByFilter { index_uid, .. } => index_uids.push(index_uid),
K::DocumentClear { index_uid } => index_uids.push(index_uid),
K::SettingsUpdate { index_uid, .. } => index_uids.push(index_uid),
K::IndexDeletion { index_uid } => index_uids.push(index_uid),
K::IndexCreation { index_uid, .. } => index_uids.push(index_uid),
K::DocumentAdditionOrUpdate { index_uid, .. }
| K::DocumentEdition { index_uid, .. }
| K::DocumentDeletion { index_uid, .. }
| K::DocumentDeletionByFilter { index_uid, .. }
| K::DocumentClear { index_uid }
| K::SettingsUpdate { index_uid, .. }
| K::IndexDeletion { index_uid }
| K::IndexCreation { index_uid, .. }
| K::IndexCompaction { index_uid, .. } => index_uids.push(index_uid),
K::IndexUpdate { index_uid, new_index_uid, .. } => {
index_uids.push(index_uid);
if let Some(new_uid) = new_index_uid {
@@ -285,7 +286,6 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
| K::DumpCreation { .. }
| K::Export { .. }
| K::UpgradeDatabase { .. }
| K::NetworkTopologyChange { .. }
| K::SnapshotCreation => (),
};
if let Some(Details::IndexSwap { swaps }) = &mut task.details {
@@ -619,8 +619,12 @@ impl crate::IndexScheduler {
Details::UpgradeDatabase { from: _, to: _ } => {
assert_eq!(kind.as_kind(), Kind::UpgradeDatabase);
}
Details::NetworkTopologyChange { .. } => {
assert_eq!(kind.as_kind(), Kind::NetworkTopologyChange);
Details::IndexCompaction {
index_uid: _,
pre_compaction_size: _,
post_compaction_size: _,
} => {
assert_eq!(kind.as_kind(), Kind::IndexCompaction);
}
}
}

View File

@@ -109,6 +109,7 @@ impl HeedAuthStore {
Action::IndexesGet,
Action::IndexesUpdate,
Action::IndexesSwap,
Action::IndexesCompact,
]
.iter(),
);

View File

@@ -5,85 +5,31 @@
use std::collections::BTreeMap;
use deserr::Deserr;
use milli::update::new::indexer::enterprise_edition::sharding::{Shard, Shards};
use milli::update::Setting;
use milli::update::new::indexer::enterprise_edition::sharding::Shards;
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
use crate::deserr::DeserrJsonError;
use crate::error::deserr_codes::{
InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf, InvalidNetworkSharding,
InvalidNetworkUrl, InvalidNetworkWriteApiKey,
};
#[derive(Clone, Debug, Deserr, ToSchema, Serialize, Deserialize, PartialEq, Eq)]
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
pub struct Network {
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!("http://localhost:7700"))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
#[serde(default)]
pub remotes: Setting<BTreeMap<String, Option<Remote>>>,
#[schema(value_type = Option<String>, example = json!("ms-00"), rename = "self")]
#[serde(default, rename = "self")]
#[deserr(default, rename = "self", error = DeserrJsonError<InvalidNetworkSelf>)]
pub local: Setting<String>,
#[schema(value_type = Option<bool>, example = json!(true))]
#[serde(default)]
#[deserr(default, error = DeserrJsonError<InvalidNetworkSharding>)]
pub sharding: Setting<bool>,
}
#[derive(Clone, Debug, Deserr, ToSchema, Serialize, Deserialize, PartialEq, Eq)]
#[deserr(error = DeserrJsonError<InvalidNetworkRemotes>, rename_all = camelCase, deny_unknown_fields)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
pub struct Remote {
#[schema(value_type = Option<String>, example = json!({
"ms-0": Remote { url: Setting::Set("http://localhost:7700".into()), search_api_key: Setting::Reset, write_api_key: Setting::Reset },
"ms-1": Remote { url: Setting::Set("http://localhost:7701".into()), search_api_key: Setting::Set("foo".into()), write_api_key: Setting::Set("bar".into()) },
"ms-2": Remote { url: Setting::Set("http://localhost:7702".into()), search_api_key: Setting::Set("bar".into()), write_api_key: Setting::Set("foo".into()) },
}))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkUrl>)]
#[serde(default)]
pub url: Setting<String>,
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkSearchApiKey>)]
#[serde(default)]
pub search_api_key: Setting<String>,
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkWriteApiKey>)]
#[serde(default)]
pub write_api_key: Setting<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
#[serde(rename_all = "camelCase")]
pub struct DbNetwork {
pub struct Network {
#[serde(default, rename = "self")]
pub local: Option<String>,
#[serde(default)]
pub remotes: BTreeMap<String, DbRemote>,
pub remotes: BTreeMap<String, Remote>,
#[serde(default)]
pub sharding: bool,
}
impl DbNetwork {
impl Network {
pub fn shards(&self) -> Option<Shards> {
if self.sharding {
let this = self.local.as_deref();
Some(Shards(
self.remotes
.keys()
.map(|name| Shard {
is_own: Some(name.as_str()) == this,
name: name.to_owned(),
})
.collect(),
))
let this = self.local.as_deref().expect("Inconsistent `sharding` and `self`");
let others = self
.remotes
.keys()
.filter(|name| name.as_str() != this)
.map(|name| name.to_owned())
.collect();
Some(Shards { own: vec![this.to_owned()], others })
} else {
None
}
@@ -92,7 +38,7 @@ impl DbNetwork {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct DbRemote {
pub struct Remote {
pub url: String,
#[serde(default)]
pub search_api_key: Option<String>,

View File

@@ -529,8 +529,6 @@ impl ErrorCode for milli::Error {
| UserError::DocumentEditionCompilationError(_) => {
Code::EditDocumentsByFunctionError
}
UserError::NetworkShardingWithoutSelf => Code::InvalidNetworkSharding,
UserError::NetworkMissingUrl(_) => Code::MissingNetworkUrl,
UserError::CelluliteError(err) => match err {
cellulite::Error::BuildCanceled
| cellulite::Error::VersionMismatchOnBuild(_)

View File

@@ -380,6 +380,9 @@ pub enum Action {
#[serde(rename = "webhooks.*")]
#[deserr(rename = "webhooks.*")]
WebhooksAll,
#[serde(rename = "indexes.compact")]
#[deserr(rename = "indexes.compact")]
IndexesCompact,
}
impl Action {
@@ -398,6 +401,7 @@ impl Action {
INDEXES_UPDATE => Some(Self::IndexesUpdate),
INDEXES_DELETE => Some(Self::IndexesDelete),
INDEXES_SWAP => Some(Self::IndexesSwap),
INDEXES_COMPACT => Some(Self::IndexesCompact),
TASKS_ALL => Some(Self::TasksAll),
TASKS_CANCEL => Some(Self::TasksCancel),
TASKS_DELETE => Some(Self::TasksDelete),
@@ -462,6 +466,7 @@ impl Action {
IndexesUpdate => false,
IndexesDelete => false,
IndexesSwap => false,
IndexesCompact => false,
TasksCancel => false,
TasksDelete => false,
TasksGet => true,
@@ -513,6 +518,7 @@ pub mod actions {
pub const INDEXES_UPDATE: u8 = IndexesUpdate.repr();
pub const INDEXES_DELETE: u8 = IndexesDelete.repr();
pub const INDEXES_SWAP: u8 = IndexesSwap.repr();
pub const INDEXES_COMPACT: u8 = IndexesCompact.repr();
pub const TASKS_ALL: u8 = TasksAll.repr();
pub const TASKS_CANCEL: u8 = TasksCancel.repr();
pub const TASKS_DELETE: u8 = TasksDelete.repr();
@@ -614,6 +620,7 @@ pub(crate) mod test {
assert!(WebhooksDelete.repr() == 47 && WEBHOOKS_DELETE == 47);
assert!(WebhooksCreate.repr() == 48 && WEBHOOKS_CREATE == 48);
assert!(WebhooksAll.repr() == 49 && WEBHOOKS_ALL == 49);
assert!(IndexesCompact.repr() == 50 && INDEXES_COMPACT == 50);
}
#[test]

View File

@@ -7,7 +7,6 @@ use time::{Duration, OffsetDateTime};
use utoipa::ToSchema;
use crate::batches::BatchId;
use crate::enterprise_edition::network::Network;
use crate::error::ResponseError;
use crate::settings::{Settings, Unchecked};
use crate::tasks::{
@@ -143,9 +142,11 @@ pub struct DetailsView {
pub old_index_uid: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub new_index_uid: Option<String>,
// network
// index compaction
#[serde(skip_serializing_if = "Option::is_none")]
pub network: Option<Network>,
pub pre_compaction_size: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub post_compaction_size: Option<String>,
}
impl DetailsView {
@@ -318,9 +319,23 @@ impl DetailsView {
// We should never be able to batch multiple renames at the same time.
(Some(left), Some(_right)) => Some(left),
},
network: match (&self.network, &other.network) {
pre_compaction_size: match (
self.pre_compaction_size.clone(),
other.pre_compaction_size.clone(),
) {
(None, None) => None,
(_, Some(network)) | (Some(network), None) => Some(network.clone()),
(None, Some(size)) | (Some(size), None) => Some(size),
// We should never be able to batch multiple renames at the same time.
(Some(left), Some(_right)) => Some(left),
},
post_compaction_size: match (
self.post_compaction_size.clone(),
other.post_compaction_size.clone(),
) {
(None, None) => None,
(None, Some(size)) | (Some(size), None) => Some(size),
// We should never be able to batch multiple renames at the same time.
(Some(left), Some(_right)) => Some(left),
},
}
}
@@ -423,8 +438,14 @@ impl From<Details> for DetailsView {
upgrade_to: Some(format!("v{}.{}.{}", to.0, to.1, to.2)),
..Default::default()
},
Details::NetworkTopologyChange { network: new_network } => {
DetailsView { network: new_network, ..Default::default() }
Details::IndexCompaction { pre_compaction_size, post_compaction_size, .. } => {
DetailsView {
pre_compaction_size: pre_compaction_size
.map(|size| size.get_appropriate_unit(UnitType::Both).to_string()),
post_compaction_size: post_compaction_size
.map(|size| size.get_appropriate_unit(UnitType::Both).to_string()),
..Default::default()
}
}
}
}

View File

@@ -15,7 +15,6 @@ use utoipa::{schema, ToSchema};
use uuid::Uuid;
use crate::batches::BatchId;
use crate::enterprise_edition::network::Network;
use crate::error::ResponseError;
use crate::index_uid_pattern::IndexUidPattern;
use crate::keys::Key;
@@ -59,7 +58,6 @@ impl Task {
| TaskDeletion { .. }
| Export { .. }
| UpgradeDatabase { .. }
| NetworkTopologyChange { .. }
| IndexSwap { .. } => None,
DocumentAdditionOrUpdate { index_uid, .. }
| DocumentEdition { index_uid, .. }
@@ -69,7 +67,8 @@ impl Task {
| SettingsUpdate { index_uid, .. }
| IndexCreation { index_uid, .. }
| IndexUpdate { index_uid, .. }
| IndexDeletion { index_uid } => Some(index_uid),
| IndexDeletion { index_uid }
| IndexCompaction { index_uid } => Some(index_uid),
}
}
@@ -97,7 +96,7 @@ impl Task {
| KindWithContent::SnapshotCreation
| KindWithContent::Export { .. }
| KindWithContent::UpgradeDatabase { .. }
| KindWithContent::NetworkTopologyChange { .. } => None,
| KindWithContent::IndexCompaction { .. } => None,
}
}
}
@@ -173,9 +172,8 @@ pub enum KindWithContent {
UpgradeDatabase {
from: (u32, u32, u32),
},
NetworkTopologyChange {
network: Option<Network>,
origin: Option<Origin>,
IndexCompaction {
index_uid: String,
},
}
@@ -213,7 +211,7 @@ impl KindWithContent {
KindWithContent::SnapshotCreation => Kind::SnapshotCreation,
KindWithContent::Export { .. } => Kind::Export,
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
KindWithContent::NetworkTopologyChange { .. } => Kind::NetworkTopologyChange,
KindWithContent::IndexCompaction { .. } => Kind::IndexCompaction,
}
}
@@ -226,7 +224,6 @@ impl KindWithContent {
| TaskCancelation { .. }
| TaskDeletion { .. }
| Export { .. }
| NetworkTopologyChange { .. }
| UpgradeDatabase { .. } => vec![],
DocumentAdditionOrUpdate { index_uid, .. }
| DocumentEdition { index_uid, .. }
@@ -235,7 +232,8 @@ impl KindWithContent {
| DocumentClear { index_uid }
| SettingsUpdate { index_uid, .. }
| IndexCreation { index_uid, .. }
| IndexDeletion { index_uid } => vec![index_uid],
| IndexDeletion { index_uid }
| IndexCompaction { index_uid } => vec![index_uid],
IndexUpdate { index_uid, new_index_uid, .. } => {
let mut indexes = vec![index_uid.as_str()];
if let Some(new_uid) = new_index_uid {
@@ -334,9 +332,11 @@ impl KindWithContent {
versioning::VERSION_PATCH,
),
}),
KindWithContent::NetworkTopologyChange { network: new_network, origin: _ } => {
Some(Details::NetworkTopologyChange { network: new_network.clone() })
}
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction {
index_uid: index_uid.clone(),
pre_compaction_size: None,
post_compaction_size: None,
}),
}
}
@@ -419,9 +419,11 @@ impl KindWithContent {
versioning::VERSION_PATCH,
),
}),
KindWithContent::NetworkTopologyChange { network: new_network, origin: _s } => {
Some(Details::NetworkTopologyChange { network: new_network.clone() })
}
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction {
index_uid: index_uid.clone(),
pre_compaction_size: None,
post_compaction_size: None,
}),
}
}
}
@@ -484,9 +486,11 @@ impl From<&KindWithContent> for Option<Details> {
versioning::VERSION_PATCH,
),
}),
KindWithContent::NetworkTopologyChange { network: new_network, origin: _ } => {
Some(Details::NetworkTopologyChange { network: new_network.clone() })
}
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction {
index_uid: index_uid.clone(),
pre_compaction_size: None,
post_compaction_size: None,
}),
}
}
}
@@ -597,7 +601,7 @@ pub enum Kind {
SnapshotCreation,
Export,
UpgradeDatabase,
NetworkTopologyChange,
IndexCompaction,
}
impl Kind {
@@ -609,15 +613,15 @@ impl Kind {
| Kind::SettingsUpdate
| Kind::IndexCreation
| Kind::IndexDeletion
| Kind::IndexUpdate => true,
| Kind::IndexUpdate
| Kind::IndexCompaction => true,
Kind::IndexSwap
| Kind::TaskCancelation
| Kind::TaskDeletion
| Kind::DumpCreation
| Kind::Export
| Kind::UpgradeDatabase
| Kind::SnapshotCreation
| Kind::NetworkTopologyChange => false,
| Kind::SnapshotCreation => false,
}
}
}
@@ -638,7 +642,7 @@ impl Display for Kind {
Kind::SnapshotCreation => write!(f, "snapshotCreation"),
Kind::Export => write!(f, "export"),
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
Kind::NetworkTopologyChange => write!(f, "networkTopologyChange"),
Kind::IndexCompaction => write!(f, "indexCompaction"),
}
}
}
@@ -674,8 +678,8 @@ impl FromStr for Kind {
Ok(Kind::Export)
} else if kind.eq_ignore_ascii_case("upgradeDatabase") {
Ok(Kind::UpgradeDatabase)
} else if kind.eq_ignore_ascii_case("networkTopologyChange") {
Ok(Kind::NetworkTopologyChange)
} else if kind.eq_ignore_ascii_case("indexCompaction") {
Ok(Kind::IndexCompaction)
} else {
Err(ParseTaskKindError(kind.to_owned()))
}
@@ -761,8 +765,10 @@ pub enum Details {
from: (u32, u32, u32),
to: (u32, u32, u32),
},
NetworkTopologyChange {
network: Option<Network>,
IndexCompaction {
index_uid: String,
pre_compaction_size: Option<Byte>,
post_compaction_size: Option<Byte>,
},
}
@@ -826,12 +832,15 @@ impl Details {
Self::ClearAll { deleted_documents } => *deleted_documents = Some(0),
Self::TaskCancelation { canceled_tasks, .. } => *canceled_tasks = Some(0),
Self::TaskDeletion { deleted_tasks, .. } => *deleted_tasks = Some(0),
Self::IndexCompaction { pre_compaction_size, post_compaction_size, .. } => {
*pre_compaction_size = None;
*post_compaction_size = None;
}
Self::SettingsUpdate { .. }
| Self::IndexInfo { .. }
| Self::Dump { .. }
| Self::Export { .. }
| Self::UpgradeDatabase { .. }
| Self::NetworkTopologyChange { .. }
| Self::IndexSwap { .. } => (),
}

View File

@@ -91,7 +91,7 @@ time = { version = "0.3.41", features = [
] }
tokio = { version = "1.45.1", features = ["full"] }
toml = "0.8.23"
uuid = { version = "1.17.0", features = ["serde", "v4"] }
uuid = { version = "1.18.0", features = ["serde", "v4", "v7"] }
serde_urlencoded = "0.7.1"
termcolor = "1.4.1"
url = { version = "2.5.4", features = ["serde"] }

View File

@@ -216,10 +216,7 @@ enum OnFailure {
KeepDb,
}
pub fn setup_meilisearch(
opt: &Opt,
handle: tokio::runtime::Handle,
) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
let index_scheduler_opt = IndexSchedulerOptions {
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
auth_path: opt.db_path.join("auth"),
@@ -259,7 +256,6 @@ pub fn setup_meilisearch(
index_scheduler_opt,
OnFailure::RemoveDb,
binary_version, // the db is empty
handle,
)?,
Err(e) => {
std::fs::remove_dir_all(&opt.db_path)?;
@@ -277,7 +273,7 @@ pub fn setup_meilisearch(
bail!("snapshot doesn't exist at {}", snapshot_path.display())
// the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag
} else {
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
}
} else if let Some(ref path) = opt.import_dump {
let src_path_exists = path.exists();
@@ -288,7 +284,6 @@ pub fn setup_meilisearch(
index_scheduler_opt,
OnFailure::RemoveDb,
binary_version, // the db is empty
handle,
)?;
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
Ok(()) => (index_scheduler, auth_controller),
@@ -309,10 +304,10 @@ pub fn setup_meilisearch(
// the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag
// or, the dump is missing but we can ignore that because of the ignore_missing_dump flag
} else {
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
}
} else {
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
};
// We create a loop in a thread that registers snapshotCreation tasks
@@ -343,7 +338,6 @@ fn open_or_create_database_unchecked(
index_scheduler_opt: IndexSchedulerOptions,
on_failure: OnFailure,
version: (u32, u32, u32),
handle: tokio::runtime::Handle,
) -> anyhow::Result<(IndexScheduler, AuthController)> {
// we don't want to create anything in the data.ms yet, thus we
// wrap our two builders in a closure that'll be executed later.
@@ -351,7 +345,7 @@ fn open_or_create_database_unchecked(
let auth_env = open_auth_store_env(&index_scheduler_opt.auth_path).unwrap();
let auth_controller = AuthController::new(auth_env.clone(), &opt.master_key);
let index_scheduler_builder = || -> anyhow::Result<_> {
Ok(IndexScheduler::new(index_scheduler_opt, auth_env, version, Some(handle))?)
Ok(IndexScheduler::new(index_scheduler_opt, auth_env, version)?)
};
match (
@@ -458,7 +452,6 @@ fn open_or_create_database(
index_scheduler_opt: IndexSchedulerOptions,
empty_db: bool,
binary_version: (u32, u32, u32),
handle: tokio::runtime::Handle,
) -> anyhow::Result<(IndexScheduler, AuthController)> {
let version = if !empty_db {
check_version(opt, &index_scheduler_opt, binary_version)?
@@ -466,7 +459,7 @@ fn open_or_create_database(
binary_version
};
open_or_create_database_unchecked(opt, index_scheduler_opt, OnFailure::KeepDb, version, handle)
open_or_create_database_unchecked(opt, index_scheduler_opt, OnFailure::KeepDb, version)
}
fn import_dump(
@@ -591,7 +584,7 @@ fn import_dump(
let reader = DocumentsBatchReader::from_reader(reader)?;
let embedder_configs = index.embedding_configs().embedding_configs(&wtxn)?;
let embedders = index_scheduler.embedders(&uid, embedder_configs)?;
let embedders = index_scheduler.embedders(uid.to_string(), embedder_configs)?;
let builder = milli::update::IndexDocuments::new(
&mut wtxn,
@@ -619,7 +612,7 @@ fn import_dump(
let mut indexer = indexer::DocumentOperation::new();
let embedders = index.embedding_configs().embedding_configs(&rtxn)?;
let embedders = index_scheduler.embedders(&uid, embedders)?;
let embedders = index_scheduler.embedders(uid.clone(), embedders)?;
let mmap = unsafe { memmap2::Mmap::map(index_reader.documents_file())? };

View File

@@ -76,10 +76,7 @@ fn on_panic(info: &std::panic::PanicHookInfo) {
#[actix_web::main]
async fn main() -> anyhow::Result<()> {
// won't panic inside of tokio::main
let runtime = tokio::runtime::Handle::current();
try_main(runtime).await.inspect_err(|error| {
try_main().await.inspect_err(|error| {
tracing::error!(%error);
let mut current = error.source();
let mut depth = 0;
@@ -91,7 +88,7 @@ async fn main() -> anyhow::Result<()> {
})
}
async fn try_main(runtime: tokio::runtime::Handle) -> anyhow::Result<()> {
async fn try_main() -> anyhow::Result<()> {
let (opt, config_read_from) = Opt::try_build()?;
std::panic::set_hook(Box::new(on_panic));
@@ -125,7 +122,7 @@ async fn try_main(runtime: tokio::runtime::Handle) -> anyhow::Result<()> {
_ => (),
}
let (index_scheduler, auth_controller) = setup_meilisearch(&opt, runtime)?;
let (index_scheduler, auth_controller) = setup_meilisearch(&opt)?;
let analytics =
analytics::Analytics::new(&opt, index_scheduler.clone(), auth_controller.clone()).await;

View File

@@ -282,7 +282,8 @@ async fn process_search_request(
if let Some(search_rules) = auth_filter.get_index_search_rules(&index_uid) {
add_search_rules(&mut query.filter, search_rules);
}
let search_kind = search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)?;
let search_kind =
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
let permit = search_queue.try_get_search_permit().await?;
let features = index_scheduler.features();
@@ -299,7 +300,7 @@ async fn process_search_request(
let (search, _is_finite_pagination, _max_total_hits, _offset) =
prepare_search(&index_cloned, &rtxn, &query, &search_kind, time_budget, features)?;
match search_from_kind(&index_uid, search_kind, search) {
match search_from_kind(index_uid, search_kind, search) {
Ok((search_results, _)) => Ok((rtxn, Ok(search_results))),
Err(MeilisearchHttpError::Milli {
error: meilisearch_types::milli::Error::UserError(user_error),

View File

@@ -0,0 +1,100 @@
use actix_web::web::{self, Data};
use actix_web::{HttpRequest, HttpResponse};
use index_scheduler::IndexScheduler;
use meilisearch_types::error::ResponseError;
use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::keys::actions;
use meilisearch_types::tasks::KindWithContent;
use serde::Serialize;
use tracing::debug;
use utoipa::OpenApi;
use super::ActionPolicy;
use crate::analytics::{Aggregate, Analytics};
use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::SummarizedTaskView;
#[derive(OpenApi)]
#[openapi(
paths(compact),
tags(
(
name = "Compact an index",
description = "The /compact route uses compacts the database to reoganize and make it smaller and more efficient.",
external_docs(url = "https://www.meilisearch.com/docs/reference/api/compact"),
),
),
)]
pub struct CompactApi;
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(web::resource("").route(web::post().to(SeqHandler(compact))));
}
/// Compact an index
#[utoipa::path(
post,
path = "{indexUid}/compact",
tag = "Compact an index",
security(("Bearer" = ["search", "*"])),
params(("indexUid" = String, Path, example = "movies", description = "Index Unique Identifier", nullable = false)),
responses(
(status = ACCEPTED, description = "Task successfully enqueued", body = SummarizedTaskView, content_type = "application/json", example = json!(
{
"taskUid": 147,
"indexUid": null,
"status": "enqueued",
"type": "documentDeletion",
"enqueuedAt": "2024-08-08T17:05:55.791772Z"
}
)),
(status = 401, description = "The authorization header is missing", body = ResponseError, content_type = "application/json", example = json!(
{
"message": "The Authorization header is missing. It must use the bearer authorization method.",
"code": "missing_authorization_header",
"type": "auth",
"link": "https://docs.meilisearch.com/errors#missing_authorization_header"
}
)),
)
)]
pub async fn compact(
index_scheduler: GuardedData<ActionPolicy<{ actions::INDEXES_COMPACT }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
req: HttpRequest,
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
analytics.publish(IndexesCompactAggregator, &req);
let task = KindWithContent::IndexCompaction { index_uid: index_uid.to_string() };
let task =
match tokio::task::spawn_blocking(move || index_scheduler.register(task, None, false))
.await?
{
Ok(task) => task,
Err(e) => return Err(e.into()),
};
debug!(returns = ?task, "Compact the {index_uid} index");
Ok(HttpResponse::Accepted().json(SummarizedTaskView::from(task)))
}
#[derive(Serialize)]
pub struct IndexesCompactAggregator;
impl Aggregate for IndexesCompactAggregator {
fn event_name(&self) -> &'static str {
"Indexes Compacted"
}
fn aggregate(self: Box<Self>, _new: Box<Self>) -> Box<Self> {
Box::new(Self)
}
fn into_event(self: Box<Self>) -> serde_json::Value {
serde_json::to_value(*self).unwrap_or_default()
}
}

View File

@@ -45,7 +45,7 @@ use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::GuardedData;
use crate::extractors::payload::Payload;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::indexes::enterprise_edition::proxy::{check_leader, proxy, Body};
use crate::routes::indexes::enterprise_edition::proxy::{proxy, Body};
use crate::routes::indexes::search::fix_sort_query_parameters;
use crate::routes::{
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
@@ -340,7 +340,6 @@ pub async fn delete_document(
let DocumentParam { index_uid, document_id } = path.into_inner();
let index_uid = IndexUid::try_from(index_uid)?;
let network = index_scheduler.network();
let origin = check_leader(&req, &network)?;
analytics.publish(
DocumentsDeletionAggregator {
@@ -364,7 +363,7 @@ pub async fn delete_document(
};
if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::none(), &task).await?;
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
}
let task: SummarizedTaskView = task.into();
@@ -947,7 +946,6 @@ async fn document_addition(
) -> Result<SummarizedTaskView, MeilisearchHttpError> {
let mime_type = extract_mime_type(req)?;
let network = index_scheduler.network();
let origin = check_leader(&req, &network)?;
let format = match (
mime_type.as_ref().map(|m| (m.type_().as_str(), m.subtype().as_str())),
@@ -1083,7 +1081,6 @@ async fn document_addition(
&index_scheduler,
&index_uid,
req,
origin,
network,
Body::with_ndjson_payload(file),
&task,
@@ -1171,7 +1168,6 @@ pub async fn delete_documents_batch(
debug!(parameters = ?body, "Delete documents by batch");
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let network = index_scheduler.network();
let origin = check_leader(&req, &network)?;
analytics.publish(
DocumentsDeletionAggregator {
@@ -1198,8 +1194,7 @@ pub async fn delete_documents_batch(
};
if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::Inline(body), &task)
.await?;
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
}
let task: SummarizedTaskView = task.into();
@@ -1259,7 +1254,6 @@ pub async fn delete_documents_by_filter(
let index_uid = index_uid.into_inner();
let filter = body.into_inner();
let network = index_scheduler.network();
let origin = check_leader(&req, &network)?;
analytics.publish(
DocumentsDeletionAggregator {
@@ -1292,8 +1286,7 @@ pub async fn delete_documents_by_filter(
};
if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::Inline(filter), &task)
.await?;
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(filter), &task).await?;
}
let task: SummarizedTaskView = task.into();
@@ -1391,7 +1384,6 @@ pub async fn edit_documents_by_function(
.check_edit_documents_by_function("Using the documents edit route")?;
let network = index_scheduler.network();
let origin = check_leader(&req, &network)?;
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let index_uid = index_uid.into_inner();
@@ -1444,8 +1436,7 @@ pub async fn edit_documents_by_function(
};
if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::Inline(params), &task)
.await?;
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(params), &task).await?;
}
let task: SummarizedTaskView = task.into();
@@ -1492,7 +1483,6 @@ pub async fn clear_all_documents(
) -> Result<HttpResponse, ResponseError> {
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let network = index_scheduler.network();
let origin = check_leader(&req, &network)?;
analytics.publish(
DocumentsDeletionAggregator {
@@ -1515,7 +1505,7 @@ pub async fn clear_all_documents(
};
if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::none(), &task).await?;
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
}
let task: SummarizedTaskView = task.into();

View File

@@ -38,27 +38,6 @@ impl Body<()> {
}
}
pub fn check_leader(
req: &HttpRequest,
network: &meilisearch_types::enterprise_edition::network::DbNetwork,
) -> Result<Option<Origin>, MeilisearchHttpError> {
match origin_from_req(req)? {
Some(origin) => Ok(Some(origin)),
None => {
let this = network
.local
.as_deref()
.expect("inconsistent `network.sharding` and `network.self`");
let is_leader = this == todo!();
if !is_leader {
return Err(MeilisearchHttpError::NotLeader { leader: todo!() });
}
Ok(None)
}
}
}
/// If necessary, proxies the passed request to the network and update the task description.
///
/// This function reads the custom headers from the request to determine if must proxy the request or if the request
@@ -73,12 +52,11 @@ pub async fn proxy<T: serde::Serialize>(
index_scheduler: &IndexScheduler,
index_uid: &str,
req: &HttpRequest,
origin: Option<Origin>,
network: meilisearch_types::enterprise_edition::network::DbNetwork,
network: meilisearch_types::enterprise_edition::network::Network,
body: Body<T>,
task: &meilisearch_types::tasks::Task,
) -> Result<(), MeilisearchHttpError> {
match origin {
match origin_from_req(req)? {
Some(origin) => {
index_scheduler.set_task_network(task.uid, TaskNetwork::Origin { origin })?
}

View File

@@ -260,7 +260,7 @@ pub async fn search(
}
let index = index_scheduler.index(&index_uid)?;
let search_kind = search_kind(&search_query, &index_scheduler, &index_uid, &index)?;
let search_kind = search_kind(&search_query, &index_scheduler, index_uid.to_string(), &index)?;
let permit = search_queue.try_get_search_permit().await?;
let search_result = tokio::task::spawn_blocking(move || {
perform_facet_search(

View File

@@ -28,6 +28,7 @@ use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::is_dry_run;
use crate::Opt;
pub mod compact;
pub mod documents;
mod enterprise_edition;
pub mod facet_search;
@@ -49,8 +50,9 @@ pub use enterprise_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TAS
(path = "/", api = facet_search::FacetSearchApi),
(path = "/", api = similar::SimilarApi),
(path = "/", api = settings::SettingsApi),
(path = "/", api = compact::CompactApi),
),
paths(list_indexes, create_index, get_index, update_index, delete_index, get_index_stats),
paths(list_indexes, create_index, get_index, update_index, delete_index, get_index_stats, compact::compact),
tags(
(
name = "Indexes",
@@ -80,7 +82,8 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
.service(web::scope("/search").configure(search::configure))
.service(web::scope("/facet-search").configure(facet_search::configure))
.service(web::scope("/similar").configure(similar::configure))
.service(web::scope("/settings").configure(settings::configure)),
.service(web::scope("/settings").configure(settings::configure))
.service(web::scope("/compact").configure(compact::configure)),
);
}

View File

@@ -13,6 +13,7 @@ use meilisearch_types::serde_cs::vec::CS;
use serde_json::Value;
use tracing::debug;
use utoipa::{IntoParams, OpenApi};
use uuid::Uuid;
use crate::analytics::Analytics;
use crate::error::MeilisearchHttpError;
@@ -325,7 +326,8 @@ pub async fn search_with_url_query(
req: HttpRequest,
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
debug!(parameters = ?params, "Search get");
let request_uid = Uuid::now_v7();
debug!(request_uid = ?request_uid, parameters = ?params, "Search get");
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let mut query: SearchQuery = params.into_inner().try_into()?;
@@ -339,17 +341,19 @@ pub async fn search_with_url_query(
let index = index_scheduler.index(&index_uid)?;
let search_kind = search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)?;
let search_kind =
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
let permit = search_queue.try_get_search_permit().await?;
let search_result = tokio::task::spawn_blocking(move || {
perform_search(
&index_uid,
index_uid.to_string(),
&index,
query,
search_kind,
retrieve_vector,
index_scheduler.features(),
request_uid,
)
})
.await;
@@ -362,7 +366,7 @@ pub async fn search_with_url_query(
let search_result = search_result?;
debug!(returns = ?search_result, "Search get");
debug!(request_uid = ?request_uid, returns = ?search_result, "Search get");
Ok(HttpResponse::Ok().json(search_result))
}
@@ -431,9 +435,10 @@ pub async fn search_with_post(
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let request_uid = Uuid::now_v7();
let mut query = params.into_inner();
debug!(parameters = ?query, "Search post");
debug!(request_uid = ?request_uid, parameters = ?query, "Search post");
// Tenant token search_rules.
if let Some(search_rules) = index_scheduler.filters().get_index_search_rules(&index_uid) {
@@ -444,18 +449,20 @@ pub async fn search_with_post(
let index = index_scheduler.index(&index_uid)?;
let search_kind = search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)?;
let search_kind =
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
let permit = search_queue.try_get_search_permit().await?;
let search_result = tokio::task::spawn_blocking(move || {
perform_search(
&index_uid,
index_uid.to_string(),
&index,
query,
search_kind,
retrieve_vectors,
index_scheduler.features(),
request_uid,
)
})
.await;
@@ -471,14 +478,14 @@ pub async fn search_with_post(
let search_result = search_result?;
debug!(returns = ?search_result, "Search post");
debug!(request_uid = ?request_uid, returns = ?search_result, "Search post");
Ok(HttpResponse::Ok().json(search_result))
}
pub fn search_kind(
query: &SearchQuery,
index_scheduler: &IndexScheduler,
index_uid: &str,
index_uid: String,
index: &milli::Index,
) -> Result<SearchKind, ResponseError> {
let is_placeholder_query =

View File

@@ -234,6 +234,7 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
facet_stats: _,
degraded,
used_negative_operator,
request_uid: _,
} = result;
self.total_succeeded = self.total_succeeded.saturating_add(1);

View File

@@ -227,7 +227,7 @@ async fn similar(
let (embedder_name, embedder, quantized) = SearchKind::embedder(
&index_scheduler,
index_uid.as_str(),
index_uid.to_string(),
&index,
&query.embedder,
None,

View File

@@ -39,6 +39,7 @@ use crate::routes::features::RuntimeTogglableFeatures;
use crate::routes::indexes::documents::{DocumentDeletionByFilter, DocumentEditionByFunction};
use crate::routes::indexes::IndexView;
use crate::routes::multi_search::SearchResults;
use crate::routes::network::{Network, Remote};
use crate::routes::swap_indexes::SwapIndexesPayload;
use crate::routes::webhooks::{WebhookResults, WebhookSettings, WebhookWithMetadata};
use crate::search::{
@@ -101,7 +102,7 @@ mod webhooks;
url = "/",
description = "Local server",
)),
components(schemas(PaginationView<KeyView>, PaginationView<IndexView>, IndexView, DocumentDeletionByFilter, AllBatches, BatchStats, ProgressStepView, ProgressView, BatchView, RuntimeTogglableFeatures, SwapIndexesPayload, DocumentEditionByFunction, MergeFacets, FederationOptions, SearchQueryWithIndex, Federation, FederatedSearch, FederatedSearchResult, SearchResults, SearchResultWithIndex, SimilarQuery, SimilarResult, PaginationView<serde_json::Value>, BrowseQuery, UpdateIndexRequest, IndexUid, IndexCreateRequest, KeyView, Action, CreateApiKey, UpdateStderrLogs, LogMode, GetLogs, IndexStats, Stats, HealthStatus, HealthResponse, VersionResponse, Code, ErrorType, AllTasks, TaskView, Status, DetailsView, ResponseError, Settings<Unchecked>, Settings<Checked>, TypoSettings, MinWordSizeTyposSetting, FacetingSettings, PaginationSettings, SummarizedTaskView, Kind, meilisearch_types::enterprise_edition::network::Network, meilisearch_types::enterprise_edition::network::Remote, FilterableAttributesRule, FilterableAttributesPatterns, AttributePatterns, FilterableAttributesFeatures, FilterFeatures, Export, WebhookSettings, WebhookResults, WebhookWithMetadata, meilisearch_types::milli::vector::VectorStoreBackend))
components(schemas(PaginationView<KeyView>, PaginationView<IndexView>, IndexView, DocumentDeletionByFilter, AllBatches, BatchStats, ProgressStepView, ProgressView, BatchView, RuntimeTogglableFeatures, SwapIndexesPayload, DocumentEditionByFunction, MergeFacets, FederationOptions, SearchQueryWithIndex, Federation, FederatedSearch, FederatedSearchResult, SearchResults, SearchResultWithIndex, SimilarQuery, SimilarResult, PaginationView<serde_json::Value>, BrowseQuery, UpdateIndexRequest, IndexUid, IndexCreateRequest, KeyView, Action, CreateApiKey, UpdateStderrLogs, LogMode, GetLogs, IndexStats, Stats, HealthStatus, HealthResponse, VersionResponse, Code, ErrorType, AllTasks, TaskView, Status, DetailsView, ResponseError, Settings<Unchecked>, Settings<Checked>, TypoSettings, MinWordSizeTyposSetting, FacetingSettings, PaginationSettings, SummarizedTaskView, Kind, Network, Remote, FilterableAttributesRule, FilterableAttributesPatterns, AttributePatterns, FilterableAttributesFeatures, FilterFeatures, Export, WebhookSettings, WebhookResults, WebhookWithMetadata, meilisearch_types::milli::vector::VectorStoreBackend))
)]
pub struct MeilisearchApi;

View File

@@ -9,6 +9,7 @@ use meilisearch_types::keys::actions;
use serde::Serialize;
use tracing::debug;
use utoipa::{OpenApi, ToSchema};
use uuid::Uuid;
use super::multi_search_analytics::MultiSearchAggregator;
use crate::analytics::Analytics;
@@ -151,6 +152,7 @@ pub async fn multi_search_with_post(
// Since we don't want to process half of the search requests and then get a permit refused
// we're going to get one permit for the whole duration of the multi-search request.
let permit = search_queue.try_get_search_permit().await?;
let request_uid = Uuid::now_v7();
let federated_search = params.into_inner();
@@ -188,14 +190,27 @@ pub async fn multi_search_with_post(
let response = match federation {
Some(federation) => {
debug!(
request_uid = ?request_uid,
federation = ?federation,
parameters = ?queries,
"Federated-search"
);
// check remote header
let is_proxy = req
.headers()
.get(PROXY_SEARCH_HEADER)
.is_some_and(|value| value.as_bytes() == PROXY_SEARCH_HEADER_VALUE.as_bytes());
let search_result =
perform_federated_search(&index_scheduler, queries, federation, features, is_proxy)
.await;
let search_result = perform_federated_search(
&index_scheduler,
queries,
federation,
features,
is_proxy,
request_uid,
)
.await;
permit.drop().await;
if search_result.is_ok() {
@@ -203,6 +218,13 @@ pub async fn multi_search_with_post(
}
analytics.publish(multi_aggregate, &req);
debug!(
request_uid = ?request_uid,
returns = ?search_result,
"Federated-search"
);
HttpResponse::Ok().json(search_result?)
}
None => {
@@ -216,7 +238,12 @@ pub async fn multi_search_with_post(
.map(SearchQueryWithIndex::into_index_query_federation)
.enumerate()
{
debug!(on_index = query_index, parameters = ?query, "Multi-search");
debug!(
request_uid = ?request_uid,
on_index = query_index,
parameters = ?query,
"Multi-search"
);
if federation_options.is_some() {
return Err((
@@ -239,23 +266,27 @@ pub async fn multi_search_with_post(
})
.with_index(query_index)?;
let search_kind =
search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)
.with_index(query_index)?;
let index_uid_str = index_uid.to_string();
let search_kind = search_kind(
&query,
index_scheduler.get_ref(),
index_uid_str.clone(),
&index,
)
.with_index(query_index)?;
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
let search_result = tokio::task::spawn_blocking({
let index_uid = index_uid.clone();
move || {
perform_search(
&index_uid,
&index,
query,
search_kind,
retrieve_vector,
features,
)
}
let search_result = tokio::task::spawn_blocking(move || {
perform_search(
index_uid_str.clone(),
&index,
query,
search_kind,
retrieve_vector,
features,
request_uid,
)
})
.await
.with_index(query_index)?;
@@ -283,7 +314,11 @@ pub async fn multi_search_with_post(
err
})?;
debug!(returns = ?search_results, "Multi-search");
debug!(
request_uid = ?request_uid,
returns = ?search_results,
"Multi-search"
);
HttpResponse::Ok().json(SearchResults { results: search_results })
}

View File

@@ -1,21 +1,28 @@
use std::collections::BTreeMap;
use actix_web::web::{self, Data};
use actix_web::{HttpRequest, HttpResponse};
use deserr::actix_web::AwebJson;
use deserr::Deserr;
use index_scheduler::IndexScheduler;
use itertools::{EitherOrBoth, Itertools};
use meilisearch_types::deserr::DeserrJsonError;
use meilisearch_types::enterprise_edition::network::{Network, Remote};
use meilisearch_types::enterprise_edition::network::{Network as DbNetwork, Remote as DbRemote};
use meilisearch_types::error::deserr_codes::{
InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf, InvalidNetworkSharding,
InvalidNetworkUrl, InvalidNetworkWriteApiKey,
};
use meilisearch_types::error::ResponseError;
use meilisearch_types::keys::actions;
use meilisearch_types::milli::update::Setting;
use serde::Serialize;
use tracing::debug;
use utoipa::OpenApi;
use utoipa::{OpenApi, ToSchema};
use crate::analytics::{Aggregate, Analytics};
use crate::extractors::authentication::policies::ActionPolicy;
use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::SummarizedTaskView;
#[derive(OpenApi)]
#[openapi(
@@ -24,7 +31,7 @@ use crate::routes::SummarizedTaskView;
name = "Network",
description = "The `/network` route allows you to describe the topology of a network of Meilisearch instances.
This route is **asynchronous**. A task uid will be returned, and any change to the network will be effective after the corresponding task has been processed.",
This route is **synchronous**. This means that no task object will be returned, and any change to the network will be made available immediately.",
external_docs(url = "https://www.meilisearch.com/docs/reference/api/network"),
)),
)]
@@ -76,6 +83,73 @@ async fn get_network(
Ok(HttpResponse::Ok().json(network))
}
#[derive(Debug, Deserr, ToSchema, Serialize)]
#[deserr(error = DeserrJsonError<InvalidNetworkRemotes>, rename_all = camelCase, deny_unknown_fields)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
pub struct Remote {
#[schema(value_type = Option<String>, example = json!({
"ms-0": Remote { url: Setting::Set("http://localhost:7700".into()), search_api_key: Setting::Reset, write_api_key: Setting::Reset },
"ms-1": Remote { url: Setting::Set("http://localhost:7701".into()), search_api_key: Setting::Set("foo".into()), write_api_key: Setting::Set("bar".into()) },
"ms-2": Remote { url: Setting::Set("http://localhost:7702".into()), search_api_key: Setting::Set("bar".into()), write_api_key: Setting::Set("foo".into()) },
}))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkUrl>)]
#[serde(default)]
pub url: Setting<String>,
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkSearchApiKey>)]
#[serde(default)]
pub search_api_key: Setting<String>,
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkWriteApiKey>)]
#[serde(default)]
pub write_api_key: Setting<String>,
}
#[derive(Debug, Deserr, ToSchema, Serialize)]
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
pub struct Network {
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!("http://localhost:7700"))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
#[serde(default)]
pub remotes: Setting<BTreeMap<String, Option<Remote>>>,
#[schema(value_type = Option<String>, example = json!("ms-00"), rename = "self")]
#[serde(default, rename = "self")]
#[deserr(default, rename = "self", error = DeserrJsonError<InvalidNetworkSelf>)]
pub local: Setting<String>,
#[schema(value_type = Option<bool>, example = json!(true))]
#[serde(default)]
#[deserr(default, error = DeserrJsonError<InvalidNetworkSharding>)]
pub sharding: Setting<bool>,
}
impl Remote {
pub fn try_into_db_node(self, name: &str) -> Result<DbRemote, ResponseError> {
Ok(DbRemote {
url: self
.url
.set()
.ok_or(ResponseError::from_msg(
format!("Missing field `.remotes.{name}.url`"),
meilisearch_types::error::Code::MissingNetworkUrl,
))
.and_then(|url| {
if let Err(error) = url::Url::parse(&url) {
return Err(ResponseError::from_msg(
format!("Invalid `.remotes.{name}.url` (`{url}`): {error}"),
meilisearch_types::error::Code::InvalidNetworkUrl,
));
}
Ok(url)
})?,
search_api_key: self.search_api_key.set(),
write_api_key: self.write_api_key.set(),
})
}
}
#[derive(Serialize)]
pub struct PatchNetworkAnalytics {
network_size: usize,
@@ -134,58 +208,111 @@ async fn patch_network(
index_scheduler.features().check_network("Using the /network route")?;
let new_network = new_network.0;
let old_network = index_scheduler.network();
debug!(parameters = ?new_network, "Patch network");
// check the URLs of all remotes
if let Setting::Set(remotes) = &new_network.remotes {
for (remote_name, remote) in remotes.iter() {
let Some(remote) = remote else {
continue;
};
match &remote.url {
Setting::Set(new_url) => {
if let Err(error) = url::Url::parse(&new_url) {
return Err(ResponseError::from_msg(
format!("Invalid `.remotes.{remote_name}.url` (`{new_url}`): {error}"),
meilisearch_types::error::Code::InvalidNetworkUrl,
));
let merged_self = match new_network.local {
Setting::Set(new_self) => Some(new_self),
Setting::Reset => None,
Setting::NotSet => old_network.local,
};
let merged_sharding = match new_network.sharding {
Setting::Set(new_sharding) => new_sharding,
Setting::Reset => false,
Setting::NotSet => old_network.sharding,
};
if merged_sharding && merged_self.is_none() {
return Err(ResponseError::from_msg(
"`.sharding`: enabling the sharding requires `.self` to be set\n - Hint: Disable `sharding` or set `self` to a value.".into(),
meilisearch_types::error::Code::InvalidNetworkSharding,
));
}
let merged_remotes = match new_network.remotes {
Setting::Set(new_remotes) => {
let mut merged_remotes = BTreeMap::new();
for either_or_both in old_network
.remotes
.into_iter()
.merge_join_by(new_remotes.into_iter(), |left, right| left.0.cmp(&right.0))
{
match either_or_both {
EitherOrBoth::Both((key, old), (_, Some(new))) => {
let DbRemote {
url: old_url,
search_api_key: old_search_api_key,
write_api_key: old_write_api_key,
} = old;
let Remote {
url: new_url,
search_api_key: new_search_api_key,
write_api_key: new_write_api_key,
} = new;
let merged = DbRemote {
url: match new_url {
Setting::Set(new_url) => {
if let Err(error) = url::Url::parse(&new_url) {
return Err(ResponseError::from_msg(
format!("Invalid `.remotes.{key}.url` (`{new_url}`): {error}"),
meilisearch_types::error::Code::InvalidNetworkUrl,
));
}
new_url
}
Setting::Reset => {
return Err(ResponseError::from_msg(
format!(
"Field `.remotes.{key}.url` cannot be set to `null`"
),
meilisearch_types::error::Code::InvalidNetworkUrl,
))
}
Setting::NotSet => old_url,
},
search_api_key: match new_search_api_key {
Setting::Set(new_search_api_key) => Some(new_search_api_key),
Setting::Reset => None,
Setting::NotSet => old_search_api_key,
},
write_api_key: match new_write_api_key {
Setting::Set(new_write_api_key) => Some(new_write_api_key),
Setting::Reset => None,
Setting::NotSet => old_write_api_key,
},
};
merged_remotes.insert(key, merged);
}
EitherOrBoth::Both((_, _), (_, None)) | EitherOrBoth::Right((_, None)) => {}
EitherOrBoth::Left((key, node)) => {
merged_remotes.insert(key, node);
}
EitherOrBoth::Right((key, Some(node))) => {
let node = node.try_into_db_node(&key)?;
merged_remotes.insert(key, node);
}
}
Setting::Reset => {
return Err(ResponseError::from_msg(
format!("Field `.remotes.{remote_name}.url` cannot be set to `null`"),
meilisearch_types::error::Code::InvalidNetworkUrl,
))
}
Setting::NotSet => (),
}
merged_remotes
}
}
Setting::Reset => BTreeMap::new(),
Setting::NotSet => old_network.remotes,
};
analytics.publish(
PatchNetworkAnalytics {
network_size: new_network
.remotes
.as_ref()
.set()
.map(|remotes| remotes.len())
.unwrap_or_default(),
network_has_self: new_network.local.as_ref().set().is_some(),
network_size: merged_remotes.len(),
network_has_self: merged_self.is_some(),
},
&req,
);
let task = index_scheduler.register(
meilisearch_types::tasks::KindWithContent::NetworkTopologyChange {
network: Some(new_network),
origin: None,
},
None,
false,
)?;
debug!(returns = ?task, "Patch network");
let task: SummarizedTaskView = task.into();
return Ok(HttpResponse::Accepted().json(task));
let merged_network =
DbNetwork { local: merged_self, remotes: merged_remotes, sharding: merged_sharding };
index_scheduler.put_network(merged_network.clone())?;
debug!(returns = ?merged_network, "Patch network");
Ok(HttpResponse::Ok().json(merged_network))
}

View File

@@ -228,7 +228,7 @@ mod tests {
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
snapshot!(meili_snap::json_string!(err), @r###"
{
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `networkTopologyChange`.",
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
"code": "invalid_task_types",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types"

View File

@@ -9,7 +9,7 @@ use std::vec::{IntoIter, Vec};
use actix_http::StatusCode;
use index_scheduler::{IndexScheduler, RoFeatures};
use itertools::Itertools;
use meilisearch_types::enterprise_edition::network::{DbNetwork, DbRemote};
use meilisearch_types::enterprise_edition::network::{Network, Remote};
use meilisearch_types::error::ResponseError;
use meilisearch_types::milli::order_by_map::OrderByMap;
use meilisearch_types::milli::score_details::{ScoreDetails, WeightedScoreValue};
@@ -17,6 +17,7 @@ use meilisearch_types::milli::vector::Embedding;
use meilisearch_types::milli::{self, DocumentId, OrderBy, TimeBudget, DEFAULT_VALUES_PER_FACET};
use roaring::RoaringBitmap;
use tokio::task::JoinHandle;
use uuid::Uuid;
use super::super::ranking_rules::{self, RankingRules};
use super::super::{
@@ -39,6 +40,7 @@ pub async fn perform_federated_search(
federation: Federation,
features: RoFeatures,
is_proxy: bool,
request_uid: Uuid,
) -> Result<FederatedSearchResult, ResponseError> {
if is_proxy {
features.check_network("Performing a remote federated search")?;
@@ -170,6 +172,7 @@ pub async fn perform_federated_search(
facet_stats,
facets_by_index,
remote_errors: partitioned_queries.has_remote.then_some(remote_errors),
request_uid: Some(request_uid),
})
}
@@ -439,6 +442,7 @@ fn merge_metadata(
degraded: degraded_for_host,
used_negative_operator: host_used_negative_operator,
remote_errors: _,
request_uid: _,
} in remote_results
{
let this_remote_duration = Duration::from_millis(*processing_time_ms as u64);
@@ -456,7 +460,7 @@ fn merge_metadata(
}
type LocalQueriesByIndex = BTreeMap<String, Vec<QueryByIndex>>;
type RemoteQueriesByHost = BTreeMap<String, (DbRemote, Vec<SearchQueryWithIndex>)>;
type RemoteQueriesByHost = BTreeMap<String, (Remote, Vec<SearchQueryWithIndex>)>;
struct PartitionedQueries {
local_queries_by_index: LocalQueriesByIndex,
@@ -477,7 +481,7 @@ impl PartitionedQueries {
&mut self,
federated_query: SearchQueryWithIndex,
query_index: usize,
network: &DbNetwork,
network: &Network,
features: RoFeatures,
) -> Result<(), ResponseError> {
if let Some(pagination_field) = federated_query.has_pagination() {
@@ -672,7 +676,7 @@ struct SearchByIndexParams<'a> {
features: RoFeatures,
is_proxy: bool,
has_remote: bool,
network: &'a DbNetwork,
network: &'a Network,
}
struct SearchByIndex {
@@ -755,7 +759,8 @@ impl SearchByIndex {
// use an immediately invoked lambda to capture the result without returning from the function
let res: Result<(), ResponseError> = (|| {
let search_kind = search_kind(&query, params.index_scheduler, &index_uid, &index)?;
let search_kind =
search_kind(&query, params.index_scheduler, index_uid.to_string(), &index)?;
let canonicalization_kind = match (&search_kind, &query.q) {
(SearchKind::SemanticOnly { .. }, _) => {
@@ -805,11 +810,11 @@ impl SearchByIndex {
{
Some((previous_ranking_rules, previous_query_index, previous_index_uid))
} else {
Some((ranking_rules, query_index, index_uid.to_string()))
Some((ranking_rules, query_index, index_uid.clone()))
};
} else {
self.previous_query_data =
Some((ranking_rules, query_index, index_uid.to_string()));
Some((ranking_rules, query_index, index_uid.clone()));
}
match search_kind {
@@ -838,7 +843,7 @@ impl SearchByIndex {
search.limit(params.required_hit_count);
let (result, _semantic_hit_count) =
super::super::search_from_kind(&index_uid, search_kind, search)?;
super::super::search_from_kind(index_uid.to_string(), search_kind, search)?;
let format = AttributesFormat {
attributes_to_retrieve: query.attributes_to_retrieve,
retrieve_vectors,

View File

@@ -1,6 +1,6 @@
pub use error::ProxySearchError;
use error::ReqwestErrorWithoutUrl;
use meilisearch_types::enterprise_edition::network::DbRemote;
use meilisearch_types::enterprise_edition::network::Remote;
use rand::Rng as _;
use reqwest::{Client, Response, StatusCode};
use serde::de::DeserializeOwned;
@@ -94,7 +94,7 @@ pub struct ProxySearchParams {
/// Performs a federated search on a remote host and returns the results
pub async fn proxy_search(
node: &DbRemote,
node: &Remote,
queries: Vec<SearchQueryWithIndex>,
federation: Federation,
params: &ProxySearchParams,

View File

@@ -16,6 +16,7 @@ use meilisearch_types::milli::order_by_map::OrderByMap;
use meilisearch_types::milli::OrderBy;
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
use uuid::Uuid;
use super::super::{ComputedFacets, FacetStats, HitsInfo, SearchHit, SearchQueryWithIndex};
use crate::milli::vector::Embedding;
@@ -131,6 +132,8 @@ pub struct FederatedSearchResult {
pub facet_stats: Option<BTreeMap<String, FacetStats>>,
#[serde(default, skip_serializing_if = "FederatedFacets::is_empty")]
pub facets_by_index: FederatedFacets,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub request_uid: Option<Uuid>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub remote_errors: Option<BTreeMap<String, ResponseError>>,
@@ -156,6 +159,7 @@ impl fmt::Debug for FederatedSearchResult {
facet_stats,
facets_by_index,
remote_errors,
request_uid,
} = self;
let mut debug = f.debug_struct("SearchResult");
@@ -188,6 +192,9 @@ impl fmt::Debug for FederatedSearchResult {
if let Some(remote_errors) = remote_errors {
debug.field("remote_errors", &remote_errors);
}
if let Some(request_uid) = request_uid {
debug.field("request_uid", &request_uid);
}
debug.finish()
}

View File

@@ -36,6 +36,7 @@ use serde_json::{json, Value};
#[cfg(test)]
mod mod_test;
use utoipa::ToSchema;
use uuid::Uuid;
use crate::error::MeilisearchHttpError;
@@ -362,7 +363,7 @@ pub enum SearchKind {
impl SearchKind {
pub(crate) fn semantic(
index_scheduler: &index_scheduler::IndexScheduler,
index_uid: &str,
index_uid: String,
index: &Index,
embedder_name: &str,
vector_len: Option<usize>,
@@ -380,7 +381,7 @@ impl SearchKind {
pub(crate) fn hybrid(
index_scheduler: &index_scheduler::IndexScheduler,
index_uid: &str,
index_uid: String,
index: &Index,
embedder_name: &str,
semantic_ratio: f32,
@@ -399,7 +400,7 @@ impl SearchKind {
pub(crate) fn embedder(
index_scheduler: &index_scheduler::IndexScheduler,
index_uid: &str,
index_uid: String,
index: &Index,
embedder_name: &str,
vector_len: Option<usize>,
@@ -851,6 +852,8 @@ pub struct SearchResult {
pub facet_distribution: Option<BTreeMap<String, IndexMap<String, u64>>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub facet_stats: Option<BTreeMap<String, FacetStats>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub request_uid: Option<Uuid>,
#[serde(skip_serializing_if = "Option::is_none")]
pub semantic_hit_count: Option<u32>,
@@ -872,6 +875,7 @@ impl fmt::Debug for SearchResult {
hits_info,
facet_distribution,
facet_stats,
request_uid,
semantic_hit_count,
degraded,
used_negative_operator,
@@ -901,6 +905,9 @@ impl fmt::Debug for SearchResult {
if let Some(semantic_hit_count) = semantic_hit_count {
debug.field("semantic_hit_count", &semantic_hit_count);
}
if let Some(request_uid) = request_uid {
debug.field("request_uid", &request_uid);
}
debug.finish()
}
@@ -1114,12 +1121,13 @@ pub fn prepare_search<'t>(
}
pub fn perform_search(
index_uid: &str,
index_uid: String,
index: &Index,
query: SearchQuery,
search_kind: SearchKind,
retrieve_vectors: RetrieveVectors,
features: RoFeatures,
request_uid: Uuid,
) -> Result<SearchResult, ResponseError> {
let before_search = Instant::now();
let rtxn = index.read_txn()?;
@@ -1237,6 +1245,7 @@ pub fn perform_search(
degraded,
used_negative_operator,
semantic_hit_count,
request_uid: Some(request_uid),
};
Ok(result)
}
@@ -1299,24 +1308,27 @@ fn compute_facet_distribution_stats<S: AsRef<str>>(
}
pub fn search_from_kind(
index_uid: &str,
index_uid: String,
search_kind: SearchKind,
search: milli::Search<'_>,
) -> Result<(milli::SearchResult, Option<u32>), MeilisearchHttpError> {
let err = |e| MeilisearchHttpError::from_milli(e, Some(index_uid.to_string()));
let (milli_result, semantic_hit_count) = match &search_kind {
SearchKind::KeywordOnly => {
let results = search.execute().map_err(err)?;
let results = search
.execute()
.map_err(|e| MeilisearchHttpError::from_milli(e, Some(index_uid.to_string())))?;
(results, None)
}
SearchKind::SemanticOnly { .. } => {
let results = search.execute().map_err(err)?;
let results = search
.execute()
.map_err(|e| MeilisearchHttpError::from_milli(e, Some(index_uid.to_string())))?;
let semantic_hit_count = results.document_scores.len() as u32;
(results, Some(semantic_hit_count))
}
SearchKind::Hybrid { semantic_ratio, .. } => {
search.execute_hybrid(*semantic_ratio).map_err(err)?
}
SearchKind::Hybrid { semantic_ratio, .. } => search
.execute_hybrid(*semantic_ratio)
.map_err(|e| MeilisearchHttpError::from_milli(e, Some(index_uid)))?,
};
Ok((milli_result, semantic_hit_count))
}

View File

@@ -419,14 +419,14 @@ async fn error_add_api_key_invalid_parameters_actions() {
let (response, code) = server.add_api_key(content).await;
meili_snap::snapshot!(code, @"400 Bad Request");
meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]" }), @r#"
meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]" }), @r###"
{
"message": "Unknown value `doc.add` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`",
"message": "Unknown value `doc.add` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`, `indexes.compact`",
"code": "invalid_api_key_actions",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_api_key_actions"
}
"#);
"###);
}
#[actix_rt::test]

View File

@@ -91,14 +91,14 @@ async fn create_api_key_bad_actions() {
// can't parse
let (response, code) = server.add_api_key(json!({ "actions": ["doggo"] })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
snapshot!(json_string!(response), @r###"
{
"message": "Unknown value `doggo` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`",
"message": "Unknown value `doggo` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`, `indexes.compact`",
"code": "invalid_api_key_actions",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_api_key_actions"
}
"#);
"###);
}
#[actix_rt::test]

View File

@@ -42,7 +42,7 @@ async fn batch_bad_types() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `networkTopologyChange`.",
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
"code": "invalid_task_types",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types"

View File

@@ -49,8 +49,8 @@ impl Server<Owned> {
}
let options = default_settings(dir.path());
let handle = tokio::runtime::Handle::current();
let (index_scheduler, auth) = setup_meilisearch(&options, handle).unwrap();
let (index_scheduler, auth) = setup_meilisearch(&options).unwrap();
let service = Service { index_scheduler, auth, options, api_key: None };
Server { service, _dir: Some(dir), _marker: PhantomData }
@@ -65,9 +65,7 @@ impl Server<Owned> {
options.master_key = Some("MASTER_KEY".to_string());
let handle = tokio::runtime::Handle::current();
let (index_scheduler, auth) = setup_meilisearch(&options, handle).unwrap();
let (index_scheduler, auth) = setup_meilisearch(&options).unwrap();
let service = Service { index_scheduler, auth, options, api_key: None };
Server { service, _dir: Some(dir), _marker: PhantomData }
@@ -80,9 +78,7 @@ impl Server<Owned> {
}
pub async fn new_with_options(options: Opt) -> Result<Self, anyhow::Error> {
let handle = tokio::runtime::Handle::current();
let (index_scheduler, auth) = setup_meilisearch(&options, handle)?;
let (index_scheduler, auth) = setup_meilisearch(&options)?;
let service = Service { index_scheduler, auth, options, api_key: None };
Ok(Server { service, _dir: None, _marker: PhantomData })
@@ -221,9 +217,8 @@ impl Server<Shared> {
}
let options = default_settings(dir.path());
let handle = tokio::runtime::Handle::current();
let (index_scheduler, auth) = setup_meilisearch(&options, handle).unwrap();
let (index_scheduler, auth) = setup_meilisearch(&options).unwrap();
let service = Service { index_scheduler, auth, api_key: None, options };
Server { service, _dir: Some(dir), _marker: PhantomData }

View File

@@ -1853,7 +1853,7 @@ async fn add_documents_with_geo_field() {
.await;
snapshot!(code, @"200 OK");
// we are expecting docs 4 and 3 first as they have geo
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }),
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }),
@r###"
{
"hits": [
@@ -1885,7 +1885,8 @@ async fn add_documents_with_geo_field() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
}
"###);
}
@@ -1940,7 +1941,7 @@ async fn update_documents_with_geo_field() {
let (response, code) = index.search_post(json!({"sort": ["_geoPoint(10,0):asc"]})).await;
snapshot!(code, @"200 OK");
// we are expecting docs 4 and 3 first as they have geo
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }),
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }),
@r###"
{
"hits": [
@@ -1972,7 +1973,8 @@ async fn update_documents_with_geo_field() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
}
"###);
@@ -2044,7 +2046,7 @@ async fn update_documents_with_geo_field() {
let (response, code) = index.search_post(json!({"sort": ["_geoPoint(10,0):asc"]})).await;
snapshot!(code, @"200 OK");
// the search response should not have changed: we are expecting docs 4 and 3 first as they have geo
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }),
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }),
@r###"
{
"hits": [
@@ -2077,7 +2079,8 @@ async fn update_documents_with_geo_field() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
}
"###);
}

View File

@@ -14,17 +14,18 @@ async fn basic_add_settings_and_geojson_documents() {
server.wait_task(task.uid()).await.succeeded();
let (response, _) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
snapshot!(response,
@r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }),
@r###"
{
"hits": [],
"query": "",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
}
"#);
"###);
let lille: serde_json::Value = serde_json::from_str(LILLE).unwrap();
let documents = json!([
@@ -92,8 +93,8 @@ async fn basic_add_settings_and_geojson_documents() {
"#);
let (response, _code) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
snapshot!(response,
@r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }),
@r###"
{
"hits": [
{
@@ -111,9 +112,10 @@ async fn basic_add_settings_and_geojson_documents() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"#);
"###);
}
#[actix_rt::test]
@@ -174,8 +176,8 @@ async fn basic_add_geojson_documents_and_settings() {
index.update_settings(json!({"filterableAttributes": ["_geojson"]})).await;
server.wait_task(task.uid()).await.succeeded();
let (response, _code) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
snapshot!(response,
@r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }),
@r###"
{
"hits": [
{
@@ -193,9 +195,10 @@ async fn basic_add_geojson_documents_and_settings() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"#);
"###);
}
#[actix_rt::test]
@@ -292,7 +295,7 @@ async fn geo_bounding_box() {
let (response, code) =
index.search_get("?filter=_geoBoundingBox([50.53987503447863,21.43443989912143],[43.76393151539099,0.54979129195425])&attributesToRetrieve=name").await;
snapshot!(code, @"200 OK");
snapshot!(response, @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -351,16 +354,17 @@ async fn geo_bounding_box() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 17
"estimatedTotalHits": 17,
"requestUid": "[uuid]"
}
"#);
"###);
// Between Russia and Alaska
let (response, code) = index
.search_get("?filter=_geoBoundingBox([70,-148],[63,152])&attributesToRetrieve=name")
.await;
snapshot!(code, @"200 OK");
snapshot!(response, @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -377,44 +381,41 @@ async fn geo_bounding_box() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"#);
"###);
}
#[actix_rt::test]
async fn geo_radius() {
let index = shared_index_geojson_documents().await;
async fn bug_5904() {
// https://github.com/meilisearch/meilisearch/issues/5904
// 200km around Luxembourg
let (response, code) = index
.search_get("?filter=_geoRadius(49.4369862,6.5576591,200000)&attributesToRetrieve=name")
.await;
snapshot!(code, @"200 OK");
snapshot!(response, @r#"
{
"hits": [
{
"name": "Belgium"
},
{
"name": "Germany"
},
{
"name": "France"
},
{
"name": "Luxembourg"
},
{
"name": "Netherlands"
}
],
"query": "",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 5
}
"#);
let server = Server::new_shared();
let index = server.unique_index();
let (response, _code) =
index.update_settings(json!({"filterableAttributes": ["_geojson"]})).await;
server.wait_task(response.uid()).await.succeeded();
let geojson = json!({
"id": 1,
"_geojson": {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
4.23914,
48.382893
]
},
"properties": {}
}
]
}
});
let (response, _code) = index.add_documents(geojson, Some("id")).await;
server.wait_task(response.uid()).await.succeeded();
}

View File

@@ -742,7 +742,7 @@ async fn vector_filter_all_embedders() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -762,9 +762,10 @@ async fn vector_filter_all_embedders() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
}
"#);
"###);
}
#[actix_rt::test]
@@ -839,7 +840,7 @@ async fn vector_filter_specific_embedder() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -859,9 +860,10 @@ async fn vector_filter_specific_embedder() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
}
"#);
"###);
}
#[actix_rt::test]
@@ -874,7 +876,7 @@ async fn vector_filter_user_provided() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -885,9 +887,10 @@ async fn vector_filter_user_provided() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"#);
"###);
}
#[actix_rt::test]
@@ -900,7 +903,7 @@ async fn vector_filter_specific_fragment() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -914,9 +917,10 @@ async fn vector_filter_specific_fragment() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
}
"#);
"###);
let (value, _code) = index
.search_post(json!({
@@ -924,7 +928,7 @@ async fn vector_filter_specific_fragment() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -941,9 +945,10 @@ async fn vector_filter_specific_fragment() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"#);
"###);
}
#[actix_rt::test]
@@ -976,16 +981,17 @@ async fn vector_filter_document_template_but_fragments_used() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"query": "",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
}
"#);
"###);
}
#[actix_rt::test]
@@ -1023,7 +1029,7 @@ async fn vector_filter_document_template() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1040,9 +1046,10 @@ async fn vector_filter_document_template() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"#);
"###);
}
#[actix_rt::test]
@@ -1075,7 +1082,7 @@ async fn vector_filter_negation() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1092,9 +1099,10 @@ async fn vector_filter_negation() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"#);
"###);
}
#[actix_rt::test]
@@ -1107,7 +1115,7 @@ async fn vector_filter_or_combination() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1124,9 +1132,10 @@ async fn vector_filter_or_combination() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"#);
"###);
}
#[actix_rt::test]
@@ -1139,7 +1148,7 @@ async fn vector_filter_regenerate() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1156,7 +1165,8 @@ async fn vector_filter_regenerate() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"#);
"###);
}

View File

@@ -33,7 +33,7 @@ async fn geo_bounding_box_with_string_and_number() {
}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -63,7 +63,8 @@ async fn geo_bounding_box_with_string_and_number() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
}
"###);
},
@@ -84,7 +85,7 @@ async fn bug_4640() {
}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -123,7 +124,8 @@ async fn bug_4640() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"###);
},
@@ -147,7 +149,7 @@ async fn geo_asc_with_words() {
&json!({"q": "jean"}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -179,7 +181,8 @@ async fn geo_asc_with_words() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"###);
},
@@ -192,7 +195,7 @@ async fn geo_asc_with_words() {
&json!({"q": "bob"}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -216,7 +219,8 @@ async fn geo_asc_with_words() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
}
"###);
},
@@ -229,7 +233,7 @@ async fn geo_asc_with_words() {
&json!({"q": "intel"}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -245,7 +249,8 @@ async fn geo_asc_with_words() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
},
@@ -269,7 +274,7 @@ async fn geo_sort_with_words() {
&json!({"q": "jean", "sort": ["_geoPoint(0.0, 0.0):asc"]}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -304,7 +309,8 @@ async fn geo_sort_with_words() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"###);
},

View File

@@ -1,4 +1,4 @@
use meili_snap::snapshot;
use meili_snap::{json_string, snapshot};
use once_cell::sync::Lazy;
use crate::common::index::Index;
@@ -148,7 +148,7 @@ async fn simple_search() {
)
.await;
snapshot!(code, @"200 OK");
snapshot!(response, @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -209,9 +209,10 @@ async fn simple_search() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"semanticHitCount": 0
}
"#);
"###);
snapshot!(response["semanticHitCount"], @"0");
let (response, code) = index
@@ -220,7 +221,7 @@ async fn simple_search() {
)
.await;
snapshot!(code, @"200 OK");
snapshot!(response, @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -284,9 +285,10 @@ async fn simple_search() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"semanticHitCount": 2
}
"#);
"###);
snapshot!(response["semanticHitCount"], @"2");
let (response, code) = index
@@ -295,7 +297,7 @@ async fn simple_search() {
)
.await;
snapshot!(code, @"200 OK");
snapshot!(response, @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -359,9 +361,10 @@ async fn simple_search() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"semanticHitCount": 3
}
"#);
"###);
snapshot!(response["semanticHitCount"], @"3");
}

View File

@@ -104,7 +104,7 @@ async fn simple_search() {
// english
index
.search(json!({"q": "Atta", "attributesToRetrieve": ["id"]}), |response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -115,7 +115,8 @@ async fn simple_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -125,7 +126,7 @@ async fn simple_search() {
// japanese
index
.search(json!({"q": "進撃", "attributesToRetrieve": ["id"]}), |response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -136,7 +137,8 @@ async fn simple_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -147,7 +149,7 @@ async fn simple_search() {
.search(
json!({"q": "進撃", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -158,9 +160,10 @@ async fn simple_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"#);
"###);
snapshot!(code, @"200 OK");
},
)
@@ -169,7 +172,7 @@ async fn simple_search() {
// chinese
index
.search(json!({"q": "进击", "attributesToRetrieve": ["id"]}), |response, code| {
snapshot!(response, @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -180,9 +183,10 @@ async fn simple_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"#);
"###);
snapshot!(code, @"200 OK");
})
.await;
@@ -222,7 +226,7 @@ async fn force_locales() {
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -233,7 +237,8 @@ async fn force_locales() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -246,7 +251,7 @@ async fn force_locales() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -257,7 +262,8 @@ async fn force_locales() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -300,7 +306,7 @@ async fn force_locales_with_pattern() {
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -311,7 +317,8 @@ async fn force_locales_with_pattern() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -324,7 +331,7 @@ async fn force_locales_with_pattern() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -335,7 +342,8 @@ async fn force_locales_with_pattern() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -376,14 +384,15 @@ async fn force_locales_with_pattern_nested() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -396,7 +405,7 @@ async fn force_locales_with_pattern_nested() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -407,7 +416,8 @@ async fn force_locales_with_pattern_nested() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -451,14 +461,15 @@ async fn force_different_locales_with_pattern() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -471,7 +482,7 @@ async fn force_different_locales_with_pattern() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -482,7 +493,8 @@ async fn force_different_locales_with_pattern() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -529,14 +541,15 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -549,7 +562,7 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"], "attributesToSearchOn": ["name_zh", "description_zh"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -560,7 +573,8 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -602,7 +616,7 @@ async fn auto_infer_locales_at_search() {
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -613,7 +627,8 @@ async fn auto_infer_locales_at_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -625,30 +640,7 @@ async fn auto_infer_locales_at_search() {
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
{
"hits": [
{
"id": 853
}
],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
},
)
.await;
index
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -659,7 +651,32 @@ async fn auto_infer_locales_at_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
},
)
.await;
index
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
"id": 853
}
],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -702,14 +719,15 @@ async fn force_different_locales_with_pattern_nested() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -722,31 +740,7 @@ async fn force_different_locales_with_pattern_nested() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
{
"hits": [
{
"id": 852
}
],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
},
)
.await;
// force japanese
index
.search(
json!({"q": "\"进击的巨人\"", "locales": ["ja"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -757,7 +751,33 @@ async fn force_different_locales_with_pattern_nested() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
},
)
.await;
// force japanese
index
.search(
json!({"q": "\"进击的巨人\"", "locales": ["ja"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
"id": 852
}
],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -799,14 +819,15 @@ async fn settings_change() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -819,14 +840,15 @@ async fn settings_change() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -862,14 +884,15 @@ async fn settings_change() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -882,14 +905,15 @@ async fn settings_change() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -1164,7 +1188,7 @@ async fn swedish_search() {
// infer swedish
index
.search(json!({"q": "trä", "attributesToRetrieve": ["product"]}), |response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1178,7 +1202,8 @@ async fn swedish_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -1187,7 +1212,7 @@ async fn swedish_search() {
index
.search(json!({"q": "tra", "attributesToRetrieve": ["product"]}), |response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1201,7 +1226,8 @@ async fn swedish_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -1213,7 +1239,7 @@ async fn swedish_search() {
.search(
json!({"q": "trä", "locales": ["swe"], "attributesToRetrieve": ["product"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1227,7 +1253,8 @@ async fn swedish_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -1238,7 +1265,7 @@ async fn swedish_search() {
.search(
json!({"q": "tra", "locales": ["swe"], "attributesToRetrieve": ["product"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1252,7 +1279,8 @@ async fn swedish_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");
@@ -1287,20 +1315,21 @@ async fn german_search() {
.search(
json!({"q": "kulturalität", "attributesToRetrieve": ["product"]}),
|response, code| {
snapshot!(response, @r###"
{
"hits": [
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"product": "Interkulturalität"
"hits": [
{
"product": "Interkulturalität"
}
],
"query": "kulturalität",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
],
"query": "kulturalität",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
}
"###);
"###);
snapshot!(code, @"200 OK");
},
)
@@ -1310,7 +1339,7 @@ async fn german_search() {
.search(
json!({"q": "organisation", "attributesToRetrieve": ["product"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1321,7 +1350,8 @@ async fn german_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
snapshot!(code, @"200 OK");

View File

@@ -1044,7 +1044,7 @@ async fn test_degraded_score_details() {
}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1103,7 +1103,8 @@ async fn test_degraded_score_details() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"###);
},

View File

@@ -93,13 +93,14 @@ async fn federation_empty_list() {
let (response, code) = server.multi_search(json!({"federation": {}, "queries": []})).await;
snapshot!(code, @"200 OK");
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
}
"###);
}
@@ -164,7 +165,7 @@ async fn simple_search_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["results"], { ".**.processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response["results"], { ".**.processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
[
{
"indexUid": "SHARED_DOCUMENTS",
@@ -182,7 +183,8 @@ async fn simple_search_single_index() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
},
{
"indexUid": "SHARED_DOCUMENTS",
@@ -200,7 +202,8 @@ async fn simple_search_single_index() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
]
"###);
@@ -217,7 +220,7 @@ async fn federation_single_search_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -237,7 +240,8 @@ async fn federation_single_search_single_index() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
}
@@ -256,7 +260,7 @@ async fn federation_multiple_search_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -308,7 +312,8 @@ async fn federation_multiple_search_single_index() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 5
"estimatedTotalHits": 5,
"requestUid": "[uuid]"
}
"###);
}
@@ -325,7 +330,7 @@ async fn federation_two_search_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -358,7 +363,8 @@ async fn federation_two_search_single_index() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
}
"###);
}
@@ -457,7 +463,7 @@ async fn simple_search_two_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["results"], { ".**.processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response["results"], { ".**.processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
[
{
"indexUid": "SHARED_DOCUMENTS",
@@ -475,7 +481,8 @@ async fn simple_search_two_indexes() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
},
{
"indexUid": "SHARED_NESTED_DOCUMENTS",
@@ -516,7 +523,8 @@ async fn simple_search_two_indexes() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
}
]
"###);
@@ -535,7 +543,7 @@ async fn federation_two_search_two_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -596,7 +604,8 @@ async fn federation_two_search_two_indexes() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"###);
}
@@ -626,7 +635,7 @@ async fn federation_multiple_search_multiple_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -795,7 +804,8 @@ async fn federation_multiple_search_multiple_indexes() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 12
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
}
"###);
}
@@ -1101,7 +1111,7 @@ async fn federation_filter() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1140,7 +1150,8 @@ async fn federation_filter() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"###);
}
@@ -1177,7 +1188,7 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1266,7 +1277,8 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
}
"###);
@@ -1278,7 +1290,7 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1353,7 +1365,8 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"###);
}
@@ -1449,7 +1462,7 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1538,7 +1551,8 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
}
"###);
@@ -1551,7 +1565,7 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1626,7 +1640,8 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"###);
}
@@ -1704,7 +1719,7 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1831,7 +1846,8 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 10
"estimatedTotalHits": 10,
"requestUid": "[uuid]"
}
"###);
@@ -1844,7 +1860,7 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1915,7 +1931,8 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 6
"estimatedTotalHits": 6,
"requestUid": "[uuid]"
}
"###);
}
@@ -1936,7 +1953,7 @@ async fn federation_sort_different_ranking_rules() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2063,7 +2080,8 @@ async fn federation_sort_different_ranking_rules() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 10
"estimatedTotalHits": 10,
"requestUid": "[uuid]"
}
"###);
@@ -2142,7 +2160,7 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2269,7 +2287,8 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 10
"estimatedTotalHits": 10,
"requestUid": "[uuid]"
}
"###);
@@ -2282,7 +2301,7 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2353,7 +2372,8 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 6
"estimatedTotalHits": 6,
"requestUid": "[uuid]"
}
"###);
}
@@ -2424,7 +2444,7 @@ async fn federation_limit_offset() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2527,7 +2547,8 @@ async fn federation_limit_offset() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 12
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
}
"###);
}
@@ -2549,7 +2570,7 @@ async fn federation_limit_offset() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2564,7 +2585,8 @@ async fn federation_limit_offset() {
"processingTimeMs": "[duration]",
"limit": 1,
"offset": 0,
"estimatedTotalHits": 12
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
}
"###);
}
@@ -2586,7 +2608,7 @@ async fn federation_limit_offset() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2673,7 +2695,8 @@ async fn federation_limit_offset() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 2,
"estimatedTotalHits": 12
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
}
"###);
}
@@ -2695,13 +2718,14 @@ async fn federation_limit_offset() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 12,
"estimatedTotalHits": 12
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
}
"###);
}
@@ -2731,7 +2755,7 @@ async fn federation_formatting() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2861,7 +2885,8 @@ async fn federation_formatting() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 12
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
}
"###);
}
@@ -2883,7 +2908,7 @@ async fn federation_formatting() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2898,7 +2923,8 @@ async fn federation_formatting() {
"processingTimeMs": "[duration]",
"limit": 1,
"offset": 0,
"estimatedTotalHits": 12
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
}
"###);
}
@@ -2920,7 +2946,7 @@ async fn federation_formatting() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -3007,7 +3033,8 @@ async fn federation_formatting() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 2,
"estimatedTotalHits": 12
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
}
"###);
}
@@ -3029,13 +3056,14 @@ async fn federation_formatting() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 12,
"estimatedTotalHits": 12
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
}
"###);
}
@@ -3098,7 +3126,7 @@ async fn federation_null_weight() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -3137,7 +3165,8 @@ async fn federation_null_weight() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"###);
}
@@ -3244,7 +3273,7 @@ async fn federation_federated_contains_facets() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -3280,7 +3309,8 @@ async fn federation_federated_contains_facets() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"###);
@@ -3488,7 +3518,7 @@ async fn federation_vector_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -3532,7 +3562,8 @@ async fn federation_vector_single_index() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"semanticHitCount": 4
"semanticHitCount": 4,
"requestUid": "[uuid]"
}
"###);
@@ -3545,7 +3576,7 @@ async fn federation_vector_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -3589,7 +3620,8 @@ async fn federation_vector_single_index() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"semanticHitCount": 4
"semanticHitCount": 4,
"requestUid": "[uuid]"
}
"###);
@@ -3603,7 +3635,7 @@ async fn federation_vector_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -3651,7 +3683,8 @@ async fn federation_vector_single_index() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"semanticHitCount": 3
"semanticHitCount": 3,
"requestUid": "[uuid]"
}
"###);
}
@@ -3703,7 +3736,7 @@ async fn federation_vector_two_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -3922,9 +3955,10 @@ async fn federation_vector_two_indexes() {
0.6
]
},
"semanticHitCount": 6
"semanticHitCount": 6,
"requestUid": "[uuid]"
}
"#);
"###);
// hybrid search, distinct embedder
let (response, code) = server
@@ -3934,7 +3968,7 @@ async fn federation_vector_two_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r#"
{
"hits": [
{
@@ -4161,7 +4195,8 @@ async fn federation_vector_two_indexes() {
0.6
]
},
"semanticHitCount": 8
"semanticHitCount": 8,
"requestUid": "[uuid]"
}
"#);
}
@@ -4209,7 +4244,7 @@ async fn federation_facets_different_indexes_same_facet() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -4380,7 +4415,8 @@ async fn federation_facets_different_indexes_same_facet() {
},
"stats": {}
}
}
},
"requestUid": "[uuid]"
}
"###);
@@ -4399,7 +4435,7 @@ async fn federation_facets_different_indexes_same_facet() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -4541,7 +4577,8 @@ async fn federation_facets_different_indexes_same_facet() {
"Shazam!": 1
}
},
"facetStats": {}
"facetStats": {},
"requestUid": "[uuid]"
}
"###);
@@ -4561,7 +4598,7 @@ async fn federation_facets_different_indexes_same_facet() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -4686,7 +4723,8 @@ async fn federation_facets_different_indexes_same_facet() {
"distribution": {},
"stats": {}
}
}
},
"requestUid": "[uuid]"
}
"###);
}
@@ -4748,7 +4786,7 @@ async fn federation_facets_same_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -4806,7 +4844,8 @@ async fn federation_facets_same_indexes() {
}
}
}
}
},
"requestUid": "[uuid]"
}
"###);
@@ -4822,7 +4861,7 @@ async fn federation_facets_same_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -4908,7 +4947,8 @@ async fn federation_facets_same_indexes() {
}
}
}
}
},
"requestUid": "[uuid]"
}
"###);
@@ -4925,7 +4965,7 @@ async fn federation_facets_same_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -4987,7 +5027,8 @@ async fn federation_facets_same_indexes() {
"min": 2.0,
"max": 6.0
}
}
},
"requestUid": "[uuid]"
}
"###);
}
@@ -5040,7 +5081,7 @@ async fn federation_inconsistent_merge_order() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -5217,7 +5258,8 @@ async fn federation_inconsistent_merge_order() {
},
"stats": {}
}
}
},
"requestUid": "[uuid]"
}
"###);
@@ -5264,7 +5306,7 @@ async fn federation_inconsistent_merge_order() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -5404,7 +5446,8 @@ async fn federation_inconsistent_merge_order() {
"Batman Returns": 1
}
},
"facetStats": {}
"facetStats": {},
"requestUid": "[uuid]"
}
"###);
}

View File

@@ -127,7 +127,7 @@ async fn remote_sharding() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -136,7 +136,7 @@ async fn remote_sharding() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -145,7 +145,7 @@ async fn remote_sharding() {
}
"###);
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms2",
@@ -192,11 +192,11 @@ async fn remote_sharding() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms1.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms2.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";
@@ -229,7 +229,7 @@ async fn remote_sharding() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -287,12 +287,13 @@ async fn remote_sharding() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 5,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
let (response, _status_code) = ms1.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -350,12 +351,13 @@ async fn remote_sharding() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 5,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
let (response, _status_code) = ms2.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -413,6 +415,7 @@ async fn remote_sharding() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 5,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
@@ -442,7 +445,7 @@ async fn remote_sharding_retrieve_vectors() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -451,7 +454,7 @@ async fn remote_sharding_retrieve_vectors() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -460,7 +463,7 @@ async fn remote_sharding_retrieve_vectors() {
}
"###);
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms2",
@@ -542,11 +545,11 @@ async fn remote_sharding_retrieve_vectors() {
}});
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms1.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms2.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// multi vector search: one query per remote
@@ -594,7 +597,7 @@ async fn remote_sharding_retrieve_vectors() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"processingTimeMs": "[time]",
@@ -619,9 +622,10 @@ async fn remote_sharding_retrieve_vectors() {
]
},
"semanticHitCount": 0,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"#);
"###);
// multi vector search: two local queries, one remote
@@ -669,7 +673,7 @@ async fn remote_sharding_retrieve_vectors() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r#"
{
"hits": [],
"processingTimeMs": "[time]",
@@ -694,6 +698,7 @@ async fn remote_sharding_retrieve_vectors() {
]
},
"semanticHitCount": 0,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"#);
@@ -744,7 +749,7 @@ async fn remote_sharding_retrieve_vectors() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r#"
{
"hits": [],
"processingTimeMs": "[time]",
@@ -769,6 +774,7 @@ async fn remote_sharding_retrieve_vectors() {
]
},
"semanticHitCount": 0,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"#);
@@ -819,7 +825,7 @@ async fn remote_sharding_retrieve_vectors() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"processingTimeMs": "[time]",
@@ -839,9 +845,10 @@ async fn remote_sharding_retrieve_vectors() {
]
},
"semanticHitCount": 0,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"#);
"###);
// multi vector search: no local queries, all remote
@@ -889,7 +896,7 @@ async fn remote_sharding_retrieve_vectors() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"processingTimeMs": "[time]",
@@ -913,9 +920,10 @@ async fn remote_sharding_retrieve_vectors() {
0.2
]
},
"requestUid": "[uuid]",
"remoteErrors": {}
}
"#);
"###);
}
#[actix_rt::test]
@@ -935,7 +943,7 @@ async fn error_unregistered_remote() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -944,7 +952,7 @@ async fn error_unregistered_remote() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -983,9 +991,9 @@ async fn error_unregistered_remote() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms1.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";
@@ -1055,7 +1063,7 @@ async fn error_no_weighted_score() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -1064,7 +1072,7 @@ async fn error_no_weighted_score() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -1107,7 +1115,7 @@ async fn error_no_weighted_score() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";
@@ -1133,7 +1141,7 @@ async fn error_no_weighted_score() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1161,6 +1169,7 @@ async fn error_no_weighted_score() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "remote hit does not contain `._federation.weightedScoreValues`\n - hint: check that the remote instance is a Meilisearch instance running the same version",
@@ -1190,7 +1199,7 @@ async fn error_bad_response() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -1199,7 +1208,7 @@ async fn error_bad_response() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -1245,7 +1254,7 @@ async fn error_bad_response() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";
@@ -1272,7 +1281,7 @@ async fn error_bad_response() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1300,6 +1309,7 @@ async fn error_bad_response() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "could not parse response from the remote host as a federated search response:\n - response from remote: <html>Returning an HTML page</html>\n - hint: check that the remote instance is a Meilisearch instance running the same version",
@@ -1329,7 +1339,7 @@ async fn error_bad_request() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -1338,7 +1348,7 @@ async fn error_bad_request() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -1377,7 +1387,7 @@ async fn error_bad_request() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";
@@ -1404,7 +1414,7 @@ async fn error_bad_request() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1432,6 +1442,7 @@ async fn error_bad_request() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "remote host responded with code 400:\n - response from remote: {\"message\":\"Inside `.queries[1]`: Index `nottest` not found.\",\"code\":\"index_not_found\",\"type\":\"invalid_request\",\"link\":\"https://docs.meilisearch.com/errors#index_not_found\"}\n - hint: check that the remote instance has the correct index configuration for that request\n - hint: check that the `network` experimental feature is enabled on the remote instance",
@@ -1461,7 +1472,7 @@ async fn error_bad_request_facets_by_index() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -1470,7 +1481,7 @@ async fn error_bad_request_facets_by_index() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -1510,7 +1521,7 @@ async fn error_bad_request_facets_by_index() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";
@@ -1541,7 +1552,7 @@ async fn error_bad_request_facets_by_index() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1575,6 +1586,7 @@ async fn error_bad_request_facets_by_index() {
"stats": {}
}
},
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "remote host responded with code 400:\n - response from remote: {\"message\":\"Inside `.federation.facetsByIndex.test0`: Index `test0` not found.\\n - Note: index `test0` is not used in queries\",\"code\":\"index_not_found\",\"type\":\"invalid_request\",\"link\":\"https://docs.meilisearch.com/errors#index_not_found\"}\n - hint: check that the remote instance has the correct index configuration for that request\n - hint: check that the `network` experimental feature is enabled on the remote instance",
@@ -1604,7 +1616,7 @@ async fn error_bad_request_facets_by_index_facet() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -1613,7 +1625,7 @@ async fn error_bad_request_facets_by_index_facet() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -1656,7 +1668,7 @@ async fn error_bad_request_facets_by_index_facet() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";
@@ -1687,7 +1699,7 @@ async fn error_bad_request_facets_by_index_facet() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -1726,6 +1738,7 @@ async fn error_bad_request_facets_by_index_facet() {
"stats": {}
}
},
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "remote host responded with code 400:\n - response from remote: {\"message\":\"Inside `.federation.facetsByIndex.test`: Invalid facet distribution: Attribute `id` is not filterable. This index does not have configured filterable attributes.\\n - Note: index `test` used in `.queries[1]`\",\"code\":\"invalid_multi_search_facets\",\"type\":\"invalid_request\",\"link\":\"https://docs.meilisearch.com/errors#invalid_multi_search_facets\"}\n - hint: check that the remote instance has the correct index configuration for that request\n - hint: check that the `network` experimental feature is enabled on the remote instance",
@@ -1756,7 +1769,7 @@ async fn error_remote_does_not_answer() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -1765,7 +1778,7 @@ async fn error_remote_does_not_answer() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -1807,9 +1820,9 @@ async fn error_remote_does_not_answer() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms1.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";
@@ -1959,7 +1972,7 @@ async fn error_remote_404() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -1968,7 +1981,7 @@ async fn error_remote_404() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -2007,9 +2020,9 @@ async fn error_remote_404() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms1.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";
@@ -2035,7 +2048,7 @@ async fn error_remote_404() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2063,6 +2076,7 @@ async fn error_remote_404() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "remote host responded with code 404:\n - response from remote: null\n - hint: check that the remote instance has the correct index configuration for that request\n - hint: check that the `network` experimental feature is enabled on the remote instance",
@@ -2075,7 +2089,7 @@ async fn error_remote_404() {
"###);
let (response, _status_code) = ms1.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2113,6 +2127,7 @@ async fn error_remote_404() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
@@ -2156,7 +2171,7 @@ async fn error_remote_sharding_auth() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -2165,7 +2180,7 @@ async fn error_remote_sharding_auth() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -2211,7 +2226,7 @@ async fn error_remote_sharding_auth() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";
@@ -2244,7 +2259,7 @@ async fn error_remote_sharding_auth() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2272,6 +2287,7 @@ async fn error_remote_sharding_auth() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]",
"remoteErrors": {
"ms1-notsearch": {
"message": "could not authenticate against the remote host\n - hint: check that the remote instance was registered with a valid API key having the `search` action",
@@ -2318,7 +2334,7 @@ async fn remote_sharding_auth() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -2327,7 +2343,7 @@ async fn remote_sharding_auth() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -2372,7 +2388,7 @@ async fn remote_sharding_auth() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";
@@ -2405,7 +2421,7 @@ async fn remote_sharding_auth() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2453,6 +2469,7 @@ async fn remote_sharding_auth() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
@@ -2475,7 +2492,7 @@ async fn error_remote_500() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -2484,7 +2501,7 @@ async fn error_remote_500() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -2527,9 +2544,9 @@ async fn error_remote_500() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms1.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";
@@ -2555,7 +2572,7 @@ async fn error_remote_500() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2583,6 +2600,7 @@ async fn error_remote_500() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "remote host responded with code 500:\n - response from remote: {\"error\":\"provoked error\",\"code\":\"test_error\",\"link\":\"https://docs.meilisearch.com/errors#test_error\"}",
@@ -2596,7 +2614,7 @@ async fn error_remote_500() {
let (response, _status_code) = ms1.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
// the response if full because we queried the instance that works
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2634,6 +2652,7 @@ async fn error_remote_500() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
@@ -2656,7 +2675,7 @@ async fn error_remote_500_once() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -2665,7 +2684,7 @@ async fn error_remote_500_once() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -2708,9 +2727,9 @@ async fn error_remote_500_once() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms1.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";
@@ -2737,7 +2756,7 @@ async fn error_remote_500_once() {
// Meilisearch is tolerant to a single failure
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2775,12 +2794,13 @@ async fn error_remote_500_once() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
let (response, _status_code) = ms1.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -2818,6 +2838,7 @@ async fn error_remote_500_once() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
@@ -2841,7 +2862,7 @@ async fn error_remote_timeout() {
// set self
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
@@ -2849,7 +2870,7 @@ async fn error_remote_timeout() {
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
@@ -2891,9 +2912,9 @@ async fn error_remote_timeout() {
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms1.set_network(network.clone()).await;
snapshot!(status_code, @"202 Accepted");
snapshot!(status_code, @"200 OK");
// perform multi-search
let query = "badman returns";

View File

@@ -175,7 +175,8 @@ async fn test_issue_5274() {
snapshot!(json_string!(rep, {
".processingTimeMs" => "[ignored]",
}), @r#"
".requestUid" => "[uuid]"
}), @r###"
{
"hits": [
{
@@ -189,7 +190,8 @@ async fn test_issue_5274() {
"hitsPerPage": 1,
"page": 1,
"totalPages": 1,
"totalHits": 1
"totalHits": 1,
"requestUid": "[uuid]"
}
"#);
"###);
}

View File

@@ -95,20 +95,20 @@ async fn task_bad_types() {
let (response, code) = server.tasks_filter("types=doggo").await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
snapshot!(json_string!(response), @r#"
{
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `networkTopologyChange`.",
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
"code": "invalid_task_types",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
}
"###);
"#);
let (response, code) = server.cancel_tasks("types=doggo").await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`.",
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
"code": "invalid_task_types",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
@@ -119,7 +119,7 @@ async fn task_bad_types() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`.",
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
"code": "invalid_task_types",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types"

View File

@@ -43,7 +43,7 @@ async fn version_too_old() {
std::fs::write(db_path.join("VERSION"), "1.11.9999").unwrap();
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.21.0");
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.22.1");
}
#[actix_rt::test]
@@ -58,7 +58,7 @@ async fn version_requires_downgrade() {
std::fs::write(db_path.join("VERSION"), format!("{major}.{minor}.{patch}")).unwrap();
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
snapshot!(err, @"Database version 1.21.1 is higher than the Meilisearch version 1.21.0. Downgrade is not supported");
snapshot!(err, @"Database version 1.22.2 is higher than the Meilisearch version 1.22.1. Downgrade is not supported");
}
#[actix_rt::test]

View File

@@ -1,6 +1,5 @@
---
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
snapshot_kind: text
---
{
"hits": [
@@ -21,5 +20,6 @@ snapshot_kind: text
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.21.0"
"upgradeTo": "v1.22.1"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.21.0"
"upgradeTo": "v1.22.1"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.21.0"
"upgradeTo": "v1.22.1"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.21.0"
"upgradeTo": "v1.22.1"
},
"error": null,
"duration": "[duration]",

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.21.0"
"upgradeTo": "v1.22.1"
},
"error": null,
"duration": "[duration]",

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.21.0"
"upgradeTo": "v1.22.1"
},
"error": null,
"duration": "[duration]",

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.21.0"
"upgradeTo": "v1.22.1"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.21.0"
"upgradeTo": "v1.22.1"
},
"error": null,
"duration": "[duration]",

View File

@@ -294,7 +294,7 @@ async fn check_the_index_features(server: &Server) {
let (results, _status) =
kefir.search_post(json!({ "sort": ["age:asc"], "filter": "surname = kefirounet" })).await;
snapshot!(results, name: "search_with_sort_and_filter");
snapshot!(json_string!(results, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), name: "search_with_sort_and_filter");
// ensuring we can get the vectors and their `regenerate` is still good.
let (results, _status) = kefir.search_post(json!({"retrieveVectors": true})).await;

View File

@@ -323,7 +323,7 @@ async fn binary_quantize_clear_documents() {
// Make sure the vector DB has been cleared
let (documents, _code) =
index.search_post(json!({ "hybrid": { "embedder": "manual" }, "vector": [1, 1, 1] })).await;
snapshot!(documents, @r#"
snapshot!(json_string!(documents, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"query": "",
@@ -331,9 +331,10 @@ async fn binary_quantize_clear_documents() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]",
"semanticHitCount": 0
}
"#);
"###);
}
#[actix_rt::test]

View File

@@ -257,7 +257,7 @@ async fn search_with_vector() {
json!({"vector": [1.0, 1.0, 1.0], "hybrid": {"semanticRatio": 1.0, "embedder": "rest"}, "limit": 1}
)).await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".requestUid" => "[uuid]", ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -270,9 +270,10 @@ async fn search_with_vector() {
"limit": 1,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]",
"semanticHitCount": 1
}
"#);
"###);
}
#[actix_rt::test]
@@ -288,7 +289,7 @@ async fn search_with_media() {
))
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".requestUid" => "[uuid]", ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -302,9 +303,10 @@ async fn search_with_media() {
"limit": 1,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]",
"semanticHitCount": 1
}
"#);
"###);
}
#[actix_rt::test]
@@ -390,7 +392,7 @@ async fn search_with_query() {
))
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".requestUid" => "[uuid]", ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -404,9 +406,10 @@ async fn search_with_query() {
"limit": 1,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]",
"semanticHitCount": 1
}
"#);
"###);
}
#[actix_rt::test]
@@ -2076,7 +2079,7 @@ async fn composite() {
json!({"vector": [1.0, 1.0, 1.0], "hybrid": {"semanticRatio": 1.0, "embedder": "rest"}, "limit": 1}
)).await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".requestUid" => "[uuid]", ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -2089,9 +2092,10 @@ async fn composite() {
"limit": 1,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]",
"semanticHitCount": 1
}
"#);
"###);
let (value, code) = index
.search_post(
@@ -2100,7 +2104,7 @@ async fn composite() {
)
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r#"
{
"hits": [
{
@@ -2114,6 +2118,7 @@ async fn composite() {
"limit": 1,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]",
"semanticHitCount": 1
}
"#);

View File

@@ -689,7 +689,7 @@ async fn clear_documents() {
// Make sure the vector DB has been cleared
let (documents, _code) =
index.search_post(json!({ "vector": [1, 1, 1], "hybrid": {"embedder": "manual"} })).await;
snapshot!(documents, @r#"
snapshot!(json_string!(documents, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [],
"query": "",
@@ -697,9 +697,10 @@ async fn clear_documents() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]",
"semanticHitCount": 0
}
"#);
"###);
}
#[actix_rt::test]
@@ -743,7 +744,7 @@ async fn add_remove_one_vector_4588() {
json!({"vector": [1, 1, 1], "hybrid": {"semanticRatio": 1.0, "embedder": "manual"} }),
)
.await;
snapshot!(documents, @r#"
snapshot!(json_string!(documents, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
@@ -756,9 +757,10 @@ async fn add_remove_one_vector_4588() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]",
"semanticHitCount": 1
}
"#);
"###);
let (documents, _code) = index
.get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() })

View File

@@ -126,7 +126,7 @@ enum Command {
/// before running the copy and compaction. This way the current indexation must finish before
/// the compaction operation can start. Once the compaction is done, the big index is replaced
/// by the compacted one and the mutable transaction is released.
CompactIndex { index_name: String },
IndexCompaction { index_name: String },
/// Uses the hair dryer the dedicate pages hot in cache
///
@@ -165,7 +165,7 @@ fn main() -> anyhow::Result<()> {
let target_version = parse_version(&target_version).context("While parsing `--target-version`. Make sure `--target-version` is in the format MAJOR.MINOR.PATCH")?;
OfflineUpgrade { db_path, current_version: detected_version, target_version }.upgrade()
}
Command::CompactIndex { index_name } => compact_index(db_path, &index_name),
Command::IndexCompaction { index_name } => compact_index(db_path, &index_name),
Command::HairDryer { index_name, index_part } => {
hair_dryer(db_path, &index_name, &index_part)
}

View File

@@ -119,7 +119,7 @@ twox-hash = { version = "2.1.1", default-features = false, features = [
"xxhash64",
] }
geo-types = "0.7.16"
zerometry = "0.1.0"
zerometry = "0.3.0"
[dev-dependencies]
mimalloc = { version = "0.1.47", default-features = false }

View File

@@ -432,10 +432,6 @@ and can not be more than 511 bytes.", .document_id.to_string()
InvalidChatSettingsDocumentTemplateMaxBytes,
#[error("{0}")]
DocumentEmbeddingError(String),
#[error("enabling the sharding requires `.self` to be set\n - Hint: Disable `sharding` or set `self` to a value.")]
NetworkShardingWithoutSelf,
#[error("Field `.remotes.{0}.url` cannot be set to `null`")]
NetworkMissingUrl(String),
}
impl From<crate::vector::Error> for Error {

View File

@@ -5,24 +5,18 @@
use std::hash::{BuildHasher as _, BuildHasherDefault};
pub struct Shards(pub Vec<Shard>);
pub struct Shard {
pub is_own: bool,
pub name: String,
pub struct Shards {
pub own: Vec<String>,
pub others: Vec<String>,
}
impl Shards {
pub fn must_process(&self, docid: &str) -> bool {
self.processing_shard(docid).map(|shard| shard.is_own).unwrap_or_default()
}
pub fn processing_shard<'a>(&'a self, docid: &str) -> Option<&'a Shard> {
let hasher = BuildHasherDefault::<twox_hash::XxHash3_64>::new();
let to_hash = |shard: &'a Shard| (shard, hasher.hash_one((&shard.name, docid)));
let to_hash = |shard: &String| hasher.hash_one((shard, docid));
let shard =
self.0.iter().map(to_hash).max_by_key(|(_, hash)| *hash).map(|(shard, _)| shard);
shard
let max_hash = self.others.iter().map(to_hash).max().unwrap_or_default();
self.own.iter().map(to_hash).any(|hash| hash > max_hash)
}
}

View File

@@ -39,6 +39,7 @@ const UPGRADE_FUNCTIONS: &[&dyn UpgradeIndex] = &[
&ToTargetNoOp { target: (1, 19, 0) },
&ToTargetNoOp { target: (1, 20, 0) },
&ToTargetNoOp { target: (1, 21, 0) },
&ToTargetNoOp { target: (1, 22, 0) },
// This is the last upgrade function, it will be called when the index is up to date.
// any other upgrade function should be added before this one.
&ToCurrentNoOp {},
@@ -71,6 +72,7 @@ const fn start(from: (u32, u32, u32)) -> Option<usize> {
(1, 19, _) => function_index!(9),
(1, 20, _) => function_index!(10),
(1, 21, _) => function_index!(11),
(1, 22, _) => function_index!(12),
// We deliberately don't add a placeholder with (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) here to force manually
// considering dumpless upgrade.
(_major, _minor, _patch) => return None,

View File

@@ -1046,6 +1046,10 @@ impl VectorStore {
where
R: rand::Rng + rand::SeedableRng,
{
// No work if distances are the same
if AD::name() == HD::name() {
return Ok(());
}
for index in vector_store_range_for_embedder(self.embedder_index) {
let arroy_reader: arroy::Reader<AD> =
match arroy::Reader::open(arroy_rtxn, index, self.database.remap_types()) {
@@ -1084,6 +1088,10 @@ impl VectorStore {
where
R: rand::Rng + rand::SeedableRng,
{
// No work if distances are the same
if AD::name() == HD::name() {
return Ok(());
}
for index in vector_store_range_for_embedder(self.embedder_index) {
let hannoy_reader: hannoy::Reader<HD> =
match hannoy::Reader::open(hannoy_rtxn, index, self.database.remap_types()) {
@@ -1098,12 +1106,8 @@ impl VectorStore {
arroy_writer.clear(arroy_wtxn)?;
for entry in hannoy_reader.iter(hannoy_rtxn)? {
let (item, mut vector) = entry?;
// hannoy bug? the `vector` here can be longer than `dimensions`.
// workaround: truncating.
if vector.len() > dimensions {
vector.truncate(dimensions);
}
// arroy and hannoy disagreement over the 0 value
debug_assert!(vector.len() == dimensions);
// arroy and hannoy disagreement over the 0 value if distance is Hamming
// - arroy does:
// - if x >= 0 => 1
// - if x < 0 => -1