Compare commits

..

5 Commits

Author SHA1 Message Date
Kerollmops
18ba2e39c0 Multithread word prefix position docids 2025-09-23 11:02:57 +02:00
Kerollmops
4fc9c3cd22 Make clippy happy 2025-09-22 15:22:16 +02:00
Kerollmops
8ce3aa431e Fix the algorithm 2025-09-22 15:04:30 +02:00
Kerollmops
a9c8ad57db Multi-thread the facet bulk processing 2025-09-22 15:04:30 +02:00
Kerollmops
9146122047 Patch heed to create multiple nested RoTxns 2025-09-22 15:04:30 +02:00
151 changed files with 1892 additions and 6113 deletions

View File

@@ -7,5 +7,6 @@ updates:
schedule:
interval: "monthly"
labels:
- 'skip changelog'
- 'dependencies'
rebase-strategy: disabled

33
.github/release-draft-template.yml vendored Normal file
View File

@@ -0,0 +1,33 @@
name-template: 'v$RESOLVED_VERSION'
tag-template: 'v$RESOLVED_VERSION'
exclude-labels:
- 'skip changelog'
version-resolver:
minor:
labels:
- 'enhancement'
default: patch
categories:
- title: '⚠️ Breaking changes'
label: 'breaking-change'
- title: '🚀 Enhancements'
label: 'enhancement'
- title: '🐛 Bug Fixes'
label: 'bug'
- title: '🔒 Security'
label: 'security'
- title: '⚙️ Maintenance/misc'
label:
- 'maintenance'
- 'documentation'
template: |
$CHANGES
❤️ Huge thanks to our contributors: $CONTRIBUTORS.
no-changes-template: 'Changes are coming soon 😎'
sort-direction: 'ascending'
replacers:
- search: '/(?:and )?@dependabot-preview(?:\[bot\])?,?/g'
replace: ''
- search: '/(?:and )?@dependabot(?:\[bot\])?,?/g'
replace: ''

View File

@@ -65,7 +65,7 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Install cosign
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # tag=v3.10.0
uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # tag=v3.9.2
- name: Login to Docker Hub
uses: docker/login-action@v3

View File

@@ -11,7 +11,7 @@ jobs:
check-version:
name: Check the version validity
runs-on: ubuntu-latest
# No need to check the version for dry run (cron or workflow_dispatch)
# No need to check the version for dry run (cron)
steps:
- uses: actions/checkout@v5
# Check if the tag has the v<nmumber>.<number>.<number> format.
@@ -48,7 +48,7 @@ jobs:
- uses: dtolnay/rust-toolchain@1.89
- name: Build
run: cargo build --release --locked
# No need to upload binaries for dry run (cron or workflow_dispatch)
# No need to upload binaries for dry run (cron)
- name: Upload binaries to release
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
@@ -78,7 +78,7 @@ jobs:
- uses: dtolnay/rust-toolchain@1.89
- name: Build
run: cargo build --release --locked
# No need to upload binaries for dry run (cron or workflow_dispatch)
# No need to upload binaries for dry run (cron)
- name: Upload binaries to release
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
@@ -111,7 +111,7 @@ jobs:
command: build
args: --release --target ${{ matrix.target }}
- name: Upload the binary to release
# No need to upload binaries for dry run (cron or workflow_dispatch)
# No need to upload binaries for dry run (cron)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
@@ -176,7 +176,7 @@ jobs:
- name: List target output files
run: ls -lR ./target
- name: Upload the binary to release
# No need to upload binaries for dry run (cron or workflow_dispatch)
# No need to upload binaries for dry run (cron)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
@@ -187,7 +187,6 @@ jobs:
publish-openapi-file:
name: Publish OpenAPI file
needs: check-version
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -202,7 +201,7 @@ jobs:
cd crates/openapi-generator
cargo run --release -- --pretty --output ../../meilisearch.json
- name: Upload OpenAPI to Release
# No need to upload for dry run (cron or workflow_dispatch)
# No need to upload for dry run (cron)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:

20
.github/workflows/release-drafter.yml vendored Normal file
View File

@@ -0,0 +1,20 @@
name: Release Drafter
permissions:
contents: read
pull-requests: write
on:
push:
branches:
- main
jobs:
update_release_draft:
runs-on: ubuntu-latest
steps:
- uses: release-drafter/release-drafter@v6
with:
config-name: release-draft-template.yml
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_DRAFTER_TOKEN }}

View File

@@ -50,7 +50,7 @@ jobs:
with:
repository: meilisearch/meilisearch-dotnet
- name: Setup .NET Core
uses: actions/setup-dotnet@v5
uses: actions/setup-dotnet@v4
with:
dotnet-version: "8.0.x"
- name: Install dependencies
@@ -100,7 +100,7 @@ jobs:
- '7700:7700'
steps:
- name: Set up Go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
go-version: stable
- uses: actions/checkout@v5
@@ -135,13 +135,13 @@ jobs:
- name: Set up Java
uses: actions/setup-java@v5
with:
java-version: 17
distribution: 'temurin'
java-version: 8
distribution: 'zulu'
cache: gradle
- name: Grant execute permission for gradlew
run: chmod +x gradlew
- name: Build and run unit and integration tests
run: ./gradlew build integrationTest --info
run: ./gradlew build integrationTest
meilisearch-js-tests:
needs: define-docker-image
@@ -160,7 +160,7 @@ jobs:
with:
repository: meilisearch/meilisearch-js
- name: Setup node
uses: actions/setup-node@v5
uses: actions/setup-node@v4
with:
cache: 'yarn'
- name: Install dependencies
@@ -224,7 +224,7 @@ jobs:
with:
repository: meilisearch/meilisearch-python
- name: Set up Python
uses: actions/setup-python@v6
uses: actions/setup-python@v5
- name: Install pipenv
uses: dschep/install-pipenv-action@v1
- name: Install dependencies
@@ -318,7 +318,7 @@ jobs:
with:
repository: meilisearch/meilisearch-js-plugins
- name: Setup node
uses: actions/setup-node@v5
uses: actions/setup-node@v4
with:
cache: yarn
- name: Install dependencies

1749
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -23,7 +23,7 @@ members = [
]
[workspace.package]
version = "1.24.0"
version = "1.21.0"
authors = [
"Quentin de Quelen <quentin@dequelen.me>",
"Clément Renault <clement@meilisearch.com>",
@@ -50,3 +50,6 @@ opt-level = 3
opt-level = 3
[profile.dev.package.roaring]
opt-level = 3
[patch.crates-io]
heed = { git = "https://github.com/meilisearch/heed", branch = "allow-nested-rtxn-from-wtxn" }

View File

@@ -1,5 +1,5 @@
# Compile
FROM rust:1.89-alpine3.22 AS compiler
FROM rust:1.89-alpine3.20 AS compiler
RUN apk add -q --no-cache build-base openssl-dev
@@ -20,7 +20,7 @@ RUN set -eux; \
cargo build --release -p meilisearch -p meilitool
# Run
FROM alpine:3.22
FROM alpine:3.20
LABEL org.opencontainers.image.source="https://github.com/meilisearch/meilisearch"
ENV MEILI_HTTP_ADDR 0.0.0.0:7700

28
LICENSE
View File

@@ -1,9 +1,29 @@
# License
MIT License
Copyright (c) 2019-2025 Meili SAS
Part of this work fall under the Meilisearch Enterprise Edition (EE) and are licensed under the Business Source License 1.1, please refer to [LICENSE-EE](./LICENSE-EE) for details.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The other parts of this work are licensed under the [MIT license](./LICENSE-MIT).
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
`SPDX-License-Identifier: MIT AND BUSL-1.1`
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
---
🔒 Meilisearch Enterprise Edition (EE)
Certain parts of this codebase are not licensed under the MIT license and governed by the Business Source License 1.1.
See the LICENSE-EE file for details.

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2019-2025 Meili SAS
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -121,7 +121,7 @@ If you want to know more about the kind of data we collect and what we use it fo
Meilisearch is a search engine created by [Meili](https://www.meilisearch.com/careers), a software development company headquartered in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
🗞 [Subscribe to our newsletter](https://share-eu1.hsforms.com/1LN5N0x_GQgq7ss7tXmSykwfg3aq) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
🗞 [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
💌 Want to make a suggestion or give feedback? Here are some of the channels where you can reach us:

View File

@@ -158,9 +158,6 @@ pub enum KindDump {
UpgradeDatabase {
from: (u32, u32, u32),
},
IndexCompaction {
index_uid: String,
},
}
impl From<Task> for TaskDump {
@@ -243,9 +240,6 @@ impl From<KindWithContent> for KindDump {
KindWithContent::UpgradeDatabase { from: version } => {
KindDump::UpgradeDatabase { from: version }
}
KindWithContent::IndexCompaction { index_uid } => {
KindDump::IndexCompaction { index_uid }
}
}
}
}

View File

@@ -7,14 +7,23 @@
use nom::branch::alt;
use nom::bytes::complete::tag;
use nom::character::complete::{char, multispace0, multispace1};
use nom::combinator::{cut, map, value};
use nom::sequence::{preceded, terminated, tuple};
use nom::character::complete::char;
use nom::character::complete::multispace0;
use nom::character::complete::multispace1;
use nom::combinator::cut;
use nom::combinator::map;
use nom::combinator::value;
use nom::sequence::preceded;
use nom::sequence::{terminated, tuple};
use Condition::*;
use crate::error::IResultExt;
use crate::value::{parse_vector_value, parse_vector_value_cut};
use crate::{parse_value, Error, ErrorKind, FilterCondition, IResult, Span, Token, VectorFilter};
use crate::value::parse_vector_value;
use crate::value::parse_vector_value_cut;
use crate::Error;
use crate::ErrorKind;
use crate::VectorFilter;
use crate::{parse_value, FilterCondition, IResult, Span, Token};
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Condition<'a> {

View File

@@ -75,11 +75,7 @@ pub enum ExpectedValueKind {
pub enum ErrorKind<'a> {
ReservedGeo(&'a str),
GeoRadius,
GeoRadiusArgumentCount(usize),
GeoBoundingBox,
GeoPolygon,
GeoPolygonNotEnoughPoints(usize),
GeoCoordinatesNotPair(usize),
MisusedGeoRadius,
MisusedGeoBoundingBox,
VectorFilterLeftover,
@@ -193,7 +189,7 @@ impl Display for Error<'_> {
}
ErrorKind::InvalidPrimary => {
let text = if input.trim().is_empty() { "but instead got nothing.".to_string() } else { format!("at `{}`.", escaped_input) };
writeln!(f, "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` {text}")?
writeln!(f, "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` {}", text)?
}
ErrorKind::InvalidEscapedNumber => {
writeln!(f, "Found an invalid escaped sequence number: `{}`.", escaped_input)?
@@ -202,23 +198,11 @@ impl Display for Error<'_> {
writeln!(f, "Found unexpected characters at the end of the filter: `{}`. You probably forgot an `OR` or an `AND` rule.", escaped_input)?
}
ErrorKind::GeoRadius => {
writeln!(f, "The `_geoRadius` filter must be in the form: `_geoRadius(latitude, longitude, radius, optionalResolution)`.")?
}
ErrorKind::GeoRadiusArgumentCount(count) => {
writeln!(f, "Was expecting 3 or 4 arguments for `_geoRadius`, but instead found {count}.")?
writeln!(f, "The `_geoRadius` filter expects three arguments: `_geoRadius(latitude, longitude, radius)`.")?
}
ErrorKind::GeoBoundingBox => {
writeln!(f, "The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.")?
}
ErrorKind::GeoPolygon => {
writeln!(f, "The `_geoPolygon` filter doesn't match the expected format: `_geoPolygon([latitude, longitude], [latitude, longitude])`.")?
}
ErrorKind::GeoPolygonNotEnoughPoints(n) => {
writeln!(f, "The `_geoPolygon` filter expects at least 3 points but only {n} were specified")?;
}
ErrorKind::GeoCoordinatesNotPair(number) => {
writeln!(f, "Was expecting 2 coordinates but instead found {number}.")?
}
ErrorKind::ReservedGeo(name) => {
writeln!(f, "`{}` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.", name.escape_debug())?
}

View File

@@ -19,7 +19,6 @@
//! word = (alphanumeric | _ | - | .)+
//! geoRadius = "_geoRadius(" WS* float WS* "," WS* float WS* "," float WS* ")"
//! geoBoundingBox = "_geoBoundingBox([" WS * float WS* "," WS* float WS* "], [" WS* float WS* "," WS* float WS* "]")
//! geoPolygon = "_geoPolygon([[" WS* float WS* "," WS* float WS* "],+])"
//! ```
//!
//! Other BNF grammar used to handle some specific errors:
@@ -157,9 +156,8 @@ pub enum FilterCondition<'a> {
Or(Vec<Self>),
And(Vec<Self>),
VectorExists { fid: Token<'a>, embedder: Option<Token<'a>>, filter: VectorFilter<'a> },
GeoLowerThan { point: [Token<'a>; 2], radius: Token<'a>, resolution: Option<Token<'a>> },
GeoLowerThan { point: [Token<'a>; 2], radius: Token<'a> },
GeoBoundingBox { top_right_point: [Token<'a>; 2], bottom_left_point: [Token<'a>; 2] },
GeoPolygon { points: Vec<[Token<'a>; 2]> },
}
pub enum TraversedElement<'a> {
@@ -191,7 +189,6 @@ impl<'a> FilterCondition<'a> {
FilterCondition::VectorExists { .. }
| FilterCondition::GeoLowerThan { .. }
| FilterCondition::GeoBoundingBox { .. }
| FilterCondition::GeoPolygon { .. }
| FilterCondition::In { .. } => None,
}
}
@@ -205,7 +202,6 @@ impl<'a> FilterCondition<'a> {
}
FilterCondition::GeoLowerThan { .. }
| FilterCondition::GeoBoundingBox { .. }
| FilterCondition::GeoPolygon { .. }
| FilterCondition::In { .. } => None,
FilterCondition::VectorExists { fid, .. } => Some(fid),
}
@@ -400,27 +396,23 @@ fn parse_not(input: Span, depth: usize) -> IResult<FilterCondition> {
/// If we parse `_geoRadius` we MUST parse the rest of the expression.
fn parse_geo_radius(input: Span) -> IResult<FilterCondition> {
// we want to allow space BEFORE the _geoRadius but not after
let (input, _) = tuple((multispace0, word_exact("_geoRadius")))(input)?;
// if we were able to parse `_geoRadius` and can't parse the rest of the input we return a failure
let parsed =
delimited(char('('), separated_list1(tag(","), ws(recognize_float)), char(')'))(input)
.map_cut(ErrorKind::GeoRadius);
let parsed = preceded(
tuple((multispace0, word_exact("_geoRadius"))),
// if we were able to parse `_geoRadius` and can't parse the rest of the input we return a failure
cut(delimited(char('('), separated_list1(tag(","), ws(recognize_float)), char(')'))),
)(input)
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::GeoRadius)));
let (input, args) = parsed?;
if !(3..=4).contains(&args.len()) {
return Err(Error::failure_from_kind(input, ErrorKind::GeoRadiusArgumentCount(args.len())));
if args.len() != 3 {
return Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::GeoRadius)));
}
let res = FilterCondition::GeoLowerThan {
point: [args[0].into(), args[1].into()],
radius: args[2].into(),
resolution: args.get(3).cloned().map(Token::from),
};
Ok((input, res))
}
@@ -428,33 +420,26 @@ fn parse_geo_radius(input: Span) -> IResult<FilterCondition> {
/// If we parse `_geoBoundingBox` we MUST parse the rest of the expression.
fn parse_geo_bounding_box(input: Span) -> IResult<FilterCondition> {
// we want to allow space BEFORE the _geoBoundingBox but not after
let (input, _) = tuple((multispace0, word_exact("_geoBoundingBox")))(input)?;
// if we were able to parse `_geoBoundingBox` and can't parse the rest of the input we return a failure
let (input, args) = delimited(
char('('),
separated_list1(
tag(","),
ws(delimited(char('['), separated_list1(tag(","), ws(recognize_float)), char(']'))),
),
char(')'),
let parsed = preceded(
tuple((multispace0, word_exact("_geoBoundingBox"))),
// if we were able to parse `_geoBoundingBox` and can't parse the rest of the input we return a failure
cut(delimited(
char('('),
separated_list1(
tag(","),
ws(delimited(char('['), separated_list1(tag(","), ws(recognize_float)), char(']'))),
),
char(')'),
)),
)(input)
.map_cut(ErrorKind::GeoBoundingBox)?;
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::GeoBoundingBox)));
if args.len() != 2 {
let (input, args) = parsed?;
if args.len() != 2 || args[0].len() != 2 || args[1].len() != 2 {
return Err(Error::failure_from_kind(input, ErrorKind::GeoBoundingBox));
}
if let Some(offending) = args.iter().find(|a| a.len() != 2) {
let context = offending.first().unwrap_or(&input);
return Err(Error::failure_from_kind(
*context,
ErrorKind::GeoCoordinatesNotPair(offending.len()),
));
}
let res = FilterCondition::GeoBoundingBox {
top_right_point: [args[0][0].into(), args[0][1].into()],
bottom_left_point: [args[1][0].into(), args[1][1].into()],
@@ -462,47 +447,6 @@ fn parse_geo_bounding_box(input: Span) -> IResult<FilterCondition> {
Ok((input, res))
}
/// geoPolygon = "_geoPolygon([[" WS* float WS* "," WS* float WS* "],+])"
/// If we parse `_geoPolygon` we MUST parse the rest of the expression.
fn parse_geo_polygon(input: Span) -> IResult<FilterCondition> {
// we want to allow space BEFORE the _geoPolygon but not after
let (input, _) = tuple((multispace0, word_exact("_geoPolygon")))(input)?;
// if we were able to parse `_geoPolygon` and can't parse the rest of the input we return a failure
let (input, args): (_, Vec<Vec<LocatedSpan<_, _>>>) = delimited(
char('('),
separated_list1(
tag(","),
ws(delimited(char('['), separated_list1(tag(","), ws(recognize_float)), char(']'))),
),
preceded(opt(ws(char(','))), char(')')), // Tolerate trailing comma
)(input)
.map_cut(ErrorKind::GeoPolygon)?;
if args.len() < 3 {
let context = args.last().and_then(|a| a.last()).unwrap_or(&input);
return Err(Error::failure_from_kind(
*context,
ErrorKind::GeoPolygonNotEnoughPoints(args.len()),
));
}
if let Some(offending) = args.iter().find(|a| a.len() != 2) {
let context = offending.first().unwrap_or(&input);
return Err(Error::failure_from_kind(
*context,
ErrorKind::GeoCoordinatesNotPair(offending.len()),
));
}
let res = FilterCondition::GeoPolygon {
points: args.into_iter().map(|a| [a[0].into(), a[1].into()]).collect(),
};
Ok((input, res))
}
/// geoPoint = WS* "_geoPoint(float WS* "," WS* float WS* "," WS* float)
fn parse_geo_point(input: Span) -> IResult<FilterCondition> {
// we want to forbid space BEFORE the _geoPoint but not after
@@ -572,8 +516,8 @@ fn parse_primary(input: Span, depth: usize) -> IResult<FilterCondition> {
Error::new_from_kind(input, ErrorKind::MissingClosingDelimiter(c.char()))
}),
),
// Made a random block of functions because we reached the maximum number of elements per alt
alt((parse_geo_radius, parse_geo_bounding_box, parse_geo_polygon)),
parse_geo_radius,
parse_geo_bounding_box,
parse_in,
parse_not_in,
parse_condition,
@@ -653,12 +597,9 @@ impl std::fmt::Display for FilterCondition<'_> {
}
write!(f, " EXISTS")
}
FilterCondition::GeoLowerThan { point, radius, resolution: None } => {
FilterCondition::GeoLowerThan { point, radius } => {
write!(f, "_geoRadius({}, {}, {})", point[0], point[1], radius)
}
FilterCondition::GeoLowerThan { point, radius, resolution: Some(resolution) } => {
write!(f, "_geoRadius({}, {}, {}, {})", point[0], point[1], radius, resolution)
}
FilterCondition::GeoBoundingBox {
top_right_point: top_left_point,
bottom_left_point: bottom_right_point,
@@ -672,13 +613,6 @@ impl std::fmt::Display for FilterCondition<'_> {
bottom_right_point[1]
)
}
FilterCondition::GeoPolygon { points } => {
write!(f, "_geoPolygon([")?;
for point in points {
write!(f, "[{}, {}], ", point[0], point[1])?;
}
write!(f, "])")
}
}
}
}
@@ -842,17 +776,12 @@ pub mod tests {
insta::assert_snapshot!(p("_geoRadius(12, 13, 14)"), @"_geoRadius({12}, {13}, {14})");
insta::assert_snapshot!(p("NOT _geoRadius(12, 13, 14)"), @"NOT (_geoRadius({12}, {13}, {14}))");
insta::assert_snapshot!(p("_geoRadius(12,13,14)"), @"_geoRadius({12}, {13}, {14})");
insta::assert_snapshot!(p("_geoRadius(12,13,14,1000)"), @"_geoRadius({12}, {13}, {14}, {1000})");
// Test geo bounding box
insta::assert_snapshot!(p("_geoBoundingBox([12, 13], [14, 15])"), @"_geoBoundingBox([{12}, {13}], [{14}, {15}])");
insta::assert_snapshot!(p("NOT _geoBoundingBox([12, 13], [14, 15])"), @"NOT (_geoBoundingBox([{12}, {13}], [{14}, {15}]))");
insta::assert_snapshot!(p("_geoBoundingBox([12,13],[14,15])"), @"_geoBoundingBox([{12}, {13}], [{14}, {15}])");
// Test geo polygon
insta::assert_snapshot!(p("_geoPolygon([12, 13], [14, 15], [16, 17])"), @"_geoPolygon([[{12}, {13}], [{14}, {15}], [{16}, {17}], ])");
insta::assert_snapshot!(p("_geoPolygon([12, 13], [14, 15], [-1.2,2939.2], [1,1])"), @"_geoPolygon([[{12}, {13}], [{14}, {15}], [{-1.2}, {2939.2}], [{1}, {1}], ])");
// Test OR + AND
insta::assert_snapshot!(p("channel = ponce AND 'dog race' != 'bernese mountain'"), @"AND[{channel} = {ponce}, {dog race} != {bernese mountain}, ]");
insta::assert_snapshot!(p("channel = ponce OR 'dog race' != 'bernese mountain'"), @"OR[{channel} = {ponce}, {dog race} != {bernese mountain}, ]");
@@ -909,80 +838,50 @@ pub mod tests {
11:12 channel = 🐻 AND followers < 100
"###);
insta::assert_snapshot!(p("'OR'"), @r"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `\'OR\'`.
insta::assert_snapshot!(p("'OR'"), @r###"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `\'OR\'`.
1:5 'OR'
");
"###);
insta::assert_snapshot!(p("OR"), @r###"
Was expecting a value but instead got `OR`, which is a reserved keyword. To use `OR` as a field name or a value, surround it by quotes.
1:3 OR
"###);
insta::assert_snapshot!(p("channel Ponce"), @r"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `channel Ponce`.
insta::assert_snapshot!(p("channel Ponce"), @r###"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `channel Ponce`.
1:14 channel Ponce
");
"###);
insta::assert_snapshot!(p("channel = Ponce OR"), @r"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` but instead got nothing.
insta::assert_snapshot!(p("channel = Ponce OR"), @r###"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` but instead got nothing.
19:19 channel = Ponce OR
");
"###);
insta::assert_snapshot!(p("_geoRadius"), @r"
The `_geoRadius` filter must be in the form: `_geoRadius(latitude, longitude, radius, optionalResolution)`.
11:11 _geoRadius
");
insta::assert_snapshot!(p("_geoRadius"), @r###"
The `_geoRadius` filter expects three arguments: `_geoRadius(latitude, longitude, radius)`.
1:11 _geoRadius
"###);
insta::assert_snapshot!(p("_geoRadius = 12"), @r"
The `_geoRadius` filter must be in the form: `_geoRadius(latitude, longitude, radius, optionalResolution)`.
11:16 _geoRadius = 12
");
insta::assert_snapshot!(p("_geoRadius = 12"), @r###"
The `_geoRadius` filter expects three arguments: `_geoRadius(latitude, longitude, radius)`.
1:16 _geoRadius = 12
"###);
insta::assert_snapshot!(p("_geoBoundingBox"), @r"
insta::assert_snapshot!(p("_geoBoundingBox"), @r###"
The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.
16:16 _geoBoundingBox
");
1:16 _geoBoundingBox
"###);
insta::assert_snapshot!(p("_geoBoundingBox = 12"), @r"
insta::assert_snapshot!(p("_geoBoundingBox = 12"), @r###"
The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.
16:21 _geoBoundingBox = 12
");
1:21 _geoBoundingBox = 12
"###);
insta::assert_snapshot!(p("_geoBoundingBox(1.0, 1.0)"), @r"
insta::assert_snapshot!(p("_geoBoundingBox(1.0, 1.0)"), @r###"
The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.
17:26 _geoBoundingBox(1.0, 1.0)
");
insta::assert_snapshot!(p("_geoPolygon([1,2,3])"), @r"
The `_geoPolygon` filter expects at least 3 points but only 1 were specified
18:19 _geoPolygon([1,2,3])
");
insta::assert_snapshot!(p("_geoPolygon(1,2,3)"), @r"
The `_geoPolygon` filter doesn't match the expected format: `_geoPolygon([latitude, longitude], [latitude, longitude])`.
13:19 _geoPolygon(1,2,3)
");
insta::assert_snapshot!(p("_geoPolygon([1,2],[1,2],[1,2,3])"), @r"
Was expecting 2 coordinates but instead found 3.
26:27 _geoPolygon([1,2],[1,2],[1,2,3])
");
insta::assert_snapshot!(p("_geoPolygon([1,2],[1,2,3])"), @r"
The `_geoPolygon` filter expects at least 3 points but only 2 were specified
24:25 _geoPolygon([1,2],[1,2,3])
");
insta::assert_snapshot!(p("_geoPolygon(1)"), @r"
The `_geoPolygon` filter doesn't match the expected format: `_geoPolygon([latitude, longitude], [latitude, longitude])`.
13:15 _geoPolygon(1)
");
insta::assert_snapshot!(p("_geoPolygon([1,2)"), @r"
The `_geoPolygon` filter doesn't match the expected format: `_geoPolygon([latitude, longitude], [latitude, longitude])`.
17:18 _geoPolygon([1,2)
");
1:26 _geoBoundingBox(1.0, 1.0)
"###);
insta::assert_snapshot!(p("_geoPoint(12, 13, 14)"), @r###"
`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.
@@ -1039,15 +938,15 @@ pub mod tests {
34:35 channel = mv OR followers >= 1000)
"###);
insta::assert_snapshot!(p("colour NOT EXIST"), @r"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `colour NOT EXIST`.
insta::assert_snapshot!(p("colour NOT EXIST"), @r###"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `colour NOT EXIST`.
1:17 colour NOT EXIST
");
"###);
insta::assert_snapshot!(p("subscribers 100 TO1000"), @r"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `subscribers 100 TO1000`.
insta::assert_snapshot!(p("subscribers 100 TO1000"), @r###"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `subscribers 100 TO1000`.
1:23 subscribers 100 TO1000
");
"###);
insta::assert_snapshot!(p("channel = ponce ORdog != 'bernese mountain'"), @r###"
Found unexpected characters at the end of the filter: `ORdog != \'bernese mountain\'`. You probably forgot an `OR` or an `AND` rule.
@@ -1172,38 +1071,38 @@ pub mod tests {
5:7 NOT OR EXISTS AND EXISTS NOT EXISTS
"###);
insta::assert_snapshot!(p(r#"value NULL"#), @r"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value NULL`.
insta::assert_snapshot!(p(r#"value NULL"#), @r###"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value NULL`.
1:11 value NULL
");
insta::assert_snapshot!(p(r#"value NOT NULL"#), @r"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value NOT NULL`.
"###);
insta::assert_snapshot!(p(r#"value NOT NULL"#), @r###"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value NOT NULL`.
1:15 value NOT NULL
");
insta::assert_snapshot!(p(r#"value EMPTY"#), @r"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value EMPTY`.
"###);
insta::assert_snapshot!(p(r#"value EMPTY"#), @r###"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value EMPTY`.
1:12 value EMPTY
");
insta::assert_snapshot!(p(r#"value NOT EMPTY"#), @r"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value NOT EMPTY`.
"###);
insta::assert_snapshot!(p(r#"value NOT EMPTY"#), @r###"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value NOT EMPTY`.
1:16 value NOT EMPTY
");
insta::assert_snapshot!(p(r#"value IS"#), @r"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value IS`.
"###);
insta::assert_snapshot!(p(r#"value IS"#), @r###"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value IS`.
1:9 value IS
");
insta::assert_snapshot!(p(r#"value IS NOT"#), @r"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value IS NOT`.
"###);
insta::assert_snapshot!(p(r#"value IS NOT"#), @r###"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value IS NOT`.
1:13 value IS NOT
");
insta::assert_snapshot!(p(r#"value IS EXISTS"#), @r"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value IS EXISTS`.
"###);
insta::assert_snapshot!(p(r#"value IS EXISTS"#), @r###"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value IS EXISTS`.
1:16 value IS EXISTS
");
insta::assert_snapshot!(p(r#"value IS NOT EXISTS"#), @r"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `value IS NOT EXISTS`.
"###);
insta::assert_snapshot!(p(r#"value IS NOT EXISTS"#), @r###"
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `value IS NOT EXISTS`.
1:20 value IS NOT EXISTS
");
"###);
}
#[test]

View File

@@ -234,9 +234,6 @@ impl<'a> Dump<'a> {
}
}
KindDump::UpgradeDatabase { from } => KindWithContent::UpgradeDatabase { from },
KindDump::IndexCompaction { index_uid } => {
KindWithContent::IndexCompaction { index_uid }
}
},
};

View File

@@ -45,7 +45,6 @@ impl From<DateField> for Code {
}
}
#[allow(clippy::large_enum_variant)]
#[derive(Error, Debug)]
pub enum Error {
#[error("{1}")]

View File

@@ -199,7 +199,7 @@ impl IndexMapper {
let uuid = Uuid::new_v4();
self.index_mapping.put(&mut wtxn, name, &uuid)?;
let index_path = self.index_path(uuid);
let index_path = self.base_path.join(uuid.to_string());
fs::create_dir_all(&index_path)?;
// Error if the UUIDv4 somehow already exists in the map, since it should be fresh.
@@ -286,7 +286,7 @@ impl IndexMapper {
};
let index_map = self.index_map.clone();
let index_path = self.index_path(uuid);
let index_path = self.base_path.join(uuid.to_string());
let index_name = name.to_string();
thread::Builder::new()
.name(String::from("index_deleter"))
@@ -341,26 +341,6 @@ impl IndexMapper {
Ok(())
}
/// Closes the specified index.
///
/// This operation involves closing the underlying environment and so can take a long time to complete.
///
/// # Panics
///
/// - If the Index corresponding to the passed name is concurrently being deleted/resized or cannot be found in the
/// in memory hash map.
pub fn close_index(&self, rtxn: &RoTxn, name: &str) -> Result<()> {
let uuid = self
.index_mapping
.get(rtxn, name)?
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
// We remove the index from the in-memory index map.
self.index_map.write().unwrap().close_for_resize(&uuid, self.enable_mdb_writemap, 0);
Ok(())
}
/// Return an index, may open it if it wasn't already opened.
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
if let Some((current_name, current_index)) =
@@ -408,7 +388,7 @@ impl IndexMapper {
} else {
continue;
};
let index_path = self.index_path(uuid);
let index_path = self.base_path.join(uuid.to_string());
// take the lock to reopen the environment.
reopen
.reopen(&mut self.index_map.write().unwrap(), &index_path)
@@ -425,7 +405,7 @@ impl IndexMapper {
// if it's not already there.
match index_map.get(&uuid) {
Missing => {
let index_path = self.index_path(uuid);
let index_path = self.base_path.join(uuid.to_string());
break index_map
.create(
@@ -452,14 +432,6 @@ impl IndexMapper {
Ok(index)
}
/// Returns the path of the index.
///
/// The folder located at this path is containing the data.mdb,
/// the lock.mdb and an optional data.mdb.cpy file.
pub fn index_path(&self, uuid: Uuid) -> PathBuf {
self.base_path.join(uuid.to_string())
}
pub fn rollback_index(
&self,
rtxn: &RoTxn,
@@ -500,7 +472,7 @@ impl IndexMapper {
};
}
let index_path = self.index_path(uuid);
let index_path = self.base_path.join(uuid.to_string());
Index::rollback(milli::heed::EnvOpenOptions::new().read_txn_without_tls(), index_path, to)
.map_err(|err| crate::Error::from_milli(err, Some(name.to_string())))
}

View File

@@ -317,9 +317,6 @@ fn snapshot_details(d: &Details) -> String {
Details::UpgradeDatabase { from, to } => {
format!("{{ from: {from:?}, to: {to:?} }}")
}
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
}
}
}

View File

@@ -75,7 +75,6 @@ make_enum_progress! {
pub enum TaskCancelationProgress {
RetrievingTasks,
CancelingUpgrade,
CleaningCompactionLeftover,
UpdatingTasks,
}
}
@@ -139,17 +138,6 @@ make_enum_progress! {
}
}
make_enum_progress! {
pub enum IndexCompaction {
RetrieveTheIndex,
CreateTemporaryFile,
CopyAndCompactTheIndex,
PersistTheCompactedIndex,
CloseTheIndex,
ReopenTheIndex,
}
}
make_enum_progress! {
pub enum InnerSwappingTwoIndexes {
RetrieveTheTasks,

View File

@@ -310,8 +310,7 @@ impl Queue {
| self.tasks.status.get(wtxn, &Status::Failed)?.unwrap_or_default()
| self.tasks.status.get(wtxn, &Status::Canceled)?.unwrap_or_default();
let to_delete =
RoaringBitmap::from_sorted_iter(finished.into_iter().take(100_000)).unwrap();
let to_delete = RoaringBitmap::from_iter(finished.into_iter().rev().take(100_000));
// /!\ the len must be at least 2 or else we might enter an infinite loop where we only delete
// the deletion tasks we enqueued ourselves.

View File

@@ -68,14 +68,13 @@ impl From<KindWithContent> for AutobatchKind {
KindWithContent::IndexCreation { .. } => AutobatchKind::IndexCreation,
KindWithContent::IndexUpdate { .. } => AutobatchKind::IndexUpdate,
KindWithContent::IndexSwap { .. } => AutobatchKind::IndexSwap,
KindWithContent::IndexCompaction { .. }
| KindWithContent::TaskCancelation { .. }
KindWithContent::TaskCancelation { .. }
| KindWithContent::TaskDeletion { .. }
| KindWithContent::DumpCreation { .. }
| KindWithContent::Export { .. }
| KindWithContent::UpgradeDatabase { .. }
| KindWithContent::SnapshotCreation => {
panic!("The autobatcher should never be called with tasks with special priority or that don't apply to an index.")
panic!("The autobatcher should never be called with tasks that don't apply to an index.")
}
}
}
@@ -288,10 +287,8 @@ impl BatchKind {
};
match (self, autobatch_kind) {
// We don't batch any of these operations
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition) => {
Break((this, BatchStopReason::TaskCannotBeBatched { kind, id }))
},
// We don't batch any of these operations
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition) => Break((this, BatchStopReason::TaskCannotBeBatched { kind, id })),
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
(this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
Break((this, BatchStopReason::IndexCreationMismatch { id }))

View File

@@ -55,10 +55,6 @@ pub(crate) enum Batch {
UpgradeDatabase {
tasks: Vec<Task>,
},
IndexCompaction {
index_uid: String,
task: Task,
},
}
#[derive(Debug)]
@@ -114,8 +110,7 @@ impl Batch {
| Batch::Dump(task)
| Batch::IndexCreation { task, .. }
| Batch::Export { task }
| Batch::IndexUpdate { task, .. }
| Batch::IndexCompaction { task, .. } => {
| Batch::IndexUpdate { task, .. } => {
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
}
Batch::SnapshotCreation(tasks)
@@ -160,8 +155,7 @@ impl Batch {
IndexOperation { op, .. } => Some(op.index_uid()),
IndexCreation { index_uid, .. }
| IndexUpdate { index_uid, .. }
| IndexDeletion { index_uid, .. }
| IndexCompaction { index_uid, .. } => Some(index_uid),
| IndexDeletion { index_uid, .. } => Some(index_uid),
}
}
}
@@ -181,7 +175,6 @@ impl fmt::Display for Batch {
Batch::IndexUpdate { .. } => f.write_str("IndexUpdate")?,
Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?,
Batch::IndexSwap { .. } => f.write_str("IndexSwap")?,
Batch::IndexCompaction { .. } => f.write_str("IndexCompaction")?,
Batch::Export { .. } => f.write_str("Export")?,
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
};
@@ -519,33 +512,17 @@ impl IndexScheduler {
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
}
// 3. we get the next task to compact
let to_compact = self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)? & enqueued;
if let Some(task_id) = to_compact.min() {
let mut task =
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
current_batch.processing(Some(&mut task));
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
kind: Kind::IndexCompaction,
id: task_id,
});
let index_uid =
task.index_uid().expect("Compaction task must have an index uid").to_owned();
return Ok(Some((Batch::IndexCompaction { index_uid, task }, current_batch)));
}
// 4. we batch the export.
// 3. we batch the export.
let to_export = self.queue.tasks.get_kind(rtxn, Kind::Export)? & enqueued;
if !to_export.is_empty() {
let task_id = to_export.iter().next().expect("There must be at least one export task");
let mut task = self.queue.tasks.get_task(rtxn, task_id)?.unwrap();
current_batch.processing([&mut task]);
current_batch
.reason(BatchStopReason::TaskCannotBeBatched { kind: Kind::Export, id: task_id });
current_batch.reason(BatchStopReason::TaskKindCannotBeBatched { kind: Kind::Export });
return Ok(Some((Batch::Export { task }, current_batch)));
}
// 5. we batch the snapshot.
// 4. we batch the snapshot.
let to_snapshot = self.queue.tasks.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued;
if !to_snapshot.is_empty() {
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
@@ -555,7 +532,7 @@ impl IndexScheduler {
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
}
// 6. we batch the dumps.
// 5. we batch the dumps.
let to_dump = self.queue.tasks.get_kind(rtxn, Kind::DumpCreation)? & enqueued;
if let Some(to_dump) = to_dump.min() {
let mut task =
@@ -568,7 +545,7 @@ impl IndexScheduler {
return Ok(Some((Batch::Dump(task), current_batch)));
}
// 7. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
// 6. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
let mut task =
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;

View File

@@ -1,27 +1,22 @@
use std::collections::{BTreeSet, HashMap, HashSet};
use std::fs::{remove_file, File};
use std::io::{ErrorKind, Seek, SeekFrom};
use std::panic::{catch_unwind, AssertUnwindSafe};
use std::sync::atomic::Ordering;
use byte_unit::Byte;
use meilisearch_types::batches::{BatchEnqueuedAt, BatchId};
use meilisearch_types::heed::{RoTxn, RwTxn};
use meilisearch_types::milli::heed::CompactionOption;
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
use meilisearch_types::milli::{self, ChannelCongestion};
use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status, Task};
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
use milli::update::Settings as MilliSettings;
use roaring::RoaringBitmap;
use tempfile::{PersistError, TempPath};
use time::OffsetDateTime;
use super::create_batch::Batch;
use crate::processing::{
AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress, FinalizingIndexStep,
IndexCompaction, InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress,
TaskDeletionProgress, UpdateIndexProgress,
InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, TaskDeletionProgress,
UpdateIndexProgress,
};
use crate::utils::{
self, remove_n_tasks_datetime_earlier_than, remove_task_datetime, swap_index_uid_in_task,
@@ -29,9 +24,6 @@ use crate::utils::{
};
use crate::{Error, IndexScheduler, Result, TaskId};
/// The name of the copy of the data.mdb file used during compaction.
const DATA_MDB_COPY_NAME: &str = "data.mdb.cpy";
#[derive(Debug, Default)]
pub struct ProcessBatchInfo {
/// The write channel congestion. None when unavailable: settings update.
@@ -426,47 +418,6 @@ impl IndexScheduler {
task.status = Status::Succeeded;
Ok((vec![task], ProcessBatchInfo::default()))
}
Batch::IndexCompaction { index_uid: _, mut task } => {
let KindWithContent::IndexCompaction { index_uid } = &task.kind else {
unreachable!()
};
let rtxn = self.env.read_txn()?;
let ret = catch_unwind(AssertUnwindSafe(|| {
self.apply_compaction(&rtxn, &progress, index_uid)
}));
let (pre_size, post_size) = match ret {
Ok(Ok(stats)) => stats,
Ok(Err(Error::AbortedTask)) => return Err(Error::AbortedTask),
Ok(Err(e)) => return Err(e),
Err(e) => {
let msg = match e.downcast_ref::<&'static str>() {
Some(s) => *s,
None => match e.downcast_ref::<String>() {
Some(s) => &s[..],
None => "Box<dyn Any>",
},
};
return Err(Error::Export(Box::new(Error::ProcessBatchPanicked(
msg.to_string(),
))));
}
};
task.status = Status::Succeeded;
if let Some(Details::IndexCompaction {
index_uid: _,
pre_compaction_size,
post_compaction_size,
}) = task.details.as_mut()
{
*pre_compaction_size = Some(Byte::from_u64(pre_size));
*post_compaction_size = Some(Byte::from_u64(post_size));
}
Ok((vec![task], ProcessBatchInfo::default()))
}
Batch::Export { mut task } => {
let KindWithContent::Export { url, api_key, payload_size, indexes } = &task.kind
else {
@@ -542,92 +493,6 @@ impl IndexScheduler {
}
}
fn apply_compaction(
&self,
rtxn: &RoTxn,
progress: &Progress,
index_uid: &str,
) -> Result<(u64, u64)> {
// 1. Verify that the index exists
if !self.index_mapper.index_exists(rtxn, index_uid)? {
return Err(Error::IndexNotFound(index_uid.to_owned()));
}
// 2. We retrieve the index and create a temporary file in the index directory
progress.update_progress(IndexCompaction::RetrieveTheIndex);
let index = self.index_mapper.index(rtxn, index_uid)?;
// the index operation can take a long time, so save this handle to make it available to the search for the duration of the tick
self.index_mapper
.set_currently_updating_index(Some((index_uid.to_string(), index.clone())));
progress.update_progress(IndexCompaction::CreateTemporaryFile);
let src_path = index.path().join("data.mdb");
let pre_size = std::fs::metadata(&src_path)?.len();
let dst_path = TempPath::from_path(index.path().join(DATA_MDB_COPY_NAME));
let file = File::create(&dst_path)?;
let mut file = tempfile::NamedTempFile::from_parts(file, dst_path);
// 3. We copy the index data to the temporary file
progress.update_progress(IndexCompaction::CopyAndCompactTheIndex);
index
.copy_to_file(file.as_file_mut(), CompactionOption::Enabled)
.map_err(|error| Error::Milli { error, index_uid: Some(index_uid.to_string()) })?;
// ...and reset the file position as specified in the documentation
file.seek(SeekFrom::Start(0))?;
// 4. We replace the index data file with the temporary file
progress.update_progress(IndexCompaction::PersistTheCompactedIndex);
match file.persist(src_path) {
Ok(file) => file.sync_all()?,
// TODO see if we have a _resource busy_ error and probably handle this by:
// 1. closing the index, 2. replacing and 3. reopening it
Err(PersistError { error, file: _ }) => return Err(Error::IoError(error)),
};
// 5. Prepare to close the index
progress.update_progress(IndexCompaction::CloseTheIndex);
// unmark that the index is the processing one so we don't keep a handle to it, preventing its closing
self.index_mapper.set_currently_updating_index(None);
self.index_mapper.close_index(rtxn, index_uid)?;
drop(index);
progress.update_progress(IndexCompaction::ReopenTheIndex);
// 6. Reopen the index
// The index will use the compacted data file when being reopened
let index = self.index_mapper.index(rtxn, index_uid)?;
// if the update processed successfully, we're going to store the new
// stats of the index. Since the tasks have already been processed and
// this is a non-critical operation. If it fails, we should not fail
// the entire batch.
let res = || -> Result<_> {
let mut wtxn = self.env.write_txn()?;
let index_rtxn = index.read_txn()?;
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)
.map_err(|e| Error::from_milli(e, Some(index_uid.to_string())))?;
self.index_mapper.store_stats_of(&mut wtxn, index_uid, &stats)?;
wtxn.commit()?;
Ok(stats.database_size)
}();
let post_size = match res {
Ok(post_size) => post_size,
Err(e) => {
tracing::error!(
error = &e as &dyn std::error::Error,
"Could not write the stats of the index"
);
0
}
};
Ok((pre_size, post_size))
}
/// Swap the index `lhs` with the index `rhs`.
fn apply_index_swap(
&self,
@@ -915,10 +780,9 @@ impl IndexScheduler {
let enqueued_tasks = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
// 0. Check if any upgrade or compaction tasks were matched.
// 0. Check if any upgrade task was matched.
// If so, we cancel all the failed or enqueued upgrade tasks.
let upgrade_tasks = &self.queue.tasks.get_kind(rtxn, Kind::UpgradeDatabase)?;
let compaction_tasks = &self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)?;
let is_canceling_upgrade = !matched_tasks.is_disjoint(upgrade_tasks);
if is_canceling_upgrade {
let failed_tasks = self.queue.tasks.get_status(rtxn, Status::Failed)?;
@@ -983,33 +847,7 @@ impl IndexScheduler {
}
}
// 3. If we are cancelling a compaction task, remove the tempfiles after incomplete compactions
for compaction_task in &tasks_to_cancel & compaction_tasks {
progress.update_progress(TaskCancelationProgress::CleaningCompactionLeftover);
let task = self.queue.tasks.get_task(rtxn, compaction_task)?.unwrap();
let Some(Details::IndexCompaction {
index_uid,
pre_compaction_size: _,
post_compaction_size: _,
}) = task.details
else {
unreachable!("wrong details for compaction task {compaction_task}")
};
let index_path = match self.index_mapper.index_mapping.get(rtxn, &index_uid)? {
Some(index_uuid) => self.index_mapper.index_path(index_uuid),
None => continue,
};
if let Err(e) = remove_file(index_path.join(DATA_MDB_COPY_NAME)) {
match e.kind() {
ErrorKind::NotFound => (),
_ => return Err(Error::IoError(e)),
}
}
}
// 4. We now have a list of tasks to cancel, cancel them
// 3. We now have a list of tasks to cancel, cancel them
let (task_progress, progress_obj) = AtomicTaskStep::new(tasks_to_cancel.len() as u32);
progress.update_progress(progress_obj);

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
[timestamp] [4,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
----------------------------------------------------------------------
### Status:
enqueued [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
----------------------------------------------------------------------
### Status:

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
----------------------------------------------------------------------
### Status:
@@ -37,7 +37,7 @@ catto [1,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
----------------------------------------------------------------------
@@ -40,7 +40,7 @@ doggo [2,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 21, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -43,7 +43,7 @@ doggo [2,3,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.21.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -722,7 +722,7 @@ fn basic_get_stats() {
let kind = index_creation_task("whalo", "fish");
let _task = index_scheduler.register(kind, None, false).unwrap();
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
{
"indexes": {
"catto": 1,
@@ -742,7 +742,6 @@ fn basic_get_stats() {
"documentEdition": 0,
"dumpCreation": 0,
"export": 0,
"indexCompaction": 0,
"indexCreation": 3,
"indexDeletion": 0,
"indexSwap": 0,
@@ -754,10 +753,10 @@ fn basic_get_stats() {
"upgradeDatabase": 0
}
}
"###);
"#);
handle.advance_till([Start, BatchCreated]);
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
{
"indexes": {
"catto": 1,
@@ -777,7 +776,6 @@ fn basic_get_stats() {
"documentEdition": 0,
"dumpCreation": 0,
"export": 0,
"indexCompaction": 0,
"indexCreation": 3,
"indexDeletion": 0,
"indexSwap": 0,
@@ -789,7 +787,7 @@ fn basic_get_stats() {
"upgradeDatabase": 0
}
}
"###);
"#);
handle.advance_till([
InsideProcessBatch,
@@ -799,7 +797,7 @@ fn basic_get_stats() {
Start,
BatchCreated,
]);
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
{
"indexes": {
"catto": 1,
@@ -819,7 +817,6 @@ fn basic_get_stats() {
"documentEdition": 0,
"dumpCreation": 0,
"export": 0,
"indexCompaction": 0,
"indexCreation": 3,
"indexDeletion": 0,
"indexSwap": 0,
@@ -831,7 +828,7 @@ fn basic_get_stats() {
"upgradeDatabase": 0
}
}
"###);
"#);
// now we make one more batch, the started_at field of the new tasks will be past `second_start_time`
handle.advance_till([
@@ -842,7 +839,7 @@ fn basic_get_stats() {
Start,
BatchCreated,
]);
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
{
"indexes": {
"catto": 1,
@@ -862,7 +859,6 @@ fn basic_get_stats() {
"documentEdition": 0,
"dumpCreation": 0,
"export": 0,
"indexCompaction": 0,
"indexCreation": 3,
"indexDeletion": 0,
"indexSwap": 0,
@@ -874,7 +870,7 @@ fn basic_get_stats() {
"upgradeDatabase": 0
}
}
"###);
"#);
}
#[test]

View File

@@ -45,9 +45,6 @@ pub fn upgrade_index_scheduler(
(1, 19, _) => 0,
(1, 20, _) => 0,
(1, 21, _) => 0,
(1, 22, _) => 0,
(1, 23, _) => 0,
(1, 24, _) => 0,
(major, minor, patch) => {
if major > current_major
|| (major == current_major && minor > current_minor)

View File

@@ -256,15 +256,14 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
use KindWithContent as K;
let mut index_uids = vec![];
match &mut task.kind {
K::DocumentAdditionOrUpdate { index_uid, .. }
| K::DocumentEdition { index_uid, .. }
| K::DocumentDeletion { index_uid, .. }
| K::DocumentDeletionByFilter { index_uid, .. }
| K::DocumentClear { index_uid }
| K::SettingsUpdate { index_uid, .. }
| K::IndexDeletion { index_uid }
| K::IndexCreation { index_uid, .. }
| K::IndexCompaction { index_uid, .. } => index_uids.push(index_uid),
K::DocumentAdditionOrUpdate { index_uid, .. } => index_uids.push(index_uid),
K::DocumentEdition { index_uid, .. } => index_uids.push(index_uid),
K::DocumentDeletion { index_uid, .. } => index_uids.push(index_uid),
K::DocumentDeletionByFilter { index_uid, .. } => index_uids.push(index_uid),
K::DocumentClear { index_uid } => index_uids.push(index_uid),
K::SettingsUpdate { index_uid, .. } => index_uids.push(index_uid),
K::IndexDeletion { index_uid } => index_uids.push(index_uid),
K::IndexCreation { index_uid, .. } => index_uids.push(index_uid),
K::IndexUpdate { index_uid, new_index_uid, .. } => {
index_uids.push(index_uid);
if let Some(new_uid) = new_index_uid {
@@ -619,13 +618,6 @@ impl crate::IndexScheduler {
Details::UpgradeDatabase { from: _, to: _ } => {
assert_eq!(kind.as_kind(), Kind::UpgradeDatabase);
}
Details::IndexCompaction {
index_uid: _,
pre_compaction_size: _,
post_compaction_size: _,
} => {
assert_eq!(kind.as_kind(), Kind::IndexCompaction);
}
}
}

View File

@@ -271,10 +271,9 @@ macro_rules! json_string {
#[cfg(test)]
mod tests {
use uuid::Uuid;
use crate as meili_snap;
use crate::UUID_IN_MESSAGE_RE;
use uuid::Uuid;
#[test]
fn snap() {

View File

@@ -109,7 +109,6 @@ impl HeedAuthStore {
Action::IndexesGet,
Action::IndexesUpdate,
Action::IndexesSwap,
Action::IndexesCompact,
]
.iter(),
);

View File

@@ -5,7 +5,6 @@ use actix_web::{self as aweb, HttpResponseBuilder};
use aweb::http::header;
use aweb::rt::task::JoinError;
use convert_case::Casing;
use milli::cellulite;
use milli::heed::{Error as HeedError, MdbError};
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
@@ -240,7 +239,6 @@ InconsistentDocumentChangeHeaders , InvalidRequest , BAD_REQU
InvalidDocumentFilter , InvalidRequest , BAD_REQUEST ;
InvalidDocumentSort , InvalidRequest , BAD_REQUEST ;
InvalidDocumentGeoField , InvalidRequest , BAD_REQUEST ;
InvalidDocumentGeojsonField , InvalidRequest , BAD_REQUEST ;
InvalidHeaderValue , InvalidRequest , BAD_REQUEST ;
InvalidVectorDimensions , InvalidRequest , BAD_REQUEST ;
InvalidVectorsType , InvalidRequest , BAD_REQUEST ;
@@ -503,9 +501,7 @@ impl ErrorCode for milli::Error {
Code::InvalidFacetSearchFacetName
}
UserError::CriterionError(_) => Code::InvalidSettingsRankingRules,
UserError::InvalidGeoField { .. } | UserError::GeoJsonError(_) => {
Code::InvalidDocumentGeoField
}
UserError::InvalidGeoField { .. } => Code::InvalidDocumentGeoField,
UserError::InvalidVectorDimensions { .. }
| UserError::InvalidIndexingVectorDimensions { .. } => {
Code::InvalidVectorDimensions
@@ -529,17 +525,6 @@ impl ErrorCode for milli::Error {
| UserError::DocumentEditionCompilationError(_) => {
Code::EditDocumentsByFunctionError
}
UserError::CelluliteError(err) => match err {
cellulite::Error::BuildCanceled
| cellulite::Error::VersionMismatchOnBuild(_)
| cellulite::Error::DatabaseDoesntExists
| cellulite::Error::Heed(_)
| cellulite::Error::InvalidGeometry(_)
| cellulite::Error::InternalDocIdMissing(_, _)
| cellulite::Error::CannotConvertLineToCell(_, _, _) => Code::Internal,
cellulite::Error::InvalidGeoJson(_) => Code::InvalidDocumentGeojsonField,
},
UserError::MalformedGeojson(_) => Code::InvalidDocumentGeojsonField,
}
}
}

View File

@@ -380,9 +380,6 @@ pub enum Action {
#[serde(rename = "webhooks.*")]
#[deserr(rename = "webhooks.*")]
WebhooksAll,
#[serde(rename = "indexes.compact")]
#[deserr(rename = "indexes.compact")]
IndexesCompact,
}
impl Action {
@@ -401,7 +398,6 @@ impl Action {
INDEXES_UPDATE => Some(Self::IndexesUpdate),
INDEXES_DELETE => Some(Self::IndexesDelete),
INDEXES_SWAP => Some(Self::IndexesSwap),
INDEXES_COMPACT => Some(Self::IndexesCompact),
TASKS_ALL => Some(Self::TasksAll),
TASKS_CANCEL => Some(Self::TasksCancel),
TASKS_DELETE => Some(Self::TasksDelete),
@@ -466,7 +462,6 @@ impl Action {
IndexesUpdate => false,
IndexesDelete => false,
IndexesSwap => false,
IndexesCompact => false,
TasksCancel => false,
TasksDelete => false,
TasksGet => true,
@@ -518,7 +513,6 @@ pub mod actions {
pub const INDEXES_UPDATE: u8 = IndexesUpdate.repr();
pub const INDEXES_DELETE: u8 = IndexesDelete.repr();
pub const INDEXES_SWAP: u8 = IndexesSwap.repr();
pub const INDEXES_COMPACT: u8 = IndexesCompact.repr();
pub const TASKS_ALL: u8 = TasksAll.repr();
pub const TASKS_CANCEL: u8 = TasksCancel.repr();
pub const TASKS_DELETE: u8 = TasksDelete.repr();
@@ -620,7 +614,6 @@ pub(crate) mod test {
assert!(WebhooksDelete.repr() == 47 && WEBHOOKS_DELETE == 47);
assert!(WebhooksCreate.repr() == 48 && WEBHOOKS_CREATE == 48);
assert!(WebhooksAll.repr() == 49 && WEBHOOKS_ALL == 49);
assert!(IndexesCompact.repr() == 50 && INDEXES_COMPACT == 50);
}
#[test]

View File

@@ -1,5 +1,3 @@
#![allow(clippy::result_large_err)]
pub mod batch_view;
pub mod batches;
pub mod compression;

View File

@@ -142,11 +142,6 @@ pub struct DetailsView {
pub old_index_uid: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub new_index_uid: Option<String>,
// index compaction
#[serde(skip_serializing_if = "Option::is_none")]
pub pre_compaction_size: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub post_compaction_size: Option<String>,
}
impl DetailsView {
@@ -319,24 +314,6 @@ impl DetailsView {
// We should never be able to batch multiple renames at the same time.
(Some(left), Some(_right)) => Some(left),
},
pre_compaction_size: match (
self.pre_compaction_size.clone(),
other.pre_compaction_size.clone(),
) {
(None, None) => None,
(None, Some(size)) | (Some(size), None) => Some(size),
// We should never be able to batch multiple compactions at the same time.
(Some(left), Some(_right)) => Some(left),
},
post_compaction_size: match (
self.post_compaction_size.clone(),
other.post_compaction_size.clone(),
) {
(None, None) => None,
(None, Some(size)) | (Some(size), None) => Some(size),
// We should never be able to batch multiple compactions at the same time.
(Some(left), Some(_right)) => Some(left),
},
}
}
}
@@ -438,15 +415,6 @@ impl From<Details> for DetailsView {
upgrade_to: Some(format!("v{}.{}.{}", to.0, to.1, to.2)),
..Default::default()
},
Details::IndexCompaction { pre_compaction_size, post_compaction_size, .. } => {
DetailsView {
pre_compaction_size: pre_compaction_size
.map(|size| size.get_appropriate_unit(UnitType::Both).to_string()),
post_compaction_size: post_compaction_size
.map(|size| size.get_appropriate_unit(UnitType::Both).to_string()),
..Default::default()
}
}
}
}
}

View File

@@ -67,8 +67,7 @@ impl Task {
| SettingsUpdate { index_uid, .. }
| IndexCreation { index_uid, .. }
| IndexUpdate { index_uid, .. }
| IndexDeletion { index_uid }
| IndexCompaction { index_uid } => Some(index_uid),
| IndexDeletion { index_uid } => Some(index_uid),
}
}
@@ -95,8 +94,7 @@ impl Task {
| KindWithContent::DumpCreation { .. }
| KindWithContent::SnapshotCreation
| KindWithContent::Export { .. }
| KindWithContent::UpgradeDatabase { .. }
| KindWithContent::IndexCompaction { .. } => None,
| KindWithContent::UpgradeDatabase { .. } => None,
}
}
}
@@ -172,9 +170,6 @@ pub enum KindWithContent {
UpgradeDatabase {
from: (u32, u32, u32),
},
IndexCompaction {
index_uid: String,
},
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
@@ -211,7 +206,6 @@ impl KindWithContent {
KindWithContent::SnapshotCreation => Kind::SnapshotCreation,
KindWithContent::Export { .. } => Kind::Export,
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
KindWithContent::IndexCompaction { .. } => Kind::IndexCompaction,
}
}
@@ -232,8 +226,7 @@ impl KindWithContent {
| DocumentClear { index_uid }
| SettingsUpdate { index_uid, .. }
| IndexCreation { index_uid, .. }
| IndexDeletion { index_uid }
| IndexCompaction { index_uid } => vec![index_uid],
| IndexDeletion { index_uid } => vec![index_uid],
IndexUpdate { index_uid, new_index_uid, .. } => {
let mut indexes = vec![index_uid.as_str()];
if let Some(new_uid) = new_index_uid {
@@ -332,11 +325,6 @@ impl KindWithContent {
versioning::VERSION_PATCH,
),
}),
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction {
index_uid: index_uid.clone(),
pre_compaction_size: None,
post_compaction_size: None,
}),
}
}
@@ -419,11 +407,6 @@ impl KindWithContent {
versioning::VERSION_PATCH,
),
}),
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction {
index_uid: index_uid.clone(),
pre_compaction_size: None,
post_compaction_size: None,
}),
}
}
}
@@ -486,11 +469,6 @@ impl From<&KindWithContent> for Option<Details> {
versioning::VERSION_PATCH,
),
}),
KindWithContent::IndexCompaction { index_uid } => Some(Details::IndexCompaction {
index_uid: index_uid.clone(),
pre_compaction_size: None,
post_compaction_size: None,
}),
}
}
}
@@ -601,7 +579,6 @@ pub enum Kind {
SnapshotCreation,
Export,
UpgradeDatabase,
IndexCompaction,
}
impl Kind {
@@ -613,8 +590,7 @@ impl Kind {
| Kind::SettingsUpdate
| Kind::IndexCreation
| Kind::IndexDeletion
| Kind::IndexUpdate
| Kind::IndexCompaction => true,
| Kind::IndexUpdate => true,
Kind::IndexSwap
| Kind::TaskCancelation
| Kind::TaskDeletion
@@ -642,7 +618,6 @@ impl Display for Kind {
Kind::SnapshotCreation => write!(f, "snapshotCreation"),
Kind::Export => write!(f, "export"),
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
Kind::IndexCompaction => write!(f, "indexCompaction"),
}
}
}
@@ -678,8 +653,6 @@ impl FromStr for Kind {
Ok(Kind::Export)
} else if kind.eq_ignore_ascii_case("upgradeDatabase") {
Ok(Kind::UpgradeDatabase)
} else if kind.eq_ignore_ascii_case("indexCompaction") {
Ok(Kind::IndexCompaction)
} else {
Err(ParseTaskKindError(kind.to_owned()))
}
@@ -765,11 +738,6 @@ pub enum Details {
from: (u32, u32, u32),
to: (u32, u32, u32),
},
IndexCompaction {
index_uid: String,
pre_compaction_size: Option<Byte>,
post_compaction_size: Option<Byte>,
},
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
@@ -832,10 +800,6 @@ impl Details {
Self::ClearAll { deleted_documents } => *deleted_documents = Some(0),
Self::TaskCancelation { canceled_tasks, .. } => *canceled_tasks = Some(0),
Self::TaskDeletion { deleted_tasks, .. } => *deleted_tasks = Some(0),
Self::IndexCompaction { pre_compaction_size, post_compaction_size, .. } => {
*pre_compaction_size = None;
*post_compaction_size = None;
}
Self::SettingsUpdate { .. }
| Self::IndexInfo { .. }
| Self::Dump { .. }

View File

@@ -91,7 +91,7 @@ time = { version = "0.3.41", features = [
] }
tokio = { version = "1.45.1", features = ["full"] }
toml = "0.8.23"
uuid = { version = "1.18.0", features = ["serde", "v4", "v7"] }
uuid = { version = "1.17.0", features = ["serde", "v4"] }
serde_urlencoded = "0.7.1"
termcolor = "1.4.1"
url = { version = "2.5.4", features = ["serde"] }

View File

@@ -205,8 +205,6 @@ struct Infos {
experimental_no_snapshot_compaction: bool,
experimental_no_edition_2024_for_dumps: bool,
experimental_no_edition_2024_for_settings: bool,
experimental_no_edition_2024_for_prefix_post_processing: bool,
experimental_no_edition_2024_for_facet_post_processing: bool,
experimental_vector_store_setting: bool,
gpu_enabled: bool,
db_path: bool,
@@ -298,8 +296,6 @@ impl Infos {
skip_index_budget: _,
experimental_no_edition_2024_for_settings,
experimental_no_edition_2024_for_dumps,
experimental_no_edition_2024_for_prefix_post_processing,
experimental_no_edition_2024_for_facet_post_processing,
} = indexer_options;
let RuntimeTogglableFeatures {
@@ -369,8 +365,6 @@ impl Infos {
ssl_resumption,
ssl_tickets,
experimental_no_edition_2024_for_settings,
experimental_no_edition_2024_for_prefix_post_processing,
experimental_no_edition_2024_for_facet_post_processing,
}
}
}

View File

@@ -55,10 +55,6 @@ const MEILI_EXPERIMENTAL_ENABLE_LOGS_ROUTE: &str = "MEILI_EXPERIMENTAL_ENABLE_LO
const MEILI_EXPERIMENTAL_CONTAINS_FILTER: &str = "MEILI_EXPERIMENTAL_CONTAINS_FILTER";
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_SETTINGS: &str =
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_SETTINGS";
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_FACET_POST_PROCESSING: &str =
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_FACET_POST_PROCESSING";
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_PREFIX_POST_PROCESSING: &str =
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_PREFIX_POST_PROCESSING";
const MEILI_EXPERIMENTAL_ENABLE_METRICS: &str = "MEILI_EXPERIMENTAL_ENABLE_METRICS";
const MEILI_EXPERIMENTAL_SEARCH_QUEUE_SIZE: &str = "MEILI_EXPERIMENTAL_SEARCH_QUEUE_SIZE";
const MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER: &str = "MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER";
@@ -776,22 +772,6 @@ pub struct IndexerOpts {
#[clap(long, env = MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS)]
#[serde(default)]
pub experimental_no_edition_2024_for_dumps: bool,
/// Experimental no edition 2024 to compute prefixes. For more information,
/// see: <https://github.com/orgs/meilisearch/discussions/862>
///
/// Enables the experimental no edition 2024 to compute prefixes.
#[clap(long, env = MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_PREFIX_POST_PROCESSING)]
#[serde(default)]
pub experimental_no_edition_2024_for_prefix_post_processing: bool,
/// Experimental no edition 2024 to compute facets. For more information,
/// see: <https://github.com/orgs/meilisearch/discussions/862>
///
/// Enables the experimental no edition 2024 to compute facets.
#[clap(long, env = MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_FACET_POST_PROCESSING)]
#[serde(default)]
pub experimental_no_edition_2024_for_facet_post_processing: bool,
}
impl IndexerOpts {
@@ -803,8 +783,6 @@ impl IndexerOpts {
skip_index_budget: _,
experimental_no_edition_2024_for_settings,
experimental_no_edition_2024_for_dumps,
experimental_no_edition_2024_for_prefix_post_processing,
experimental_no_edition_2024_for_facet_post_processing,
} = self;
if let Some(max_indexing_memory) = max_indexing_memory.0 {
export_to_env_if_not_present(
@@ -830,18 +808,6 @@ impl IndexerOpts {
experimental_no_edition_2024_for_dumps.to_string(),
);
}
if experimental_no_edition_2024_for_prefix_post_processing {
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_PREFIX_POST_PROCESSING,
experimental_no_edition_2024_for_prefix_post_processing.to_string(),
);
}
if experimental_no_edition_2024_for_facet_post_processing {
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_FACET_POST_PROCESSING,
experimental_no_edition_2024_for_facet_post_processing.to_string(),
);
}
}
}
@@ -867,10 +833,6 @@ impl TryFrom<&IndexerOpts> for IndexerConfig {
chunk_compression_level: Default::default(),
documents_chunk_size: Default::default(),
max_nb_chunks: Default::default(),
experimental_no_edition_2024_for_prefix_post_processing: other
.experimental_no_edition_2024_for_prefix_post_processing,
experimental_no_edition_2024_for_facet_post_processing: other
.experimental_no_edition_2024_for_facet_post_processing,
})
}
}

View File

@@ -1,84 +0,0 @@
use actix_web::web::{self, Data};
use actix_web::{HttpRequest, HttpResponse};
use index_scheduler::IndexScheduler;
use meilisearch_types::error::ResponseError;
use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::keys::actions;
use meilisearch_types::tasks::KindWithContent;
use tracing::debug;
use utoipa::OpenApi;
use super::ActionPolicy;
use crate::analytics::Analytics;
use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::SummarizedTaskView;
#[derive(OpenApi)]
#[openapi(
paths(compact),
tags(
(
name = "Compact an index",
description = "The /compact route uses compacts the database to reorganize and make it smaller and more efficient.",
external_docs(url = "https://www.meilisearch.com/docs/reference/api/compact"),
),
),
)]
pub struct CompactApi;
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(web::resource("").route(web::post().to(SeqHandler(compact))));
}
/// Compact an index
#[utoipa::path(
post,
path = "{indexUid}/compact",
tag = "Compact an index",
security(("Bearer" = ["search", "*"])),
params(("indexUid" = String, Path, example = "movies", description = "Index Unique Identifier", nullable = false)),
responses(
(status = ACCEPTED, description = "Task successfully enqueued", body = SummarizedTaskView, content_type = "application/json", example = json!(
{
"taskUid": 147,
"indexUid": null,
"status": "enqueued",
"type": "documentDeletion",
"enqueuedAt": "2024-08-08T17:05:55.791772Z"
}
)),
(status = 401, description = "The authorization header is missing", body = ResponseError, content_type = "application/json", example = json!(
{
"message": "The Authorization header is missing. It must use the bearer authorization method.",
"code": "missing_authorization_header",
"type": "auth",
"link": "https://docs.meilisearch.com/errors#missing_authorization_header"
}
)),
)
)]
pub async fn compact(
index_scheduler: GuardedData<ActionPolicy<{ actions::INDEXES_COMPACT }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
req: HttpRequest,
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
analytics.publish(IndexCompacted::default(), &req);
let task = KindWithContent::IndexCompaction { index_uid: index_uid.to_string() };
let task =
match tokio::task::spawn_blocking(move || index_scheduler.register(task, None, false))
.await?
{
Ok(task) => task,
Err(e) => return Err(e.into()),
};
debug!(returns = ?task, "Compact the {index_uid} index");
Ok(HttpResponse::Accepted().json(SummarizedTaskView::from(task)))
}
crate::empty_analytics!(IndexCompacted, "Index Compacted");

View File

@@ -28,7 +28,6 @@ use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::is_dry_run;
use crate::Opt;
pub mod compact;
pub mod documents;
mod enterprise_edition;
pub mod facet_search;
@@ -50,9 +49,8 @@ pub use enterprise_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TAS
(path = "/", api = facet_search::FacetSearchApi),
(path = "/", api = similar::SimilarApi),
(path = "/", api = settings::SettingsApi),
(path = "/", api = compact::CompactApi),
),
paths(list_indexes, create_index, get_index, update_index, delete_index, get_index_stats, compact::compact),
paths(list_indexes, create_index, get_index, update_index, delete_index, get_index_stats),
tags(
(
name = "Indexes",
@@ -82,8 +80,7 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
.service(web::scope("/search").configure(search::configure))
.service(web::scope("/facet-search").configure(facet_search::configure))
.service(web::scope("/similar").configure(similar::configure))
.service(web::scope("/settings").configure(settings::configure))
.service(web::scope("/compact").configure(compact::configure)),
.service(web::scope("/settings").configure(settings::configure)),
);
}

View File

@@ -13,7 +13,6 @@ use meilisearch_types::serde_cs::vec::CS;
use serde_json::Value;
use tracing::debug;
use utoipa::{IntoParams, OpenApi};
use uuid::Uuid;
use crate::analytics::Analytics;
use crate::error::MeilisearchHttpError;
@@ -22,12 +21,11 @@ use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::metrics::MEILISEARCH_DEGRADED_SEARCH_REQUESTS;
use crate::routes::indexes::search_analytics::{SearchAggregator, SearchGET, SearchPOST};
use crate::routes::parse_include_metadata_header;
use crate::search::{
add_search_rules, perform_search, HybridQuery, MatchingStrategy, RankingScoreThreshold,
RetrieveVectors, SearchKind, SearchParams, SearchQuery, SearchResult, SemanticRatio,
DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG,
DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET, DEFAULT_SEMANTIC_RATIO,
RetrieveVectors, SearchKind, SearchQuery, SearchResult, SemanticRatio, DEFAULT_CROP_LENGTH,
DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG,
DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET, DEFAULT_SEMANTIC_RATIO,
};
use crate::search_queue::SearchQueue;
@@ -327,8 +325,7 @@ pub async fn search_with_url_query(
req: HttpRequest,
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let request_uid = Uuid::now_v7();
debug!(request_uid = ?request_uid, parameters = ?params, "Search get");
debug!(parameters = ?params, "Search get");
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let mut query: SearchQuery = params.into_inner().try_into()?;
@@ -346,20 +343,14 @@ pub async fn search_with_url_query(
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
let permit = search_queue.try_get_search_permit().await?;
let include_metadata = parse_include_metadata_header(&req);
let search_result = tokio::task::spawn_blocking(move || {
perform_search(
SearchParams {
index_uid: index_uid.to_string(),
query,
search_kind,
retrieve_vectors: retrieve_vector,
features: index_scheduler.features(),
request_uid,
include_metadata,
},
index_uid.to_string(),
&index,
query,
search_kind,
retrieve_vector,
index_scheduler.features(),
)
})
.await;
@@ -372,7 +363,7 @@ pub async fn search_with_url_query(
let search_result = search_result?;
debug!(request_uid = ?request_uid, returns = ?search_result, "Search get");
debug!(returns = ?search_result, "Search get");
Ok(HttpResponse::Ok().json(search_result))
}
@@ -441,10 +432,9 @@ pub async fn search_with_post(
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let request_uid = Uuid::now_v7();
let mut query = params.into_inner();
debug!(request_uid = ?request_uid, parameters = ?query, "Search post");
debug!(parameters = ?query, "Search post");
// Tenant token search_rules.
if let Some(search_rules) = index_scheduler.filters().get_index_search_rules(&index_uid) {
@@ -459,21 +449,15 @@ pub async fn search_with_post(
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
let include_metadata = parse_include_metadata_header(&req);
let permit = search_queue.try_get_search_permit().await?;
let search_result = tokio::task::spawn_blocking(move || {
perform_search(
SearchParams {
index_uid: index_uid.to_string(),
query,
search_kind,
retrieve_vectors,
features: index_scheduler.features(),
request_uid,
include_metadata,
},
index_uid.to_string(),
&index,
query,
search_kind,
retrieve_vectors,
index_scheduler.features(),
)
})
.await;
@@ -489,7 +473,7 @@ pub async fn search_with_post(
let search_result = search_result?;
debug!(request_uid = ?request_uid, returns = ?search_result, "Search post");
debug!(returns = ?search_result, "Search post");
Ok(HttpResponse::Ok().json(search_result))
}

View File

@@ -234,8 +234,6 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
facet_stats: _,
degraded,
used_negative_operator,
request_uid: _,
metadata: _,
} = result;
self.total_succeeded = self.total_succeeded.saturating_add(1);

View File

@@ -45,7 +45,6 @@ use crate::routes::webhooks::{WebhookResults, WebhookSettings, WebhookWithMetada
use crate::search::{
FederatedSearch, FederatedSearchResult, Federation, FederationOptions, MergeFacets,
SearchQueryWithIndex, SearchResultWithIndex, SimilarQuery, SimilarResult,
INCLUDE_METADATA_HEADER,
};
use crate::search_queue::SearchQueue;
use crate::Opt;
@@ -185,18 +184,6 @@ pub fn is_dry_run(req: &HttpRequest, opt: &Opt) -> Result<bool, ResponseError> {
.is_some_and(|s| s.to_lowercase() == "true"))
}
/// Parse the `Meili-Include-Metadata` header from an HTTP request.
///
/// Returns `true` if the header is present and set to "true" or "1" (case-insensitive).
/// Returns `false` if the header is not present or has any other value.
pub fn parse_include_metadata_header(req: &HttpRequest) -> bool {
req.headers()
.get(INCLUDE_METADATA_HEADER)
.and_then(|h| h.to_str().ok())
.map(|v| matches!(v.to_lowercase().as_str(), "true" | "1"))
.unwrap_or(false)
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct SummarizedTaskView {

View File

@@ -9,7 +9,6 @@ use meilisearch_types::keys::actions;
use serde::Serialize;
use tracing::debug;
use utoipa::{OpenApi, ToSchema};
use uuid::Uuid;
use super::multi_search_analytics::MultiSearchAggregator;
use crate::analytics::Analytics;
@@ -18,11 +17,10 @@ use crate::extractors::authentication::policies::ActionPolicy;
use crate::extractors::authentication::{AuthenticationError, GuardedData};
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::indexes::search::search_kind;
use crate::routes::parse_include_metadata_header;
use crate::search::{
add_search_rules, perform_federated_search, perform_search, FederatedSearch,
FederatedSearchResult, RetrieveVectors, SearchParams, SearchQueryWithIndex,
SearchResultWithIndex, PROXY_SEARCH_HEADER, PROXY_SEARCH_HEADER_VALUE,
FederatedSearchResult, RetrieveVectors, SearchQueryWithIndex, SearchResultWithIndex,
PROXY_SEARCH_HEADER, PROXY_SEARCH_HEADER_VALUE,
};
use crate::search_queue::SearchQueue;
@@ -153,7 +151,6 @@ pub async fn multi_search_with_post(
// Since we don't want to process half of the search requests and then get a permit refused
// we're going to get one permit for the whole duration of the multi-search request.
let permit = search_queue.try_get_search_permit().await?;
let request_uid = Uuid::now_v7();
let federated_search = params.into_inner();
@@ -189,31 +186,16 @@ pub async fn multi_search_with_post(
err
})?;
let include_metadata = parse_include_metadata_header(&req);
let response = match federation {
Some(federation) => {
debug!(
request_uid = ?request_uid,
federation = ?federation,
parameters = ?queries,
"Federated-search"
);
// check remote header
let is_proxy = req
.headers()
.get(PROXY_SEARCH_HEADER)
.is_some_and(|value| value.as_bytes() == PROXY_SEARCH_HEADER_VALUE.as_bytes());
let search_result = perform_federated_search(
&index_scheduler,
queries,
federation,
features,
is_proxy,
request_uid,
include_metadata,
)
.await;
let search_result =
perform_federated_search(&index_scheduler, queries, federation, features, is_proxy)
.await;
permit.drop().await;
if search_result.is_ok() {
@@ -221,13 +203,6 @@ pub async fn multi_search_with_post(
}
analytics.publish(multi_aggregate, &req);
debug!(
request_uid = ?request_uid,
returns = ?search_result,
"Federated-search"
);
HttpResponse::Ok().json(search_result?)
}
None => {
@@ -241,12 +216,7 @@ pub async fn multi_search_with_post(
.map(SearchQueryWithIndex::into_index_query_federation)
.enumerate()
{
debug!(
request_uid = ?request_uid,
on_index = query_index,
parameters = ?query,
"Multi-search"
);
debug!(on_index = query_index, parameters = ?query, "Multi-search");
if federation_options.is_some() {
return Err((
@@ -282,16 +252,12 @@ pub async fn multi_search_with_post(
let search_result = tokio::task::spawn_blocking(move || {
perform_search(
SearchParams {
index_uid: index_uid_str.clone(),
query,
search_kind,
retrieve_vectors: retrieve_vector,
features,
request_uid,
include_metadata,
},
index_uid_str.clone(),
&index,
query,
search_kind,
retrieve_vector,
features,
)
})
.await
@@ -320,11 +286,7 @@ pub async fn multi_search_with_post(
err
})?;
debug!(
request_uid = ?request_uid,
returns = ?search_results,
"Multi-search"
);
debug!(returns = ?search_results, "Multi-search");
HttpResponse::Ok().json(SearchResults { results: search_results })
}

View File

@@ -226,14 +226,14 @@ mod tests {
{
let params = "types=createIndex";
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
snapshot!(meili_snap::json_string!(err), @r###"
snapshot!(meili_snap::json_string!(err), @r#"
{
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`.",
"code": "invalid_task_types",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
}
"###);
"#);
}
}
#[test]

View File

@@ -1,4 +1,3 @@
use core::convert::Infallible;
use std::collections::BTreeMap;
use std::str::FromStr;
@@ -8,6 +7,7 @@ use actix_http::header::{
};
use actix_web::web::{self, Data, Path};
use actix_web::{HttpRequest, HttpResponse};
use core::convert::Infallible;
use deserr::actix_web::AwebJson;
use deserr::{DeserializeError, Deserr, ValuePointerRef};
use index_scheduler::IndexScheduler;
@@ -24,12 +24,12 @@ use tracing::debug;
use url::Url;
use utoipa::{OpenApi, ToSchema};
use uuid::Uuid;
use WebhooksError::*;
use crate::analytics::{Aggregate, Analytics};
use crate::extractors::authentication::policies::ActionPolicy;
use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use WebhooksError::*;
#[derive(OpenApi)]
#[openapi(

View File

@@ -17,13 +17,11 @@ use meilisearch_types::milli::vector::Embedding;
use meilisearch_types::milli::{self, DocumentId, OrderBy, TimeBudget, DEFAULT_VALUES_PER_FACET};
use roaring::RoaringBitmap;
use tokio::task::JoinHandle;
use uuid::Uuid;
use super::super::ranking_rules::{self, RankingRules};
use super::super::{
compute_facet_distribution_stats, prepare_search, AttributesFormat, ComputedFacets, HitMaker,
HitsInfo, RetrieveVectors, SearchHit, SearchKind, SearchMetadata, SearchQuery,
SearchQueryWithIndex,
HitsInfo, RetrieveVectors, SearchHit, SearchKind, SearchQuery, SearchQueryWithIndex,
};
use super::proxy::{proxy_search, ProxySearchError, ProxySearchParams};
use super::types::{
@@ -41,58 +39,32 @@ pub async fn perform_federated_search(
federation: Federation,
features: RoFeatures,
is_proxy: bool,
request_uid: Uuid,
include_metadata: bool,
) -> Result<FederatedSearchResult, ResponseError> {
if is_proxy {
features.check_network("Performing a remote federated search")?;
}
let before_search = std::time::Instant::now();
let timeout = std::env::var("MEILI_EXPERIMENTAL_REMOTE_SEARCH_TIMEOUT_SECONDS")
.ok()
.map(|p| p.parse().unwrap())
.unwrap_or(25);
let deadline = before_search + std::time::Duration::from_secs(timeout);
let deadline = before_search + std::time::Duration::from_secs(9);
let required_hit_count = federation.limit + federation.offset;
let retrieve_vectors = queries.iter().any(|q| q.retrieve_vectors);
let network = index_scheduler.network();
// Preconstruct metadata keeping the original queries order for later metadata building
let precomputed_query_metadata: Option<Vec<_>> = include_metadata.then(|| {
queries
.iter()
.map(|q| {
(
q.index_uid.to_string(),
q.federation_options.as_ref().and_then(|o| o.remote.clone()),
)
})
.collect()
});
// this implementation partition the queries by index to guarantee an important property:
// - all the queries to a particular index use the same read transaction.
// This is an important property, otherwise we cannot guarantee the self-consistency of the results.
// 1. partition queries by host and index
let mut partitioned_queries = PartitionedQueries::new();
for (query_index, federated_query) in queries.into_iter().enumerate() {
partitioned_queries.partition(federated_query, query_index, &network, features)?
}
// 2. perform queries, merge and make hits index by index
// 2.1. start remote queries
let remote_search = RemoteSearch::start(
partitioned_queries.remote_queries_by_host,
&federation,
deadline,
include_metadata,
);
let remote_search =
RemoteSearch::start(partitioned_queries.remote_queries_by_host, &federation, deadline);
// 2.2. concurrently execute local queries
let params = SearchByIndexParams {
@@ -134,25 +106,11 @@ pub async fn perform_federated_search(
let after_waiting_remote_results = std::time::Instant::now();
// 3. merge hits and metadata across indexes and hosts
// 3.1. Build metadata in the same order as the original queries
let query_metadata = precomputed_query_metadata.map(|precomputed_query_metadata| {
// If a remote is present, set the local remote name
let local_remote_name = network.local.clone().filter(|_| partitioned_queries.has_remote);
build_query_metadata(
precomputed_query_metadata,
local_remote_name,
&remote_results,
&results_by_index,
)
});
// 3.2. merge federation metadata
// 3.1. merge metadata
let (estimated_total_hits, degraded, used_negative_operator, facets, max_remote_duration) =
merge_metadata(&mut results_by_index, &remote_results);
// 3.3. merge hits
// 3.2. merge hits
let merged_hits: Vec<_> = merge_index_global_results(results_by_index, &mut remote_results)
.skip(federation.offset)
.take(federation.limit)
@@ -167,7 +125,7 @@ pub async fn perform_federated_search(
.map(|hit| hit.hit())
.collect();
// 3.4. merge query vectors
// 3.3. merge query vectors
let query_vectors = if retrieve_vectors {
for remote_results in remote_results.iter_mut() {
if let Some(remote_vectors) = remote_results.query_vectors.take() {
@@ -186,7 +144,7 @@ pub async fn perform_federated_search(
None
};
// 3.5. merge facets
// 3.4. merge facets
let (facet_distribution, facet_stats, facets_by_index) =
facet_order.merge(federation.merge_facets, remote_results, facets);
@@ -212,8 +170,6 @@ pub async fn perform_federated_search(
facet_stats,
facets_by_index,
remote_errors: partitioned_queries.has_remote.then_some(remote_errors),
request_uid: Some(request_uid),
metadata: query_metadata,
})
}
@@ -437,7 +393,6 @@ struct SearchHitByIndex {
struct SearchResultByIndex {
index: String,
primary_key: Option<String>,
hits: Vec<SearchHitByIndex>,
estimated_total_hits: usize,
degraded: bool,
@@ -445,61 +400,6 @@ struct SearchResultByIndex {
facets: Option<ComputedFacets>,
}
/// Builds query metadata for federated search results.
///
/// This function creates metadata for each query in the same order as the original queries,
/// combining information from both local and remote search results. It handles the mapping
/// of primary keys to their respective indexes and remotes to prevent collisions when
/// multiple remotes have the same index_uid but different primary keys.
fn build_query_metadata(
precomputed_query_metadata: Vec<(String, Option<String>)>,
local_remote_name: Option<String>,
remote_results: &[FederatedSearchResult],
results_by_index: &[SearchResultByIndex],
) -> Vec<SearchMetadata> {
// Create a map of (remote, index_uid) -> primary_key for quick lookup
// This prevents collisions when multiple remotes have the same index_uid but different primary keys
let mut primary_key_per_index = std::collections::HashMap::new();
// Build metadata for remote results
for remote_result in remote_results {
if let Some(remote_metadata) = &remote_result.metadata {
for remote_meta in remote_metadata {
if let SearchMetadata {
remote: Some(remote_name),
index_uid,
primary_key: Some(primary_key),
..
} = remote_meta
{
let key = (Some(remote_name), index_uid);
primary_key_per_index.insert(key, primary_key);
}
}
}
}
// Build metadata for local results
for local_meta in results_by_index {
if let SearchResultByIndex { index, primary_key: Some(primary_key), .. } = local_meta {
let key = (None, index);
primary_key_per_index.insert(key, primary_key);
}
}
// Build metadata in the same order as the original queries
let mut query_metadata = Vec::new();
for (index_uid, remote) in precomputed_query_metadata {
let primary_key =
primary_key_per_index.get(&(remote.as_ref(), &index_uid)).map(|pk| pk.to_string());
let query_uid = Uuid::now_v7();
// if the remote is not set, use the local remote name
let remote = remote.or_else(|| local_remote_name.clone());
query_metadata.push(SearchMetadata { query_uid, primary_key, index_uid, remote });
}
query_metadata
}
fn merge_metadata(
results_by_index: &mut Vec<SearchResultByIndex>,
remote_results: &Vec<FederatedSearchResult>,
@@ -511,7 +411,6 @@ fn merge_metadata(
let mut max_remote_duration = Duration::ZERO;
for SearchResultByIndex {
index,
primary_key: _,
hits: _,
estimated_total_hits: estimated_total_hits_by_index,
facets: facets_by_index,
@@ -540,8 +439,6 @@ fn merge_metadata(
degraded: degraded_for_host,
used_negative_operator: host_used_negative_operator,
remote_errors: _,
metadata: _,
request_uid: _,
} in remote_results
{
let this_remote_duration = Duration::from_millis(*processing_time_ms as u64);
@@ -669,12 +566,7 @@ struct RemoteSearch {
}
impl RemoteSearch {
fn start(
queries: RemoteQueriesByHost,
federation: &Federation,
deadline: Instant,
include_metadata: bool,
) -> Self {
fn start(queries: RemoteQueriesByHost, federation: &Federation, deadline: Instant) -> Self {
let mut in_flight_remote_queries = BTreeMap::new();
let client = reqwest::ClientBuilder::new()
.connect_timeout(std::time::Duration::from_millis(200))
@@ -694,10 +586,7 @@ impl RemoteSearch {
// never merge distant facets
proxy_federation.merge_facets = None;
let params = params.clone();
async move {
proxy_search(&node, queries, proxy_federation, &params, include_metadata)
.await
}
async move { proxy_search(&node, queries, proxy_federation, &params).await }
}),
);
}
@@ -741,13 +630,6 @@ impl RemoteSearch {
continue 'remote_queries;
}
// Add remote name to metadata
if let Some(metadata) = res.metadata.as_mut() {
for meta in metadata {
meta.remote = Some(node_name.clone());
}
}
federation.insert(
FEDERATION_REMOTE.to_string(),
serde_json::Value::String(node_name.clone()),
@@ -843,7 +725,6 @@ impl SearchByIndex {
}
};
let rtxn = index.read_txn()?;
let primary_key = index.primary_key(&rtxn)?.map(|pk| pk.to_string());
let criteria = index.criteria(&rtxn)?;
let dictionary = index.dictionary(&rtxn)?;
let dictionary: Option<Vec<_>> =
@@ -870,12 +751,6 @@ impl SearchByIndex {
return Err(error);
}
let mut results_by_query = Vec::with_capacity(queries.len());
// all queries for an index share the same budget
let time_budget = match cutoff {
Some(cutoff) => TimeBudget::new(Duration::from_millis(cutoff)),
None => TimeBudget::default(),
};
for QueryByIndex { query, weight, query_index } in queries {
// use an immediately invoked lambda to capture the result without returning from the function
@@ -945,13 +820,17 @@ impl SearchByIndex {
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
let time_budget = match cutoff {
Some(cutoff) => TimeBudget::new(Duration::from_millis(cutoff)),
None => TimeBudget::default(),
};
let (mut search, _is_finite_pagination, _max_total_hits, _offset) = prepare_search(
&index,
&rtxn,
&query,
&search_kind,
// clones of `TimeBudget` share the budget rather than restart it
time_budget.clone(),
time_budget,
params.features,
)?;
@@ -1098,7 +977,6 @@ impl SearchByIndex {
})?;
self.results_by_index.push(SearchResultByIndex {
index: index_uid,
primary_key,
hits: merged_result,
estimated_total_hits,
degraded,

View File

@@ -7,7 +7,7 @@ use serde::de::DeserializeOwned;
use serde_json::Value;
use super::types::{FederatedSearch, FederatedSearchResult, Federation};
use crate::search::{SearchQueryWithIndex, INCLUDE_METADATA_HEADER};
use crate::search::SearchQueryWithIndex;
pub const PROXY_SEARCH_HEADER: &str = "Meili-Proxy-Search";
pub const PROXY_SEARCH_HEADER_VALUE: &str = "true";
@@ -98,7 +98,6 @@ pub async fn proxy_search(
queries: Vec<SearchQueryWithIndex>,
federation: Federation,
params: &ProxySearchParams,
include_metadata: bool,
) -> Result<FederatedSearchResult, ProxySearchError> {
let url = format!("{}/multi-search", node.url);
@@ -106,12 +105,7 @@ pub async fn proxy_search(
let search_api_key = node.search_api_key.as_deref();
let timeout = std::env::var("MEILI_EXPERIMENTAL_REMOTE_SEARCH_TIMEOUT_SECONDS")
.ok()
.map(|p| p.parse().unwrap())
.unwrap_or(25);
let max_deadline = std::time::Instant::now() + std::time::Duration::from_secs(timeout);
let max_deadline = std::time::Instant::now() + std::time::Duration::from_secs(5);
let deadline = if let Some(deadline) = params.deadline {
std::time::Instant::min(deadline, max_deadline)
@@ -120,16 +114,7 @@ pub async fn proxy_search(
};
for i in 0..params.try_count {
match try_proxy_search(
&url,
search_api_key,
&federated,
&params.client,
deadline,
include_metadata,
)
.await
{
match try_proxy_search(&url, search_api_key, &federated, &params.client, deadline).await {
Ok(response) => return Ok(response),
Err(retry) => {
let duration = retry.into_duration(i)?;
@@ -137,7 +122,7 @@ pub async fn proxy_search(
}
}
}
try_proxy_search(&url, search_api_key, &federated, &params.client, deadline, include_metadata)
try_proxy_search(&url, search_api_key, &federated, &params.client, deadline)
.await
.map_err(Retry::into_error)
}
@@ -148,7 +133,6 @@ async fn try_proxy_search(
federated: &FederatedSearch,
client: &Client,
deadline: std::time::Instant,
include_metadata: bool,
) -> Result<FederatedSearchResult, Retry> {
let timeout = deadline.saturating_duration_since(std::time::Instant::now());
@@ -159,8 +143,6 @@ async fn try_proxy_search(
request
};
let request = request.header(PROXY_SEARCH_HEADER, PROXY_SEARCH_HEADER_VALUE);
let request =
if include_metadata { request.header(INCLUDE_METADATA_HEADER, "true") } else { request };
let response = request.send().await;
let response = match response {

View File

@@ -16,9 +16,6 @@ use meilisearch_types::milli::order_by_map::OrderByMap;
use meilisearch_types::milli::OrderBy;
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
use uuid::Uuid;
use crate::search::SearchMetadata;
use super::super::{ComputedFacets, FacetStats, HitsInfo, SearchHit, SearchQueryWithIndex};
use crate::milli::vector::Embedding;
@@ -134,10 +131,6 @@ pub struct FederatedSearchResult {
pub facet_stats: Option<BTreeMap<String, FacetStats>>,
#[serde(default, skip_serializing_if = "FederatedFacets::is_empty")]
pub facets_by_index: FederatedFacets,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub request_uid: Option<Uuid>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub metadata: Option<Vec<SearchMetadata>>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub remote_errors: Option<BTreeMap<String, ResponseError>>,
@@ -163,8 +156,6 @@ impl fmt::Debug for FederatedSearchResult {
facet_stats,
facets_by_index,
remote_errors,
request_uid,
metadata,
} = self;
let mut debug = f.debug_struct("SearchResult");
@@ -197,12 +188,6 @@ impl fmt::Debug for FederatedSearchResult {
if let Some(remote_errors) = remote_errors {
debug.field("remote_errors", &remote_errors);
}
if let Some(request_uid) = request_uid {
debug.field("request_uid", &request_uid);
}
if let Some(metadata) = metadata {
debug.field("metadata", &metadata);
}
debug.finish()
}

View File

@@ -36,7 +36,6 @@ use serde_json::{json, Value};
#[cfg(test)]
mod mod_test;
use utoipa::ToSchema;
use uuid::Uuid;
use crate::error::MeilisearchHttpError;
@@ -57,7 +56,6 @@ pub const DEFAULT_CROP_MARKER: fn() -> String = || "…".to_string();
pub const DEFAULT_HIGHLIGHT_PRE_TAG: fn() -> String = || "<em>".to_string();
pub const DEFAULT_HIGHLIGHT_POST_TAG: fn() -> String = || "</em>".to_string();
pub const DEFAULT_SEMANTIC_RATIO: fn() -> SemanticRatio = || SemanticRatio(0.5);
pub const INCLUDE_METADATA_HEADER: &str = "Meili-Include-Metadata";
#[derive(Clone, Default, PartialEq, Deserr, ToSchema)]
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
@@ -837,18 +835,6 @@ pub struct SearchHit {
pub ranking_score_details: Option<serde_json::Map<String, serde_json::Value>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, ToSchema)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
pub struct SearchMetadata {
pub query_uid: Uuid,
pub index_uid: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub primary_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub remote: Option<String>,
}
#[derive(Serialize, Clone, PartialEq, ToSchema)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
@@ -865,10 +851,6 @@ pub struct SearchResult {
pub facet_distribution: Option<BTreeMap<String, IndexMap<String, u64>>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub facet_stats: Option<BTreeMap<String, FacetStats>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub request_uid: Option<Uuid>,
#[serde(skip_serializing_if = "Option::is_none")]
pub metadata: Option<SearchMetadata>,
#[serde(skip_serializing_if = "Option::is_none")]
pub semantic_hit_count: Option<u32>,
@@ -890,8 +872,6 @@ impl fmt::Debug for SearchResult {
hits_info,
facet_distribution,
facet_stats,
request_uid,
metadata,
semantic_hit_count,
degraded,
used_negative_operator,
@@ -921,12 +901,6 @@ impl fmt::Debug for SearchResult {
if let Some(semantic_hit_count) = semantic_hit_count {
debug.field("semantic_hit_count", &semantic_hit_count);
}
if let Some(request_uid) = request_uid {
debug.field("request_uid", &request_uid);
}
if let Some(metadata) = metadata {
debug.field("metadata", &metadata);
}
debug.finish()
}
@@ -1139,28 +1113,15 @@ pub fn prepare_search<'t>(
Ok((search, is_finite_pagination, max_total_hits, offset))
}
pub struct SearchParams {
pub index_uid: String,
pub query: SearchQuery,
pub search_kind: SearchKind,
pub retrieve_vectors: RetrieveVectors,
pub features: RoFeatures,
pub request_uid: Uuid,
pub include_metadata: bool,
}
pub fn perform_search(params: SearchParams, index: &Index) -> Result<SearchResult, ResponseError> {
let SearchParams {
index_uid,
query,
search_kind,
retrieve_vectors,
features,
request_uid,
include_metadata,
} = params;
pub fn perform_search(
index_uid: String,
index: &Index,
query: SearchQuery,
search_kind: SearchKind,
retrieve_vectors: RetrieveVectors,
features: RoFeatures,
) -> Result<SearchResult, ResponseError> {
let before_search = Instant::now();
let index_uid_for_metadata = index_uid.clone();
let rtxn = index.read_txn()?;
let time_budget = match index.search_cutoff(&rtxn)? {
Some(cutoff) => TimeBudget::new(Duration::from_millis(cutoff)),
@@ -1181,20 +1142,7 @@ pub fn perform_search(params: SearchParams, index: &Index) -> Result<SearchResul
query_vector,
},
semantic_hit_count,
) = search_from_kind(index_uid.clone(), search_kind, search)?;
let metadata = if include_metadata {
let query_uid = Uuid::now_v7();
let primary_key = index.primary_key(&rtxn)?.map(|pk| pk.to_string());
Some(SearchMetadata {
query_uid,
index_uid: index_uid_for_metadata,
primary_key,
remote: None, // Local searches don't have a remote
})
} else {
None
};
) = search_from_kind(index_uid, search_kind, search)?;
let SearchQuery {
q,
@@ -1277,6 +1225,7 @@ pub fn perform_search(params: SearchParams, index: &Index) -> Result<SearchResul
.transpose()?
.map(|ComputedFacets { distribution, stats }| (distribution, stats))
.unzip();
let result = SearchResult {
hits: documents,
hits_info,
@@ -1288,8 +1237,6 @@ pub fn perform_search(params: SearchParams, index: &Index) -> Result<SearchResul
degraded,
used_negative_operator,
semantic_hit_count,
request_uid: Some(request_uid),
metadata,
};
Ok(result)
}

View File

@@ -419,14 +419,14 @@ async fn error_add_api_key_invalid_parameters_actions() {
let (response, code) = server.add_api_key(content).await;
meili_snap::snapshot!(code, @"400 Bad Request");
meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]" }), @r###"
meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]" }), @r#"
{
"message": "Unknown value `doc.add` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`, `indexes.compact`",
"message": "Unknown value `doc.add` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`",
"code": "invalid_api_key_actions",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_api_key_actions"
}
"###);
"#);
}
#[actix_rt::test]

View File

@@ -91,14 +91,14 @@ async fn create_api_key_bad_actions() {
// can't parse
let (response, code) = server.add_api_key(json!({ "actions": ["doggo"] })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
snapshot!(json_string!(response), @r#"
{
"message": "Unknown value `doggo` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`, `indexes.compact`",
"message": "Unknown value `doggo` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`, `*.get`, `webhooks.get`, `webhooks.update`, `webhooks.delete`, `webhooks.create`, `webhooks.*`",
"code": "invalid_api_key_actions",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_api_key_actions"
}
"###);
"#);
}
#[actix_rt::test]

View File

@@ -40,14 +40,14 @@ async fn batch_bad_types() {
let (response, code) = server.batches_filter("types=doggo").await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
snapshot!(json_string!(response), @r#"
{
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`.",
"code": "invalid_task_types",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
}
"###);
"#);
}
#[actix_rt::test]

View File

@@ -516,18 +516,6 @@ impl<State> Index<'_, State> {
self.service.post_encoded(url, query, self.encoder).await
}
pub async fn search_with_headers(
&self,
query: Value,
headers: Vec<(&str, &str)>,
) -> (Value, StatusCode) {
let url = format!("/indexes/{}/search", urlencode(self.uid.as_ref()));
let body = serde_json::to_string(&query).unwrap();
let mut all_headers = vec![("content-type", "application/json")];
all_headers.extend(headers);
self.service.post_str(url, body, all_headers).await
}
pub async fn search_get(&self, query: &str) -> (Value, StatusCode) {
let url = format!("/indexes/{}/search{}", urlencode(self.uid.as_ref()), query);
self.service.get(url).await

View File

@@ -522,26 +522,6 @@ pub async fn shared_index_with_geo_documents() -> &'static Index<'static, Shared
.await
}
pub async fn shared_index_geojson_documents() -> &'static Index<'static, Shared> {
static INDEX: OnceCell<Index<'static, Shared>> = OnceCell::const_new();
INDEX
.get_or_init(|| async {
// Retrieved from https://gitlab-forge.din.developpement-durable.gouv.fr/pub/geomatique/descartes/d-map/-/blob/main/demo/examples/commons/countries.geojson?ref_type=heads
let server = Server::new_shared();
let index = server._index("SHARED_GEOJSON_DOCUMENTS").to_shared();
let countries = include_str!("../documents/geojson/assets/countries.json");
let lille = serde_json::from_str::<serde_json::Value>(countries).unwrap();
let (response, _code) = index._add_documents(Value(lille), Some("name")).await;
server.wait_task(response.uid()).await.succeeded();
let (response, _code) =
index._update_settings(json!({"filterableAttributes": ["_geojson"]})).await;
server.wait_task(response.uid()).await.succeeded();
index
})
.await
}
pub async fn shared_index_for_fragments() -> Index<'static, Shared> {
static INDEX: OnceCell<(Server<Shared>, String)> = OnceCell::const_new();
let (server, uid) = INDEX

View File

@@ -390,17 +390,6 @@ impl<State> Server<State> {
self.service.post("/multi-search", queries).await
}
pub async fn multi_search_with_headers(
&self,
queries: Value,
headers: Vec<(&str, &str)>,
) -> (Value, StatusCode) {
let body = serde_json::to_string(&queries).unwrap();
let mut all_headers = vec![("content-type", "application/json")];
all_headers.extend(headers);
self.service.post_str("/multi-search", body, all_headers).await
}
pub async fn list_indexes_raw(&self, parameters: &str) -> (Value, StatusCode) {
self.service.get(format!("/indexes{parameters}")).await
}
@@ -501,8 +490,6 @@ pub fn default_settings(dir: impl AsRef<Path>) -> Opt {
max_indexing_threads: MaxThreads::from_str("2").unwrap(),
experimental_no_edition_2024_for_settings: false,
experimental_no_edition_2024_for_dumps: false,
experimental_no_edition_2024_for_prefix_post_processing: false,
experimental_no_edition_2024_for_facet_post_processing: false,
},
experimental_enable_metrics: false,
..Parser::parse_from(None as Option<&str>)

View File

@@ -1,3 +1,6 @@
use crate::common::encoder::Encoder;
use crate::common::{default_settings, GetAllDocumentsOptions, Server, Value};
use crate::json;
use actix_web::test;
use meili_snap::{json_string, snapshot};
use meilisearch::Opt;
@@ -5,10 +8,6 @@ use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use uuid::Uuid;
use crate::common::encoder::Encoder;
use crate::common::{default_settings, GetAllDocumentsOptions, Server, Value};
use crate::json;
/// This is the basic usage of our API and every other tests uses the content-type application/json
#[actix_rt::test]
async fn add_documents_test_json_content_types() {
@@ -1853,7 +1852,7 @@ async fn add_documents_with_geo_field() {
.await;
snapshot!(code, @"200 OK");
// we are expecting docs 4 and 3 first as they have geo
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }),
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }),
@r###"
{
"hits": [
@@ -1885,8 +1884,7 @@ async fn add_documents_with_geo_field() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
"estimatedTotalHits": 4
}
"###);
}
@@ -1941,7 +1939,7 @@ async fn update_documents_with_geo_field() {
let (response, code) = index.search_post(json!({"sort": ["_geoPoint(10,0):asc"]})).await;
snapshot!(code, @"200 OK");
// we are expecting docs 4 and 3 first as they have geo
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }),
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }),
@r###"
{
"hits": [
@@ -1973,8 +1971,7 @@ async fn update_documents_with_geo_field() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
"estimatedTotalHits": 4
}
"###);
@@ -2046,7 +2043,7 @@ async fn update_documents_with_geo_field() {
let (response, code) = index.search_post(json!({"sort": ["_geoPoint(10,0):asc"]})).await;
snapshot!(code, @"200 OK");
// the search response should not have changed: we are expecting docs 4 and 3 first as they have geo
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }),
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }),
@r###"
{
"hits": [
@@ -2079,8 +2076,7 @@ async fn update_documents_with_geo_field() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
"estimatedTotalHits": 4
}
"###);
}

View File

@@ -134,14 +134,14 @@ async fn get_all_documents_bad_filter() {
let (response, code) = index.get_all_documents_raw("?filter=doggo").await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
snapshot!(json_string!(response), @r###"
{
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `doggo`.\n1:6 doggo",
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `doggo`.\n1:6 doggo",
"code": "invalid_document_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
}
"#);
"###);
let (response, code) = index.get_all_documents_raw("?filter=doggo=bernese").await;
snapshot!(code, @"400 Bad Request");
@@ -523,14 +523,14 @@ async fn delete_document_by_filter() {
// send bad filter
let (response, code) = index.delete_document_by_filter(json!({ "filter": "hello"})).await;
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r#"
snapshot!(response, @r###"
{
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `hello`.\n1:6 hello",
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `hello`.\n1:6 hello",
"code": "invalid_document_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
}
"#);
"###);
// send empty filter
let (response, code) = index.delete_document_by_filter(json!({ "filter": ""})).await;
@@ -724,14 +724,14 @@ async fn fetch_document_by_filter() {
let (response, code) = index.fetch_documents(json!({ "filter": "cool doggo" })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r#"
snapshot!(response, @r###"
{
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `cool doggo`.\n1:11 cool doggo",
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `cool doggo`.\n1:11 cool doggo",
"code": "invalid_document_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
}
"#);
"###);
let (response, code) = index.fetch_documents(json!({ "filter": "doggo = bernese" })).await;
snapshot!(code, @"400 Bad Request");

File diff suppressed because one or more lines are too long

View File

@@ -1,547 +0,0 @@
{
"type": "Polygon",
"coordinates": [
[
[
3.11681,
50.63646
],
[
3.11945,
50.63488
],
[
3.12134,
50.63504
],
[
3.12064,
50.63127
],
[
3.12203,
50.62785
],
[
3.12389,
50.6262
],
[
3.12161,
50.62358
],
[
3.12547,
50.62114
],
[
3.12447,
50.61874
],
[
3.12288,
50.61988
],
[
3.12054,
50.61846
],
[
3.11846,
50.61754
],
[
3.11482,
50.6207
],
[
3.11232,
50.6188
],
[
3.10936,
50.61727
],
[
3.10822,
50.61765
],
[
3.10603,
50.61536
],
[
3.1041,
50.61596
],
[
3.10017,
50.6186
],
[
3.09688,
50.61714
],
[
3.09575,
50.61795
],
[
3.0891,
50.61532
],
[
3.08625,
50.61792
],
[
3.07948,
50.61428
],
[
3.07146,
50.6066
],
[
3.06819,
50.60918
],
[
3.06502,
50.61046
],
[
3.06223,
50.61223
],
[
3.05925,
50.60659
],
[
3.05463,
50.60077
],
[
3.04906,
50.6008
],
[
3.04726,
50.6035
],
[
3.04328,
50.60667
],
[
3.04155,
50.60417
],
[
3.03767,
50.60456
],
[
3.03528,
50.60538
],
[
3.03239,
50.60725
],
[
3.0254,
50.6111
],
[
3.02387,
50.6125
],
[
3.0248,
50.61344
],
[
3.02779,
50.61418
],
[
3.02414,
50.6169
],
[
3.02312,
50.61975
],
[
3.02172,
50.62082
],
[
3.01953,
50.62484
],
[
3.01811,
50.62529
],
[
3.01313,
50.62558
],
[
3.01385,
50.62695
],
[
3.00844,
50.62717
],
[
3.0056,
50.6267
],
[
3.00229,
50.62557
],
[
3.00119,
50.62723
],
[
2.99769,
50.62901
],
[
2.99391,
50.62732
],
[
2.98971,
50.63036
],
[
2.9862,
50.63328
],
[
2.98178,
50.63404
],
[
2.97917,
50.63499
],
[
2.97284,
50.63429
],
[
2.97174,
50.63365
],
[
2.97002,
50.63366
],
[
2.96956,
50.63506
],
[
2.97046,
50.6365
],
[
2.96878,
50.63833
],
[
2.97039,
50.6395
],
[
2.97275,
50.64183
],
[
2.97225,
50.64381
],
[
2.9745,
50.64442
],
[
2.97474,
50.64648
],
[
2.97091,
50.65108
],
[
2.96975,
50.65361
],
[
2.97061,
50.65513
],
[
2.96929,
50.65739
],
[
2.97072,
50.6581
],
[
2.97973,
50.66048
],
[
2.98369,
50.66123
],
[
2.9865,
50.65959
],
[
2.9896,
50.65845
],
[
2.9963,
50.65666
],
[
2.99903,
50.65552
],
[
3.00274,
50.65235
],
[
3.00714,
50.64887
],
[
3.01088,
50.64845
],
[
3.01318,
50.64541
],
[
3.01974,
50.63972
],
[
3.02317,
50.63813
],
[
3.02639,
50.63613
],
[
3.029,
50.63521
],
[
3.03414,
50.6382
],
[
3.03676,
50.63888
],
[
3.03686,
50.64147
],
[
3.03791,
50.64379
],
[
3.0409,
50.64577
],
[
3.04582,
50.64807
],
[
3.05132,
50.64866
],
[
3.05055,
50.64949
],
[
3.05244,
50.65055
],
[
3.05784,
50.64927
],
[
3.0596,
50.65105
],
[
3.06414,
50.65041
],
[
3.06705,
50.64936
],
[
3.07023,
50.64706
],
[
3.07203,
50.64355
],
[
3.07526,
50.64188
],
[
3.0758,
50.64453
],
[
3.07753,
50.64381
],
[
3.07861,
50.64542
],
[
3.08299,
50.64725
],
[
3.08046,
50.64912
],
[
3.08349,
50.65082
],
[
3.08354,
50.65155
],
[
3.08477,
50.65312
],
[
3.08542,
50.65654
],
[
3.08753,
50.65687
],
[
3.09032,
50.65602
],
[
3.09018,
50.65142
],
[
3.09278,
50.65086
],
[
3.09402,
50.64982
],
[
3.09908,
50.65146
],
[
3.10316,
50.65227
],
[
3.09726,
50.64723
],
[
3.09387,
50.64358
],
[
3.09357,
50.64095
],
[
3.09561,
50.64133
],
[
3.09675,
50.64018
],
[
3.09454,
50.63891
],
[
3.09627,
50.63693
],
[
3.09795,
50.63713
],
[
3.09919,
50.63576
],
[
3.10324,
50.6351
],
[
3.10613,
50.63532
],
[
3.10649,
50.63434
],
[
3.1109,
50.63525
],
[
3.11502,
50.63504
],
[
3.11681,
50.63646
]
]
]
}

View File

@@ -1,421 +0,0 @@
use meili_snap::{json_string, snapshot};
use crate::common::{shared_index_geojson_documents, Server};
use crate::json;
const LILLE: &str = include_str!("assets/lille.geojson");
#[actix_rt::test]
async fn basic_add_settings_and_geojson_documents() {
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) =
index.update_settings(json!({"filterableAttributes": ["_geojson"]})).await;
server.wait_task(task.uid()).await.succeeded();
let (response, _) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }),
@r###"
{
"hits": [],
"query": "",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
}
"###);
let lille: serde_json::Value = serde_json::from_str(LILLE).unwrap();
let documents = json!([
{
"id": "missing",
},
{
"id": "point",
"_geojson": { "type": "Point", "coordinates": [1, 1] },
},
{
"id": "lille",
"_geojson": lille,
},
]);
let (task, _status_code) = index.add_documents(documents, None).await;
let response = server.wait_task(task.uid()).await.succeeded();
snapshot!(json_string!(response, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 3,
"indexedDocuments": 3
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
"#);
let (response, code) = index.get_all_documents_raw("?ids=missing,point").await;
snapshot!(code, @"200 OK");
snapshot!(response,
@r#"
{
"results": [
{
"id": "missing"
},
{
"id": "point",
"_geojson": {
"type": "Point",
"coordinates": [
1,
1
]
}
}
],
"offset": 0,
"limit": 20,
"total": 2
}
"#);
let (response, _code) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }),
@r###"
{
"hits": [
{
"id": "point",
"_geojson": {
"type": "Point",
"coordinates": [
1,
1
]
}
}
],
"query": "",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
}
#[actix_rt::test]
async fn basic_add_geojson_documents_and_settings() {
let server = Server::new_shared();
let index = server.unique_index();
let lille: serde_json::Value = serde_json::from_str(LILLE).unwrap();
let documents = json!([
{
"id": "missing",
},
{
"id": "point",
"_geojson": { "type": "Point", "coordinates": [1, 1] },
},
{
"id": "lille",
"_geojson": lille,
},
]);
let (task, _status_code) = index.add_documents(documents, None).await;
let response = server.wait_task(task.uid()).await.succeeded();
snapshot!(response,
@r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 3,
"indexedDocuments": 3
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
"#);
let (response, _code) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
snapshot!(response,
@r#"
{
"message": "Index `[uuid]`: Attribute `_geojson` is not filterable. This index does not have configured filterable attributes.\n14:15 _geoPolygon([0,0],[0,2],[2,2],[2,0])",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
}
"#);
let (task, _status_code) =
index.update_settings(json!({"filterableAttributes": ["_geojson"]})).await;
server.wait_task(task.uid()).await.succeeded();
let (response, _code) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }),
@r###"
{
"hits": [
{
"id": "point",
"_geojson": {
"type": "Point",
"coordinates": [
1,
1
]
}
}
],
"query": "",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
}
#[actix_rt::test]
async fn add_and_remove_geojson() {
let server = Server::new_shared();
let index = server.unique_index();
index.update_settings(json!({"filterableAttributes": ["_geojson"]})).await;
let documents = json!([
{
"id": "missing",
},
{
"id": 0,
"_geojson": { "type": "Point", "coordinates": [1, 1] },
}
]);
let (task, _status_code) = index.add_documents(documents, None).await;
server.wait_task(task.uid()).await.succeeded();
let (response, _code) =
index.search_get("?filter=_geoPolygon([0,0],[0,0.9],[0.9,0.9],[0.9,0])").await;
assert_eq!(response.get("hits").unwrap().as_array().unwrap().len(), 0);
let (response, _code) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
assert_eq!(response.get("hits").unwrap().as_array().unwrap().len(), 1);
let (task, _) = index.delete_document(0).await;
server.wait_task(task.uid()).await.succeeded();
let (response, _code) =
index.search_get("?filter=_geoPolygon([0,0],[0,0.9],[0.9,0.9],[0.9,0])").await;
assert_eq!(response.get("hits").unwrap().as_array().unwrap().len(), 0);
let (response, _code) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
assert_eq!(response.get("hits").unwrap().as_array().unwrap().len(), 0);
// add it back
let documents = json!([
{
"id": 0,
"_geojson": { "type": "Point", "coordinates": [1, 1] },
}
]);
let (task, _status_code) = index.add_documents(documents, None).await;
server.wait_task(task.uid()).await.succeeded();
let (response, _code) =
index.search_get("?filter=_geoPolygon([0,0],[0,0.9],[0.9,0.9],[0.9,0])").await;
assert_eq!(response.get("hits").unwrap().as_array().unwrap().len(), 0);
let (response, _code) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
assert_eq!(response.get("hits").unwrap().as_array().unwrap().len(), 1);
}
#[actix_rt::test]
async fn partial_update_geojson() {
let server = Server::new_shared();
let index = server.unique_index();
let (task, _) = index.update_settings(json!({"filterableAttributes": ["_geojson"]})).await;
server.wait_task(task.uid()).await.succeeded();
let documents = json!([
{
"id": 0,
"_geojson": { "type": "Point", "coordinates": [1, 1] },
}
]);
let (task, _status_code) = index.add_documents(documents, None).await;
server.wait_task(task.uid()).await.succeeded();
let (response, _code) =
index.search_get("?filter=_geoPolygon([0,0],[0,0.9],[0.9,0.9],[0.9,0])").await;
assert_eq!(response.get("hits").unwrap().as_array().unwrap().len(), 0);
let (response, _code) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
assert_eq!(response.get("hits").unwrap().as_array().unwrap().len(), 1);
let documents = json!([
{
"id": 0,
"_geojson": { "type": "Point", "coordinates": [0.5, 0.5] },
}
]);
let (task, _status_code) = index.update_documents(documents, None).await;
server.wait_task(task.uid()).await.succeeded();
let (response, _code) =
index.search_get("?filter=_geoPolygon([0,0],[0,0.9],[0.9,0.9],[0.9,0])").await;
assert_eq!(response.get("hits").unwrap().as_array().unwrap().len(), 1);
let (response, _code) = index.search_get("?filter=_geoPolygon([0,0],[0,2],[2,2],[2,0])").await;
assert_eq!(response.get("hits").unwrap().as_array().unwrap().len(), 1);
let (response, _code) =
index.search_get("?filter=_geoPolygon([0.9,0.9],[0.9,2],[2,2],[2,0.9])").await;
assert_eq!(response.get("hits").unwrap().as_array().unwrap().len(), 0);
}
#[actix_rt::test]
async fn geo_bounding_box() {
let index = shared_index_geojson_documents().await;
// The bounding box is a polygon over middle Europe
let (response, code) =
index.search_get("?filter=_geoBoundingBox([50.53987503447863,21.43443989912143],[43.76393151539099,0.54979129195425])&attributesToRetrieve=name").await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
"name": "Austria"
},
{
"name": "Belgium"
},
{
"name": "Bosnia_and_Herzegovina"
},
{
"name": "Switzerland"
},
{
"name": "Czech_Republic"
},
{
"name": "Germany"
},
{
"name": "France"
},
{
"name": "Croatia"
},
{
"name": "Hungary"
},
{
"name": "Italy"
},
{
"name": "Luxembourg"
},
{
"name": "Netherlands"
},
{
"name": "Poland"
},
{
"name": "Romania"
},
{
"name": "Republic_of_Serbia"
},
{
"name": "Slovakia"
},
{
"name": "Slovenia"
}
],
"query": "",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 17,
"requestUid": "[uuid]"
}
"###);
// Between Russia and Alaska
let (response, code) = index
.search_get("?filter=_geoBoundingBox([70,-148],[63,152])&attributesToRetrieve=name")
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
"name": "Canada"
},
{
"name": "Russia"
},
{
"name": "United_States_of_America"
}
],
"query": "",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
}
"###);
}
#[actix_rt::test]
async fn bug_5904() {
// https://github.com/meilisearch/meilisearch/issues/5904
let server = Server::new_shared();
let index = server.unique_index();
let (response, _code) =
index.update_settings(json!({"filterableAttributes": ["_geojson"]})).await;
server.wait_task(response.uid()).await.succeeded();
let geojson = json!({
"id": 1,
"_geojson": {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
4.23914,
48.382893
]
},
"properties": {}
}
]
}
});
let (response, _code) = index.add_documents(geojson, Some("id")).await;
server.wait_task(response.uid()).await.succeeded();
}

View File

@@ -1,6 +1,5 @@
mod add_documents;
mod delete_documents;
mod errors;
mod geojson;
mod get_documents;
mod update_documents;

View File

@@ -1,4 +1,5 @@
use meili_snap::snapshot;
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;

View File

@@ -642,14 +642,14 @@ async fn filter_invalid_syntax_object() {
&json!({"filterableAttributes": ["title"]}),
&json!({"filter": "title & Glass"}),
|response, code| {
snapshot!(response, @r#"
snapshot!(response, @r###"
{
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `title & Glass`.\n1:14 title & Glass",
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `title & Glass`.\n1:14 title & Glass",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
}
"#);
"###);
snapshot!(code, @"400 Bad Request");
},
)
@@ -663,14 +663,14 @@ async fn filter_invalid_syntax_array() {
&json!({"filterableAttributes": ["title"]}),
&json!({"filter": ["title & Glass"]}),
|response, code| {
snapshot!(response, @r#"
snapshot!(response, @r###"
{
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `title & Glass`.\n1:14 title & Glass",
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `title & Glass`.\n1:14 title & Glass",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
}
"#);
"###);
snapshot!(code, @"400 Bad Request");
},
)

View File

@@ -742,7 +742,7 @@ async fn vector_filter_all_embedders() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(value, @r#"
{
"hits": [
{
@@ -762,10 +762,9 @@ async fn vector_filter_all_embedders() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
"estimatedTotalHits": 4
}
"###);
"#);
}
#[actix_rt::test]
@@ -840,7 +839,7 @@ async fn vector_filter_specific_embedder() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(value, @r#"
{
"hits": [
{
@@ -860,10 +859,9 @@ async fn vector_filter_specific_embedder() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
"estimatedTotalHits": 4
}
"###);
"#);
}
#[actix_rt::test]
@@ -876,7 +874,7 @@ async fn vector_filter_user_provided() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(value, @r#"
{
"hits": [
{
@@ -887,10 +885,9 @@ async fn vector_filter_user_provided() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
"#);
}
#[actix_rt::test]
@@ -903,7 +900,7 @@ async fn vector_filter_specific_fragment() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(value, @r#"
{
"hits": [
{
@@ -917,10 +914,9 @@ async fn vector_filter_specific_fragment() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
"estimatedTotalHits": 2
}
"###);
"#);
let (value, _code) = index
.search_post(json!({
@@ -928,7 +924,7 @@ async fn vector_filter_specific_fragment() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(value, @r#"
{
"hits": [
{
@@ -945,10 +941,9 @@ async fn vector_filter_specific_fragment() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
"#);
}
#[actix_rt::test]
@@ -981,17 +976,16 @@ async fn vector_filter_document_template_but_fragments_used() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(value, @r#"
{
"hits": [],
"query": "",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
"estimatedTotalHits": 0
}
"###);
"#);
}
#[actix_rt::test]
@@ -1029,7 +1023,7 @@ async fn vector_filter_document_template() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(value, @r#"
{
"hits": [
{
@@ -1046,10 +1040,9 @@ async fn vector_filter_document_template() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
"#);
}
#[actix_rt::test]
@@ -1082,7 +1075,7 @@ async fn vector_filter_negation() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(value, @r#"
{
"hits": [
{
@@ -1099,10 +1092,9 @@ async fn vector_filter_negation() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
"#);
}
#[actix_rt::test]
@@ -1115,7 +1107,7 @@ async fn vector_filter_or_combination() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(value, @r#"
{
"hits": [
{
@@ -1132,10 +1124,9 @@ async fn vector_filter_or_combination() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
"#);
}
#[actix_rt::test]
@@ -1148,7 +1139,7 @@ async fn vector_filter_regenerate() {
"attributesToRetrieve": ["name"]
}))
.await;
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(value, @r#"
{
"hits": [
{
@@ -1165,8 +1156,7 @@ async fn vector_filter_regenerate() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
"#);
}

View File

@@ -33,7 +33,7 @@ async fn geo_bounding_box_with_string_and_number() {
}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -63,8 +63,7 @@ async fn geo_bounding_box_with_string_and_number() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
"estimatedTotalHits": 2
}
"###);
},
@@ -85,7 +84,7 @@ async fn bug_4640() {
}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -124,8 +123,7 @@ async fn bug_4640() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
},
@@ -149,7 +147,7 @@ async fn geo_asc_with_words() {
&json!({"q": "jean"}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -181,8 +179,7 @@ async fn geo_asc_with_words() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
},
@@ -195,7 +192,7 @@ async fn geo_asc_with_words() {
&json!({"q": "bob"}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -219,8 +216,7 @@ async fn geo_asc_with_words() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
"estimatedTotalHits": 2
}
"###);
},
@@ -233,7 +229,7 @@ async fn geo_asc_with_words() {
&json!({"q": "intel"}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -249,8 +245,7 @@ async fn geo_asc_with_words() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
},
@@ -274,7 +269,7 @@ async fn geo_sort_with_words() {
&json!({"q": "jean", "sort": ["_geoPoint(0.0, 0.0):asc"]}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -309,8 +304,7 @@ async fn geo_sort_with_words() {
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
},

View File

@@ -1,4 +1,4 @@
use meili_snap::{json_string, snapshot};
use meili_snap::snapshot;
use once_cell::sync::Lazy;
use crate::common::index::Index;
@@ -148,7 +148,7 @@ async fn simple_search() {
)
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r#"
{
"hits": [
{
@@ -209,10 +209,9 @@ async fn simple_search() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"semanticHitCount": 0
}
"###);
"#);
snapshot!(response["semanticHitCount"], @"0");
let (response, code) = index
@@ -221,7 +220,7 @@ async fn simple_search() {
)
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r#"
{
"hits": [
{
@@ -285,10 +284,9 @@ async fn simple_search() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"semanticHitCount": 2
}
"###);
"#);
snapshot!(response["semanticHitCount"], @"2");
let (response, code) = index
@@ -297,7 +295,7 @@ async fn simple_search() {
)
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r#"
{
"hits": [
{
@@ -361,10 +359,9 @@ async fn simple_search() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"semanticHitCount": 3
}
"###);
"#);
snapshot!(response["semanticHitCount"], @"3");
}

View File

@@ -104,7 +104,7 @@ async fn simple_search() {
// english
index
.search(json!({"q": "Atta", "attributesToRetrieve": ["id"]}), |response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -115,8 +115,7 @@ async fn simple_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
@@ -126,7 +125,7 @@ async fn simple_search() {
// japanese
index
.search(json!({"q": "進撃", "attributesToRetrieve": ["id"]}), |response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -137,8 +136,7 @@ async fn simple_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
@@ -149,7 +147,7 @@ async fn simple_search() {
.search(
json!({"q": "進撃", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r#"
{
"hits": [
{
@@ -160,10 +158,9 @@ async fn simple_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
"#);
snapshot!(code, @"200 OK");
},
)
@@ -172,7 +169,7 @@ async fn simple_search() {
// chinese
index
.search(json!({"q": "进击", "attributesToRetrieve": ["id"]}), |response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r#"
{
"hits": [
{
@@ -183,10 +180,9 @@ async fn simple_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
"#);
snapshot!(code, @"200 OK");
})
.await;
@@ -226,7 +222,7 @@ async fn force_locales() {
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -237,8 +233,7 @@ async fn force_locales() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
@@ -251,7 +246,7 @@ async fn force_locales() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -262,8 +257,7 @@ async fn force_locales() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
@@ -306,7 +300,7 @@ async fn force_locales_with_pattern() {
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -317,8 +311,7 @@ async fn force_locales_with_pattern() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
@@ -331,7 +324,7 @@ async fn force_locales_with_pattern() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -342,8 +335,7 @@ async fn force_locales_with_pattern() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
@@ -384,15 +376,14 @@ async fn force_locales_with_pattern_nested() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
"estimatedTotalHits": 0
}
"###);
snapshot!(code, @"200 OK");
@@ -405,7 +396,7 @@ async fn force_locales_with_pattern_nested() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -416,8 +407,7 @@ async fn force_locales_with_pattern_nested() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
@@ -461,15 +451,14 @@ async fn force_different_locales_with_pattern() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
"estimatedTotalHits": 0
}
"###);
snapshot!(code, @"200 OK");
@@ -482,7 +471,7 @@ async fn force_different_locales_with_pattern() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -493,8 +482,7 @@ async fn force_different_locales_with_pattern() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
@@ -541,15 +529,14 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
"estimatedTotalHits": 0
}
"###);
snapshot!(code, @"200 OK");
@@ -562,7 +549,7 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"], "attributesToSearchOn": ["name_zh", "description_zh"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -573,8 +560,7 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
@@ -616,7 +602,7 @@ async fn auto_infer_locales_at_search() {
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -627,8 +613,7 @@ async fn auto_infer_locales_at_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
@@ -640,21 +625,20 @@ async fn auto_infer_locales_at_search() {
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
snapshot!(response, @r###"
{
"id": 853
"hits": [
{
"id": 853
}
],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
}
],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
"###);
snapshot!(code, @"200 OK");
},
)
@@ -664,7 +648,7 @@ async fn auto_infer_locales_at_search() {
.search(
json!({"q": "\"进击的巨人\"", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -675,8 +659,7 @@ async fn auto_infer_locales_at_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
@@ -719,15 +702,14 @@ async fn force_different_locales_with_pattern_nested() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
"estimatedTotalHits": 0
}
"###);
snapshot!(code, @"200 OK");
@@ -740,21 +722,20 @@ async fn force_different_locales_with_pattern_nested() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
snapshot!(response, @r###"
{
"id": 852
"hits": [
{
"id": 852
}
],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
}
],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
"###);
snapshot!(code, @"200 OK");
},
)
@@ -765,7 +746,7 @@ async fn force_different_locales_with_pattern_nested() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["ja"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -776,8 +757,7 @@ async fn force_different_locales_with_pattern_nested() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
@@ -819,15 +799,14 @@ async fn settings_change() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
"estimatedTotalHits": 0
}
"###);
snapshot!(code, @"200 OK");
@@ -840,15 +819,14 @@ async fn settings_change() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
"estimatedTotalHits": 0
}
"###);
snapshot!(code, @"200 OK");
@@ -884,15 +862,14 @@ async fn settings_change() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["cmn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
"estimatedTotalHits": 0
}
"###);
snapshot!(code, @"200 OK");
@@ -905,15 +882,14 @@ async fn settings_change() {
.search(
json!({"q": "\"进击的巨人\"", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [],
"query": "\"进击的巨人\"",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
"estimatedTotalHits": 0
}
"###);
snapshot!(code, @"200 OK");
@@ -1188,7 +1164,7 @@ async fn swedish_search() {
// infer swedish
index
.search(json!({"q": "trä", "attributesToRetrieve": ["product"]}), |response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -1202,8 +1178,7 @@ async fn swedish_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
"estimatedTotalHits": 2
}
"###);
snapshot!(code, @"200 OK");
@@ -1212,7 +1187,7 @@ async fn swedish_search() {
index
.search(json!({"q": "tra", "attributesToRetrieve": ["product"]}), |response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -1226,8 +1201,7 @@ async fn swedish_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
"estimatedTotalHits": 2
}
"###);
snapshot!(code, @"200 OK");
@@ -1239,7 +1213,7 @@ async fn swedish_search() {
.search(
json!({"q": "trä", "locales": ["swe"], "attributesToRetrieve": ["product"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -1253,8 +1227,7 @@ async fn swedish_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
"estimatedTotalHits": 2
}
"###);
snapshot!(code, @"200 OK");
@@ -1265,7 +1238,7 @@ async fn swedish_search() {
.search(
json!({"q": "tra", "locales": ["swe"], "attributesToRetrieve": ["product"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -1279,8 +1252,7 @@ async fn swedish_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
"estimatedTotalHits": 2
}
"###);
snapshot!(code, @"200 OK");
@@ -1315,21 +1287,20 @@ async fn german_search() {
.search(
json!({"q": "kulturalität", "attributesToRetrieve": ["product"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
"hits": [
{
"product": "Interkulturalität"
}
],
"query": "kulturalität",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"product": "Interkulturalität"
}
"###);
],
"query": "kulturalität",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");
},
)
@@ -1339,7 +1310,7 @@ async fn german_search() {
.search(
json!({"q": "organisation", "attributesToRetrieve": ["product"]}),
|response, code| {
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -1350,8 +1321,7 @@ async fn german_search() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
snapshot!(code, @"200 OK");

View File

@@ -1,387 +0,0 @@
use meili_snap::{json_string, snapshot};
use crate::common::{shared_index_with_documents, Server, DOCUMENTS};
use crate::json;
#[actix_rt::test]
async fn search_without_metadata_header() {
let index = shared_index_with_documents().await;
// Test that metadata is not included by default
index
.search(json!({"q": "glass"}), |response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
"title": "Gläss",
"id": "450465",
"color": [
"blue",
"red"
]
}
],
"query": "glass",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
})
.await;
}
#[actix_rt::test]
async fn search_with_metadata_header() {
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
let (task, _code) = index.add_documents(documents, None).await;
server.wait_task(task.uid()).await.succeeded();
// Test with Meili-Include-Metadata header
let (response, code) = index
.search_with_headers(json!({"q": "glass"}), vec![("Meili-Include-Metadata", "true")])
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]", ".metadata.queryUid" => "[uuid]" }), @r###"
{
"hits": [
{
"title": "Gläss",
"id": "450465",
"color": [
"blue",
"red"
]
}
],
"query": "glass",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]",
"metadata": {
"queryUid": "[uuid]",
"indexUid": "[uuid]",
"primaryKey": "id"
}
}
"###);
}
#[actix_rt::test]
async fn search_with_metadata_header_and_primary_key() {
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
let (task, _code) = index.add_documents(documents, Some("id")).await;
server.wait_task(task.uid()).await.succeeded();
// Test with Meili-Include-Metadata header
let (response, code) = index
.search_with_headers(json!({"q": "glass"}), vec![("Meili-Include-Metadata", "true")])
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]", ".metadata.queryUid" => "[uuid]" }), @r###"
{
"hits": [
{
"id": "450465",
"title": "Gläss",
"color": [
"blue",
"red"
]
}
],
"query": "glass",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]",
"metadata": {
"queryUid": "[uuid]",
"indexUid": "[uuid]",
"primaryKey": "id"
}
}
"###);
}
#[actix_rt::test]
async fn multi_search_without_metadata_header() {
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
let (task, _code) = index.add_documents(documents, None).await;
server.wait_task(task.uid()).await.succeeded();
// Test multi-search without metadata header
let (response, code) = server
.multi_search(json!({
"queries": [
{"indexUid": index.uid, "q": "glass"},
{"indexUid": index.uid, "q": "dragon"}
]
}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".results[0].processingTimeMs" => "[duration]", ".results[0].requestUid" => "[uuid]", ".results[1].processingTimeMs" => "[duration]", ".results[1].requestUid" => "[uuid]" }), @r###"
{
"results": [
{
"indexUid": "[uuid]",
"hits": [
{
"title": "Gläss",
"id": "450465",
"color": [
"blue",
"red"
]
}
],
"query": "glass",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
},
{
"indexUid": "[uuid]",
"hits": [
{
"title": "How to Train Your Dragon: The Hidden World",
"id": "166428",
"color": [
"green",
"red"
]
}
],
"query": "dragon",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
]
}
"###);
}
#[actix_rt::test]
async fn multi_search_with_metadata_header() {
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
let (task, _code) = index.add_documents(documents, Some("id")).await;
server.wait_task(task.uid()).await.succeeded();
// Test multi-search with metadata header
let (response, code) = server
.multi_search_with_headers(
json!({
"queries": [
{"indexUid": index.uid, "q": "glass"},
{"indexUid": index.uid, "q": "dragon"}
]
}),
vec![("Meili-Include-Metadata", "true")],
)
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".results[0].processingTimeMs" => "[duration]", ".results[0].requestUid" => "[uuid]", ".results[0].metadata.queryUid" => "[uuid]", ".results[1].processingTimeMs" => "[duration]", ".results[1].requestUid" => "[uuid]", ".results[1].metadata.queryUid" => "[uuid]" }), @r###"
{
"results": [
{
"indexUid": "[uuid]",
"hits": [
{
"id": "450465",
"title": "Gläss",
"color": [
"blue",
"red"
]
}
],
"query": "glass",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]",
"metadata": {
"queryUid": "[uuid]",
"indexUid": "[uuid]",
"primaryKey": "id"
}
},
{
"indexUid": "[uuid]",
"hits": [
{
"id": "166428",
"title": "How to Train Your Dragon: The Hidden World",
"color": [
"green",
"red"
]
}
],
"query": "dragon",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]",
"metadata": {
"queryUid": "[uuid]",
"indexUid": "[uuid]",
"primaryKey": "id"
}
}
]
}
"###);
}
#[actix_rt::test]
async fn search_metadata_header_false_value() {
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
let (task, _code) = index.add_documents(documents, None).await;
server.wait_task(task.uid()).await.succeeded();
// Test with header set to false
let (response, code) = index
.search_with_headers(json!({"q": "glass"}), vec![("Meili-Include-Metadata", "false")])
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
"title": "Gläss",
"id": "450465",
"color": [
"blue",
"red"
]
}
],
"query": "glass",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
}
"###);
}
#[actix_rt::test]
async fn search_metadata_uuid_format() {
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
let (task, _code) = index.add_documents(documents, None).await;
server.wait_task(task.uid()).await.succeeded();
let (response, code) = index
.search_with_headers(json!({"q": "glass"}), vec![("Meili-Include-Metadata", "true")])
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]", ".metadata.queryUid" => "[uuid]" }), @r###"
{
"hits": [
{
"title": "Gläss",
"id": "450465",
"color": [
"blue",
"red"
]
}
],
"query": "glass",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]",
"metadata": {
"queryUid": "[uuid]",
"indexUid": "[uuid]",
"primaryKey": "id"
}
}
"###);
}
#[actix_rt::test]
async fn search_metadata_consistency_across_requests() {
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
let (task, _code) = index.add_documents(documents, Some("id")).await;
server.wait_task(task.uid()).await.succeeded();
// Make multiple requests and check that metadata is consistent
for _i in 0..3 {
let (response, code) = index
.search_with_headers(json!({"q": "glass"}), vec![("Meili-Include-Metadata", "true")])
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]", ".metadata.queryUid" => "[uuid]" }), @r###"
{
"hits": [
{
"id": "450465",
"title": "Gläss",
"color": [
"blue",
"red"
]
}
],
"query": "glass",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]",
"metadata": {
"queryUid": "[uuid]",
"indexUid": "[uuid]",
"primaryKey": "id"
}
}
"###);
}
}

View File

@@ -11,7 +11,6 @@ mod hybrid;
#[cfg(not(feature = "chinese-pinyin"))]
mod locales;
mod matching_strategy;
mod metadata;
mod multi;
mod pagination;
mod restrict_searchable;
@@ -1045,7 +1044,7 @@ async fn test_degraded_score_details() {
}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -1104,8 +1103,7 @@ async fn test_degraded_score_details() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
},
@@ -2129,102 +2127,3 @@ async fn simple_search_changing_unrelated_settings() {
})
.await;
}
#[actix_rt::test]
async fn ranking_score_bug_with_sort() {
let server = Server::new_shared();
let index = server.unique_index();
// Create documents with a "created" field for sorting
let documents = json!([
{
"id": "1",
"title": "Coffee Mug",
"created": "2023-01-01T00:00:00Z"
},
{
"id": "2",
"title": "Water Bottle",
"created": "2023-01-02T00:00:00Z"
},
{
"id": "3",
"title": "Tumbler Cup",
"created": "2023-01-03T00:00:00Z"
},
{
"id": "4",
"title": "Stainless Steel Tumbler",
"created": "2023-01-04T00:00:00Z"
}
]);
// Add documents
let (task, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202, "{task}");
server.wait_task(task.uid()).await.succeeded();
// Configure sortable attributes
let (task, code) = index
.update_settings(json!({
"sortableAttributes": ["created"]
}))
.await;
assert_eq!(code, 202, "{task}");
server.wait_task(task.uid()).await.succeeded();
// Test 1: Search without sort - should have proper ranking scores
index
.search(
json!({
"q": "tumbler",
"showRankingScore": true,
"rankingScoreThreshold": 0.0,
"attributesToRetrieve": ["title"]
}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"title": "Tumbler Cup",
"_rankingScore": 0.9848484848484848
},
{
"title": "Stainless Steel Tumbler",
"_rankingScore": 0.8787878787878788
}
]
"###);
},
)
.await;
// Test 2: Search with sort - this is where the bug occurs
index
.search(
json!({
"q": "tumbler",
"showRankingScore": true,
"rankingScoreThreshold": 0.0,
"sort": ["created:desc"],
"attributesToRetrieve": ["title"]
}),
|response, code| {
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"title": "Tumbler Cup",
"_rankingScore": 0.9848484848484848
},
{
"title": "Stainless Steel Tumbler",
"_rankingScore": 0.8787878787878788
}
]
"###);
},
)
.await;
}

View File

@@ -93,14 +93,13 @@ async fn federation_empty_list() {
let (response, code) = server.multi_search(json!({"federation": {}, "queries": []})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [],
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]"
"estimatedTotalHits": 0
}
"###);
}
@@ -165,7 +164,7 @@ async fn simple_search_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["results"], { ".**.processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response["results"], { ".**.processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
[
{
"indexUid": "SHARED_DOCUMENTS",
@@ -183,8 +182,7 @@ async fn simple_search_single_index() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
},
{
"indexUid": "SHARED_DOCUMENTS",
@@ -202,8 +200,7 @@ async fn simple_search_single_index() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
]
"###);
@@ -220,7 +217,7 @@ async fn federation_single_search_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -240,8 +237,7 @@ async fn federation_single_search_single_index() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}
"###);
}
@@ -260,7 +256,7 @@ async fn federation_multiple_search_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -312,8 +308,7 @@ async fn federation_multiple_search_single_index() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 5,
"requestUid": "[uuid]"
"estimatedTotalHits": 5
}
"###);
}
@@ -330,7 +325,7 @@ async fn federation_two_search_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -363,8 +358,7 @@ async fn federation_two_search_single_index() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
"estimatedTotalHits": 2
}
"###);
}
@@ -463,7 +457,7 @@ async fn simple_search_two_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["results"], { ".**.processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response["results"], { ".**.processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
[
{
"indexUid": "SHARED_DOCUMENTS",
@@ -481,8 +475,7 @@ async fn simple_search_two_indexes() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
},
{
"indexUid": "SHARED_NESTED_DOCUMENTS",
@@ -523,8 +516,7 @@ async fn simple_search_two_indexes() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]"
"estimatedTotalHits": 2
}
]
"###);
@@ -543,7 +535,7 @@ async fn federation_two_search_two_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -604,8 +596,7 @@ async fn federation_two_search_two_indexes() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
}
@@ -635,7 +626,7 @@ async fn federation_multiple_search_multiple_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -804,8 +795,7 @@ async fn federation_multiple_search_multiple_indexes() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
"estimatedTotalHits": 12
}
"###);
}
@@ -1111,7 +1101,7 @@ async fn federation_filter() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(response, @r###"
{
"hits": [
{
@@ -1150,8 +1140,7 @@ async fn federation_filter() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
}
@@ -1188,7 +1177,7 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -1277,8 +1266,7 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
"estimatedTotalHits": 4
}
"###);
@@ -1290,7 +1278,7 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -1365,8 +1353,7 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
}
@@ -1462,7 +1449,7 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -1551,8 +1538,7 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]"
"estimatedTotalHits": 4
}
"###);
@@ -1565,7 +1551,7 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -1640,8 +1626,7 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
}
@@ -1719,7 +1704,7 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -1846,8 +1831,7 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 10,
"requestUid": "[uuid]"
"estimatedTotalHits": 10
}
"###);
@@ -1860,7 +1844,7 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -1931,8 +1915,7 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 6,
"requestUid": "[uuid]"
"estimatedTotalHits": 6
}
"###);
}
@@ -1953,7 +1936,7 @@ async fn federation_sort_different_ranking_rules() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -2080,8 +2063,7 @@ async fn federation_sort_different_ranking_rules() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 10,
"requestUid": "[uuid]"
"estimatedTotalHits": 10
}
"###);
@@ -2160,7 +2142,7 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -2287,8 +2269,7 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 10,
"requestUid": "[uuid]"
"estimatedTotalHits": 10
}
"###);
@@ -2301,7 +2282,7 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -2372,8 +2353,7 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 6,
"requestUid": "[uuid]"
"estimatedTotalHits": 6
}
"###);
}
@@ -2444,7 +2424,7 @@ async fn federation_limit_offset() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -2547,8 +2527,7 @@ async fn federation_limit_offset() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
"estimatedTotalHits": 12
}
"###);
}
@@ -2570,7 +2549,7 @@ async fn federation_limit_offset() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -2585,8 +2564,7 @@ async fn federation_limit_offset() {
"processingTimeMs": "[duration]",
"limit": 1,
"offset": 0,
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
"estimatedTotalHits": 12
}
"###);
}
@@ -2608,7 +2586,7 @@ async fn federation_limit_offset() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -2695,8 +2673,7 @@ async fn federation_limit_offset() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 2,
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
"estimatedTotalHits": 12
}
"###);
}
@@ -2718,14 +2695,13 @@ async fn federation_limit_offset() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [],
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 12,
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
"estimatedTotalHits": 12
}
"###);
}
@@ -2755,7 +2731,7 @@ async fn federation_formatting() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -2885,8 +2861,7 @@ async fn federation_formatting() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
"estimatedTotalHits": 12
}
"###);
}
@@ -2908,7 +2883,7 @@ async fn federation_formatting() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -2923,8 +2898,7 @@ async fn federation_formatting() {
"processingTimeMs": "[duration]",
"limit": 1,
"offset": 0,
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
"estimatedTotalHits": 12
}
"###);
}
@@ -2946,7 +2920,7 @@ async fn federation_formatting() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -3033,8 +3007,7 @@ async fn federation_formatting() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 2,
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
"estimatedTotalHits": 12
}
"###);
}
@@ -3056,14 +3029,13 @@ async fn federation_formatting() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [],
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 12,
"estimatedTotalHits": 12,
"requestUid": "[uuid]"
"estimatedTotalHits": 12
}
"###);
}
@@ -3126,7 +3098,7 @@ async fn federation_null_weight() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -3165,8 +3137,7 @@ async fn federation_null_weight() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
}
@@ -3273,7 +3244,7 @@ async fn federation_federated_contains_facets() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -3309,8 +3280,7 @@ async fn federation_federated_contains_facets() {
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]"
"estimatedTotalHits": 3
}
"###);
@@ -3518,7 +3488,7 @@ async fn federation_vector_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -3562,8 +3532,7 @@ async fn federation_vector_single_index() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"semanticHitCount": 4,
"requestUid": "[uuid]"
"semanticHitCount": 4
}
"###);
@@ -3576,7 +3545,7 @@ async fn federation_vector_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -3620,8 +3589,7 @@ async fn federation_vector_single_index() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"semanticHitCount": 4,
"requestUid": "[uuid]"
"semanticHitCount": 4
}
"###);
@@ -3635,7 +3603,7 @@ async fn federation_vector_single_index() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r###"
{
"hits": [
{
@@ -3683,8 +3651,7 @@ async fn federation_vector_single_index() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"semanticHitCount": 3,
"requestUid": "[uuid]"
"semanticHitCount": 3
}
"###);
}
@@ -3736,7 +3703,7 @@ async fn federation_vector_two_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r#"
{
"hits": [
{
@@ -3955,10 +3922,9 @@ async fn federation_vector_two_indexes() {
0.6
]
},
"semanticHitCount": 6,
"requestUid": "[uuid]"
"semanticHitCount": 6
}
"###);
"#);
// hybrid search, distinct embedder
let (response, code) = server
@@ -3968,7 +3934,7 @@ async fn federation_vector_two_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]", ".**.requestUid" => "[uuid]" }), @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".**._rankingScore" => "[score]" }), @r#"
{
"hits": [
{
@@ -4195,8 +4161,7 @@ async fn federation_vector_two_indexes() {
0.6
]
},
"semanticHitCount": 8,
"requestUid": "[uuid]"
"semanticHitCount": 8
}
"#);
}
@@ -4244,7 +4209,7 @@ async fn federation_facets_different_indexes_same_facet() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -4415,8 +4380,7 @@ async fn federation_facets_different_indexes_same_facet() {
},
"stats": {}
}
},
"requestUid": "[uuid]"
}
}
"###);
@@ -4435,7 +4399,7 @@ async fn federation_facets_different_indexes_same_facet() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -4577,8 +4541,7 @@ async fn federation_facets_different_indexes_same_facet() {
"Shazam!": 1
}
},
"facetStats": {},
"requestUid": "[uuid]"
"facetStats": {}
}
"###);
@@ -4598,7 +4561,7 @@ async fn federation_facets_different_indexes_same_facet() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -4723,8 +4686,7 @@ async fn federation_facets_different_indexes_same_facet() {
"distribution": {},
"stats": {}
}
},
"requestUid": "[uuid]"
}
}
"###);
}
@@ -4786,7 +4748,7 @@ async fn federation_facets_same_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -4844,8 +4806,7 @@ async fn federation_facets_same_indexes() {
}
}
}
},
"requestUid": "[uuid]"
}
}
"###);
@@ -4861,7 +4822,7 @@ async fn federation_facets_same_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -4947,8 +4908,7 @@ async fn federation_facets_same_indexes() {
}
}
}
},
"requestUid": "[uuid]"
}
}
"###);
@@ -4965,7 +4925,7 @@ async fn federation_facets_same_indexes() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -5027,8 +4987,7 @@ async fn federation_facets_same_indexes() {
"min": 2.0,
"max": 6.0
}
},
"requestUid": "[uuid]"
}
}
"###);
}
@@ -5081,7 +5040,7 @@ async fn federation_inconsistent_merge_order() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -5258,8 +5217,7 @@ async fn federation_inconsistent_merge_order() {
},
"stats": {}
}
},
"requestUid": "[uuid]"
}
}
"###);
@@ -5306,7 +5264,7 @@ async fn federation_inconsistent_merge_order() {
]}))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@@ -5446,8 +5404,7 @@ async fn federation_inconsistent_merge_order() {
"Batman Returns": 1
}
},
"facetStats": {},
"requestUid": "[uuid]"
"facetStats": {}
}
"###);
}

View File

@@ -2,7 +2,8 @@ use std::sync::Arc;
use actix_http::StatusCode;
use meili_snap::{json_string, snapshot};
use wiremock::matchers::{method, path, AnyMatcher};
use wiremock::matchers::method;
use wiremock::matchers::{path, AnyMatcher};
use wiremock::{Mock, MockServer, Request, ResponseTemplate};
use crate::common::{Server, Value, SCORE_DOCUMENTS};
@@ -229,7 +230,7 @@ async fn remote_sharding() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -287,13 +288,12 @@ async fn remote_sharding() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 5,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
let (response, _status_code) = ms1.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -351,13 +351,12 @@ async fn remote_sharding() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 5,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
let (response, _status_code) = ms2.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -415,7 +414,6 @@ async fn remote_sharding() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 5,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
@@ -597,7 +595,7 @@ async fn remote_sharding_retrieve_vectors() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r#"
{
"hits": [],
"processingTimeMs": "[time]",
@@ -622,10 +620,9 @@ async fn remote_sharding_retrieve_vectors() {
]
},
"semanticHitCount": 0,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
"#);
// multi vector search: two local queries, one remote
@@ -673,7 +670,7 @@ async fn remote_sharding_retrieve_vectors() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r#"
{
"hits": [],
"processingTimeMs": "[time]",
@@ -698,7 +695,6 @@ async fn remote_sharding_retrieve_vectors() {
]
},
"semanticHitCount": 0,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"#);
@@ -749,7 +745,7 @@ async fn remote_sharding_retrieve_vectors() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r#"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r#"
{
"hits": [],
"processingTimeMs": "[time]",
@@ -774,7 +770,6 @@ async fn remote_sharding_retrieve_vectors() {
]
},
"semanticHitCount": 0,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"#);
@@ -825,7 +820,7 @@ async fn remote_sharding_retrieve_vectors() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r#"
{
"hits": [],
"processingTimeMs": "[time]",
@@ -845,10 +840,9 @@ async fn remote_sharding_retrieve_vectors() {
]
},
"semanticHitCount": 0,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
"#);
// multi vector search: no local queries, all remote
@@ -896,7 +890,7 @@ async fn remote_sharding_retrieve_vectors() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r#"
{
"hits": [],
"processingTimeMs": "[time]",
@@ -920,10 +914,9 @@ async fn remote_sharding_retrieve_vectors() {
0.2
]
},
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
"#);
}
#[actix_rt::test]
@@ -1141,7 +1134,7 @@ async fn error_no_weighted_score() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -1169,7 +1162,6 @@ async fn error_no_weighted_score() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "remote hit does not contain `._federation.weightedScoreValues`\n - hint: check that the remote instance is a Meilisearch instance running the same version",
@@ -1281,7 +1273,7 @@ async fn error_bad_response() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -1309,7 +1301,6 @@ async fn error_bad_response() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "could not parse response from the remote host as a federated search response:\n - response from remote: <html>Returning an HTML page</html>\n - hint: check that the remote instance is a Meilisearch instance running the same version",
@@ -1414,7 +1405,7 @@ async fn error_bad_request() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -1442,7 +1433,6 @@ async fn error_bad_request() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "remote host responded with code 400:\n - response from remote: {\"message\":\"Inside `.queries[1]`: Index `nottest` not found.\",\"code\":\"index_not_found\",\"type\":\"invalid_request\",\"link\":\"https://docs.meilisearch.com/errors#index_not_found\"}\n - hint: check that the remote instance has the correct index configuration for that request\n - hint: check that the `network` experimental feature is enabled on the remote instance",
@@ -1552,7 +1542,7 @@ async fn error_bad_request_facets_by_index() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -1586,7 +1576,6 @@ async fn error_bad_request_facets_by_index() {
"stats": {}
}
},
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "remote host responded with code 400:\n - response from remote: {\"message\":\"Inside `.federation.facetsByIndex.test0`: Index `test0` not found.\\n - Note: index `test0` is not used in queries\",\"code\":\"index_not_found\",\"type\":\"invalid_request\",\"link\":\"https://docs.meilisearch.com/errors#index_not_found\"}\n - hint: check that the remote instance has the correct index configuration for that request\n - hint: check that the `network` experimental feature is enabled on the remote instance",
@@ -1699,7 +1688,7 @@ async fn error_bad_request_facets_by_index_facet() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -1738,7 +1727,6 @@ async fn error_bad_request_facets_by_index_facet() {
"stats": {}
}
},
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "remote host responded with code 400:\n - response from remote: {\"message\":\"Inside `.federation.facetsByIndex.test`: Invalid facet distribution: Attribute `id` is not filterable. This index does not have configured filterable attributes.\\n - Note: index `test` used in `.queries[1]`\",\"code\":\"invalid_multi_search_facets\",\"type\":\"invalid_request\",\"link\":\"https://docs.meilisearch.com/errors#invalid_multi_search_facets\"}\n - hint: check that the remote instance has the correct index configuration for that request\n - hint: check that the `network` experimental feature is enabled on the remote instance",
@@ -2048,7 +2036,7 @@ async fn error_remote_404() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -2076,7 +2064,6 @@ async fn error_remote_404() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "remote host responded with code 404:\n - response from remote: null\n - hint: check that the remote instance has the correct index configuration for that request\n - hint: check that the `network` experimental feature is enabled on the remote instance",
@@ -2089,7 +2076,7 @@ async fn error_remote_404() {
"###);
let (response, _status_code) = ms1.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -2127,7 +2114,6 @@ async fn error_remote_404() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
@@ -2259,7 +2245,7 @@ async fn error_remote_sharding_auth() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -2287,7 +2273,6 @@ async fn error_remote_sharding_auth() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]",
"remoteErrors": {
"ms1-notsearch": {
"message": "could not authenticate against the remote host\n - hint: check that the remote instance was registered with a valid API key having the `search` action",
@@ -2421,7 +2406,7 @@ async fn remote_sharding_auth() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -2469,7 +2454,6 @@ async fn remote_sharding_auth() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
@@ -2572,7 +2556,7 @@ async fn error_remote_500() {
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -2600,7 +2584,6 @@ async fn error_remote_500() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2,
"requestUid": "[uuid]",
"remoteErrors": {
"ms1": {
"message": "remote host responded with code 500:\n - response from remote: {\"error\":\"provoked error\",\"code\":\"test_error\",\"link\":\"https://docs.meilisearch.com/errors#test_error\"}",
@@ -2614,7 +2597,7 @@ async fn error_remote_500() {
let (response, _status_code) = ms1.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
// the response if full because we queried the instance that works
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -2652,7 +2635,6 @@ async fn error_remote_500() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
@@ -2756,7 +2738,7 @@ async fn error_remote_500_once() {
// Meilisearch is tolerant to a single failure
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -2794,13 +2776,12 @@ async fn error_remote_500_once() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
let (response, _status_code) = ms1.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
@@ -2838,7 +2819,6 @@ async fn error_remote_500_once() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 3,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
@@ -2866,8 +2846,7 @@ async fn error_remote_timeout() {
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
"remotes": {},
"sharding": false
"remotes": {}
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
@@ -2875,8 +2854,7 @@ async fn error_remote_timeout() {
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
"remotes": {},
"sharding": false
"remotes": {}
}
"###);
@@ -2897,7 +2875,7 @@ async fn error_remote_timeout() {
let rms0 = LocalMeili::new(ms0.clone()).await;
let rms1 = LocalMeili::with_params(
ms1.clone(),
LocalMeiliParams { delay: Some(std::time::Duration::from_secs(31)), ..Default::default() },
LocalMeiliParams { delay: Some(std::time::Duration::from_secs(6)), ..Default::default() },
)
.await;

View File

@@ -1,8 +1,7 @@
use meili_snap::{json_string, snapshot};
use super::shared_index_with_documents;
use crate::common::Server;
use crate::json;
use meili_snap::{json_string, snapshot};
#[actix_rt::test]
async fn default_search_should_return_estimated_total_hit() {
@@ -175,8 +174,7 @@ async fn test_issue_5274() {
snapshot!(json_string!(rep, {
".processingTimeMs" => "[ignored]",
".requestUid" => "[uuid]"
}), @r###"
}), @r#"
{
"hits": [
{
@@ -190,8 +188,7 @@ async fn test_issue_5274() {
"hitsPerPage": 1,
"page": 1,
"totalPages": 1,
"totalHits": 1,
"requestUid": "[uuid]"
"totalHits": 1
}
"###);
"#);
}

View File

@@ -1,7 +1,6 @@
use meili_snap::{json_string, snapshot};
use crate::common::Server;
use crate::json;
use meili_snap::{json_string, snapshot};
#[actix_rt::test]
async fn set_reset_chat_issue_5772() {

View File

@@ -339,14 +339,14 @@ async fn filter_invalid_syntax_object() {
index
.similar(json!({"id": 287947, "filter": "title & Glass", "embedder": "manual"}), |response, code| {
snapshot!(response, @r#"
snapshot!(response, @r###"
{
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `title & Glass`.\n1:14 title & Glass",
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `title & Glass`.\n1:14 title & Glass",
"code": "invalid_similar_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_similar_filter"
}
"#);
"###);
snapshot!(code, @"400 Bad Request");
})
.await;
@@ -377,14 +377,14 @@ async fn filter_invalid_syntax_array() {
index
.similar(json!({"id": 287947, "filter": ["title & Glass"], "embedder": "manual"}), |response, code| {
snapshot!(response, @r#"
snapshot!(response, @r###"
{
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, `_geoBoundingBox` or `_geoPolygon` at `title & Glass`.\n1:14 title & Glass",
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `CONTAINS`, `NOT CONTAINS`, `STARTS WITH`, `NOT STARTS WITH`, `_geoRadius`, or `_geoBoundingBox` at `title & Glass`.\n1:14 title & Glass",
"code": "invalid_similar_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_similar_filter"
}
"#);
"###);
snapshot!(code, @"400 Bad Request");
})
.await;

View File

@@ -97,7 +97,7 @@ async fn task_bad_types() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`.",
"code": "invalid_task_types",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
@@ -108,7 +108,7 @@ async fn task_bad_types() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`.",
"code": "invalid_task_types",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
@@ -119,7 +119,7 @@ async fn task_bad_types() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`.",
"code": "invalid_task_types",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types"

View File

@@ -43,7 +43,7 @@ async fn version_too_old() {
std::fs::write(db_path.join("VERSION"), "1.11.9999").unwrap();
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.24.0");
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.21.0");
}
#[actix_rt::test]
@@ -58,7 +58,7 @@ async fn version_requires_downgrade() {
std::fs::write(db_path.join("VERSION"), format!("{major}.{minor}.{patch}")).unwrap();
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
snapshot!(err, @"Database version 1.24.1 is higher than the Meilisearch version 1.24.0. Downgrade is not supported");
snapshot!(err, @"Database version 1.21.1 is higher than the Meilisearch version 1.21.0. Downgrade is not supported");
}
#[actix_rt::test]

View File

@@ -1,5 +1,6 @@
---
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
snapshot_kind: text
---
{
"hits": [
@@ -20,6 +21,5 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]"
"estimatedTotalHits": 1
}

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.24.0"
"upgradeTo": "v1.21.0"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.24.0"
"upgradeTo": "v1.21.0"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.24.0"
"upgradeTo": "v1.21.0"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.24.0"
"upgradeTo": "v1.21.0"
},
"error": null,
"duration": "[duration]",

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.24.0"
"upgradeTo": "v1.21.0"
},
"error": null,
"duration": "[duration]",

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.24.0"
"upgradeTo": "v1.21.0"
},
"error": null,
"duration": "[duration]",

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.24.0"
"upgradeTo": "v1.21.0"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.24.0"
"upgradeTo": "v1.21.0"
},
"error": null,
"duration": "[duration]",

View File

@@ -294,7 +294,7 @@ async fn check_the_index_features(server: &Server) {
let (results, _status) =
kefir.search_post(json!({ "sort": ["age:asc"], "filter": "surname = kefirounet" })).await;
snapshot!(json_string!(results, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), name: "search_with_sort_and_filter");
snapshot!(results, name: "search_with_sort_and_filter");
// ensuring we can get the vectors and their `regenerate` is still good.
let (results, _status) = kefir.search_post(json!({"retrieveVectors": true})).await;

View File

@@ -323,7 +323,7 @@ async fn binary_quantize_clear_documents() {
// Make sure the vector DB has been cleared
let (documents, _code) =
index.search_post(json!({ "hybrid": { "embedder": "manual" }, "vector": [1, 1, 1] })).await;
snapshot!(json_string!(documents, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(documents, @r#"
{
"hits": [],
"query": "",
@@ -331,10 +331,9 @@ async fn binary_quantize_clear_documents() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]",
"semanticHitCount": 0
}
"###);
"#);
}
#[actix_rt::test]

View File

@@ -257,7 +257,7 @@ async fn search_with_vector() {
json!({"vector": [1.0, 1.0, 1.0], "hybrid": {"semanticRatio": 1.0, "embedder": "rest"}, "limit": 1}
)).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(value, { ".requestUid" => "[uuid]", ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(value, @r#"
{
"hits": [
{
@@ -270,10 +270,9 @@ async fn search_with_vector() {
"limit": 1,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]",
"semanticHitCount": 1
}
"###);
"#);
}
#[actix_rt::test]
@@ -289,7 +288,7 @@ async fn search_with_media() {
))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(value, { ".requestUid" => "[uuid]", ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(value, @r#"
{
"hits": [
{
@@ -303,10 +302,9 @@ async fn search_with_media() {
"limit": 1,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]",
"semanticHitCount": 1
}
"###);
"#);
}
#[actix_rt::test]
@@ -392,7 +390,7 @@ async fn search_with_query() {
))
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(value, { ".requestUid" => "[uuid]", ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(value, @r#"
{
"hits": [
{
@@ -406,10 +404,9 @@ async fn search_with_query() {
"limit": 1,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]",
"semanticHitCount": 1
}
"###);
"#);
}
#[actix_rt::test]
@@ -2079,7 +2076,7 @@ async fn composite() {
json!({"vector": [1.0, 1.0, 1.0], "hybrid": {"semanticRatio": 1.0, "embedder": "rest"}, "limit": 1}
)).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(value, { ".requestUid" => "[uuid]", ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(value, @r#"
{
"hits": [
{
@@ -2092,10 +2089,9 @@ async fn composite() {
"limit": 1,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]",
"semanticHitCount": 1
}
"###);
"#);
let (value, code) = index
.search_post(
@@ -2104,7 +2100,7 @@ async fn composite() {
)
.await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(value, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r#"
snapshot!(value, @r#"
{
"hits": [
{
@@ -2118,7 +2114,6 @@ async fn composite() {
"limit": 1,
"offset": 0,
"estimatedTotalHits": 4,
"requestUid": "[uuid]",
"semanticHitCount": 1
}
"#);

View File

@@ -689,7 +689,7 @@ async fn clear_documents() {
// Make sure the vector DB has been cleared
let (documents, _code) =
index.search_post(json!({ "vector": [1, 1, 1], "hybrid": {"embedder": "manual"} })).await;
snapshot!(json_string!(documents, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(documents, @r#"
{
"hits": [],
"query": "",
@@ -697,10 +697,9 @@ async fn clear_documents() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 0,
"requestUid": "[uuid]",
"semanticHitCount": 0
}
"###);
"#);
}
#[actix_rt::test]
@@ -744,7 +743,7 @@ async fn add_remove_one_vector_4588() {
json!({"vector": [1, 1, 1], "hybrid": {"semanticRatio": 1.0, "embedder": "manual"} }),
)
.await;
snapshot!(json_string!(documents, { ".processingTimeMs" => "[duration]", ".requestUid" => "[uuid]" }), @r###"
snapshot!(documents, @r#"
{
"hits": [
{
@@ -757,10 +756,9 @@ async fn add_remove_one_vector_4588() {
"limit": 20,
"offset": 0,
"estimatedTotalHits": 1,
"requestUid": "[uuid]",
"semanticHitCount": 1
}
"###);
"#);
let (documents, _code) = index
.get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() })

View File

@@ -1,5 +1,3 @@
#![allow(clippy::result_large_err)]
use std::fs::{read_dir, read_to_string, remove_file, File};
use std::io::{BufWriter, Write as _};
use std::path::PathBuf;
@@ -126,7 +124,7 @@ enum Command {
/// before running the copy and compaction. This way the current indexation must finish before
/// the compaction operation can start. Once the compaction is done, the big index is replaced
/// by the compacted one and the mutable transaction is released.
IndexCompaction { index_name: String },
CompactIndex { index_name: String },
/// Uses the hair dryer the dedicate pages hot in cache
///
@@ -165,7 +163,7 @@ fn main() -> anyhow::Result<()> {
let target_version = parse_version(&target_version).context("While parsing `--target-version`. Make sure `--target-version` is in the format MAJOR.MINOR.PATCH")?;
OfflineUpgrade { db_path, current_version: detected_version, target_version }.upgrade()
}
Command::IndexCompaction { index_name } => compact_index(db_path, &index_name),
Command::CompactIndex { index_name } => compact_index(db_path, &index_name),
Command::HairDryer { index_name, index_part } => {
hair_dryer(db_path, &index_name, &index_part)
}

View File

@@ -19,7 +19,6 @@ bstr = "1.12.0"
bytemuck = { version = "1.23.1", features = ["extern_crate_alloc"] }
byteorder = "1.5.0"
charabia = { version = "0.9.7", default-features = false }
cellulite = "0.3.1-nested-rtxns-2"
concat-arrays = "0.1.2"
convert_case = "0.8.0"
crossbeam-channel = "0.5.15"
@@ -28,13 +27,12 @@ either = { version = "1.15.0", features = ["serde"] }
flatten-serde-json = { path = "../flatten-serde-json" }
fst = "0.4.7"
fxhash = "0.2.1"
geojson = "0.24.2"
geoutils = "0.5.1"
grenad = { version = "0.5.0", default-features = false, features = [
"rayon",
"tempfile",
] }
heed = { version = "0.22.1-nested-rtxns", default-features = false, features = [
heed = { version = "0.22.0", default-features = false, features = [
"serde-json",
"serde-bincode",
] }
@@ -89,8 +87,8 @@ rhai = { version = "1.22.2", features = [
"no_time",
"sync",
] }
arroy = "0.6.4-nested-rtxns"
hannoy = { version = "0.0.9-nested-rtxns-2", features = ["arroy"] }
arroy = "0.6.3"
hannoy = { version = "0.0.8", features = ["arroy"] }
rand = "0.8.5"
tracing = "0.1.41"
ureq = { version = "2.12.1", features = ["json"] }
@@ -98,7 +96,7 @@ url = "2.5.4"
hashbrown = "0.15.4"
bumpalo = "3.18.1"
bumparaw-collections = "0.1.4"
steppe = { version = "0.4", default-features = false }
steppe = { version = "0.4.0", default-features = false }
thread_local = "1.1.9"
allocator-api2 = "0.3.0"
rustc-hash = "2.1.1"
@@ -118,8 +116,6 @@ twox-hash = { version = "2.1.1", default-features = false, features = [
"xxhash3_64",
"xxhash64",
] }
geo-types = "0.7.16"
zerometry = "0.3.0"
[dev-dependencies]
mimalloc = { version = "0.1.47", default-features = false }

View File

@@ -11,4 +11,3 @@ const fn parse_u32(s: &str) -> u32 {
pub const RESERVED_VECTORS_FIELD_NAME: &str = "_vectors";
pub const RESERVED_GEO_FIELD_NAME: &str = "_geo";
pub const RESERVED_GEOJSON_FIELD_NAME: &str = "_geojson";

View File

@@ -48,7 +48,6 @@ pub enum PrimaryKey<'a> {
Nested { name: &'a str },
}
#[allow(clippy::large_enum_variant)]
pub enum DocumentIdExtractionError {
InvalidDocumentId(UserError),
MissingDocumentId,

Some files were not shown because too many files have changed in this diff Show More