Compare commits

..

1 Commits

Author SHA1 Message Date
JannisK89
7b7a27a3b2 removed unnecessary borrow call 2023-08-10 12:02:07 +02:00
111 changed files with 1229 additions and 4004 deletions

View File

@@ -2,8 +2,8 @@ name: Create issue to upgrade dependencies
on:
schedule:
# Run the first of the month, every 6 month
- cron: '0 0 1 */6 *'
# Run the first of the month, every 3 month
- cron: '0 0 1 */3 *'
workflow_dispatch:
jobs:

View File

@@ -53,6 +53,5 @@ jobs:
uses: mislav/bump-homebrew-formula-action@v2
with:
formula-name: meilisearch
formula-path: Formula/m/meilisearch.rb
env:
COMMITTER_TOKEN: ${{ secrets.HOMEBREW_COMMITTER_TOKEN }}

View File

@@ -14,7 +14,6 @@ on:
env:
MEILI_MASTER_KEY: 'masterKey'
MEILI_NO_ANALYTICS: 'true'
DISABLE_COVERAGE: 'true'
jobs:
define-docker-image:
@@ -31,117 +30,6 @@ jobs:
if [[ $event == 'workflow_dispatch' ]]; then
echo "docker-image=${{ github.event.inputs.docker_image }}" >> $GITHUB_OUTPUT
fi
- name: Docker image is ${{ steps.define-image.outputs.docker-image }}
run: echo "Docker image is ${{ steps.define-image.outputs.docker-image }}"
##########
## SDKs ##
##########
meilisearch-dotnet-tests:
needs: define-docker-image
name: .NET SDK tests
runs-on: ubuntu-latest
env:
MEILISEARCH_VERSION: ${{ needs.define-docker-image.outputs.docker-image }}
steps:
- uses: actions/checkout@v3
with:
repository: meilisearch/meilisearch-dotnet
- name: Setup .NET Core
uses: actions/setup-dotnet@v3
with:
dotnet-version: "6.0.x"
- name: Install dependencies
run: dotnet restore
- name: Build
run: dotnet build --configuration Release --no-restore
- name: Meilisearch (latest version) setup with Docker
run: docker compose up -d
- name: Run tests
run: dotnet test --no-restore --verbosity normal
meilisearch-dart-tests:
needs: define-docker-image
name: Dart SDK tests
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
ports:
- '7700:7700'
steps:
- uses: actions/checkout@v3
with:
repository: meilisearch/meilisearch-dart
- uses: dart-lang/setup-dart@v1
with:
sdk: 3.1.1
- name: Install dependencies
run: dart pub get
- name: Run integration tests
run: dart test --concurrency=4
meilisearch-go-tests:
needs: define-docker-image
name: Go SDK tests
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
ports:
- '7700:7700'
steps:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: stable
- uses: actions/checkout@v3
with:
repository: meilisearch/meilisearch-go
- name: Get dependencies
run: |
go get -v -t -d ./...
if [ -f Gopkg.toml ]; then
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
dep ensure
fi
- name: Run integration tests
run: go test -v ./...
meilisearch-java-tests:
needs: define-docker-image
name: Java SDK tests
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
ports:
- '7700:7700'
steps:
- uses: actions/checkout@v3
with:
repository: meilisearch/meilisearch-java
- name: Set up Java
uses: actions/setup-java@v3
with:
java-version: 8
distribution: 'zulu'
cache: gradle
- name: Grant execute permission for gradlew
run: chmod +x gradlew
- name: Build and run unit and integration tests
run: ./gradlew build integrationTest
meilisearch-js-tests:
needs: define-docker-image
@@ -178,6 +66,33 @@ jobs:
- name: Run Browser env
run: yarn test:env:browser
instant-meilisearch-tests:
needs: define-docker-image
name: instant-meilisearch tests
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
ports:
- '7700:7700'
steps:
- uses: actions/checkout@v3
with:
repository: meilisearch/instant-meilisearch
- name: Setup node
uses: actions/setup-node@v3
with:
cache: yarn
- name: Install dependencies
run: yarn install
- name: Run tests
run: yarn test
- name: Build all the playgrounds and the packages
run: yarn build
meilisearch-php-tests:
needs: define-docker-image
name: PHP SDK tests
@@ -196,6 +111,8 @@ jobs:
repository: meilisearch/meilisearch-php
- name: Install PHP
uses: shivammathur/setup-php@v2
with:
coverage: none
- name: Validate composer.json and composer.lock
run: composer validate
- name: Install dependencies
@@ -232,6 +149,36 @@ jobs:
- name: Test with pytest
run: pipenv run pytest
meilisearch-go-tests:
needs: define-docker-image
name: Go SDK tests
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
ports:
- '7700:7700'
steps:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: stable
- uses: actions/checkout@v3
with:
repository: meilisearch/meilisearch-go
- name: Get dependencies
run: |
go get -v -t -d ./...
if [ -f Gopkg.toml ]; then
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
dep ensure
fi
- name: Run integration tests
run: go test -v ./...
meilisearch-ruby-tests:
needs: define-docker-image
name: Ruby SDK tests
@@ -277,110 +224,3 @@ jobs:
run: cargo build --verbose
- name: Run tests
run: cargo test --verbose
meilisearch-swift-tests:
needs: define-docker-image
name: Swift SDK tests
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
ports:
- '7700:7700'
steps:
- uses: actions/checkout@v3
with:
repository: meilisearch/meilisearch-swift
- name: Run tests
run: swift test
########################
## FRONT-END PLUGINS ##
########################
meilisearch-js-plugins-tests:
needs: define-docker-image
name: meilisearch-js-plugins tests
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
ports:
- '7700:7700'
steps:
- uses: actions/checkout@v3
with:
repository: meilisearch/meilisearch-js-plugins
- name: Setup node
uses: actions/setup-node@v3
with:
cache: yarn
- name: Install dependencies
run: yarn install
- name: Run tests
run: yarn test
- name: Build all the playgrounds and the packages
run: yarn build
########################
## BACK-END PLUGINS ###
########################
meilisearch-rails-tests:
needs: define-docker-image
name: meilisearch-rails tests
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
ports:
- '7700:7700'
steps:
- uses: actions/checkout@v3
with:
repository: meilisearch/meilisearch-rails
- name: Set up Ruby 3
uses: ruby/setup-ruby@v1
with:
ruby-version: 3
bundler-cache: true
- name: Run tests
run: bundle exec rspec
meilisearch-symfony-tests:
needs: define-docker-image
name: meilisearch-symfony tests
runs-on: ubuntu-latest
services:
meilisearch:
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
env:
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
ports:
- '7700:7700'
steps:
- uses: actions/checkout@v3
with:
repository: meilisearch/meilisearch-symfony
- name: Install PHP
uses: shivammathur/setup-php@v2
with:
tools: composer:v2, flex
- name: Validate composer.json and composer.lock
run: composer validate
- name: Install dependencies
run: composer install --prefer-dist --no-progress --quiet
- name: Remove doctrine/annotations
run: composer remove --dev doctrine/annotations
- name: Run test suite
run: composer test:unit

View File

@@ -43,7 +43,7 @@ jobs:
toolchain: nightly
override: true
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.6.2
uses: Swatinem/rust-cache@v2.5.1
- name: Run cargo check without any default features
uses: actions-rs/cargo@v1
with:
@@ -65,7 +65,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.6.2
uses: Swatinem/rust-cache@v2.5.1
- name: Run cargo check without any default features
uses: actions-rs/cargo@v1
with:
@@ -123,10 +123,7 @@ jobs:
override: true
- name: Run cargo tree without default features and check lindera is not present
run: |
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -vqz lindera; then
echo "lindera has been found in the sources and it shouldn't"
exit 1
fi
cargo tree -f '{p} {f}' -e normal --no-default-features | grep lindera -vqz
- name: Run cargo tree with default features and check lindera is pressent
run: |
cargo tree -f '{p} {f}' -e normal | grep lindera -qz
@@ -149,7 +146,7 @@ jobs:
toolchain: stable
override: true
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.6.2
uses: Swatinem/rust-cache@v2.5.1
- name: Run tests in debug
uses: actions-rs/cargo@v1
with:
@@ -164,11 +161,11 @@ jobs:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: 1.71.1
toolchain: 1.69.0
override: true
components: clippy
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.6.2
uses: Swatinem/rust-cache@v2.5.1
- name: Run cargo clippy
uses: actions-rs/cargo@v1
with:
@@ -187,7 +184,7 @@ jobs:
override: true
components: rustfmt
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.6.2
uses: Swatinem/rust-cache@v2.5.1
- name: Run cargo fmt
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate

666
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -18,7 +18,7 @@ members = [
]
[workspace.package]
version = "1.4.0"
version = "1.3.0"
authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"]
description = "Meilisearch HTTP server"
homepage = "https://meilisearch.com"

View File

@@ -262,9 +262,6 @@ pub(crate) mod test {
sortable_attributes: Setting::Set(btreeset! { S("age") }),
ranking_rules: Setting::NotSet,
stop_words: Setting::NotSet,
non_separator_tokens: Setting::NotSet,
separator_tokens: Setting::NotSet,
dictionary: Setting::NotSet,
synonyms: Setting::NotSet,
distinct_attribute: Setting::NotSet,
typo_tolerance: Setting::NotSet,

View File

@@ -340,9 +340,6 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
}
},
stop_words: settings.stop_words.into(),
non_separator_tokens: v6::Setting::NotSet,
separator_tokens: v6::Setting::NotSet,
dictionary: v6::Setting::NotSet,
synonyms: settings.synonyms.into(),
distinct_attribute: settings.distinct_attribute.into(),
typo_tolerance: match settings.typo_tolerance {

View File

@@ -1,24 +0,0 @@
---
source: dump/src/reader/mod.rs
expression: spells.settings().unwrap()
---
{
"displayedAttributes": [
"*"
],
"searchableAttributes": [
"*"
],
"filterableAttributes": [],
"sortableAttributes": [],
"rankingRules": [
"typo",
"words",
"proximity",
"attribute",
"exactness"
],
"stopWords": [],
"synonyms": {},
"distinctAttribute": null
}

View File

@@ -1,38 +0,0 @@
---
source: dump/src/reader/mod.rs
expression: products.settings().unwrap()
---
{
"displayedAttributes": [
"*"
],
"searchableAttributes": [
"*"
],
"filterableAttributes": [],
"sortableAttributes": [],
"rankingRules": [
"typo",
"words",
"proximity",
"attribute",
"exactness"
],
"stopWords": [],
"synonyms": {
"android": [
"phone",
"smartphone"
],
"iphone": [
"phone",
"smartphone"
],
"phone": [
"android",
"iphone",
"smartphone"
]
},
"distinctAttribute": null
}

View File

@@ -1,31 +0,0 @@
---
source: dump/src/reader/mod.rs
expression: movies.settings().unwrap()
---
{
"displayedAttributes": [
"*"
],
"searchableAttributes": [
"*"
],
"filterableAttributes": [
"genres",
"id"
],
"sortableAttributes": [
"genres",
"id"
],
"rankingRules": [
"typo",
"words",
"proximity",
"attribute",
"exactness",
"release_date:asc"
],
"stopWords": [],
"synonyms": {},
"distinctAttribute": null
}

View File

@@ -14,7 +14,6 @@ license.workspace = true
[dependencies]
nom = "7.1.3"
nom_locate = "4.1.0"
unescaper = "0.1.2"
[dev-dependencies]
insta = "1.29.0"

View File

@@ -62,7 +62,6 @@ pub enum ErrorKind<'a> {
MisusedGeoRadius,
MisusedGeoBoundingBox,
InvalidPrimary,
InvalidEscapedNumber,
ExpectedEof,
ExpectedValue(ExpectedValueKind),
MalformedValue,
@@ -148,9 +147,6 @@ impl<'a> Display for Error<'a> {
let text = if input.trim().is_empty() { "but instead got nothing.".to_string() } else { format!("at `{}`.", escaped_input) };
writeln!(f, "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` {}", text)?
}
ErrorKind::InvalidEscapedNumber => {
writeln!(f, "Found an invalid escaped sequence number: `{}`.", escaped_input)?
}
ErrorKind::ExpectedEof => {
writeln!(f, "Found unexpected characters at the end of the filter: `{}`. You probably forgot an `OR` or an `AND` rule.", escaped_input)?
}

View File

@@ -545,8 +545,6 @@ impl<'a> std::fmt::Display for Token<'a> {
#[cfg(test)]
pub mod tests {
use FilterCondition as Fc;
use super::*;
/// Create a raw [Token]. You must specify the string that appear BEFORE your element followed by your element
@@ -558,22 +556,14 @@ pub mod tests {
unsafe { Span::new_from_raw_offset(offset, lines as u32, value, "") }.into()
}
fn p(s: &str) -> impl std::fmt::Display + '_ {
Fc::parse(s).unwrap().unwrap()
}
#[test]
fn parse_escaped() {
insta::assert_display_snapshot!(p(r#"title = 'foo\\'"#), @r#"{title} = {foo\}"#);
insta::assert_display_snapshot!(p(r#"title = 'foo\\\\'"#), @r#"{title} = {foo\\}"#);
insta::assert_display_snapshot!(p(r#"title = 'foo\\\\\\'"#), @r#"{title} = {foo\\\}"#);
insta::assert_display_snapshot!(p(r#"title = 'foo\\\\\\\\'"#), @r#"{title} = {foo\\\\}"#);
// but it also works with other sequencies
insta::assert_display_snapshot!(p(r#"title = 'foo\x20\n\t\"\'"'"#), @"{title} = {foo \n\t\"\'\"}");
}
#[test]
fn parse() {
use FilterCondition as Fc;
fn p(s: &str) -> impl std::fmt::Display + '_ {
Fc::parse(s).unwrap().unwrap()
}
// Test equal
insta::assert_display_snapshot!(p("channel = Ponce"), @"{channel} = {Ponce}");
insta::assert_display_snapshot!(p("subscribers = 12"), @"{subscribers} = {12}");

View File

@@ -171,24 +171,7 @@ pub fn parse_value(input: Span) -> IResult<Token> {
})
})?;
match unescaper::unescape(value.value()) {
Ok(content) => {
if content.len() != value.value().len() {
Ok((input, Token::new(value.original_span(), Some(content))))
} else {
Ok((input, value))
}
}
Err(unescaper::Error::IncompleteStr(_)) => Err(nom::Err::Incomplete(nom::Needed::Unknown)),
Err(unescaper::Error::ParseIntError { .. }) => Err(nom::Err::Error(Error::new_from_kind(
value.original_span(),
ErrorKind::InvalidEscapedNumber,
))),
Err(unescaper::Error::InvalidChar { .. }) => Err(nom::Err::Error(Error::new_from_kind(
value.original_span(),
ErrorKind::MalformedValue,
))),
}
Ok((input, value))
}
fn is_value_component(c: char) -> bool {
@@ -335,17 +318,17 @@ pub mod test {
("\"cha'nnel\"", "cha'nnel", false),
("I'm tamo", "I", false),
// escaped thing but not quote
(r#""\\""#, r#"\"#, true),
(r#""\\\\\\""#, r#"\\\"#, true),
(r#""aa\\aa""#, r#"aa\aa"#, true),
(r#""\\""#, r#"\\"#, false),
(r#""\\\\\\""#, r#"\\\\\\"#, false),
(r#""aa\\aa""#, r#"aa\\aa"#, false),
// with double quote
(r#""Hello \"world\"""#, r#"Hello "world""#, true),
(r#""Hello \\\"world\\\"""#, r#"Hello \"world\""#, true),
(r#""Hello \\\"world\\\"""#, r#"Hello \\"world\\""#, true),
(r#""I'm \"super\" tamo""#, r#"I'm "super" tamo"#, true),
(r#""\"\"""#, r#""""#, true),
// with simple quote
(r#"'Hello \'world\''"#, r#"Hello 'world'"#, true),
(r#"'Hello \\\'world\\\''"#, r#"Hello \'world\'"#, true),
(r#"'Hello \\\'world\\\''"#, r#"Hello \\'world\\'"#, true),
(r#"'I\'m "super" tamo'"#, r#"I'm "super" tamo"#, true),
(r#"'\'\''"#, r#"''"#, true),
];
@@ -367,14 +350,7 @@ pub mod test {
"Filter `{}` was not supposed to be escaped",
input
);
assert_eq!(
token.value(),
expected,
"Filter `{}` failed by giving `{}` instead of `{}`.",
input,
token.value(),
expected
);
assert_eq!(token.value(), expected, "Filter `{}` failed.", input);
}
}

View File

@@ -13,7 +13,7 @@ license.workspace = true
[dependencies]
arbitrary = { version = "1.3.0", features = ["derive"] }
clap = { version = "4.3.0", features = ["derive"] }
fastrand = "2.0.0"
fastrand = "1.9.0"
milli = { path = "../milli" }
serde = { version = "1.0.160", features = ["derive"] }
serde_json = { version = "1.0.95", features = ["preserve_order"] }

View File

@@ -67,6 +67,10 @@ pub(crate) enum Batch {
op: IndexOperation,
must_create_index: bool,
},
IndexDocumentDeletionByFilter {
index_uid: String,
task: Task,
},
IndexCreation {
index_uid: String,
primary_key: Option<String>,
@@ -110,10 +114,6 @@ pub(crate) enum IndexOperation {
documents: Vec<Vec<String>>,
tasks: Vec<Task>,
},
IndexDocumentDeletionByFilter {
index_uid: String,
task: Task,
},
DocumentClear {
index_uid: String,
tasks: Vec<Task>,
@@ -155,6 +155,7 @@ impl Batch {
| Batch::TaskDeletion(task)
| Batch::Dump(task)
| Batch::IndexCreation { task, .. }
| Batch::IndexDocumentDeletionByFilter { task, .. }
| Batch::IndexUpdate { task, .. } => vec![task.uid],
Batch::SnapshotCreation(tasks) | Batch::IndexDeletion { tasks, .. } => {
tasks.iter().map(|task| task.uid).collect()
@@ -166,7 +167,6 @@ impl Batch {
| IndexOperation::DocumentClear { tasks, .. } => {
tasks.iter().map(|task| task.uid).collect()
}
IndexOperation::IndexDocumentDeletionByFilter { task, .. } => vec![task.uid],
IndexOperation::SettingsAndDocumentOperation {
document_import_tasks: tasks,
settings_tasks: other,
@@ -194,7 +194,8 @@ impl Batch {
IndexOperation { op, .. } => Some(op.index_uid()),
IndexCreation { index_uid, .. }
| IndexUpdate { index_uid, .. }
| IndexDeletion { index_uid, .. } => Some(index_uid),
| IndexDeletion { index_uid, .. }
| IndexDocumentDeletionByFilter { index_uid, .. } => Some(index_uid),
}
}
}
@@ -204,7 +205,6 @@ impl IndexOperation {
match self {
IndexOperation::DocumentOperation { index_uid, .. }
| IndexOperation::DocumentDeletion { index_uid, .. }
| IndexOperation::IndexDocumentDeletionByFilter { index_uid, .. }
| IndexOperation::DocumentClear { index_uid, .. }
| IndexOperation::Settings { index_uid, .. }
| IndexOperation::DocumentClearAndSetting { index_uid, .. }
@@ -239,12 +239,9 @@ impl IndexScheduler {
let task = self.get_task(rtxn, id)?.ok_or(Error::CorruptedTaskQueue)?;
match &task.kind {
KindWithContent::DocumentDeletionByFilter { index_uid, .. } => {
Ok(Some(Batch::IndexOperation {
op: IndexOperation::IndexDocumentDeletionByFilter {
index_uid: index_uid.clone(),
task,
},
must_create_index: false,
Ok(Some(Batch::IndexDocumentDeletionByFilter {
index_uid: index_uid.clone(),
task,
}))
}
_ => unreachable!(),
@@ -899,6 +896,51 @@ impl IndexScheduler {
Ok(tasks)
}
Batch::IndexDocumentDeletionByFilter { mut task, index_uid: _ } => {
let (index_uid, filter) =
if let KindWithContent::DocumentDeletionByFilter { index_uid, filter_expr } =
&task.kind
{
(index_uid, filter_expr)
} else {
unreachable!()
};
let index = {
let rtxn = self.env.read_txn()?;
self.index_mapper.index(&rtxn, index_uid)?
};
let deleted_documents = delete_document_by_filter(filter, index);
let original_filter = if let Some(Details::DocumentDeletionByFilter {
original_filter,
deleted_documents: _,
}) = task.details
{
original_filter
} else {
// In the case of a `documentDeleteByFilter` the details MUST be set
unreachable!();
};
match deleted_documents {
Ok(deleted_documents) => {
task.status = Status::Succeeded;
task.details = Some(Details::DocumentDeletionByFilter {
original_filter,
deleted_documents: Some(deleted_documents),
});
}
Err(e) => {
task.status = Status::Failed;
task.details = Some(Details::DocumentDeletionByFilter {
original_filter,
deleted_documents: Some(0),
});
task.error = Some(e.into());
}
}
Ok(vec![task])
}
Batch::IndexCreation { index_uid, primary_key, task } => {
let wtxn = self.env.write_txn()?;
if self.index_mapper.exists(&wtxn, &index_uid)? {
@@ -1257,47 +1299,6 @@ impl IndexScheduler {
Ok(tasks)
}
IndexOperation::IndexDocumentDeletionByFilter { mut task, index_uid: _ } => {
let filter =
if let KindWithContent::DocumentDeletionByFilter { filter_expr, .. } =
&task.kind
{
filter_expr
} else {
unreachable!()
};
let deleted_documents = delete_document_by_filter(index_wtxn, filter, index);
let original_filter = if let Some(Details::DocumentDeletionByFilter {
original_filter,
deleted_documents: _,
}) = task.details
{
original_filter
} else {
// In the case of a `documentDeleteByFilter` the details MUST be set
unreachable!();
};
match deleted_documents {
Ok(deleted_documents) => {
task.status = Status::Succeeded;
task.details = Some(Details::DocumentDeletionByFilter {
original_filter,
deleted_documents: Some(deleted_documents),
});
}
Err(e) => {
task.status = Status::Failed;
task.details = Some(Details::DocumentDeletionByFilter {
original_filter,
deleted_documents: Some(0),
});
task.error = Some(e.into());
}
}
Ok(vec![task])
}
IndexOperation::Settings { index_uid: _, settings, mut tasks } => {
let indexer_config = self.index_mapper.indexer_config();
let mut builder = milli::update::Settings::new(index_wtxn, index, indexer_config);
@@ -1497,22 +1498,23 @@ impl IndexScheduler {
}
}
fn delete_document_by_filter<'a>(
wtxn: &mut RwTxn<'a, '_>,
filter: &serde_json::Value,
index: &'a Index,
) -> Result<u64> {
fn delete_document_by_filter(filter: &serde_json::Value, index: Index) -> Result<u64> {
let filter = Filter::from_json(filter)?;
Ok(if let Some(filter) = filter {
let candidates = filter.evaluate(wtxn, index).map_err(|err| match err {
let mut wtxn = index.write_txn()?;
let candidates = filter.evaluate(&wtxn, &index).map_err(|err| match err {
milli::Error::UserError(milli::UserError::InvalidFilter(_)) => {
Error::from(err).with_custom_error_code(Code::InvalidDocumentFilter)
}
e => e.into(),
})?;
let mut delete_operation = DeleteDocuments::new(wtxn, index)?;
let mut delete_operation = DeleteDocuments::new(&mut wtxn, &index)?;
delete_operation.delete_documents(&candidates);
delete_operation.execute().map(|result| result.deleted_documents)?
let deleted_documents =
delete_operation.execute().map(|result| result.deleted_documents)?;
wtxn.commit()?;
deleted_documents
} else {
0
})

View File

@@ -790,19 +790,10 @@ impl IndexScheduler {
let mut res = BTreeMap::new();
let processing_tasks = { self.processing_tasks.read().unwrap().processing.len() };
res.insert(
"statuses".to_string(),
enum_iterator::all::<Status>()
.map(|s| {
let tasks = self.get_status(&rtxn, s)?.len();
match s {
Status::Enqueued => Ok((s.to_string(), tasks - processing_tasks)),
Status::Processing => Ok((s.to_string(), processing_tasks)),
s => Ok((s.to_string(), tasks)),
}
})
.map(|s| Ok((s.to_string(), self.get_status(&rtxn, s)?.len())))
.collect::<Result<BTreeMap<String, u64>>>()?,
);
res.insert(
@@ -4140,154 +4131,4 @@ mod tests {
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "everything_has_been_processed");
drop(rtxn);
}
#[test]
fn basic_get_stats() {
let (index_scheduler, mut handle) = IndexScheduler::test(true, vec![]);
let kind = index_creation_task("catto", "mouse");
let _task = index_scheduler.register(kind).unwrap();
let kind = index_creation_task("doggo", "sheep");
let _task = index_scheduler.register(kind).unwrap();
let kind = index_creation_task("whalo", "fish");
let _task = index_scheduler.register(kind).unwrap();
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
{
"indexes": {
"catto": 1,
"doggo": 1,
"whalo": 1
},
"statuses": {
"canceled": 0,
"enqueued": 3,
"failed": 0,
"processing": 0,
"succeeded": 0
},
"types": {
"documentAdditionOrUpdate": 0,
"documentDeletion": 0,
"dumpCreation": 0,
"indexCreation": 3,
"indexDeletion": 0,
"indexSwap": 0,
"indexUpdate": 0,
"settingsUpdate": 0,
"snapshotCreation": 0,
"taskCancelation": 0,
"taskDeletion": 0
}
}
"###);
handle.advance_till([Start, BatchCreated]);
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
{
"indexes": {
"catto": 1,
"doggo": 1,
"whalo": 1
},
"statuses": {
"canceled": 0,
"enqueued": 2,
"failed": 0,
"processing": 1,
"succeeded": 0
},
"types": {
"documentAdditionOrUpdate": 0,
"documentDeletion": 0,
"dumpCreation": 0,
"indexCreation": 3,
"indexDeletion": 0,
"indexSwap": 0,
"indexUpdate": 0,
"settingsUpdate": 0,
"snapshotCreation": 0,
"taskCancelation": 0,
"taskDeletion": 0
}
}
"###);
handle.advance_till([
InsideProcessBatch,
InsideProcessBatch,
ProcessBatchSucceeded,
AfterProcessing,
Start,
BatchCreated,
]);
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
{
"indexes": {
"catto": 1,
"doggo": 1,
"whalo": 1
},
"statuses": {
"canceled": 0,
"enqueued": 1,
"failed": 0,
"processing": 1,
"succeeded": 1
},
"types": {
"documentAdditionOrUpdate": 0,
"documentDeletion": 0,
"dumpCreation": 0,
"indexCreation": 3,
"indexDeletion": 0,
"indexSwap": 0,
"indexUpdate": 0,
"settingsUpdate": 0,
"snapshotCreation": 0,
"taskCancelation": 0,
"taskDeletion": 0
}
}
"###);
// now we make one more batch, the started_at field of the new tasks will be past `second_start_time`
handle.advance_till([
InsideProcessBatch,
InsideProcessBatch,
ProcessBatchSucceeded,
AfterProcessing,
Start,
BatchCreated,
]);
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
{
"indexes": {
"catto": 1,
"doggo": 1,
"whalo": 1
},
"statuses": {
"canceled": 0,
"enqueued": 0,
"failed": 0,
"processing": 1,
"succeeded": 2
},
"types": {
"documentAdditionOrUpdate": 0,
"documentDeletion": 0,
"dumpCreation": 0,
"indexCreation": 3,
"indexDeletion": 0,
"indexSwap": 0,
"indexUpdate": 0,
"settingsUpdate": 0,
"snapshotCreation": 0,
"taskCancelation": 0,
"taskDeletion": 0
}
}
"###);
}
}

View File

@@ -167,9 +167,7 @@ macro_rules! snapshot {
let (settings, snap_name, _) = $crate::default_snapshot_settings_for_test(test_name, Some(&snap_name));
settings.bind(|| {
let snap = format!("{}", $value);
insta::allow_duplicates! {
meili_snap::insta::assert_snapshot!(format!("{}", snap_name), snap);
}
meili_snap::insta::assert_snapshot!(format!("{}", snap_name), snap);
});
};
($value:expr, @$inline:literal) => {
@@ -178,9 +176,7 @@ macro_rules! snapshot {
let (settings, _, _) = $crate::default_snapshot_settings_for_test("", Some("_dummy_argument"));
settings.bind(|| {
let snap = format!("{}", $value);
insta::allow_duplicates! {
meili_snap::insta::assert_snapshot!(snap, @$inline);
}
meili_snap::insta::assert_snapshot!(snap, @$inline);
});
};
($value:expr) => {
@@ -198,9 +194,7 @@ macro_rules! snapshot {
let (settings, snap_name, _) = $crate::default_snapshot_settings_for_test(test_name, None);
settings.bind(|| {
let snap = format!("{}", $value);
insta::allow_duplicates! {
meili_snap::insta::assert_snapshot!(format!("{}", snap_name), snap);
}
meili_snap::insta::assert_snapshot!(format!("{}", snap_name), snap);
});
};
}

View File

@@ -129,9 +129,6 @@ impl HeedAuthStore {
Action::DumpsAll => {
actions.insert(Action::DumpsCreate);
}
Action::SnapshotsAll => {
actions.insert(Action::SnapshotsCreate);
}
Action::TasksAll => {
actions.extend([Action::TasksGet, Action::TasksDelete, Action::TasksCancel]);
}

View File

@@ -15,13 +15,13 @@ actix-web = { version = "4.3.1", default-features = false }
anyhow = "1.0.70"
convert_case = "0.6.0"
csv = "1.2.1"
deserr = { version = "0.6.0", features = ["actix-web"]}
deserr = "0.5.0"
either = { version = "1.8.1", features = ["serde"] }
enum-iterator = "1.4.0"
file-store = { path = "../file-store" }
flate2 = "1.0.25"
fst = "0.4.7"
memmap2 = "0.7.1"
memmap2 = "0.5.10"
milli = { path = "../milli" }
roaring = { version = "0.10.1", features = ["serde"] }
serde = { version = "1.0.160", features = ["derive"] }

View File

@@ -259,9 +259,6 @@ InvalidSettingsRankingRules , InvalidRequest , BAD_REQUEST ;
InvalidSettingsSearchableAttributes , InvalidRequest , BAD_REQUEST ;
InvalidSettingsSortableAttributes , InvalidRequest , BAD_REQUEST ;
InvalidSettingsStopWords , InvalidRequest , BAD_REQUEST ;
InvalidSettingsNonSeparatorTokens , InvalidRequest , BAD_REQUEST ;
InvalidSettingsSeparatorTokens , InvalidRequest , BAD_REQUEST ;
InvalidSettingsDictionary , InvalidRequest , BAD_REQUEST ;
InvalidSettingsSynonyms , InvalidRequest , BAD_REQUEST ;
InvalidSettingsTypoTolerance , InvalidRequest , BAD_REQUEST ;
InvalidState , Internal , INTERNAL_SERVER_ERROR ;

View File

@@ -257,12 +257,6 @@ pub enum Action {
#[serde(rename = "dumps.create")]
#[deserr(rename = "dumps.create")]
DumpsCreate,
#[serde(rename = "snapshots.*")]
#[deserr(rename = "snapshots.*")]
SnapshotsAll,
#[serde(rename = "snapshots.create")]
#[deserr(rename = "snapshots.create")]
SnapshotsCreate,
#[serde(rename = "version")]
#[deserr(rename = "version")]
Version,
@@ -315,7 +309,6 @@ impl Action {
METRICS_GET => Some(Self::MetricsGet),
DUMPS_ALL => Some(Self::DumpsAll),
DUMPS_CREATE => Some(Self::DumpsCreate),
SNAPSHOTS_CREATE => Some(Self::SnapshotsCreate),
VERSION => Some(Self::Version),
KEYS_CREATE => Some(Self::KeysAdd),
KEYS_GET => Some(Self::KeysGet),
@@ -360,7 +353,6 @@ pub mod actions {
pub const METRICS_GET: u8 = MetricsGet.repr();
pub const DUMPS_ALL: u8 = DumpsAll.repr();
pub const DUMPS_CREATE: u8 = DumpsCreate.repr();
pub const SNAPSHOTS_CREATE: u8 = SnapshotsCreate.repr();
pub const VERSION: u8 = Version.repr();
pub const KEYS_CREATE: u8 = KeysAdd.repr();
pub const KEYS_GET: u8 = KeysGet.repr();

View File

@@ -171,15 +171,6 @@ pub struct Settings<T> {
#[deserr(default, error = DeserrJsonError<InvalidSettingsStopWords>)]
pub stop_words: Setting<BTreeSet<String>>,
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
#[deserr(default, error = DeserrJsonError<InvalidSettingsNonSeparatorTokens>)]
pub non_separator_tokens: Setting<BTreeSet<String>>,
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
#[deserr(default, error = DeserrJsonError<InvalidSettingsSeparatorTokens>)]
pub separator_tokens: Setting<BTreeSet<String>>,
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
#[deserr(default, error = DeserrJsonError<InvalidSettingsDictionary>)]
pub dictionary: Setting<BTreeSet<String>>,
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
#[deserr(default, error = DeserrJsonError<InvalidSettingsSynonyms>)]
pub synonyms: Setting<BTreeMap<String, Vec<String>>>,
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
@@ -210,9 +201,6 @@ impl Settings<Checked> {
ranking_rules: Setting::Reset,
stop_words: Setting::Reset,
synonyms: Setting::Reset,
non_separator_tokens: Setting::Reset,
separator_tokens: Setting::Reset,
dictionary: Setting::Reset,
distinct_attribute: Setting::Reset,
typo_tolerance: Setting::Reset,
faceting: Setting::Reset,
@@ -229,9 +217,6 @@ impl Settings<Checked> {
sortable_attributes,
ranking_rules,
stop_words,
non_separator_tokens,
separator_tokens,
dictionary,
synonyms,
distinct_attribute,
typo_tolerance,
@@ -247,9 +232,6 @@ impl Settings<Checked> {
sortable_attributes,
ranking_rules,
stop_words,
non_separator_tokens,
separator_tokens,
dictionary,
synonyms,
distinct_attribute,
typo_tolerance,
@@ -292,9 +274,6 @@ impl Settings<Unchecked> {
ranking_rules: self.ranking_rules,
stop_words: self.stop_words,
synonyms: self.synonyms,
non_separator_tokens: self.non_separator_tokens,
separator_tokens: self.separator_tokens,
dictionary: self.dictionary,
distinct_attribute: self.distinct_attribute,
typo_tolerance: self.typo_tolerance,
faceting: self.faceting,
@@ -356,28 +335,6 @@ pub fn apply_settings_to_builder(
Setting::NotSet => (),
}
match settings.non_separator_tokens {
Setting::Set(ref non_separator_tokens) => {
builder.set_non_separator_tokens(non_separator_tokens.clone())
}
Setting::Reset => builder.reset_non_separator_tokens(),
Setting::NotSet => (),
}
match settings.separator_tokens {
Setting::Set(ref separator_tokens) => {
builder.set_separator_tokens(separator_tokens.clone())
}
Setting::Reset => builder.reset_separator_tokens(),
Setting::NotSet => (),
}
match settings.dictionary {
Setting::Set(ref dictionary) => builder.set_dictionary(dictionary.clone()),
Setting::Reset => builder.reset_dictionary(),
Setting::NotSet => (),
}
match settings.synonyms {
Setting::Set(ref synonyms) => builder.set_synonyms(synonyms.clone().into_iter().collect()),
Setting::Reset => builder.reset_synonyms(),
@@ -502,14 +459,15 @@ pub fn settings(
})
.transpose()?
.unwrap_or_default();
let non_separator_tokens = index.non_separator_tokens(rtxn)?.unwrap_or_default();
let separator_tokens = index.separator_tokens(rtxn)?.unwrap_or_default();
let dictionary = index.dictionary(rtxn)?.unwrap_or_default();
let distinct_field = index.distinct_field(rtxn)?.map(String::from);
let synonyms = index.user_defined_synonyms(rtxn)?;
// in milli each word in the synonyms map were split on their separator. Since we lost
// this information we are going to put space between words.
let synonyms = index
.synonyms(rtxn)?
.iter()
.map(|(key, values)| (key.join(" "), values.iter().map(|value| value.join(" ")).collect()))
.collect();
let min_typo_word_len = MinWordSizeTyposSetting {
one_typo: Setting::Set(index.min_word_len_one_typo(rtxn)?),
@@ -562,9 +520,6 @@ pub fn settings(
sortable_attributes: Setting::Set(sortable_attributes),
ranking_rules: Setting::Set(criteria.iter().map(|c| c.clone().into()).collect()),
stop_words: Setting::Set(stop_words),
non_separator_tokens: Setting::Set(non_separator_tokens),
separator_tokens: Setting::Set(separator_tokens),
dictionary: Setting::Set(dictionary),
distinct_attribute: match distinct_field {
Some(field) => Setting::Set(field),
None => Setting::Reset,
@@ -687,9 +642,6 @@ pub(crate) mod test {
sortable_attributes: Setting::NotSet,
ranking_rules: Setting::NotSet,
stop_words: Setting::NotSet,
non_separator_tokens: Setting::NotSet,
separator_tokens: Setting::NotSet,
dictionary: Setting::NotSet,
synonyms: Setting::NotSet,
distinct_attribute: Setting::NotSet,
typo_tolerance: Setting::NotSet,
@@ -711,9 +663,6 @@ pub(crate) mod test {
sortable_attributes: Setting::NotSet,
ranking_rules: Setting::NotSet,
stop_words: Setting::NotSet,
non_separator_tokens: Setting::NotSet,
separator_tokens: Setting::NotSet,
dictionary: Setting::NotSet,
synonyms: Setting::NotSet,
distinct_attribute: Setting::NotSet,
typo_tolerance: Setting::NotSet,

View File

@@ -39,7 +39,7 @@ byte-unit = { version = "4.0.19", default-features = false, features = [
bytes = "1.4.0"
clap = { version = "4.2.1", features = ["derive", "env"] }
crossbeam-channel = "0.5.8"
deserr = { version = "0.6.0", features = ["actix-web"]}
deserr = "0.5.0"
dump = { path = "../dump" }
either = "1.8.1"
env_logger = "0.10.0"
@@ -50,9 +50,9 @@ futures = "0.3.28"
futures-util = "0.3.28"
http = "0.2.9"
index-scheduler = { path = "../index-scheduler" }
indexmap = { version = "2.0.0", features = ["serde"] }
indexmap = { version = "1.9.3", features = ["serde-1"] }
is-terminal = "0.4.8"
itertools = "0.11.0"
itertools = "0.10.5"
jsonwebtoken = "8.3.0"
lazy_static = "1.4.0"
log = "0.4.17"
@@ -87,7 +87,7 @@ sha2 = "0.10.6"
siphasher = "0.3.10"
slice-group-by = "0.3.0"
static-files = { version = "0.2.3", optional = true }
sysinfo = "0.29.7"
sysinfo = "0.28.4"
tar = "0.4.38"
tempfile = "3.5.0"
thiserror = "1.0.40"

View File

@@ -20,7 +20,7 @@ pub struct SearchAggregator;
#[allow(dead_code)]
impl SearchAggregator {
pub fn from_query(_: &dyn Any, _: &dyn Any) -> Self {
Self
Self::default()
}
pub fn succeed(&mut self, _: &dyn Any) {}
@@ -32,7 +32,7 @@ pub struct MultiSearchAggregator;
#[allow(dead_code)]
impl MultiSearchAggregator {
pub fn from_queries(_: &dyn Any, _: &dyn Any) -> Self {
Self
Self::default()
}
pub fn succeed(&mut self) {}
@@ -44,7 +44,7 @@ pub struct FacetSearchAggregator;
#[allow(dead_code)]
impl FacetSearchAggregator {
pub fn from_query(_: &dyn Any, _: &dyn Any) -> Self {
Self
Self::default()
}
pub fn succeed(&mut self, _: &dyn Any) {}

View File

@@ -1,5 +1,6 @@
mod mock_analytics;
#[cfg(feature = "analytics")]
// if we are in release mode and the feature analytics was enabled
#[cfg(all(not(debug_assertions), feature = "analytics"))]
mod segment_analytics;
use std::fs;
@@ -16,25 +17,26 @@ use serde_json::Value;
use crate::routes::indexes::documents::UpdateDocumentsQuery;
use crate::routes::tasks::TasksFilterQuery;
// if the analytics feature is disabled
// if we are in debug mode OR the analytics feature is disabled
// the `SegmentAnalytics` point to the mock instead of the real analytics
#[cfg(not(feature = "analytics"))]
#[cfg(any(debug_assertions, not(feature = "analytics")))]
pub type SegmentAnalytics = mock_analytics::MockAnalytics;
#[cfg(not(feature = "analytics"))]
#[cfg(any(debug_assertions, not(feature = "analytics")))]
pub type SearchAggregator = mock_analytics::SearchAggregator;
#[cfg(not(feature = "analytics"))]
#[cfg(any(debug_assertions, not(feature = "analytics")))]
pub type MultiSearchAggregator = mock_analytics::MultiSearchAggregator;
#[cfg(not(feature = "analytics"))]
#[cfg(any(debug_assertions, not(feature = "analytics")))]
pub type FacetSearchAggregator = mock_analytics::FacetSearchAggregator;
// if the feature analytics is enabled we use the real analytics
#[cfg(feature = "analytics")]
// if we are in release mode and the feature analytics was enabled
// we use the real analytics
#[cfg(all(not(debug_assertions), feature = "analytics"))]
pub type SegmentAnalytics = segment_analytics::SegmentAnalytics;
#[cfg(feature = "analytics")]
#[cfg(all(not(debug_assertions), feature = "analytics"))]
pub type SearchAggregator = segment_analytics::SearchAggregator;
#[cfg(feature = "analytics")]
#[cfg(all(not(debug_assertions), feature = "analytics"))]
pub type MultiSearchAggregator = segment_analytics::MultiSearchAggregator;
#[cfg(feature = "analytics")]
#[cfg(all(not(debug_assertions), feature = "analytics"))]
pub type FacetSearchAggregator = segment_analytics::FacetSearchAggregator;
/// The Meilisearch config dir:

File diff suppressed because it is too large Load Diff

View File

@@ -28,7 +28,7 @@ const MEILI_DB_PATH: &str = "MEILI_DB_PATH";
const MEILI_HTTP_ADDR: &str = "MEILI_HTTP_ADDR";
const MEILI_MASTER_KEY: &str = "MEILI_MASTER_KEY";
const MEILI_ENV: &str = "MEILI_ENV";
#[cfg(feature = "analytics")]
#[cfg(all(not(debug_assertions), feature = "analytics"))]
const MEILI_NO_ANALYTICS: &str = "MEILI_NO_ANALYTICS";
const MEILI_HTTP_PAYLOAD_SIZE_LIMIT: &str = "MEILI_HTTP_PAYLOAD_SIZE_LIMIT";
const MEILI_SSL_CERT_PATH: &str = "MEILI_SSL_CERT_PATH";
@@ -159,7 +159,7 @@ pub struct Opt {
/// Meilisearch automatically collects data from all instances that do not opt out using this flag.
/// All gathered data is used solely for the purpose of improving Meilisearch, and can be deleted
/// at any time.
#[cfg(feature = "analytics")]
#[cfg(all(not(debug_assertions), feature = "analytics"))]
#[serde(default)] // we can't send true
#[clap(long, env = MEILI_NO_ANALYTICS)]
pub no_analytics: bool,
@@ -390,7 +390,7 @@ impl Opt {
ignore_missing_dump: _,
ignore_dump_if_db_exists: _,
config_file_path: _,
#[cfg(feature = "analytics")]
#[cfg(all(not(debug_assertions), feature = "analytics"))]
no_analytics,
experimental_enable_metrics: enable_metrics_route,
experimental_reduce_indexing_memory_usage: reduce_indexing_memory_usage,
@@ -401,7 +401,7 @@ impl Opt {
export_to_env_if_not_present(MEILI_MASTER_KEY, master_key);
}
export_to_env_if_not_present(MEILI_ENV, env);
#[cfg(feature = "analytics")]
#[cfg(all(not(debug_assertions), feature = "analytics"))]
{
export_to_env_if_not_present(MEILI_NO_ANALYTICS, no_analytics.to_string());
}

View File

@@ -310,81 +310,6 @@ make_setting_route!(
}
);
make_setting_route!(
"/non-separator-tokens",
put,
std::collections::BTreeSet<String>,
meilisearch_types::deserr::DeserrJsonError<
meilisearch_types::error::deserr_codes::InvalidSettingsNonSeparatorTokens,
>,
non_separator_tokens,
"nonSeparatorTokens",
analytics,
|non_separator_tokens: &Option<std::collections::BTreeSet<String>>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"nonSeparatorTokens Updated".to_string(),
json!({
"non_separator_tokens": {
"total": non_separator_tokens.as_ref().map(|non_separator_tokens| non_separator_tokens.len()),
},
}),
Some(req),
);
}
);
make_setting_route!(
"/separator-tokens",
put,
std::collections::BTreeSet<String>,
meilisearch_types::deserr::DeserrJsonError<
meilisearch_types::error::deserr_codes::InvalidSettingsSeparatorTokens,
>,
separator_tokens,
"separatorTokens",
analytics,
|separator_tokens: &Option<std::collections::BTreeSet<String>>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"separatorTokens Updated".to_string(),
json!({
"separator_tokens": {
"total": separator_tokens.as_ref().map(|separator_tokens| separator_tokens.len()),
},
}),
Some(req),
);
}
);
make_setting_route!(
"/dictionary",
put,
std::collections::BTreeSet<String>,
meilisearch_types::deserr::DeserrJsonError<
meilisearch_types::error::deserr_codes::InvalidSettingsDictionary,
>,
dictionary,
"dictionary",
analytics,
|dictionary: &Option<std::collections::BTreeSet<String>>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"dictionary Updated".to_string(),
json!({
"dictionary": {
"total": dictionary.as_ref().map(|dictionary| dictionary.len()),
},
}),
Some(req),
);
}
);
make_setting_route!(
"/synonyms",
put,
@@ -541,9 +466,6 @@ generate_configure!(
searchable_attributes,
distinct_attribute,
stop_words,
separator_tokens,
non_separator_tokens,
dictionary,
synonyms,
ranking_rules,
typo_tolerance,

View File

@@ -24,7 +24,6 @@ pub mod features;
pub mod indexes;
mod metrics;
mod multi_search;
mod snapshot;
mod swap_indexes;
pub mod tasks;
@@ -33,7 +32,6 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
.service(web::resource("/health").route(web::get().to(get_health)))
.service(web::scope("/keys").configure(api_key::configure))
.service(web::scope("/dumps").configure(dump::configure))
.service(web::scope("/snapshots").configure(snapshot::configure))
.service(web::resource("/stats").route(web::get().to(get_stats)))
.service(web::resource("/version").route(web::get().to(get_version)))
.service(web::scope("/indexes").configure(indexes::configure))

View File

@@ -1,32 +0,0 @@
use actix_web::web::Data;
use actix_web::{web, HttpRequest, HttpResponse};
use index_scheduler::IndexScheduler;
use log::debug;
use meilisearch_types::error::ResponseError;
use meilisearch_types::tasks::KindWithContent;
use serde_json::json;
use crate::analytics::Analytics;
use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::SummarizedTaskView;
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(web::resource("").route(web::post().to(SeqHandler(create_snapshot))));
}
pub async fn create_snapshot(
index_scheduler: GuardedData<ActionPolicy<{ actions::SNAPSHOTS_CREATE }>, Data<IndexScheduler>>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
analytics.publish("Snapshot Created".to_string(), json!({}), Some(&req));
let task = KindWithContent::SnapshotCreation;
let task: SummarizedTaskView =
tokio::task::spawn_blocking(move || index_scheduler.register(task)).await??.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
}

View File

@@ -60,7 +60,8 @@ pub async fn swap_indexes(
}
let task = KindWithContent::IndexSwap { swaps };
let task: SummarizedTaskView =
tokio::task::spawn_blocking(move || index_scheduler.register(task)).await??.into();
let task = index_scheduler.register(task)?;
let task: SummarizedTaskView = task.into();
Ok(HttpResponse::Accepted().json(task))
}

View File

@@ -491,20 +491,6 @@ pub fn perform_search(
tokenizer_builder.allow_list(&script_lang_map);
}
let separators = index.allowed_separators(&rtxn)?;
let separators: Option<Vec<_>> =
separators.as_ref().map(|x| x.iter().map(String::as_str).collect());
if let Some(ref separators) = separators {
tokenizer_builder.separators(separators);
}
let dictionary = index.dictionary(&rtxn)?;
let dictionary: Option<Vec<_>> =
dictionary.as_ref().map(|x| x.iter().map(String::as_str).collect());
if let Some(ref dictionary) = dictionary {
tokenizer_builder.words_dict(dictionary);
}
let mut formatter_builder = MatcherBuilder::new(matching_words, tokenizer_builder.build());
formatter_builder.crop_marker(query.crop_marker);
formatter_builder.highlight_prefix(query.highlight_pre_tag);
@@ -680,7 +666,6 @@ fn compute_semantic_score(query: &[f32], vectors: Value) -> milli::Result<Option
.map_err(InternalError::SerdeJson)?;
Ok(vectors
.into_iter()
.flatten()
.map(|v| OrderedFloat(dot_product_similarity(query, &v)))
.max()
.map(OrderedFloat::into_inner))

View File

@@ -1,7 +1,8 @@
use std::{thread, time};
use crate::common::{Server, Value};
use crate::json;
use serde_json::{json, Value};
use crate::common::Server;
#[actix_rt::test]
async fn add_valid_api_key() {
@@ -161,7 +162,7 @@ async fn add_valid_api_key_null_description() {
server.use_api_key("MASTER_KEY");
let content = json!({
"description": json!(null),
"description": Value::Null,
"indexes": ["products"],
"actions": ["documents.add"],
"expiresAt": "2050-11-13T00:00:00"
@@ -364,7 +365,7 @@ async fn error_add_api_key_invalid_index_uids() {
server.use_api_key("MASTER_KEY");
let content = json!({
"description": json!(null),
"description": Value::Null,
"indexes": ["invalid index # / \\name with spaces"],
"actions": [
"documents.add"
@@ -421,7 +422,7 @@ async fn error_add_api_key_invalid_parameters_actions() {
meili_snap::snapshot!(code, @"400 Bad Request");
meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]" }), @r###"
{
"message": "Unknown value `doc.add` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`",
"message": "Unknown value `doc.add` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`",
"code": "invalid_api_key_actions",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_api_key_actions"
@@ -506,7 +507,7 @@ async fn error_add_api_key_invalid_parameters_uid() {
async fn error_add_api_key_parameters_uid_already_exist() {
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
let content: Value = json!({
let content = json!({
"uid": "4bc0887a-0e41-4f3b-935d-0c451dcee9c8",
"indexes": ["products"],
"actions": ["search"],
@@ -1145,7 +1146,7 @@ async fn patch_api_key_description() {
meili_snap::snapshot!(code, @"200 OK");
// Remove the description
let content = json!({ "description": null });
let content = json!({ "description": serde_json::Value::Null });
let (response, code) = server.patch_api_key(&uid, content).await;
meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]", ".uid" => "[ignored]", ".key" => "[ignored]" }), @r###"

View File

@@ -3,10 +3,10 @@ use std::collections::{HashMap, HashSet};
use ::time::format_description::well_known::Rfc3339;
use maplit::{hashmap, hashset};
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use time::{Duration, OffsetDateTime};
use crate::common::{Server, Value};
use crate::json;
use crate::common::Server;
pub static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), HashSet<&'static str>>> =
Lazy::new(|| {
@@ -54,7 +54,6 @@ pub static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), HashSet<&'
("GET", "/indexes/products/stats") => hashset!{"stats.get", "stats.*", "*"},
("GET", "/stats") => hashset!{"stats.get", "stats.*", "*"},
("POST", "/dumps") => hashset!{"dumps.create", "dumps.*", "*"},
("POST", "/snapshots") => hashset!{"snapshots.create", "snapshots.*", "*"},
("GET", "/version") => hashset!{"version", "*"},
("GET", "/metrics") => hashset!{"metrics.get", "metrics.*", "*"},
("PATCH", "/keys/mykey/") => hashset!{"keys.update", "*"},

View File

@@ -1,8 +1,8 @@
use meili_snap::*;
use serde_json::json;
use uuid::Uuid;
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn create_api_key_bad_description() {
@@ -90,7 +90,7 @@ async fn create_api_key_bad_actions() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Unknown value `doggo` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`",
"message": "Unknown value `doggo` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`",
"code": "invalid_api_key_actions",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_api_key_actions"

View File

@@ -7,9 +7,9 @@ mod tenant_token;
mod tenant_token_multi_search;
use actix_web::http::StatusCode;
use serde_json::{json, Value};
use crate::common::{Server, Value};
use crate::json;
use crate::common::Server;
impl Server {
pub fn use_api_key(&mut self, api_key: impl AsRef<str>) {

View File

@@ -3,11 +3,11 @@ use std::collections::HashMap;
use ::time::format_description::well_known::Rfc3339;
use maplit::hashmap;
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use time::{Duration, OffsetDateTime};
use super::authorization::{ALL_ACTIONS, AUTHORIZATIONS};
use crate::common::{Server, Value};
use crate::json;
use crate::common::Server;
fn generate_tenant_token(
parent_uid: impl AsRef<str>,
@@ -233,31 +233,31 @@ async fn search_authorized_simple_token() {
},
hashmap! {
"searchRules" => json!({"*": {}}),
"exp" => json!(null)
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"*": null}),
"exp" => json!(null)
"searchRules" => json!({"*": Value::Null}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["*"]),
"exp" => json!(null)
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"sales": {}}),
"exp" => json!(null)
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"sales": null}),
"exp" => json!(null)
"searchRules" => json!({"sales": Value::Null}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["sales"]),
"exp" => json!(null)
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["sa*"]),
"exp" => json!(null)
"exp" => Value::Null
},
];
@@ -386,7 +386,7 @@ async fn error_search_token_forbidden_parent_key() {
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"*": null}),
"searchRules" => json!({"*": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
@@ -398,7 +398,7 @@ async fn error_search_token_forbidden_parent_key() {
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": null}),
"searchRules" => json!({"sales": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
@@ -428,15 +428,15 @@ async fn error_search_forbidden_token() {
},
hashmap! {
"searchRules" => json!({"products": {}}),
"exp" => json!(null)
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"products": null}),
"exp" => json!(null)
"searchRules" => json!({"products": Value::Null}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["products"]),
"exp" => json!(null)
"exp" => Value::Null
},
// expired token
hashmap! {
@@ -444,7 +444,7 @@ async fn error_search_forbidden_token() {
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"*": null}),
"searchRules" => json!({"*": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
@@ -456,7 +456,7 @@ async fn error_search_forbidden_token() {
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": null}),
"searchRules" => json!({"sales": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {

View File

@@ -3,11 +3,11 @@ use std::collections::HashMap;
use ::time::format_description::well_known::Rfc3339;
use maplit::hashmap;
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use time::{Duration, OffsetDateTime};
use super::authorization::ALL_ACTIONS;
use crate::common::{Server, Value};
use crate::json;
use crate::common::Server;
fn generate_tenant_token(
parent_uid: impl AsRef<str>,
@@ -512,31 +512,31 @@ async fn single_search_authorized_simple_token() {
},
hashmap! {
"searchRules" => json!({"*": {}}),
"exp" => json!(null),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"*": null}),
"exp" => json!(null),
"searchRules" => json!({"*": Value::Null}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["*"]),
"exp" => json!(null),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"sales": {}}),
"exp" => json!(null),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"sales": null}),
"exp" => json!(null),
"searchRules" => json!({"sales": Value::Null}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["sales"]),
"exp" => json!(null),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["sa*"]),
"exp" => json!(null),
"exp" => Value::Null
},
];
@@ -564,31 +564,31 @@ async fn multi_search_authorized_simple_token() {
},
hashmap! {
"searchRules" => json!({"*": {}}),
"exp" => json!(null),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"*": null}),
"exp" => json!(null),
"searchRules" => json!({"*": Value::Null}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["*"]),
"exp" => json!(null),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"sales": {}, "products": {}}),
"exp" => json!(null),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"sales": null, "products": null}),
"exp" => json!(null),
"searchRules" => json!({"sales": Value::Null, "products": Value::Null}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["sales", "products"]),
"exp" => json!(null),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["sa*", "pro*"]),
"exp" => json!(null),
"exp" => Value::Null
},
];
@@ -823,7 +823,7 @@ async fn error_single_search_token_forbidden_parent_key() {
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"*": null}),
"searchRules" => json!({"*": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
@@ -835,7 +835,7 @@ async fn error_single_search_token_forbidden_parent_key() {
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": null}),
"searchRules" => json!({"sales": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
@@ -864,7 +864,7 @@ async fn error_multi_search_token_forbidden_parent_key() {
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"*": null}),
"searchRules" => json!({"*": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
@@ -876,7 +876,7 @@ async fn error_multi_search_token_forbidden_parent_key() {
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": null, "products": null}),
"searchRules" => json!({"sales": Value::Null, "products": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
@@ -919,15 +919,15 @@ async fn error_single_search_forbidden_token() {
},
hashmap! {
"searchRules" => json!({"products": {}}),
"exp" => json!(null),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"products": null}),
"exp" => json!(null),
"searchRules" => json!({"products": Value::Null}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["products"]),
"exp" => json!(null),
"exp" => Value::Null
},
// expired token
hashmap! {
@@ -935,7 +935,7 @@ async fn error_single_search_forbidden_token() {
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"*": null}),
"searchRules" => json!({"*": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
@@ -947,7 +947,7 @@ async fn error_single_search_forbidden_token() {
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": null}),
"searchRules" => json!({"sales": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
@@ -978,15 +978,15 @@ async fn error_multi_search_forbidden_token() {
},
hashmap! {
"searchRules" => json!({"products": {}}),
"exp" => json!(null),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"products": null}),
"exp" => json!(null),
"searchRules" => json!({"products": Value::Null}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["products"]),
"exp" => json!(null),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"sales": {}}),
@@ -998,15 +998,15 @@ async fn error_multi_search_forbidden_token() {
},
hashmap! {
"searchRules" => json!({"sales": {}}),
"exp" => json!(null),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"sales": null}),
"exp" => json!(null),
"searchRules" => json!({"sales": Value::Null}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["sales"]),
"exp" => json!(null),
"exp" => Value::Null
},
// expired token
hashmap! {
@@ -1014,7 +1014,7 @@ async fn error_multi_search_forbidden_token() {
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"*": null}),
"searchRules" => json!({"*": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
@@ -1026,7 +1026,7 @@ async fn error_multi_search_forbidden_token() {
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": null, "products": {}}),
"searchRules" => json!({"sales": Value::Null, "products": {}}),
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {

View File

@@ -3,13 +3,12 @@ use std::panic::{catch_unwind, resume_unwind, UnwindSafe};
use std::time::Duration;
use actix_web::http::StatusCode;
use serde_json::{json, Value};
use tokio::time::sleep;
use urlencoding::encode as urlencode;
use super::encoder::Encoder;
use super::service::Service;
use super::Value;
use crate::json;
pub struct Index<'a> {
pub uid: String,
@@ -243,9 +242,7 @@ impl Index<'_> {
pub async fn delete_batch(&self, ids: Vec<u64>) -> (Value, StatusCode) {
let url = format!("/indexes/{}/documents/delete-batch", urlencode(self.uid.as_ref()));
self.service
.post_encoded(url, serde_json::to_value(&ids).unwrap().into(), self.encoder)
.await
self.service.post_encoded(url, serde_json::to_value(&ids).unwrap(), self.encoder).await
}
pub async fn delete_batch_raw(&self, body: Value) -> (Value, StatusCode) {

View File

@@ -3,83 +3,9 @@ pub mod index;
pub mod server;
pub mod service;
use std::fmt::{self, Display};
pub use index::{GetAllDocumentsOptions, GetDocumentOptions};
use meili_snap::json_string;
use serde::{Deserialize, Serialize};
pub use server::{default_settings, Server};
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct Value(pub serde_json::Value);
impl Value {
pub fn uid(&self) -> u64 {
if let Some(uid) = self["uid"].as_u64() {
uid
} else if let Some(uid) = self["taskUid"].as_u64() {
uid
} else {
panic!("Didn't find any task id in: {self}");
}
}
}
impl From<serde_json::Value> for Value {
fn from(value: serde_json::Value) -> Self {
Value(value)
}
}
impl std::ops::Deref for Value {
type Target = serde_json::Value;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl PartialEq<serde_json::Value> for Value {
fn eq(&self, other: &serde_json::Value) -> bool {
&self.0 == other
}
}
impl PartialEq<Value> for serde_json::Value {
fn eq(&self, other: &Value) -> bool {
self == &other.0
}
}
impl PartialEq<&str> for Value {
fn eq(&self, other: &&str) -> bool {
self.0.eq(other)
}
}
impl Display for Value {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
json_string!(self, { ".enqueuedAt" => "[date]", ".processedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" })
)
}
}
impl From<Vec<Value>> for Value {
fn from(value: Vec<Value>) -> Self {
Self(value.into_iter().map(|value| value.0).collect::<serde_json::Value>())
}
}
#[macro_export]
macro_rules! json {
($($json:tt)+) => {
$crate::common::Value(serde_json::json!($($json)+))
};
}
/// Performs a search test on both post and get routes
#[macro_export]
macro_rules! test_post_get_search {

View File

@@ -11,14 +11,13 @@ use clap::Parser;
use meilisearch::option::{IndexerOpts, MaxMemory, Opt};
use meilisearch::{analytics, create_app, setup_meilisearch};
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use tempfile::TempDir;
use tokio::time::sleep;
use super::index::Index;
use super::service::Service;
use crate::common::encoder::Encoder;
use crate::common::Value;
use crate::json;
pub struct Server {
pub service: Service,
@@ -157,10 +156,6 @@ impl Server {
self.service.post("/dumps", json!(null)).await
}
pub async fn create_snapshot(&self) -> (Value, StatusCode) {
self.service.post("/snapshots", json!(null)).await
}
pub async fn index_swap(&self, value: Value) -> (Value, StatusCode) {
self.service.post("/swap-indexes", value).await
}
@@ -209,7 +204,7 @@ pub fn default_settings(dir: impl AsRef<Path>) -> Opt {
db_path: dir.as_ref().join("db"),
dump_dir: dir.as_ref().join("dumps"),
env: "development".to_owned(),
#[cfg(feature = "analytics")]
#[cfg(all(not(debug_assertions), feature = "analytics"))]
no_analytics: true,
max_index_size: Byte::from_unit(100.0, ByteUnit::MiB).unwrap(),
max_task_db_size: Byte::from_unit(1.0, ByteUnit::GiB).unwrap(),

View File

@@ -7,9 +7,9 @@ use actix_web::test::TestRequest;
use index_scheduler::IndexScheduler;
use meilisearch::{analytics, create_app, Opt};
use meilisearch_auth::AuthController;
use serde_json::Value;
use crate::common::encoder::Encoder;
use crate::common::Value;
pub struct Service {
pub index_scheduler: Arc<IndexScheduler>,

View File

@@ -3,8 +3,9 @@
mod common;
use actix_web::test;
use serde_json::{json, Value};
use crate::common::{Server, Value};
use crate::common::Server;
enum HttpVerb {
Put,

View File

@@ -1,11 +1,11 @@
use actix_web::test;
use meili_snap::{json_string, snapshot};
use serde_json::{json, Value};
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use crate::common::encoder::Encoder;
use crate::common::{GetAllDocumentsOptions, Server, Value};
use crate::json;
use crate::common::{GetAllDocumentsOptions, Server};
/// This is the basic usage of our API and every other tests uses the content-type application/json
#[actix_rt::test]

View File

@@ -1,7 +1,7 @@
use meili_snap::{json_string, snapshot};
use serde_json::json;
use crate::common::{GetAllDocumentsOptions, Server};
use crate::json;
#[actix_rt::test]
async fn delete_one_document_unexisting_index() {
@@ -154,19 +154,6 @@ async fn delete_document_by_filter() {
)
.await;
index.wait_task(1).await;
let (stats, _) = index.stats().await;
snapshot!(json_string!(stats), @r###"
{
"numberOfDocuments": 4,
"isIndexing": false,
"fieldDistribution": {
"color": 3,
"id": 4
}
}
"###);
let (response, code) =
index.delete_document_by_filter(json!({ "filter": "color = blue"})).await;
snapshot!(code, @"202 Accepted");
@@ -201,18 +188,6 @@ async fn delete_document_by_filter() {
}
"###);
let (stats, _) = index.stats().await;
snapshot!(json_string!(stats), @r###"
{
"numberOfDocuments": 2,
"isIndexing": false,
"fieldDistribution": {
"color": 1,
"id": 2
}
}
"###);
let (documents, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(documents), @r###"
@@ -266,18 +241,6 @@ async fn delete_document_by_filter() {
}
"###);
let (stats, _) = index.stats().await;
snapshot!(json_string!(stats), @r###"
{
"numberOfDocuments": 1,
"isIndexing": false,
"fieldDistribution": {
"color": 1,
"id": 1
}
}
"###);
let (documents, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(documents), @r###"

View File

@@ -1,8 +1,8 @@
use meili_snap::*;
use serde_json::json;
use urlencoding::encode;
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn get_all_documents_bad_offset() {

View File

@@ -1,11 +1,11 @@
use actix_web::test;
use http::header::ACCEPT_ENCODING;
use meili_snap::*;
use serde_json::{json, Value};
use urlencoding::encode as urlencode;
use crate::common::encoder::Encoder;
use crate::common::{GetAllDocumentsOptions, GetDocumentOptions, Server, Value};
use crate::json;
use crate::common::{GetAllDocumentsOptions, GetDocumentOptions, Server};
// TODO: partial test since we are testing error, amd error is not yet fully implemented in
// transplant
@@ -40,7 +40,7 @@ async fn get_document() {
let server = Server::new().await;
let index = server.index("test");
index.create(None).await;
let documents = json!([
let documents = serde_json::json!([
{
"id": 0,
"nested": { "content": "foobar" },
@@ -53,7 +53,7 @@ async fn get_document() {
assert_eq!(code, 200);
assert_eq!(
response,
json!({
serde_json::json!({
"id": 0,
"nested": { "content": "foobar" },
})
@@ -64,7 +64,7 @@ async fn get_document() {
assert_eq!(code, 200);
assert_eq!(
response,
json!({
serde_json::json!({
"id": 0,
})
);
@@ -75,7 +75,7 @@ async fn get_document() {
assert_eq!(code, 200);
assert_eq!(
response,
json!({
serde_json::json!({
"nested": { "content": "foobar" },
})
);
@@ -122,7 +122,7 @@ async fn get_all_documents_no_options() {
assert_eq!(code, 200);
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 20);
let first = json!({
let first = serde_json::json!({
"id":0,
"isActive":false,
"balance":"$2,668.55",

View File

@@ -1,8 +1,7 @@
use meili_snap::snapshot;
use serde_json::json;
use crate::common::encoder::Encoder;
use crate::common::{GetAllDocumentsOptions, Server};
use crate::json;
#[actix_rt::test]
async fn error_document_update_create_index_bad_uid() {
@@ -85,13 +84,7 @@ async fn update_document() {
let (response, code) = index.get_document(1, None).await;
assert_eq!(code, 200);
snapshot!(response, @r###"
{
"doc_id": 1,
"content": "foo",
"other": "bar"
}
"###);
assert_eq!(response.to_string(), r##"{"doc_id":1,"content":"foo","other":"bar"}"##);
}
#[actix_rt::test]
@@ -129,13 +122,7 @@ async fn update_document_gzip_encoded() {
let (response, code) = index.get_document(1, None).await;
assert_eq!(code, 200);
snapshot!(response, @r###"
{
"doc_id": 1,
"content": "foo",
"other": "bar"
}
"###);
assert_eq!(response.to_string(), r##"{"doc_id":1,"content":"foo","other":"bar"}"##);
}
#[actix_rt::test]

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,6 @@
use serde_json::json;
use crate::common::Server;
use crate::json;
/// Feature name to test against.
/// This will have to be changed by a different one when that feature is stabilized.

View File

@@ -2,10 +2,10 @@ use actix_web::http::header::ContentType;
use actix_web::test;
use http::header::ACCEPT_ENCODING;
use meili_snap::{json_string, snapshot};
use serde_json::{json, Value};
use crate::common::encoder::Encoder;
use crate::common::{Server, Value};
use crate::json;
use crate::common::Server;
#[actix_rt::test]
async fn create_index_no_primary_key() {
@@ -21,7 +21,7 @@ async fn create_index_no_primary_key() {
assert_eq!(response["status"], "succeeded");
assert_eq!(response["type"], "indexCreation");
assert_eq!(response["details"]["primaryKey"], json!(null));
assert_eq!(response["details"]["primaryKey"], Value::Null);
}
#[actix_rt::test]
@@ -38,7 +38,7 @@ async fn create_index_with_gzip_encoded_request() {
assert_eq!(response["status"], "succeeded");
assert_eq!(response["type"], "indexCreation");
assert_eq!(response["details"]["primaryKey"], json!(null));
assert_eq!(response["details"]["primaryKey"], Value::Null);
}
#[actix_rt::test]
@@ -86,7 +86,7 @@ async fn create_index_with_zlib_encoded_request() {
assert_eq!(response["status"], "succeeded");
assert_eq!(response["type"], "indexCreation");
assert_eq!(response["details"]["primaryKey"], json!(null));
assert_eq!(response["details"]["primaryKey"], Value::Null);
}
#[actix_rt::test]
@@ -103,7 +103,7 @@ async fn create_index_with_brotli_encoded_request() {
assert_eq!(response["status"], "succeeded");
assert_eq!(response["type"], "indexCreation");
assert_eq!(response["details"]["primaryKey"], json!(null));
assert_eq!(response["details"]["primaryKey"], Value::Null);
}
#[actix_rt::test]
@@ -136,7 +136,7 @@ async fn create_index_with_invalid_primary_key() {
let (response, code) = index.get().await;
assert_eq!(code, 200);
assert_eq!(response["primaryKey"], json!(null));
assert_eq!(response["primaryKey"], Value::Null);
}
#[actix_rt::test]

View File

@@ -1,5 +1,6 @@
use serde_json::json;
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn create_and_delete_index() {

View File

@@ -1,7 +1,7 @@
use meili_snap::*;
use serde_json::json;
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn get_indexes_bad_offset() {

View File

@@ -1,5 +1,6 @@
use serde_json::json;
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn stats() {

View File

@@ -1,9 +1,9 @@
use serde_json::json;
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use crate::common::encoder::Encoder;
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn update_primary_key() {

View File

@@ -1,8 +1,8 @@
use meili_snap::*;
use serde_json::json;
use super::DOCUMENTS;
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn search_unexisting_index() {

View File

@@ -1,8 +1,8 @@
use meili_snap::snapshot;
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use crate::common::{Server, Value};
use crate::json;
use crate::common::Server;
pub(self) static DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([

View File

@@ -1,8 +1,8 @@
use insta::{allow_duplicates, assert_json_snapshot};
use serde_json::json;
use super::*;
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn formatted_contain_wildcard() {

View File

@@ -1,8 +1,7 @@
use meili_snap::{json_string, snapshot};
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use crate::common::{Server, Value};
use crate::json;
use crate::common::Server;
pub(self) static DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
@@ -61,59 +60,3 @@ async fn geo_sort_with_geo_strings() {
)
.await;
}
#[actix_rt::test]
async fn geo_bounding_box_with_string_and_number() {
let server = Server::new().await;
let index = server.index("test");
let documents = DOCUMENTS.clone();
index.update_settings_filterable_attributes(json!(["_geo"])).await;
index.update_settings_sortable_attributes(json!(["_geo"])).await;
index.add_documents(documents, None).await;
index.wait_task(2).await;
index
.search(
json!({
"filter": "_geoBoundingBox([89, 179], [-89, -179])",
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
{
"id": 1,
"name": "Taco Truck",
"address": "444 Salsa Street, Burritoville",
"type": "Mexican",
"rating": 9,
"_geo": {
"lat": 34.0522,
"lng": -118.2437
}
},
{
"id": 2,
"name": "La Bella Italia",
"address": "456 Elm Street, Townsville",
"type": "Italian",
"rating": 9,
"_geo": {
"lat": "45.4777599",
"lng": "9.1967508"
}
}
],
"query": "",
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2
}
"###);
},
)
.await;
}

View File

@@ -10,9 +10,9 @@ mod pagination;
mod restrict_searchable;
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use crate::common::{Server, Value};
use crate::json;
use crate::common::Server;
pub(self) static DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
@@ -1104,59 +1104,3 @@ async fn camelcased_words() {
})
.await;
}
#[actix_rt::test]
async fn simple_search_with_strange_synonyms() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({ "synonyms": {"&": ["to"], "to": ["&"]} })).await;
let r = index.wait_task(0).await;
meili_snap::snapshot!(r["status"], @r###""succeeded""###);
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
index
.search(json!({"q": "How to train"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
[
{
"title": "How to Train Your Dragon: The Hidden World",
"id": "166428"
}
]
"###);
})
.await;
index
.search(json!({"q": "How & train"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
[
{
"title": "How to Train Your Dragon: The Hidden World",
"id": "166428"
}
]
"###);
})
.await;
index
.search(json!({"q": "to"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
[
{
"title": "How to Train Your Dragon: The Hidden World",
"id": "166428"
}
]
"###);
})
.await;
}

View File

@@ -1,8 +1,8 @@
use meili_snap::{json_string, snapshot};
use serde_json::json;
use super::{DOCUMENTS, NESTED_DOCUMENTS};
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn search_empty_list() {

View File

@@ -1,5 +1,6 @@
use serde_json::json;
use crate::common::Server;
use crate::json;
use crate::search::DOCUMENTS;
#[actix_rt::test]

View File

@@ -1,9 +1,9 @@
use meili_snap::{json_string, snapshot};
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use crate::common::index::Index;
use crate::common::{Server, Value};
use crate::json;
use crate::common::Server;
async fn index_with_documents<'a>(server: &'a Server, documents: &Value) -> Index<'a> {
let index = server.index("test");

View File

@@ -1,5 +1,6 @@
use serde_json::json;
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn set_and_reset_distinct_attribute() {

View File

@@ -1,7 +1,7 @@
use meili_snap::*;
use serde_json::json;
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn settings_bad_displayed_attributes() {

View File

@@ -1,24 +1,21 @@
use std::collections::HashMap;
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use crate::common::{Server, Value};
use crate::json;
use crate::common::Server;
static DEFAULT_SETTINGS_VALUES: Lazy<HashMap<&'static str, Value>> = Lazy::new(|| {
let mut map = HashMap::new();
map.insert("displayed_attributes", json!(["*"]));
map.insert("searchable_attributes", json!(["*"]));
map.insert("filterable_attributes", json!([]));
map.insert("distinct_attribute", json!(null));
map.insert("distinct_attribute", json!(Value::Null));
map.insert(
"ranking_rules",
json!(["words", "typo", "proximity", "attribute", "sort", "exactness"]),
);
map.insert("stop_words", json!([]));
map.insert("non_separator_tokens", json!([]));
map.insert("separator_tokens", json!([]));
map.insert("dictionary", json!([]));
map.insert("synonyms", json!({}));
map.insert(
"faceting",
@@ -54,7 +51,7 @@ async fn get_settings() {
let (response, code) = index.settings().await;
assert_eq!(code, 200);
let settings = response.as_object().unwrap();
assert_eq!(settings.keys().len(), 14);
assert_eq!(settings.keys().len(), 11);
assert_eq!(settings["displayedAttributes"], json!(["*"]));
assert_eq!(settings["searchableAttributes"], json!(["*"]));
assert_eq!(settings["filterableAttributes"], json!([]));
@@ -65,9 +62,6 @@ async fn get_settings() {
json!(["words", "typo", "proximity", "attribute", "sort", "exactness"])
);
assert_eq!(settings["stopWords"], json!([]));
assert_eq!(settings["nonSeparatorTokens"], json!([]));
assert_eq!(settings["separatorTokens"], json!([]));
assert_eq!(settings["dictionary"], json!([]));
assert_eq!(
settings["faceting"],
json!({
@@ -229,7 +223,7 @@ macro_rules! test_setting_routes {
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (response, code) = server.service.$write_method(url, serde_json::Value::Null.into()).await;
let (response, code) = server.service.$write_method(url, serde_json::Value::Null).await;
assert_eq!(code, 202, "{}", response);
server.index("").wait_task(0).await;
let (response, code) = server.index("test").get().await;
@@ -278,9 +272,6 @@ test_setting_routes!(
searchable_attributes put,
distinct_attribute put,
stop_words put,
separator_tokens put,
non_separator_tokens put,
dictionary put,
ranking_rules put,
synonyms put,
pagination patch,

View File

@@ -1,4 +1,3 @@
mod distinct;
mod errors;
mod get_settings;
mod tokenizer_customization;

View File

@@ -1,467 +0,0 @@
use meili_snap::{json_string, snapshot};
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn set_and_reset() {
let server = Server::new().await;
let index = server.index("test");
let (_response, _code) = index
.update_settings(json!({
"nonSeparatorTokens": ["#", "&"],
"separatorTokens": ["&sep", "<br/>"],
"dictionary": ["J.R.R.", "J. R. R."],
}))
.await;
index.wait_task(0).await;
let (response, _) = index.settings().await;
snapshot!(json_string!(response["nonSeparatorTokens"]), @r###"
[
"#",
"&"
]
"###);
snapshot!(json_string!(response["separatorTokens"]), @r###"
[
"&sep",
"<br/>"
]
"###);
snapshot!(json_string!(response["dictionary"]), @r###"
[
"J. R. R.",
"J.R.R."
]
"###);
index
.update_settings(json!({
"nonSeparatorTokens": null,
"separatorTokens": null,
"dictionary": null,
}))
.await;
index.wait_task(1).await;
let (response, _) = index.settings().await;
snapshot!(json_string!(response["nonSeparatorTokens"]), @"[]");
snapshot!(json_string!(response["separatorTokens"]), @"[]");
snapshot!(json_string!(response["dictionary"]), @"[]");
}
#[actix_rt::test]
async fn set_and_search() {
let documents = json!([
{
"id": 1,
"content": "Mac & cheese",
},
{
"id": 2,
"content": "G#D#G#D#G#C#D#G#C#",
},
{
"id": 3,
"content": "Mac&sep&&sepcheese",
},
]);
let server = Server::new().await;
let index = server.index("test");
index.add_documents(documents, None).await;
index.wait_task(0).await;
let (_response, _code) = index
.update_settings(json!({
"nonSeparatorTokens": ["#", "&"],
"separatorTokens": ["<br/>", "&sep"],
"dictionary": ["#", "A#", "B#", "C#", "D#", "E#", "F#", "G#"],
}))
.await;
index.wait_task(1).await;
index
.search(json!({"q": "&", "attributesToHighlight": ["content"]}), |response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 1,
"content": "Mac & cheese",
"_formatted": {
"id": "1",
"content": "Mac <em>&</em> cheese"
}
},
{
"id": 3,
"content": "Mac&sep&&sepcheese",
"_formatted": {
"id": "3",
"content": "Mac&sep<em>&</em>&sepcheese"
}
}
]
"###);
})
.await;
index
.search(
json!({"q": "Mac & cheese", "attributesToHighlight": ["content"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 1,
"content": "Mac & cheese",
"_formatted": {
"id": "1",
"content": "<em>Mac</em> <em>&</em> <em>cheese</em>"
}
},
{
"id": 3,
"content": "Mac&sep&&sepcheese",
"_formatted": {
"id": "3",
"content": "<em>Mac</em>&sep<em>&</em>&sep<em>cheese</em>"
}
}
]
"###);
},
)
.await;
index
.search(
json!({"q": "Mac&sep&&sepcheese", "attributesToHighlight": ["content"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 1,
"content": "Mac & cheese",
"_formatted": {
"id": "1",
"content": "<em>Mac</em> <em>&</em> <em>cheese</em>"
}
},
{
"id": 3,
"content": "Mac&sep&&sepcheese",
"_formatted": {
"id": "3",
"content": "<em>Mac</em>&sep<em>&</em>&sep<em>cheese</em>"
}
}
]
"###);
},
)
.await;
index
.search(json!({"q": "C#D#G", "attributesToHighlight": ["content"]}), |response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 2,
"content": "G#D#G#D#G#C#D#G#C#",
"_formatted": {
"id": "2",
"content": "<em>G</em>#<em>D#</em><em>G</em>#<em>D#</em><em>G</em>#<em>C#</em><em>D#</em><em>G</em>#<em>C#</em>"
}
}
]
"###);
})
.await;
index
.search(json!({"q": "#", "attributesToHighlight": ["content"]}), |response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @"[]");
})
.await;
}
#[actix_rt::test]
async fn advanced_synergies() {
let documents = json!([
{
"id": 1,
"content": "J.R.R. Tolkien",
},
{
"id": 2,
"content": "J. R. R. Tolkien",
},
{
"id": 3,
"content": "jrr Tolkien",
},
{
"id": 4,
"content": "J.K. Rowlings",
},
{
"id": 5,
"content": "J. K. Rowlings",
},
{
"id": 6,
"content": "jk Rowlings",
},
]);
let server = Server::new().await;
let index = server.index("test");
index.add_documents(documents, None).await;
index.wait_task(0).await;
let (_response, _code) = index
.update_settings(json!({
"dictionary": ["J.R.R.", "J. R. R."],
"synonyms": {
"J.R.R.": ["jrr", "J. R. R."],
"J. R. R.": ["jrr", "J.R.R."],
"jrr": ["J.R.R.", "J. R. R."],
"J.K.": ["jk", "J. K."],
"J. K.": ["jk", "J.K."],
"jk": ["J.K.", "J. K."],
}
}))
.await;
index.wait_task(1).await;
index
.search(json!({"q": "J.R.R.", "attributesToHighlight": ["content"]}), |response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 1,
"content": "J.R.R. Tolkien",
"_formatted": {
"id": "1",
"content": "<em>J.R.R.</em> Tolkien"
}
},
{
"id": 2,
"content": "J. R. R. Tolkien",
"_formatted": {
"id": "2",
"content": "<em>J. R. R.</em> Tolkien"
}
},
{
"id": 3,
"content": "jrr Tolkien",
"_formatted": {
"id": "3",
"content": "<em>jrr</em> Tolkien"
}
}
]
"###);
})
.await;
index
.search(json!({"q": "jrr", "attributesToHighlight": ["content"]}), |response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 3,
"content": "jrr Tolkien",
"_formatted": {
"id": "3",
"content": "<em>jrr</em> Tolkien"
}
},
{
"id": 1,
"content": "J.R.R. Tolkien",
"_formatted": {
"id": "1",
"content": "<em>J.R.R.</em> Tolkien"
}
},
{
"id": 2,
"content": "J. R. R. Tolkien",
"_formatted": {
"id": "2",
"content": "<em>J. R. R.</em> Tolkien"
}
}
]
"###);
})
.await;
index
.search(json!({"q": "J. R. R.", "attributesToHighlight": ["content"]}), |response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 2,
"content": "J. R. R. Tolkien",
"_formatted": {
"id": "2",
"content": "<em>J. R. R.</em> Tolkien"
}
},
{
"id": 1,
"content": "J.R.R. Tolkien",
"_formatted": {
"id": "1",
"content": "<em>J.R.R.</em> Tolkien"
}
},
{
"id": 3,
"content": "jrr Tolkien",
"_formatted": {
"id": "3",
"content": "<em>jrr</em> Tolkien"
}
}
]
"###);
})
.await;
// Only update dictionary, the synonyms should be recomputed.
let (_response, _code) = index
.update_settings(json!({
"dictionary": ["J.R.R.", "J. R. R.", "J.K.", "J. K."],
}))
.await;
index.wait_task(2).await;
index
.search(json!({"q": "jk", "attributesToHighlight": ["content"]}), |response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 6,
"content": "jk Rowlings",
"_formatted": {
"id": "6",
"content": "<em>jk</em> Rowlings"
}
},
{
"id": 4,
"content": "J.K. Rowlings",
"_formatted": {
"id": "4",
"content": "<em>J.K.</em> Rowlings"
}
},
{
"id": 5,
"content": "J. K. Rowlings",
"_formatted": {
"id": "5",
"content": "<em>J. K.</em> Rowlings"
}
}
]
"###);
})
.await;
index
.search(json!({"q": "J.K.", "attributesToHighlight": ["content"]}), |response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 4,
"content": "J.K. Rowlings",
"_formatted": {
"id": "4",
"content": "<em>J.K.</em> Rowlings"
}
},
{
"id": 5,
"content": "J. K. Rowlings",
"_formatted": {
"id": "5",
"content": "<em>J. K.</em> Rowlings"
}
},
{
"id": 6,
"content": "jk Rowlings",
"_formatted": {
"id": "6",
"content": "<em>jk</em> Rowlings"
}
}
]
"###);
})
.await;
index
.search(json!({"q": "J. K.", "attributesToHighlight": ["content"]}), |response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 5,
"content": "J. K. Rowlings",
"_formatted": {
"id": "5",
"content": "<em>J. K.</em> Rowlings"
}
},
{
"id": 4,
"content": "J.K. Rowlings",
"_formatted": {
"id": "4",
"content": "<em>J.K.</em> Rowlings"
}
},
{
"id": 6,
"content": "jk Rowlings",
"_formatted": {
"id": "6",
"content": "<em>jk</em> Rowlings"
}
},
{
"id": 2,
"content": "J. R. R. Tolkien",
"_formatted": {
"id": "2",
"content": "<em>J. R.</em> R. Tolkien"
}
}
]
"###);
})
.await;
}

View File

@@ -1,13 +1,11 @@
use std::time::Duration;
use actix_rt::time::sleep;
use meili_snap::{json_string, snapshot};
use meilisearch::option::ScheduleSnapshot;
use meilisearch::Opt;
use crate::common::server::default_settings;
use crate::common::{GetAllDocumentsOptions, Server};
use crate::json;
macro_rules! verify_snapshot {
(
@@ -46,7 +44,7 @@ async fn perform_snapshot() {
let index = server.index("test");
index
.update_settings(json! ({
.update_settings(serde_json::json! ({
"searchableAttributes": [],
}))
.await;
@@ -92,95 +90,3 @@ async fn perform_snapshot() {
server.index("test1").settings(),
);
}
#[actix_rt::test]
async fn perform_on_demand_snapshot() {
let temp = tempfile::tempdir().unwrap();
let snapshot_dir = tempfile::tempdir().unwrap();
let options =
Opt { snapshot_dir: snapshot_dir.path().to_owned(), ..default_settings(temp.path()) };
let server = Server::new_with_options(options).await.unwrap();
let index = server.index("catto");
index
.update_settings(json! ({
"searchableAttributes": [],
}))
.await;
index.load_test_set().await;
server.index("doggo").create(Some("bone")).await;
index.wait_task(2).await;
server.index("doggo").create(Some("bone")).await;
index.wait_task(2).await;
let (task, code) = server.create_snapshot().await;
snapshot!(code, @"202 Accepted");
snapshot!(json_string!(task, { ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 4,
"indexUid": null,
"status": "enqueued",
"type": "snapshotCreation",
"enqueuedAt": "[date]"
}
"###);
let task = index.wait_task(task.uid()).await;
snapshot!(json_string!(task, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{
"uid": 4,
"indexUid": null,
"status": "succeeded",
"type": "snapshotCreation",
"canceledBy": null,
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
"###);
let temp = tempfile::tempdir().unwrap();
let snapshots: Vec<String> = std::fs::read_dir(&snapshot_dir)
.unwrap()
.map(|entry| entry.unwrap().path().file_name().unwrap().to_str().unwrap().to_string())
.collect();
meili_snap::snapshot!(format!("{snapshots:?}"), @r###"["db.snapshot"]"###);
let snapshot_path = snapshot_dir.path().to_owned().join("db.snapshot");
#[cfg_attr(windows, allow(unused))]
let snapshot_meta = std::fs::metadata(&snapshot_path).unwrap();
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mode = snapshot_meta.permissions().mode();
// rwxrwxrwx
meili_snap::snapshot!(format!("{:b}", mode), @"1000000100100100");
}
let options = Opt { import_snapshot: Some(snapshot_path), ..default_settings(temp.path()) };
let snapshot_server = Server::new_with_options(options).await.unwrap();
verify_snapshot!(server, snapshot_server, |server| =>
server.list_indexes(None, None),
// for some reason the db sizes differ. this may be due to the compaction options we have
// set when performing the snapshot
//server.stats(),
// The original instance contains the snapshotCreation task, while the snapshotted-instance does not. For this reason we need to compare the task queue **after** the task 4
server.tasks_filter("?from=2"),
server.index("catto").get_all_documents(GetAllDocumentsOptions::default()),
server.index("catto").settings(),
server.index("doggo").get_all_documents(GetAllDocumentsOptions::default()),
server.index("doggo").settings(),
);
}

View File

@@ -1,8 +1,8 @@
use serde_json::json;
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn get_settings_unexisting_index() {

View File

@@ -1,7 +1,7 @@
use meili_snap::*;
use serde_json::json;
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn swap_indexes_bad_format() {

View File

@@ -1,9 +1,9 @@
mod errors;
use meili_snap::{json_string, snapshot};
use serde_json::json;
use crate::common::{GetAllDocumentsOptions, Server};
use crate::json;
#[actix_rt::test]
async fn swap_indexes() {

View File

@@ -1,11 +1,11 @@
mod errors;
use meili_snap::insta::assert_json_snapshot;
use serde_json::json;
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn error_get_unexisting_task_status() {
@@ -33,7 +33,7 @@ async fn get_task_status() {
index.create(None).await;
index
.add_documents(
json!([{
serde_json::json!([{
"id": 1,
"content": "foobar",
}]),

View File

@@ -17,10 +17,10 @@ bincode = "1.3.3"
bstr = "1.4.0"
bytemuck = { version = "1.13.1", features = ["extern_crate_alloc"] }
byteorder = "1.4.3"
charabia = { version = "0.8.3", default-features = false }
charabia = { version = "0.8.2", default-features = false }
concat-arrays = "0.1.2"
crossbeam-channel = "0.5.8"
deserr = { version = "0.6.0", features = ["actix-web"]}
deserr = "0.5.0"
either = { version = "1.8.1", features = ["serde"] }
flatten-serde-json = { path = "../flatten-serde-json" }
fst = "0.4.7"
@@ -32,18 +32,18 @@ grenad = { version = "0.4.4", default-features = false, features = [
heed = { git = "https://github.com/meilisearch/heed", tag = "v0.12.7", default-features = false, features = [
"lmdb", "read-txn-no-tls"
] }
indexmap = { version = "2.0.0", features = ["serde"] }
indexmap = { version = "1.9.3", features = ["serde"] }
instant-distance = { version = "0.6.1", features = ["with-serde"] }
json-depth-checker = { path = "../json-depth-checker" }
levenshtein_automata = { version = "0.2.1", features = ["fst_automaton"] }
memmap2 = "0.7.1"
memmap2 = "0.5.10"
obkv = "0.2.0"
once_cell = "1.17.1"
ordered-float = "3.6.0"
rand_pcg = { version = "0.3.1", features = ["serde1"] }
rayon = "1.7.0"
roaring = "0.10.1"
rstar = { version = "0.11.0", features = ["serde"] }
rstar = { version = "0.10.0", features = ["serde"] }
serde = { version = "1.0.160", features = ["derive"] }
serde_json = { version = "1.0.95", features = ["preserve_order"] }
slice-group-by = "0.3.0"
@@ -63,7 +63,7 @@ uuid = { version = "1.3.1", features = ["v4"] }
filter-parser = { path = "../filter-parser" }
# documents words self-join
itertools = "0.11.0"
itertools = "0.10.5"
# profiling
puffin = "0.16.0"

View File

@@ -1,5 +1,4 @@
use std::fs::File;
use std::io::BufReader;
use std::{io, str};
use obkv::KvReader;
@@ -20,14 +19,14 @@ use crate::FieldId;
pub struct EnrichedDocumentsBatchReader<R> {
documents: DocumentsBatchReader<R>,
primary_key: String,
external_ids: grenad::ReaderCursor<BufReader<File>>,
external_ids: grenad::ReaderCursor<File>,
}
impl<R: io::Read + io::Seek> EnrichedDocumentsBatchReader<R> {
pub fn new(
documents: DocumentsBatchReader<R>,
primary_key: String,
external_ids: grenad::Reader<BufReader<File>>,
external_ids: grenad::Reader<File>,
) -> Result<Self, Error> {
if documents.documents_count() as u64 == external_ids.len() {
Ok(EnrichedDocumentsBatchReader {
@@ -76,7 +75,7 @@ pub struct EnrichedDocument<'a> {
pub struct EnrichedDocumentsBatchCursor<R> {
documents: DocumentsBatchCursor<R>,
primary_key: String,
external_ids: grenad::ReaderCursor<BufReader<File>>,
external_ids: grenad::ReaderCursor<File>,
}
impl<R> EnrichedDocumentsBatchCursor<R> {

View File

@@ -47,8 +47,6 @@ pub enum InternalError {
IndexingMergingKeys { process: &'static str },
#[error("{}", HeedError::InvalidDatabaseTyping)]
InvalidDatabaseTyping,
#[error("Could not access the inner of a buf-reader/writer: {0}")]
BufIntoInnerError(String),
#[error(transparent)]
RayonThreadPool(#[from] ThreadPoolBuildError),
#[error(transparent)]
@@ -124,28 +122,22 @@ only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and undersco
.field,
match .valid_fields.is_empty() {
true => "This index does not have configured sortable attributes.".to_string(),
false => format!("Available sortable attributes are: `{}{}`.",
valid_fields.iter().map(AsRef::as_ref).collect::<Vec<&str>>().join(", "),
.hidden_fields.then_some(", <..hidden-attributes>").unwrap_or(""),
false => format!("Available sortable attributes are: `{}`.",
valid_fields.iter().map(AsRef::as_ref).collect::<Vec<&str>>().join(", ")
),
}
)]
InvalidSortableAttribute { field: String, valid_fields: BTreeSet<String>, hidden_fields: bool },
InvalidSortableAttribute { field: String, valid_fields: BTreeSet<String> },
#[error("Attribute `{}` is not facet-searchable. {}",
.field,
match .valid_fields.is_empty() {
true => "This index does not have configured facet-searchable attributes. To make it facet-searchable add it to the `filterableAttributes` index settings.".to_string(),
false => format!("Available facet-searchable attributes are: `{}{}`. To make it facet-searchable add it to the `filterableAttributes` index settings.",
valid_fields.iter().map(AsRef::as_ref).collect::<Vec<&str>>().join(", "),
.hidden_fields.then_some(", <..hidden-attributes>").unwrap_or(""),
false => format!("Available facet-searchable attributes are: `{}`. To make it facet-searchable add it to the `filterableAttributes` index settings.",
valid_fields.iter().map(AsRef::as_ref).collect::<Vec<&str>>().join(", ")
),
}
)]
InvalidFacetSearchFacetName {
field: String,
valid_fields: BTreeSet<String>,
hidden_fields: bool,
},
InvalidFacetSearchFacetName { field: String, valid_fields: BTreeSet<String> },
#[error("Attribute `{}` is not searchable. Available searchable attributes are: `{}{}`.",
.field,
.valid_fields.iter().map(AsRef::as_ref).collect::<Vec<&str>>().join(", "),
@@ -348,11 +340,8 @@ fn conditionally_lookup_for_error_message() {
];
for (list, suffix) in messages {
let err = UserError::InvalidSortableAttribute {
field: "name".to_string(),
valid_fields: list,
hidden_fields: false,
};
let err =
UserError::InvalidSortableAttribute { field: "name".to_string(), valid_fields: list };
assert_eq!(err.to_string(), format!("{} {}", prefix, suffix));
}

View File

@@ -1,5 +1,5 @@
use std::borrow::Cow;
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use std::collections::{BTreeSet, HashMap, HashSet};
use std::fs::File;
use std::mem::size_of;
use std::path::Path;
@@ -61,12 +61,8 @@ pub mod main_key {
pub const USER_DEFINED_SEARCHABLE_FIELDS_KEY: &str = "user-defined-searchable-fields";
pub const SOFT_EXTERNAL_DOCUMENTS_IDS_KEY: &str = "soft-external-documents-ids";
pub const STOP_WORDS_KEY: &str = "stop-words";
pub const NON_SEPARATOR_TOKENS_KEY: &str = "non-separator-tokens";
pub const SEPARATOR_TOKENS_KEY: &str = "separator-tokens";
pub const DICTIONARY_KEY: &str = "dictionary";
pub const STRING_FACETED_DOCUMENTS_IDS_PREFIX: &str = "string-faceted-documents-ids";
pub const SYNONYMS_KEY: &str = "synonyms";
pub const USER_DEFINED_SYNONYMS_KEY: &str = "user-defined-synonyms";
pub const WORDS_FST_KEY: &str = "words-fst";
pub const WORDS_PREFIXES_FST_KEY: &str = "words-prefixes-fst";
pub const CREATED_AT_KEY: &str = "created-at";
@@ -655,26 +651,6 @@ impl Index {
}
}
/* remove hidden fields */
pub fn remove_hidden_fields(
&self,
rtxn: &RoTxn,
fields: impl IntoIterator<Item = impl AsRef<str>>,
) -> Result<(BTreeSet<String>, bool)> {
let mut valid_fields =
fields.into_iter().map(|f| f.as_ref().to_string()).collect::<BTreeSet<String>>();
let fields_len = valid_fields.len();
if let Some(dn) = self.displayed_fields(rtxn)? {
let displayable_names = dn.iter().map(|s| s.to_string()).collect();
valid_fields = &valid_fields & &displayable_names;
}
let hidden_fields = fields_len > valid_fields.len();
Ok((valid_fields, hidden_fields))
}
/* searchable fields */
/// Write the user defined searchable fields and generate the real searchable fields from the specified fields ids map.
@@ -1079,116 +1055,18 @@ impl Index {
}
}
/* non separator tokens */
pub(crate) fn put_non_separator_tokens(
&self,
wtxn: &mut RwTxn,
set: &BTreeSet<String>,
) -> heed::Result<()> {
self.main.put::<_, Str, SerdeBincode<_>>(wtxn, main_key::NON_SEPARATOR_TOKENS_KEY, set)
}
pub(crate) fn delete_non_separator_tokens(&self, wtxn: &mut RwTxn) -> heed::Result<bool> {
self.main.delete::<_, Str>(wtxn, main_key::NON_SEPARATOR_TOKENS_KEY)
}
pub fn non_separator_tokens(&self, rtxn: &RoTxn) -> Result<Option<BTreeSet<String>>> {
Ok(self.main.get::<_, Str, SerdeBincode<BTreeSet<String>>>(
rtxn,
main_key::NON_SEPARATOR_TOKENS_KEY,
)?)
}
/* separator tokens */
pub(crate) fn put_separator_tokens(
&self,
wtxn: &mut RwTxn,
set: &BTreeSet<String>,
) -> heed::Result<()> {
self.main.put::<_, Str, SerdeBincode<_>>(wtxn, main_key::SEPARATOR_TOKENS_KEY, set)
}
pub(crate) fn delete_separator_tokens(&self, wtxn: &mut RwTxn) -> heed::Result<bool> {
self.main.delete::<_, Str>(wtxn, main_key::SEPARATOR_TOKENS_KEY)
}
pub fn separator_tokens(&self, rtxn: &RoTxn) -> Result<Option<BTreeSet<String>>> {
Ok(self
.main
.get::<_, Str, SerdeBincode<BTreeSet<String>>>(rtxn, main_key::SEPARATOR_TOKENS_KEY)?)
}
/* separators easing method */
pub fn allowed_separators(&self, rtxn: &RoTxn) -> Result<Option<BTreeSet<String>>> {
let default_separators =
charabia::separators::DEFAULT_SEPARATORS.iter().map(|s| s.to_string());
let mut separators: Option<BTreeSet<_>> = None;
if let Some(mut separator_tokens) = self.separator_tokens(rtxn)? {
separator_tokens.extend(default_separators.clone());
separators = Some(separator_tokens);
}
if let Some(non_separator_tokens) = self.non_separator_tokens(rtxn)? {
separators = separators
.or_else(|| Some(default_separators.collect()))
.map(|separators| &separators - &non_separator_tokens);
}
Ok(separators)
}
/* dictionary */
pub(crate) fn put_dictionary(
&self,
wtxn: &mut RwTxn,
set: &BTreeSet<String>,
) -> heed::Result<()> {
self.main.put::<_, Str, SerdeBincode<_>>(wtxn, main_key::DICTIONARY_KEY, set)
}
pub(crate) fn delete_dictionary(&self, wtxn: &mut RwTxn) -> heed::Result<bool> {
self.main.delete::<_, Str>(wtxn, main_key::DICTIONARY_KEY)
}
pub fn dictionary(&self, rtxn: &RoTxn) -> Result<Option<BTreeSet<String>>> {
Ok(self
.main
.get::<_, Str, SerdeBincode<BTreeSet<String>>>(rtxn, main_key::DICTIONARY_KEY)?)
}
/* synonyms */
pub(crate) fn put_synonyms(
&self,
wtxn: &mut RwTxn,
synonyms: &HashMap<Vec<String>, Vec<Vec<String>>>,
user_defined_synonyms: &BTreeMap<String, Vec<String>>,
) -> heed::Result<()> {
self.main.put::<_, Str, SerdeBincode<_>>(wtxn, main_key::SYNONYMS_KEY, synonyms)?;
self.main.put::<_, Str, SerdeBincode<_>>(
wtxn,
main_key::USER_DEFINED_SYNONYMS_KEY,
user_defined_synonyms,
)
self.main.put::<_, Str, SerdeBincode<_>>(wtxn, main_key::SYNONYMS_KEY, synonyms)
}
pub(crate) fn delete_synonyms(&self, wtxn: &mut RwTxn) -> heed::Result<bool> {
self.main.delete::<_, Str>(wtxn, main_key::SYNONYMS_KEY)?;
self.main.delete::<_, Str>(wtxn, main_key::USER_DEFINED_SYNONYMS_KEY)
}
pub fn user_defined_synonyms(
&self,
rtxn: &RoTxn,
) -> heed::Result<BTreeMap<String, Vec<String>>> {
Ok(self
.main
.get::<_, Str, SerdeBincode<_>>(rtxn, main_key::USER_DEFINED_SYNONYMS_KEY)?
.unwrap_or_default())
self.main.delete::<_, Str>(wtxn, main_key::SYNONYMS_KEY)
}
pub fn synonyms(&self, rtxn: &RoTxn) -> heed::Result<HashMap<Vec<String>, Vec<Vec<String>>>> {
@@ -1840,11 +1718,11 @@ pub(crate) mod tests {
.unwrap();
index
.add_documents(documents!([
{ "id": 0, "_geo": { "lat": "0", "lng": "0" } },
{ "id": 1, "_geo": { "lat": 0, "lng": "-175" } },
{ "id": 2, "_geo": { "lat": "0", "lng": 175 } },
{ "id": 0, "_geo": { "lat": 0, "lng": 0 } },
{ "id": 1, "_geo": { "lat": 0, "lng": -175 } },
{ "id": 2, "_geo": { "lat": 0, "lng": 175 } },
{ "id": 3, "_geo": { "lat": 85, "lng": 0 } },
{ "id": 4, "_geo": { "lat": "-85", "lng": "0" } },
{ "id": 4, "_geo": { "lat": -85, "lng": 0 } },
]))
.unwrap();

View File

@@ -97,7 +97,7 @@ const MAX_LMDB_KEY_LENGTH: usize = 500;
///
/// This number is determined by the keys of the different facet databases
/// and adding a margin of safety.
pub const MAX_FACET_VALUE_LENGTH: usize = MAX_LMDB_KEY_LENGTH - 32;
pub const MAX_FACET_VALUE_LENGTH: usize = MAX_LMDB_KEY_LENGTH - 20;
/// The maximum length a word can be
pub const MAX_WORD_LENGTH: usize = MAX_LMDB_KEY_LENGTH / 2;
@@ -293,15 +293,15 @@ pub fn normalize_facet(original: &str) -> String {
#[derive(serde::Serialize, serde::Deserialize, Debug)]
#[serde(transparent)]
pub struct VectorOrArrayOfVectors {
#[serde(with = "either::serde_untagged_optional")]
inner: Option<either::Either<Vec<f32>, Vec<Vec<f32>>>>,
#[serde(with = "either::serde_untagged")]
inner: either::Either<Vec<f32>, Vec<Vec<f32>>>,
}
impl VectorOrArrayOfVectors {
pub fn into_array_of_vectors(self) -> Option<Vec<Vec<f32>>> {
match self.inner? {
either::Either::Left(vector) => Some(vec![vector]),
either::Either::Right(vectors) => Some(vectors),
pub fn into_array_of_vectors(self) -> Vec<Vec<f32>> {
match self.inner {
either::Either::Left(vector) => vec![vector],
either::Either::Right(vectors) => vectors,
}
}
}

View File

@@ -280,13 +280,9 @@ impl<'a> SearchForFacetValues<'a> {
let filterable_fields = index.filterable_fields(rtxn)?;
if !filterable_fields.contains(&self.facet) {
let (valid_fields, hidden_fields) =
index.remove_hidden_fields(rtxn, filterable_fields)?;
return Err(UserError::InvalidFacetSearchFacetName {
field: self.facet.clone(),
valid_fields,
hidden_fields,
valid_fields: filterable_fields.into_iter().collect(),
}
.into());
}

View File

@@ -91,12 +91,11 @@ pub fn bucket_sort<'ctx, Q: RankingRuleQueryTrait>(
/// Update the universes accordingly and inform the logger.
macro_rules! back {
() => {
// FIXME: temporarily disabled assert: see <https://github.com/meilisearch/meilisearch/pull/4013>
// assert!(
// ranking_rule_universes[cur_ranking_rule_index].is_empty(),
// "The ranking rule {} did not sort its bucket exhaustively",
// ranking_rules[cur_ranking_rule_index].id()
// );
assert!(
ranking_rule_universes[cur_ranking_rule_index].is_empty(),
"The ranking rule {} did not sort its bucket exhaustively",
ranking_rules[cur_ranking_rule_index].id()
);
logger.end_iteration_ranking_rule(
cur_ranking_rule_index,
ranking_rules[cur_ranking_rule_index].as_ref(),

View File

@@ -418,11 +418,19 @@ impl<'t> Matcher<'t, '_> {
} else {
match &self.matches {
Some((tokens, matches)) => {
// If the text has to be cropped,
// compute the best interval to crop around.
let matches = match format_options.crop {
Some(crop_size) if crop_size > 0 => {
self.find_best_match_interval(matches, crop_size)
}
_ => matches,
};
// If the text has to be cropped,
// crop around the best interval.
let (byte_start, byte_end) = match format_options.crop {
Some(crop_size) if crop_size > 0 => {
let matches = self.find_best_match_interval(matches, crop_size);
self.crop_bounds(tokens, matches, crop_size)
}
_ => (0, self.text.len()),
@@ -442,11 +450,6 @@ impl<'t> Matcher<'t, '_> {
for m in matches {
let token = &tokens[m.token_position];
// skip matches out of the crop window.
if token.byte_start < byte_start || token.byte_end > byte_end {
continue;
}
if byte_index < token.byte_start {
formatted.push(&self.text[byte_index..token.byte_start]);
}
@@ -797,37 +800,6 @@ mod tests {
);
}
#[test]
fn format_highlight_crop_phrase_query() {
//! testing: https://github.com/meilisearch/meilisearch/issues/3975
let temp_index = TempIndex::new();
temp_index
.add_documents(documents!([
{ "id": 1, "text": "The groundbreaking invention had the power to split the world between those who embraced progress and those who resisted change!" }
]))
.unwrap();
let rtxn = temp_index.read_txn().unwrap();
let format_options = FormatOptions { highlight: true, crop: Some(10) };
let text = "The groundbreaking invention had the power to split the world between those who embraced progress and those who resisted change!";
let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "\"the world\"");
let mut matcher = builder.build(text);
// should return 10 words with a marker at the start as well the end, and the highlighted matches.
insta::assert_snapshot!(
matcher.format(format_options),
@"…had the power to split <em>the</em> <em>world</em> between those who…"
);
let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "those \"and those\"");
let mut matcher = builder.build(text);
// should highlight "those" and the phrase "and those".
insta::assert_snapshot!(
matcher.format(format_options),
@"…world between <em>those</em> who embraced progress <em>and</em> <em>those</em> who resisted…"
);
}
#[test]
fn smaller_crop_size() {
//! testing: https://github.com/meilisearch/specifications/pull/120#discussion_r836536295

View File

@@ -20,7 +20,7 @@ mod sort;
#[cfg(test)]
mod tests;
use std::collections::HashSet;
use std::collections::{BTreeSet, HashSet};
use bucket_sort::{bucket_sort, BucketSortOutput};
use charabia::TokenizerBuilder;
@@ -108,11 +108,24 @@ impl<'ctx> SearchContext<'ctx> {
(None, None) => continue,
// The field is not searchable => User error
(_fid, Some(false)) => {
let (valid_fields, hidden_fields) = match searchable_names {
Some(sn) => self.index.remove_hidden_fields(self.txn, sn)?,
None => self.index.remove_hidden_fields(self.txn, fids_map.names())?,
};
let mut valid_fields: BTreeSet<_> =
fids_map.names().map(String::from).collect();
// Filter by the searchable names
if let Some(sn) = searchable_names {
let searchable_names = sn.iter().map(|s| s.to_string()).collect();
valid_fields = &valid_fields & &searchable_names;
}
let searchable_count = valid_fields.len();
// Remove hidden fields
if let Some(dn) = self.index.displayed_fields(self.txn)? {
let displayable_names = dn.iter().map(|s| s.to_string()).collect();
valid_fields = &valid_fields & &displayable_names;
}
let hidden_fields = searchable_count > valid_fields.len();
let field = field_name.to_string();
return Err(UserError::InvalidSearchableAttribute {
field,
@@ -475,20 +488,6 @@ pub fn execute_search(
tokbuilder.stop_words(stop_words);
}
let separators = ctx.index.allowed_separators(ctx.txn)?;
let separators: Option<Vec<_>> =
separators.as_ref().map(|x| x.iter().map(String::as_str).collect());
if let Some(ref separators) = separators {
tokbuilder.separators(separators);
}
let dictionary = ctx.index.dictionary(ctx.txn)?;
let dictionary: Option<Vec<_>> =
dictionary.as_ref().map(|x| x.iter().map(String::as_str).collect());
if let Some(ref dictionary) = dictionary {
tokbuilder.words_dict(dictionary);
}
let script_lang_map = ctx.index.script_language(ctx.txn)?;
if !script_lang_map.is_empty() {
tokbuilder.allow_list(&script_lang_map);
@@ -591,24 +590,16 @@ fn check_sort_criteria(ctx: &SearchContext, sort_criteria: Option<&Vec<AscDesc>>
for asc_desc in sort_criteria {
match asc_desc.member() {
Member::Field(ref field) if !crate::is_faceted(field, &sortable_fields) => {
let (valid_fields, hidden_fields) =
ctx.index.remove_hidden_fields(ctx.txn, sortable_fields)?;
return Err(UserError::InvalidSortableAttribute {
field: field.to_string(),
valid_fields,
hidden_fields,
})?;
valid_fields: sortable_fields.into_iter().collect(),
})?
}
Member::Geo(_) if !sortable_fields.contains("_geo") => {
let (valid_fields, hidden_fields) =
ctx.index.remove_hidden_fields(ctx.txn, sortable_fields)?;
return Err(UserError::InvalidSortableAttribute {
field: "_geo".to_string(),
valid_fields,
hidden_fields,
})?;
valid_fields: sortable_fields.into_iter().collect(),
})?
}
_ => (),
}

View File

@@ -2,7 +2,7 @@ use std::io::Cursor;
use big_s::S;
use heed::EnvOpenOptions;
use maplit::{btreemap, hashset};
use maplit::{hashmap, hashset};
use crate::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
use crate::update::{IndexDocuments, IndexDocumentsConfig, IndexerConfig, Settings};
@@ -33,7 +33,7 @@ pub fn setup_search_index_with_criteria(criteria: &[Criterion]) -> Index {
S("tag"),
S("asc_desc_rank"),
});
builder.set_synonyms(btreemap! {
builder.set_synonyms(hashmap! {
S("hello") => vec![S("good morning")],
S("world") => vec![S("earth")],
S("america") => vec![S("the united states")],

View File

@@ -15,7 +15,7 @@ they store fewer sprximities than the regular word sprximity DB.
*/
use std::collections::BTreeMap;
use std::collections::HashMap;
use crate::index::tests::TempIndex;
use crate::search::new::tests::collect_field_values;
@@ -336,7 +336,7 @@ fn test_proximity_split_word() {
index
.update_settings(|s| {
let mut syns = BTreeMap::new();
let mut syns = HashMap::new();
syns.insert("xyz".to_owned(), vec!["sun flower".to_owned()]);
s.set_synonyms(syns);
})

View File

@@ -18,7 +18,7 @@ if `words` doesn't exist before it.
14. Synonyms cost nothing according to the typo ranking rule
*/
use std::collections::BTreeMap;
use std::collections::HashMap;
use crate::index::tests::TempIndex;
use crate::search::new::tests::collect_field_values;
@@ -591,7 +591,7 @@ fn test_typo_synonyms() {
.update_settings(|s| {
s.set_criteria(vec![Criterion::Typo]);
let mut synonyms = BTreeMap::new();
let mut synonyms = HashMap::new();
synonyms.insert("lackadaisical".to_owned(), vec!["lazy".to_owned()]);
synonyms.insert("fast brownish".to_owned(), vec!["quick brown".to_owned()]);

View File

@@ -1,6 +1,5 @@
use std::borrow::Cow;
use std::fs::File;
use std::io::BufReader;
use grenad::CompressionType;
use heed::types::ByteSlice;
@@ -31,7 +30,7 @@ pub struct FacetsUpdateBulk<'i> {
facet_type: FacetType,
field_ids: Vec<FieldId>,
// None if level 0 does not need to be updated
new_data: Option<grenad::Reader<BufReader<File>>>,
new_data: Option<grenad::Reader<File>>,
}
impl<'i> FacetsUpdateBulk<'i> {
@@ -39,7 +38,7 @@ impl<'i> FacetsUpdateBulk<'i> {
index: &'i Index,
field_ids: Vec<FieldId>,
facet_type: FacetType,
new_data: grenad::Reader<BufReader<File>>,
new_data: grenad::Reader<File>,
group_size: u8,
min_level_size: u8,
) -> FacetsUpdateBulk<'i> {
@@ -188,7 +187,7 @@ impl<R: std::io::Read + std::io::Seek> FacetsUpdateBulkInner<R> {
&self,
field_id: FieldId,
txn: &RoTxn,
) -> Result<(Vec<grenad::Reader<BufReader<File>>>, RoaringBitmap)> {
) -> Result<(Vec<grenad::Reader<File>>, RoaringBitmap)> {
let mut all_docids = RoaringBitmap::new();
let subwriters = self.compute_higher_levels(txn, field_id, 32, &mut |bitmaps, _| {
for bitmap in bitmaps {
@@ -260,7 +259,7 @@ impl<R: std::io::Read + std::io::Seek> FacetsUpdateBulkInner<R> {
field_id: u16,
level: u8,
handle_group: &mut dyn FnMut(&[RoaringBitmap], &'t [u8]) -> Result<()>,
) -> Result<Vec<grenad::Reader<BufReader<File>>>> {
) -> Result<Vec<grenad::Reader<File>>> {
if level == 0 {
self.read_level_0(rtxn, field_id, handle_group)?;
// Level 0 is already in the database

View File

@@ -1,6 +1,5 @@
use std::collections::HashMap;
use std::fs::File;
use std::io::BufReader;
use heed::types::{ByteSlice, DecodeIgnore};
use heed::{BytesDecode, Error, RoTxn, RwTxn};
@@ -35,14 +34,14 @@ pub struct FacetsUpdateIncremental<'i> {
index: &'i Index,
inner: FacetsUpdateIncrementalInner,
facet_type: FacetType,
new_data: grenad::Reader<BufReader<File>>,
new_data: grenad::Reader<File>,
}
impl<'i> FacetsUpdateIncremental<'i> {
pub fn new(
index: &'i Index,
facet_type: FacetType,
new_data: grenad::Reader<BufReader<File>>,
new_data: grenad::Reader<File>,
group_size: u8,
min_level_size: u8,
max_group_size: u8,

View File

@@ -78,7 +78,6 @@ pub const FACET_MIN_LEVEL_SIZE: u8 = 5;
use std::collections::BTreeSet;
use std::fs::File;
use std::io::BufReader;
use std::iter::FromIterator;
use charabia::normalizer::{Normalize, NormalizerOption};
@@ -95,7 +94,7 @@ use crate::heed_codec::facet::{FacetGroupKey, FacetGroupKeyCodec, FacetGroupValu
use crate::heed_codec::ByteSliceRefCodec;
use crate::update::index_documents::create_sorter;
use crate::update::merge_btreeset_string;
use crate::{BEU16StrCodec, Index, Result, BEU16, MAX_FACET_VALUE_LENGTH};
use crate::{BEU16StrCodec, Index, Result, BEU16};
pub mod bulk;
pub mod delete;
@@ -109,17 +108,13 @@ pub struct FacetsUpdate<'i> {
index: &'i Index,
database: heed::Database<FacetGroupKeyCodec<ByteSliceRefCodec>, FacetGroupValueCodec>,
facet_type: FacetType,
new_data: grenad::Reader<BufReader<File>>,
new_data: grenad::Reader<File>,
group_size: u8,
max_group_size: u8,
min_level_size: u8,
}
impl<'i> FacetsUpdate<'i> {
pub fn new(
index: &'i Index,
facet_type: FacetType,
new_data: grenad::Reader<BufReader<File>>,
) -> Self {
pub fn new(index: &'i Index, facet_type: FacetType, new_data: grenad::Reader<File>) -> Self {
let database = match facet_type {
FacetType::String => index
.facet_id_string_docids
@@ -196,16 +191,7 @@ impl<'i> FacetsUpdate<'i> {
for result in database.iter(wtxn)? {
let (facet_group_key, ()) = result?;
if let FacetGroupKey { field_id, level: 0, left_bound } = facet_group_key {
let mut normalized_facet = left_bound.normalize(&options);
let normalized_truncated_facet: String;
if normalized_facet.len() > MAX_FACET_VALUE_LENGTH {
normalized_truncated_facet = normalized_facet
.char_indices()
.take_while(|(idx, _)| *idx < MAX_FACET_VALUE_LENGTH)
.map(|(_, c)| c)
.collect();
normalized_facet = normalized_truncated_facet.into();
}
let normalized_facet = left_bound.normalize(&options);
let set = BTreeSet::from_iter(std::iter::once(left_bound));
let key = (field_id, normalized_facet.as_ref());
let key = BEU16StrCodec::bytes_encode(&key).ok_or(heed::Error::Encoding)?;

View File

@@ -1,4 +1,4 @@
use std::io::{BufWriter, Read, Seek};
use std::io::{Read, Seek};
use std::result::Result as StdResult;
use std::{fmt, iter};
@@ -35,7 +35,7 @@ pub fn enrich_documents_batch<R: Read + Seek>(
let (mut cursor, mut documents_batch_index) = reader.into_cursor_and_fields_index();
let mut external_ids = tempfile::tempfile().map(BufWriter::new).map(grenad::Writer::new)?;
let mut external_ids = tempfile::tempfile().map(grenad::Writer::new)?;
let mut uuid_buffer = [0; uuid::fmt::Hyphenated::LENGTH];
// The primary key *field id* that has already been set for this index or the one

View File

@@ -1,7 +1,6 @@
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
use std::fs::File;
use std::io::BufReader;
use std::{io, mem, str};
use charabia::{Language, Script, SeparatorKind, Token, TokenKind, Tokenizer, TokenizerBuilder};
@@ -29,10 +28,8 @@ pub fn extract_docid_word_positions<R: io::Read + io::Seek>(
indexer: GrenadParameters,
searchable_fields: &Option<HashSet<FieldId>>,
stop_words: Option<&fst::Set<&[u8]>>,
allowed_separators: Option<&[&str]>,
dictionary: Option<&[&str]>,
max_positions_per_attributes: Option<u32>,
) -> Result<(RoaringBitmap, grenad::Reader<BufReader<File>>, ScriptLanguageDocidsMap)> {
) -> Result<(RoaringBitmap, grenad::Reader<File>, ScriptLanguageDocidsMap)> {
puffin::profile_function!();
let max_positions_per_attributes = max_positions_per_attributes
@@ -55,12 +52,6 @@ pub fn extract_docid_word_positions<R: io::Read + io::Seek>(
if let Some(stop_words) = stop_words {
tokenizer_builder.stop_words(stop_words);
}
if let Some(dictionary) = dictionary {
tokenizer_builder.words_dict(dictionary);
}
if let Some(separators) = allowed_separators {
tokenizer_builder.separators(separators);
}
let tokenizer = tokenizer_builder.build();
let mut cursor = obkv_documents.into_cursor()?;
@@ -227,9 +218,9 @@ fn process_tokens<'a>(
) -> impl Iterator<Item = (usize, Token<'a>)> {
tokens
.skip_while(|token| token.is_separator())
.scan((0, None), |(offset, prev_kind), mut token| {
.scan((0, None), |(offset, prev_kind), token| {
match token.kind {
TokenKind::Word | TokenKind::StopWord if !token.lemma().is_empty() => {
TokenKind::Word | TokenKind::StopWord | TokenKind::Unknown => {
*offset += match *prev_kind {
Some(TokenKind::Separator(SeparatorKind::Hard)) => 8,
Some(_) => 1,
@@ -245,7 +236,7 @@ fn process_tokens<'a>(
{
*prev_kind = Some(token.kind);
}
_ => token.kind = TokenKind::Unknown,
_ => (),
}
Some((*offset, token))
})

View File

@@ -1,5 +1,5 @@
use std::fs::File;
use std::io::{self, BufReader};
use std::io;
use heed::{BytesDecode, BytesEncode};
@@ -19,7 +19,7 @@ use crate::Result;
pub fn extract_facet_number_docids<R: io::Read + io::Seek>(
docid_fid_facet_number: grenad::Reader<R>,
indexer: GrenadParameters,
) -> Result<grenad::Reader<BufReader<File>>> {
) -> Result<grenad::Reader<File>> {
puffin::profile_function!();
let max_memory = indexer.max_memory_by_thread();

View File

@@ -1,5 +1,5 @@
use std::fs::File;
use std::io::{self, BufReader};
use std::io;
use heed::BytesEncode;
@@ -17,7 +17,7 @@ use crate::{FieldId, Result, MAX_FACET_VALUE_LENGTH};
pub fn extract_facet_string_docids<R: io::Read + io::Seek>(
docid_fid_facet_string: grenad::Reader<R>,
indexer: GrenadParameters,
) -> Result<grenad::Reader<BufReader<File>>> {
) -> Result<grenad::Reader<File>> {
puffin::profile_function!();
let max_memory = indexer.max_memory_by_thread();
@@ -46,7 +46,7 @@ pub fn extract_facet_string_docids<R: io::Read + io::Seek>(
if normalised_value.len() > MAX_FACET_VALUE_LENGTH {
normalised_truncated_value = normalised_value
.char_indices()
.take_while(|(idx, _)| *idx < MAX_FACET_VALUE_LENGTH)
.take_while(|(idx, _)| idx + 4 < MAX_FACET_VALUE_LENGTH)
.map(|(_, c)| c)
.collect();
normalised_value = normalised_truncated_value.as_str();

View File

@@ -1,7 +1,7 @@
use std::collections::{BTreeMap, HashSet};
use std::convert::TryInto;
use std::fs::File;
use std::io::{self, BufReader};
use std::io;
use std::mem::size_of;
use heed::zerocopy::AsBytes;
@@ -17,24 +17,22 @@ use crate::{CboRoaringBitmapCodec, DocumentId, FieldId, Result, BEU32, MAX_FACET
/// The extracted facet values stored in grenad files by type.
pub struct ExtractedFacetValues {
pub docid_fid_facet_numbers_chunk: grenad::Reader<BufReader<File>>,
pub docid_fid_facet_strings_chunk: grenad::Reader<BufReader<File>>,
pub fid_facet_is_null_docids_chunk: grenad::Reader<BufReader<File>>,
pub fid_facet_is_empty_docids_chunk: grenad::Reader<BufReader<File>>,
pub fid_facet_exists_docids_chunk: grenad::Reader<BufReader<File>>,
pub docid_fid_facet_numbers_chunk: grenad::Reader<File>,
pub docid_fid_facet_strings_chunk: grenad::Reader<File>,
pub fid_facet_is_null_docids_chunk: grenad::Reader<File>,
pub fid_facet_is_empty_docids_chunk: grenad::Reader<File>,
pub fid_facet_exists_docids_chunk: grenad::Reader<File>,
}
/// Extracts the facet values of each faceted field of each document.
///
/// Returns the generated grenad reader containing the docid the fid and the orginal value as key
/// and the normalized value as value extracted from the given chunk of documents.
/// We need the fid of the geofields to correctly parse them as numbers if they were sent as strings initially.
#[logging_timer::time]
pub fn extract_fid_docid_facet_values<R: io::Read + io::Seek>(
obkv_documents: grenad::Reader<R>,
indexer: GrenadParameters,
faceted_fields: &HashSet<FieldId>,
geo_fields_ids: Option<(FieldId, FieldId)>,
) -> Result<ExtractedFacetValues> {
puffin::profile_function!();
@@ -86,10 +84,7 @@ pub fn extract_fid_docid_facet_values<R: io::Read + io::Seek>(
let value = from_slice(field_bytes).map_err(InternalError::SerdeJson)?;
match extract_facet_values(
&value,
geo_fields_ids.map_or(false, |(lat, lng)| field_id == lat || field_id == lng),
) {
match extract_facet_values(&value) {
FilterableValues::Null => {
facet_is_null_docids.entry(field_id).or_default().insert(document);
}
@@ -182,13 +177,12 @@ enum FilterableValues {
Values { numbers: Vec<f64>, strings: Vec<(String, String)> },
}
fn extract_facet_values(value: &Value, geo_field: bool) -> FilterableValues {
fn extract_facet_values(value: &Value) -> FilterableValues {
fn inner_extract_facet_values(
value: &Value,
can_recurse: bool,
output_numbers: &mut Vec<f64>,
output_strings: &mut Vec<(String, String)>,
geo_field: bool,
) {
match value {
Value::Null => (),
@@ -199,30 +193,13 @@ fn extract_facet_values(value: &Value, geo_field: bool) -> FilterableValues {
}
}
Value::String(original) => {
// if we're working on a geofield it MUST be something we can parse or else there was an internal error
// in the enrich pipeline. But since the enrich pipeline worked, we want to avoid crashing at all costs.
if geo_field {
if let Ok(float) = original.parse() {
output_numbers.push(float);
} else {
log::warn!(
"Internal error, could not parse a geofield that has been validated. Please open an issue."
)
}
}
let normalized = crate::normalize_facet(original);
output_strings.push((normalized, original.clone()));
}
Value::Array(values) => {
if can_recurse {
for value in values {
inner_extract_facet_values(
value,
false,
output_numbers,
output_strings,
geo_field,
);
inner_extract_facet_values(value, false, output_numbers, output_strings);
}
}
}
@@ -238,7 +215,7 @@ fn extract_facet_values(value: &Value, geo_field: bool) -> FilterableValues {
otherwise => {
let mut numbers = Vec::new();
let mut strings = Vec::new();
inner_extract_facet_values(otherwise, true, &mut numbers, &mut strings, geo_field);
inner_extract_facet_values(otherwise, true, &mut numbers, &mut strings);
FilterableValues::Values { numbers, strings }
}
}

View File

@@ -1,6 +1,6 @@
use std::collections::HashMap;
use std::fs::File;
use std::io::{self, BufReader};
use std::io;
use grenad::Sorter;
@@ -21,7 +21,7 @@ use crate::{relative_from_absolute_position, DocumentId, FieldId, Result};
pub fn extract_fid_word_count_docids<R: io::Read + io::Seek>(
docid_word_positions: grenad::Reader<R>,
indexer: GrenadParameters,
) -> Result<grenad::Reader<BufReader<File>>> {
) -> Result<grenad::Reader<File>> {
puffin::profile_function!();
let max_memory = indexer.max_memory_by_thread();

View File

@@ -1,5 +1,5 @@
use std::fs::File;
use std::io::{self, BufReader};
use std::io;
use concat_arrays::concat_arrays;
use serde_json::Value;
@@ -18,7 +18,7 @@ pub fn extract_geo_points<R: io::Read + io::Seek>(
indexer: GrenadParameters,
primary_key_id: FieldId,
(lat_fid, lng_fid): (FieldId, FieldId),
) -> Result<grenad::Reader<BufReader<File>>> {
) -> Result<grenad::Reader<File>> {
puffin::profile_function!();
let mut writer = create_writer(

View File

@@ -1,6 +1,6 @@
use std::convert::TryFrom;
use std::fs::File;
use std::io::{self, BufReader};
use std::io;
use bytemuck::cast_slice;
use serde_json::{from_slice, Value};
@@ -18,7 +18,7 @@ pub fn extract_vector_points<R: io::Read + io::Seek>(
indexer: GrenadParameters,
primary_key_id: FieldId,
vectors_fid: FieldId,
) -> Result<grenad::Reader<BufReader<File>>> {
) -> Result<grenad::Reader<File>> {
puffin::profile_function!();
let mut writer = create_writer(
@@ -35,7 +35,7 @@ pub fn extract_vector_points<R: io::Read + io::Seek>(
// lazily get it when needed
let document_id = || -> Value {
let document_id = obkv.get(primary_key_id).unwrap();
from_slice(document_id).unwrap()
serde_json::from_slice(document_id).unwrap()
};
// first we retrieve the _vectors field
@@ -52,14 +52,12 @@ pub fn extract_vector_points<R: io::Read + io::Seek>(
}
};
if let Some(vectors) = vectors {
for (i, vector) in vectors.into_iter().enumerate().take(u16::MAX as usize) {
let index = u16::try_from(i).unwrap();
let mut key = docid_bytes.to_vec();
key.extend_from_slice(&index.to_be_bytes());
let bytes = cast_slice(&vector);
writer.insert(key, bytes)?;
}
for (i, vector) in vectors.into_iter().enumerate().take(u16::MAX as usize) {
let index = u16::try_from(i).unwrap();
let mut key = docid_bytes.to_vec();
key.extend_from_slice(&index.to_be_bytes());
let bytes = cast_slice(&vector);
writer.insert(key, bytes)?;
}
}
// else => the `_vectors` object was `null`, there is nothing to do

View File

@@ -1,6 +1,6 @@
use std::collections::HashSet;
use std::fs::File;
use std::io::{self, BufReader};
use std::io;
use std::iter::FromIterator;
use roaring::RoaringBitmap;
@@ -26,7 +26,7 @@ pub fn extract_word_docids<R: io::Read + io::Seek>(
docid_word_positions: grenad::Reader<R>,
indexer: GrenadParameters,
exact_attributes: &HashSet<FieldId>,
) -> Result<(grenad::Reader<BufReader<File>>, grenad::Reader<BufReader<File>>)> {
) -> Result<(grenad::Reader<File>, grenad::Reader<File>)> {
puffin::profile_function!();
let max_memory = indexer.max_memory_by_thread();

View File

@@ -1,5 +1,5 @@
use std::fs::File;
use std::io::{self, BufReader};
use std::io;
use super::helpers::{
create_sorter, merge_cbo_roaring_bitmaps, read_u32_ne_bytes, sorter_into_reader,
@@ -14,7 +14,7 @@ use crate::{relative_from_absolute_position, DocumentId, Result};
pub fn extract_word_fid_docids<R: io::Read + io::Seek>(
docid_word_positions: grenad::Reader<R>,
indexer: GrenadParameters,
) -> Result<grenad::Reader<BufReader<File>>> {
) -> Result<grenad::Reader<File>> {
puffin::profile_function!();
let max_memory = indexer.max_memory_by_thread();

Some files were not shown because too many files have changed in this diff Show More