mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-03 19:25:36 +00:00
Compare commits
3 Commits
v1.2.0-rc.
...
reduce-mx-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
17f6d65840 | ||
|
|
daa4138df9 | ||
|
|
e4035ff3ec |
32
Cargo.lock
generated
32
Cargo.lock
generated
@@ -463,7 +463,7 @@ checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf"
|
||||
|
||||
[[package]]
|
||||
name = "benchmarks"
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -1209,7 +1209,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "dump"
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"big_s",
|
||||
@@ -1428,7 +1428,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-store"
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"faux",
|
||||
"tempfile",
|
||||
@@ -1450,7 +1450,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "filter-parser"
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"nom",
|
||||
@@ -1476,7 +1476,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flatten-serde-json"
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@@ -1794,7 +1794,7 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
|
||||
[[package]]
|
||||
name = "heed"
|
||||
version = "0.12.5"
|
||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.6#8c5b94225fc949c02bb7b900cc50ffaf6b584b1e"
|
||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"heed-traits",
|
||||
@@ -1811,12 +1811,12 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "heed-traits"
|
||||
version = "0.7.0"
|
||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.6#8c5b94225fc949c02bb7b900cc50ffaf6b584b1e"
|
||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8"
|
||||
|
||||
[[package]]
|
||||
name = "heed-types"
|
||||
version = "0.7.2"
|
||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.6#8c5b94225fc949c02bb7b900cc50ffaf6b584b1e"
|
||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"heed-traits",
|
||||
@@ -1959,7 +1959,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index-scheduler"
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"big_s",
|
||||
@@ -2113,7 +2113,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "json-depth-checker"
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@@ -2539,7 +2539,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
|
||||
|
||||
[[package]]
|
||||
name = "meili-snap"
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"md5",
|
||||
@@ -2548,7 +2548,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch"
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"actix-cors",
|
||||
"actix-http",
|
||||
@@ -2636,7 +2636,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-auth"
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"base64 0.21.0",
|
||||
"enum-iterator",
|
||||
@@ -2655,7 +2655,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-types"
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"actix-web",
|
||||
"anyhow",
|
||||
@@ -2709,7 +2709,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "milli"
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"big_s",
|
||||
"bimap",
|
||||
@@ -3064,7 +3064,7 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
|
||||
|
||||
[[package]]
|
||||
name = "permissive-json-pointer"
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"big_s",
|
||||
"serde_json",
|
||||
|
||||
@@ -17,7 +17,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.2.0"
|
||||
version = "1.1.1"
|
||||
authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"]
|
||||
description = "Meilisearch HTTP server"
|
||||
homepage = "https://meilisearch.com"
|
||||
|
||||
@@ -126,6 +126,3 @@ ssl_tickets = false
|
||||
experimental_enable_metrics = false
|
||||
# Experimental metrics feature. For more information, see: <https://github.com/meilisearch/meilisearch/discussions/3518>
|
||||
# Enables the Prometheus metrics on the `GET /metrics` endpoint.
|
||||
|
||||
experimental_reduce_indexing_memory_usage = false
|
||||
# Experimental RAM reduction during indexing, do not use in production, see: <https://github.com/meilisearch/product/discussions/652>
|
||||
|
||||
@@ -24,7 +24,6 @@ use std::io::BufWriter;
|
||||
|
||||
use dump::IndexMetadata;
|
||||
use log::{debug, error, info};
|
||||
use meilisearch_types::error::Code;
|
||||
use meilisearch_types::heed::{RoTxn, RwTxn};
|
||||
use meilisearch_types::milli::documents::{obkv_to_object, DocumentsBatchReader};
|
||||
use meilisearch_types::milli::heed::CompactionOption;
|
||||
@@ -1492,12 +1491,7 @@ fn delete_document_by_filter(filter: &serde_json::Value, index: Index) -> Result
|
||||
Ok(if let Some(filter) = filter {
|
||||
let mut wtxn = index.write_txn()?;
|
||||
|
||||
let candidates = filter.evaluate(&wtxn, &index).map_err(|err| match err {
|
||||
milli::Error::UserError(milli::UserError::InvalidFilter(_)) => {
|
||||
Error::from(err).with_custom_error_code(Code::InvalidDocumentFilter)
|
||||
}
|
||||
e => e.into(),
|
||||
})?;
|
||||
let candidates = filter.evaluate(&wtxn, &index)?;
|
||||
let mut delete_operation = DeleteDocuments::new(&mut wtxn, &index)?;
|
||||
delete_operation.delete_documents(&candidates);
|
||||
let deleted_documents =
|
||||
|
||||
@@ -46,8 +46,6 @@ impl From<DateField> for Code {
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error("{1}")]
|
||||
WithCustomErrorCode(Code, Box<Self>),
|
||||
#[error("Index `{0}` not found.")]
|
||||
IndexNotFound(String),
|
||||
#[error("Index `{0}` already exists.")]
|
||||
@@ -146,7 +144,6 @@ impl Error {
|
||||
pub fn is_recoverable(&self) -> bool {
|
||||
match self {
|
||||
Error::IndexNotFound(_)
|
||||
| Error::WithCustomErrorCode(_, _)
|
||||
| Error::IndexAlreadyExists(_)
|
||||
| Error::SwapDuplicateIndexFound(_)
|
||||
| Error::SwapDuplicateIndexesFound(_)
|
||||
@@ -179,16 +176,11 @@ impl Error {
|
||||
Error::PlannedFailure => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_custom_error_code(self, code: Code) -> Self {
|
||||
Self::WithCustomErrorCode(code, Box::new(self))
|
||||
}
|
||||
}
|
||||
|
||||
impl ErrorCode for Error {
|
||||
fn error_code(&self) -> Code {
|
||||
match self {
|
||||
Error::WithCustomErrorCode(code, _) => *code,
|
||||
Error::IndexNotFound(_) => Code::IndexNotFound,
|
||||
Error::IndexAlreadyExists(_) => Code::IndexAlreadyExists,
|
||||
Error::SwapDuplicateIndexesFound(_) => Code::InvalidSwapDuplicateIndexFound,
|
||||
|
||||
@@ -5,7 +5,6 @@ use std::collections::BTreeMap;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use meilisearch_types::heed::flags::Flags;
|
||||
use meilisearch_types::heed::{EnvClosingEvent, EnvOpenOptions};
|
||||
use meilisearch_types::milli::Index;
|
||||
use time::OffsetDateTime;
|
||||
@@ -54,7 +53,6 @@ pub struct IndexMap {
|
||||
pub struct ClosingIndex {
|
||||
uuid: Uuid,
|
||||
closing_event: EnvClosingEvent,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size: usize,
|
||||
generation: usize,
|
||||
}
|
||||
@@ -70,7 +68,6 @@ impl ClosingIndex {
|
||||
pub fn wait_timeout(self, timeout: Duration) -> Option<ReopenableIndex> {
|
||||
self.closing_event.wait_timeout(timeout).then_some(ReopenableIndex {
|
||||
uuid: self.uuid,
|
||||
enable_mdb_writemap: self.enable_mdb_writemap,
|
||||
map_size: self.map_size,
|
||||
generation: self.generation,
|
||||
})
|
||||
@@ -79,7 +76,6 @@ impl ClosingIndex {
|
||||
|
||||
pub struct ReopenableIndex {
|
||||
uuid: Uuid,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size: usize,
|
||||
generation: usize,
|
||||
}
|
||||
@@ -107,7 +103,7 @@ impl ReopenableIndex {
|
||||
return Ok(());
|
||||
}
|
||||
map.unavailable.remove(&self.uuid);
|
||||
map.create(&self.uuid, path, None, self.enable_mdb_writemap, self.map_size)?;
|
||||
map.create(&self.uuid, path, None, self.map_size)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -174,17 +170,16 @@ impl IndexMap {
|
||||
uuid: &Uuid,
|
||||
path: &Path,
|
||||
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size: usize,
|
||||
) -> Result<Index> {
|
||||
if !matches!(self.get_unavailable(uuid), Missing) {
|
||||
panic!("Attempt to open an index that was unavailable");
|
||||
}
|
||||
let index = create_or_open_index(path, date, enable_mdb_writemap, map_size)?;
|
||||
let index = create_or_open_index(path, date, map_size)?;
|
||||
match self.available.insert(*uuid, index.clone()) {
|
||||
InsertionOutcome::InsertedNew => (),
|
||||
InsertionOutcome::Evicted(evicted_uuid, evicted_index) => {
|
||||
self.close(evicted_uuid, evicted_index, enable_mdb_writemap, 0);
|
||||
self.close(evicted_uuid, evicted_index, 0);
|
||||
}
|
||||
InsertionOutcome::Replaced(_) => {
|
||||
panic!("Attempt to open an index that was already opened")
|
||||
@@ -217,30 +212,17 @@ impl IndexMap {
|
||||
/// | Closing | Closing |
|
||||
/// | Available | Closing |
|
||||
///
|
||||
pub fn close_for_resize(
|
||||
&mut self,
|
||||
uuid: &Uuid,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size_growth: usize,
|
||||
) {
|
||||
pub fn close_for_resize(&mut self, uuid: &Uuid, map_size_growth: usize) {
|
||||
let Some(index) = self.available.remove(uuid) else { return; };
|
||||
self.close(*uuid, index, enable_mdb_writemap, map_size_growth);
|
||||
self.close(*uuid, index, map_size_growth);
|
||||
}
|
||||
|
||||
fn close(
|
||||
&mut self,
|
||||
uuid: Uuid,
|
||||
index: Index,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size_growth: usize,
|
||||
) {
|
||||
fn close(&mut self, uuid: Uuid, index: Index, map_size_growth: usize) {
|
||||
let map_size = index.map_size().unwrap_or(DEFAULT_MAP_SIZE) + map_size_growth;
|
||||
let closing_event = index.prepare_for_closing();
|
||||
let generation = self.next_generation();
|
||||
self.unavailable.insert(
|
||||
uuid,
|
||||
Some(ClosingIndex { uuid, closing_event, enable_mdb_writemap, map_size, generation }),
|
||||
);
|
||||
self.unavailable
|
||||
.insert(uuid, Some(ClosingIndex { uuid, closing_event, map_size, generation }));
|
||||
}
|
||||
|
||||
/// Attempts to delete and index.
|
||||
@@ -300,15 +282,11 @@ impl IndexMap {
|
||||
fn create_or_open_index(
|
||||
path: &Path,
|
||||
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size: usize,
|
||||
) -> Result<Index> {
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(clamp_to_page_size(map_size));
|
||||
options.max_readers(1024);
|
||||
if enable_mdb_writemap {
|
||||
unsafe { options.flag(Flags::MdbWriteMap) };
|
||||
}
|
||||
|
||||
if let Some((created, updated)) = date {
|
||||
Ok(Index::new_with_creation_dates(options, path, created, updated)?)
|
||||
|
||||
@@ -66,8 +66,6 @@ pub struct IndexMapper {
|
||||
index_base_map_size: usize,
|
||||
/// The quantity by which the map size of an index is incremented upon reopening, in bytes.
|
||||
index_growth_amount: usize,
|
||||
/// Whether we open a meilisearch index with the MDB_WRITEMAP option or not.
|
||||
enable_mdb_writemap: bool,
|
||||
pub indexer_config: Arc<IndexerConfig>,
|
||||
}
|
||||
|
||||
@@ -125,22 +123,15 @@ impl IndexMapper {
|
||||
index_base_map_size: usize,
|
||||
index_growth_amount: usize,
|
||||
index_count: usize,
|
||||
enable_mdb_writemap: bool,
|
||||
indexer_config: IndexerConfig,
|
||||
) -> Result<Self> {
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let index_mapping = env.create_database(&mut wtxn, Some(INDEX_MAPPING))?;
|
||||
let index_stats = env.create_database(&mut wtxn, Some(INDEX_STATS))?;
|
||||
wtxn.commit()?;
|
||||
|
||||
Ok(Self {
|
||||
index_map: Arc::new(RwLock::new(IndexMap::new(index_count))),
|
||||
index_mapping,
|
||||
index_stats,
|
||||
index_mapping: env.create_database(Some(INDEX_MAPPING))?,
|
||||
index_stats: env.create_database(Some(INDEX_STATS))?,
|
||||
base_path,
|
||||
index_base_map_size,
|
||||
index_growth_amount,
|
||||
enable_mdb_writemap,
|
||||
indexer_config: Arc::new(indexer_config),
|
||||
})
|
||||
}
|
||||
@@ -171,7 +162,6 @@ impl IndexMapper {
|
||||
&uuid,
|
||||
&index_path,
|
||||
date,
|
||||
self.enable_mdb_writemap,
|
||||
self.index_base_map_size,
|
||||
)?;
|
||||
|
||||
@@ -283,11 +273,7 @@ impl IndexMapper {
|
||||
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
||||
|
||||
// We remove the index from the in-memory index map.
|
||||
self.index_map.write().unwrap().close_for_resize(
|
||||
&uuid,
|
||||
self.enable_mdb_writemap,
|
||||
self.index_growth_amount,
|
||||
);
|
||||
self.index_map.write().unwrap().close_for_resize(&uuid, self.index_growth_amount);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -352,7 +338,6 @@ impl IndexMapper {
|
||||
&uuid,
|
||||
&index_path,
|
||||
None,
|
||||
self.enable_mdb_writemap,
|
||||
self.index_base_map_size,
|
||||
)?;
|
||||
}
|
||||
|
||||
@@ -233,8 +233,6 @@ pub struct IndexSchedulerOptions {
|
||||
pub task_db_size: usize,
|
||||
/// The size, in bytes, with which a meilisearch index is opened the first time of each meilisearch index.
|
||||
pub index_base_map_size: usize,
|
||||
/// Whether we open a meilisearch index with the MDB_WRITEMAP option or not.
|
||||
pub enable_mdb_writemap: bool,
|
||||
/// The size, in bytes, by which the map size of an index is increased when it resized due to being full.
|
||||
pub index_growth_amount: usize,
|
||||
/// The number of indexes that can be concurrently opened in memory.
|
||||
@@ -376,11 +374,6 @@ impl IndexScheduler {
|
||||
std::fs::create_dir_all(&options.indexes_path)?;
|
||||
std::fs::create_dir_all(&options.dumps_path)?;
|
||||
|
||||
if cfg!(windows) && options.enable_mdb_writemap {
|
||||
// programmer error if this happens: in normal use passing the option on Windows is an error in main
|
||||
panic!("Windows doesn't support the MDB_WRITEMAP LMDB option");
|
||||
}
|
||||
|
||||
let task_db_size = clamp_to_page_size(options.task_db_size);
|
||||
let budget = if options.indexer_config.skip_index_budget {
|
||||
IndexBudget {
|
||||
@@ -403,37 +396,25 @@ impl IndexScheduler {
|
||||
.open(options.tasks_path)?;
|
||||
let file_store = FileStore::new(&options.update_file_path)?;
|
||||
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let all_tasks = env.create_database(&mut wtxn, Some(db_name::ALL_TASKS))?;
|
||||
let status = env.create_database(&mut wtxn, Some(db_name::STATUS))?;
|
||||
let kind = env.create_database(&mut wtxn, Some(db_name::KIND))?;
|
||||
let index_tasks = env.create_database(&mut wtxn, Some(db_name::INDEX_TASKS))?;
|
||||
let canceled_by = env.create_database(&mut wtxn, Some(db_name::CANCELED_BY))?;
|
||||
let enqueued_at = env.create_database(&mut wtxn, Some(db_name::ENQUEUED_AT))?;
|
||||
let started_at = env.create_database(&mut wtxn, Some(db_name::STARTED_AT))?;
|
||||
let finished_at = env.create_database(&mut wtxn, Some(db_name::FINISHED_AT))?;
|
||||
wtxn.commit()?;
|
||||
|
||||
// allow unreachable_code to get rids of the warning in the case of a test build.
|
||||
let this = Self {
|
||||
must_stop_processing: MustStopProcessing::default(),
|
||||
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
|
||||
file_store,
|
||||
all_tasks,
|
||||
status,
|
||||
kind,
|
||||
index_tasks,
|
||||
canceled_by,
|
||||
enqueued_at,
|
||||
started_at,
|
||||
finished_at,
|
||||
all_tasks: env.create_database(Some(db_name::ALL_TASKS))?,
|
||||
status: env.create_database(Some(db_name::STATUS))?,
|
||||
kind: env.create_database(Some(db_name::KIND))?,
|
||||
index_tasks: env.create_database(Some(db_name::INDEX_TASKS))?,
|
||||
canceled_by: env.create_database(Some(db_name::CANCELED_BY))?,
|
||||
enqueued_at: env.create_database(Some(db_name::ENQUEUED_AT))?,
|
||||
started_at: env.create_database(Some(db_name::STARTED_AT))?,
|
||||
finished_at: env.create_database(Some(db_name::FINISHED_AT))?,
|
||||
index_mapper: IndexMapper::new(
|
||||
&env,
|
||||
options.indexes_path,
|
||||
budget.map_size,
|
||||
options.index_growth_amount,
|
||||
budget.index_count,
|
||||
options.enable_mdb_writemap,
|
||||
options.indexer_config,
|
||||
)?,
|
||||
env,
|
||||
@@ -1490,7 +1471,6 @@ mod tests {
|
||||
dumps_path: tempdir.path().join("dumps"),
|
||||
task_db_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
|
||||
index_base_map_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
|
||||
enable_mdb_writemap: false,
|
||||
index_growth_amount: 1000 * 1000, // 1 MB
|
||||
index_count: 5,
|
||||
indexer_config,
|
||||
|
||||
@@ -55,11 +55,9 @@ impl HeedAuthStore {
|
||||
let path = path.as_ref().join(AUTH_DB_PATH);
|
||||
create_dir_all(&path)?;
|
||||
let env = Arc::new(open_auth_store_env(path.as_ref())?);
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let keys = env.create_database(&mut wtxn, Some(KEY_DB_NAME))?;
|
||||
let keys = env.create_database(Some(KEY_DB_NAME))?;
|
||||
let action_keyid_index_expiration =
|
||||
env.create_database(&mut wtxn, Some(KEY_ID_ACTION_INDEX_EXPIRATION_DB_NAME))?;
|
||||
wtxn.commit()?;
|
||||
env.create_database(Some(KEY_ID_ACTION_INDEX_EXPIRATION_DB_NAME))?;
|
||||
Ok(Self { env, keys, action_keyid_index_expiration, should_close_on_drop: true })
|
||||
}
|
||||
|
||||
|
||||
@@ -150,7 +150,6 @@ make_missing_field_convenience_builder!(MissingApiKeyActions, missing_api_key_ac
|
||||
make_missing_field_convenience_builder!(MissingApiKeyExpiresAt, missing_api_key_expires_at);
|
||||
make_missing_field_convenience_builder!(MissingApiKeyIndexes, missing_api_key_indexes);
|
||||
make_missing_field_convenience_builder!(MissingSwapIndexes, missing_swap_indexes);
|
||||
make_missing_field_convenience_builder!(MissingDocumentFilter, missing_document_filter);
|
||||
|
||||
// Integrate a sub-error into a [`DeserrError`] by taking its error message but using
|
||||
// the default error code (C) from `Self`
|
||||
|
||||
@@ -214,12 +214,12 @@ InvalidApiKeyUid , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidContentType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
||||
InvalidDocumentCsvDelimiter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentFields , InvalidRequest , BAD_REQUEST ;
|
||||
MissingDocumentFilter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentFilter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentGeoField , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentId , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentDeleteFilter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
|
||||
|
||||
@@ -5,7 +5,7 @@ use actix_web::HttpRequest;
|
||||
use meilisearch_types::InstanceUid;
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{find_user_id, Analytics, DocumentDeletionKind, DocumentFetchKind};
|
||||
use super::{find_user_id, Analytics, DocumentDeletionKind};
|
||||
use crate::routes::indexes::documents::UpdateDocumentsQuery;
|
||||
use crate::routes::tasks::TasksFilterQuery;
|
||||
use crate::Opt;
|
||||
@@ -71,8 +71,6 @@ impl Analytics for MockAnalytics {
|
||||
_request: &HttpRequest,
|
||||
) {
|
||||
}
|
||||
fn get_fetch_documents(&self, _documents_query: &DocumentFetchKind, _request: &HttpRequest) {}
|
||||
fn post_fetch_documents(&self, _documents_query: &DocumentFetchKind, _request: &HttpRequest) {}
|
||||
fn get_tasks(&self, _query: &TasksFilterQuery, _request: &HttpRequest) {}
|
||||
fn health_seen(&self, _request: &HttpRequest) {}
|
||||
}
|
||||
|
||||
@@ -67,12 +67,6 @@ pub enum DocumentDeletionKind {
|
||||
PerFilter,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum DocumentFetchKind {
|
||||
PerDocumentId,
|
||||
Normal { with_filter: bool, limit: usize, offset: usize },
|
||||
}
|
||||
|
||||
pub trait Analytics: Sync + Send {
|
||||
fn instance_uid(&self) -> Option<&InstanceUid>;
|
||||
|
||||
@@ -96,12 +90,6 @@ pub trait Analytics: Sync + Send {
|
||||
request: &HttpRequest,
|
||||
);
|
||||
|
||||
// this method should be called to aggregate a fetch documents request
|
||||
fn get_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest);
|
||||
|
||||
// this method should be called to aggregate a fetch documents request
|
||||
fn post_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest);
|
||||
|
||||
// this method should be called to aggregate a add documents request
|
||||
fn delete_documents(&self, kind: DocumentDeletionKind, request: &HttpRequest);
|
||||
|
||||
|
||||
@@ -23,9 +23,7 @@ use tokio::select;
|
||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::{
|
||||
config_user_id_path, DocumentDeletionKind, DocumentFetchKind, MEILISEARCH_CONFIG_PATH,
|
||||
};
|
||||
use super::{config_user_id_path, DocumentDeletionKind, MEILISEARCH_CONFIG_PATH};
|
||||
use crate::analytics::Analytics;
|
||||
use crate::option::{default_http_addr, IndexerOpts, MaxMemory, MaxThreads, ScheduleSnapshot};
|
||||
use crate::routes::indexes::documents::UpdateDocumentsQuery;
|
||||
@@ -74,8 +72,6 @@ pub enum AnalyticsMsg {
|
||||
AggregateAddDocuments(DocumentsAggregator),
|
||||
AggregateDeleteDocuments(DocumentsDeletionAggregator),
|
||||
AggregateUpdateDocuments(DocumentsAggregator),
|
||||
AggregateGetFetchDocuments(DocumentsFetchAggregator),
|
||||
AggregatePostFetchDocuments(DocumentsFetchAggregator),
|
||||
AggregateTasks(TasksAggregator),
|
||||
AggregateHealth(HealthAggregator),
|
||||
}
|
||||
@@ -143,8 +139,6 @@ impl SegmentAnalytics {
|
||||
add_documents_aggregator: DocumentsAggregator::default(),
|
||||
delete_documents_aggregator: DocumentsDeletionAggregator::default(),
|
||||
update_documents_aggregator: DocumentsAggregator::default(),
|
||||
get_fetch_documents_aggregator: DocumentsFetchAggregator::default(),
|
||||
post_fetch_documents_aggregator: DocumentsFetchAggregator::default(),
|
||||
get_tasks_aggregator: TasksAggregator::default(),
|
||||
health_aggregator: HealthAggregator::default(),
|
||||
});
|
||||
@@ -211,16 +205,6 @@ impl super::Analytics for SegmentAnalytics {
|
||||
let _ = self.sender.try_send(AnalyticsMsg::AggregateUpdateDocuments(aggregate));
|
||||
}
|
||||
|
||||
fn get_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest) {
|
||||
let aggregate = DocumentsFetchAggregator::from_query(documents_query, request);
|
||||
let _ = self.sender.try_send(AnalyticsMsg::AggregateGetFetchDocuments(aggregate));
|
||||
}
|
||||
|
||||
fn post_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest) {
|
||||
let aggregate = DocumentsFetchAggregator::from_query(documents_query, request);
|
||||
let _ = self.sender.try_send(AnalyticsMsg::AggregatePostFetchDocuments(aggregate));
|
||||
}
|
||||
|
||||
fn get_tasks(&self, query: &TasksFilterQuery, request: &HttpRequest) {
|
||||
let aggregate = TasksAggregator::from_query(query, request);
|
||||
let _ = self.sender.try_send(AnalyticsMsg::AggregateTasks(aggregate));
|
||||
@@ -241,7 +225,6 @@ impl super::Analytics for SegmentAnalytics {
|
||||
struct Infos {
|
||||
env: String,
|
||||
experimental_enable_metrics: bool,
|
||||
experimental_reduce_indexing_memory_usage: bool,
|
||||
db_path: bool,
|
||||
import_dump: bool,
|
||||
dump_dir: bool,
|
||||
@@ -275,7 +258,6 @@ impl From<Opt> for Infos {
|
||||
let Opt {
|
||||
db_path,
|
||||
experimental_enable_metrics,
|
||||
experimental_reduce_indexing_memory_usage,
|
||||
http_addr,
|
||||
master_key: _,
|
||||
env,
|
||||
@@ -318,7 +300,6 @@ impl From<Opt> for Infos {
|
||||
Self {
|
||||
env,
|
||||
experimental_enable_metrics,
|
||||
experimental_reduce_indexing_memory_usage,
|
||||
db_path: db_path != PathBuf::from("./data.ms"),
|
||||
import_dump: import_dump.is_some(),
|
||||
dump_dir: dump_dir != PathBuf::from("dumps/"),
|
||||
@@ -357,8 +338,6 @@ pub struct Segment {
|
||||
add_documents_aggregator: DocumentsAggregator,
|
||||
delete_documents_aggregator: DocumentsDeletionAggregator,
|
||||
update_documents_aggregator: DocumentsAggregator,
|
||||
get_fetch_documents_aggregator: DocumentsFetchAggregator,
|
||||
post_fetch_documents_aggregator: DocumentsFetchAggregator,
|
||||
get_tasks_aggregator: TasksAggregator,
|
||||
health_aggregator: HealthAggregator,
|
||||
}
|
||||
@@ -421,8 +400,6 @@ impl Segment {
|
||||
Some(AnalyticsMsg::AggregateAddDocuments(agreg)) => self.add_documents_aggregator.aggregate(agreg),
|
||||
Some(AnalyticsMsg::AggregateDeleteDocuments(agreg)) => self.delete_documents_aggregator.aggregate(agreg),
|
||||
Some(AnalyticsMsg::AggregateUpdateDocuments(agreg)) => self.update_documents_aggregator.aggregate(agreg),
|
||||
Some(AnalyticsMsg::AggregateGetFetchDocuments(agreg)) => self.get_fetch_documents_aggregator.aggregate(agreg),
|
||||
Some(AnalyticsMsg::AggregatePostFetchDocuments(agreg)) => self.post_fetch_documents_aggregator.aggregate(agreg),
|
||||
Some(AnalyticsMsg::AggregateTasks(agreg)) => self.get_tasks_aggregator.aggregate(agreg),
|
||||
Some(AnalyticsMsg::AggregateHealth(agreg)) => self.health_aggregator.aggregate(agreg),
|
||||
None => (),
|
||||
@@ -473,10 +450,6 @@ impl Segment {
|
||||
.into_event(&self.user, "Documents Deleted");
|
||||
let update_documents = std::mem::take(&mut self.update_documents_aggregator)
|
||||
.into_event(&self.user, "Documents Updated");
|
||||
let get_fetch_documents = std::mem::take(&mut self.get_fetch_documents_aggregator)
|
||||
.into_event(&self.user, "Documents Fetched GET");
|
||||
let post_fetch_documents = std::mem::take(&mut self.post_fetch_documents_aggregator)
|
||||
.into_event(&self.user, "Documents Fetched POST");
|
||||
let get_tasks =
|
||||
std::mem::take(&mut self.get_tasks_aggregator).into_event(&self.user, "Tasks Seen");
|
||||
let health =
|
||||
@@ -500,12 +473,6 @@ impl Segment {
|
||||
if let Some(update_documents) = update_documents {
|
||||
let _ = self.batcher.push(update_documents).await;
|
||||
}
|
||||
if let Some(get_fetch_documents) = get_fetch_documents {
|
||||
let _ = self.batcher.push(get_fetch_documents).await;
|
||||
}
|
||||
if let Some(post_fetch_documents) = post_fetch_documents {
|
||||
let _ = self.batcher.push(post_fetch_documents).await;
|
||||
}
|
||||
if let Some(get_tasks) = get_tasks {
|
||||
let _ = self.batcher.push(get_tasks).await;
|
||||
}
|
||||
@@ -1168,76 +1135,3 @@ impl HealthAggregator {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Serialize)]
|
||||
pub struct DocumentsFetchAggregator {
|
||||
#[serde(skip)]
|
||||
timestamp: Option<OffsetDateTime>,
|
||||
|
||||
// context
|
||||
#[serde(rename = "user-agent")]
|
||||
user_agents: HashSet<String>,
|
||||
|
||||
#[serde(rename = "requests.max_limit")]
|
||||
total_received: usize,
|
||||
|
||||
// a call on ../documents/:doc_id
|
||||
per_document_id: bool,
|
||||
// if a filter was used
|
||||
per_filter: bool,
|
||||
|
||||
// pagination
|
||||
#[serde(rename = "pagination.max_limit")]
|
||||
max_limit: usize,
|
||||
#[serde(rename = "pagination.max_offset")]
|
||||
max_offset: usize,
|
||||
}
|
||||
|
||||
impl DocumentsFetchAggregator {
|
||||
pub fn from_query(query: &DocumentFetchKind, request: &HttpRequest) -> Self {
|
||||
let (limit, offset) = match query {
|
||||
DocumentFetchKind::PerDocumentId => (1, 0),
|
||||
DocumentFetchKind::Normal { limit, offset, .. } => (*limit, *offset),
|
||||
};
|
||||
Self {
|
||||
timestamp: Some(OffsetDateTime::now_utc()),
|
||||
user_agents: extract_user_agents(request).into_iter().collect(),
|
||||
total_received: 1,
|
||||
per_document_id: matches!(query, DocumentFetchKind::PerDocumentId),
|
||||
per_filter: matches!(query, DocumentFetchKind::Normal { with_filter, .. } if *with_filter),
|
||||
max_limit: limit,
|
||||
max_offset: offset,
|
||||
}
|
||||
}
|
||||
|
||||
/// Aggregate one [DocumentsFetchAggregator] into another.
|
||||
pub fn aggregate(&mut self, other: Self) {
|
||||
if self.timestamp.is_none() {
|
||||
self.timestamp = other.timestamp;
|
||||
}
|
||||
for user_agent in other.user_agents {
|
||||
self.user_agents.insert(user_agent);
|
||||
}
|
||||
|
||||
self.total_received = self.total_received.saturating_add(other.total_received);
|
||||
self.per_document_id |= other.per_document_id;
|
||||
self.per_filter |= other.per_filter;
|
||||
|
||||
self.max_limit = self.max_limit.max(other.max_limit);
|
||||
self.max_offset = self.max_offset.max(other.max_offset);
|
||||
}
|
||||
|
||||
pub fn into_event(self, user: &User, event_name: &str) -> Option<Track> {
|
||||
// if we had no timestamp it means we never encountered any events and
|
||||
// thus we don't need to send this event.
|
||||
let timestamp = self.timestamp?;
|
||||
|
||||
Some(Track {
|
||||
timestamp: Some(timestamp),
|
||||
user: user.clone(),
|
||||
event: event_name.to_string(),
|
||||
properties: serde_json::to_value(self).ok()?,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use actix_web as aweb;
|
||||
use aweb::error::{JsonPayloadError, QueryPayloadError};
|
||||
use byte_unit::Byte;
|
||||
use meilisearch_types::document_formats::{DocumentFormatError, PayloadType};
|
||||
use meilisearch_types::error::{Code, ErrorCode, ResponseError};
|
||||
use meilisearch_types::index_uid::{IndexUid, IndexUidFormatError};
|
||||
@@ -27,8 +26,8 @@ pub enum MeilisearchHttpError {
|
||||
InvalidExpression(&'static [&'static str], Value),
|
||||
#[error("A {0} payload is missing.")]
|
||||
MissingPayload(PayloadType),
|
||||
#[error("The provided payload reached the size limit. The maximum accepted payload size is {}.", Byte::from_bytes(*.0 as u64).get_appropriate_unit(true))]
|
||||
PayloadTooLarge(usize),
|
||||
#[error("The provided payload reached the size limit.")]
|
||||
PayloadTooLarge,
|
||||
#[error("Two indexes must be given for each swap. The list `[{}]` contains {} indexes.",
|
||||
.0.iter().map(|uid| format!("\"{uid}\"")).collect::<Vec<_>>().join(", "), .0.len()
|
||||
)]
|
||||
@@ -61,9 +60,9 @@ impl ErrorCode for MeilisearchHttpError {
|
||||
MeilisearchHttpError::MissingPayload(_) => Code::MissingPayload,
|
||||
MeilisearchHttpError::InvalidContentType(_, _) => Code::InvalidContentType,
|
||||
MeilisearchHttpError::DocumentNotFound(_) => Code::DocumentNotFound,
|
||||
MeilisearchHttpError::EmptyFilter => Code::InvalidDocumentFilter,
|
||||
MeilisearchHttpError::EmptyFilter => Code::InvalidDocumentDeleteFilter,
|
||||
MeilisearchHttpError::InvalidExpression(_, _) => Code::InvalidSearchFilter,
|
||||
MeilisearchHttpError::PayloadTooLarge(_) => Code::PayloadTooLarge,
|
||||
MeilisearchHttpError::PayloadTooLarge => Code::PayloadTooLarge,
|
||||
MeilisearchHttpError::SwapIndexPayloadWrongLength(_) => Code::InvalidSwapIndexes,
|
||||
MeilisearchHttpError::IndexUid(e) => e.error_code(),
|
||||
MeilisearchHttpError::SerdeJson(_) => Code::Internal,
|
||||
|
||||
@@ -11,7 +11,6 @@ use crate::error::MeilisearchHttpError;
|
||||
pub struct Payload {
|
||||
payload: Decompress<dev::Payload>,
|
||||
limit: usize,
|
||||
remaining: usize,
|
||||
}
|
||||
|
||||
pub struct PayloadConfig {
|
||||
@@ -44,7 +43,6 @@ impl FromRequest for Payload {
|
||||
ready(Ok(Payload {
|
||||
payload: Decompress::from_headers(payload.take(), req.headers()),
|
||||
limit,
|
||||
remaining: limit,
|
||||
}))
|
||||
}
|
||||
}
|
||||
@@ -56,14 +54,12 @@ impl Stream for Payload {
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
match Pin::new(&mut self.payload).poll_next(cx) {
|
||||
Poll::Ready(Some(result)) => match result {
|
||||
Ok(bytes) => match self.remaining.checked_sub(bytes.len()) {
|
||||
Ok(bytes) => match self.limit.checked_sub(bytes.len()) {
|
||||
Some(new_limit) => {
|
||||
self.remaining = new_limit;
|
||||
self.limit = new_limit;
|
||||
Poll::Ready(Some(Ok(bytes)))
|
||||
}
|
||||
None => {
|
||||
Poll::Ready(Some(Err(MeilisearchHttpError::PayloadTooLarge(self.limit))))
|
||||
}
|
||||
None => Poll::Ready(Some(Err(MeilisearchHttpError::PayloadTooLarge))),
|
||||
},
|
||||
x => Poll::Ready(Some(x.map_err(MeilisearchHttpError::from))),
|
||||
},
|
||||
|
||||
@@ -232,7 +232,6 @@ fn open_or_create_database_unchecked(
|
||||
dumps_path: opt.dump_dir.clone(),
|
||||
task_db_size: opt.max_task_db_size.get_bytes() as usize,
|
||||
index_base_map_size: opt.max_index_size.get_bytes() as usize,
|
||||
enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage,
|
||||
indexer_config: (&opt.indexer_options).try_into()?,
|
||||
autobatching_enabled: true,
|
||||
max_number_of_tasks: 1_000_000,
|
||||
|
||||
@@ -29,11 +29,6 @@ fn setup(opt: &Opt) -> anyhow::Result<()> {
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let (opt, config_read_from) = Opt::try_build()?;
|
||||
|
||||
anyhow::ensure!(
|
||||
!(cfg!(windows) && opt.experimental_reduce_indexing_memory_usage),
|
||||
"The `experimental-reduce-indexing-memory-usage` flag is not supported on Windows"
|
||||
);
|
||||
|
||||
setup(&opt)?;
|
||||
|
||||
match (opt.env.as_ref(), &opt.master_key) {
|
||||
|
||||
@@ -48,8 +48,6 @@ const MEILI_IGNORE_DUMP_IF_DB_EXISTS: &str = "MEILI_IGNORE_DUMP_IF_DB_EXISTS";
|
||||
const MEILI_DUMP_DIR: &str = "MEILI_DUMP_DIR";
|
||||
const MEILI_LOG_LEVEL: &str = "MEILI_LOG_LEVEL";
|
||||
const MEILI_EXPERIMENTAL_ENABLE_METRICS: &str = "MEILI_EXPERIMENTAL_ENABLE_METRICS";
|
||||
const MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE: &str =
|
||||
"MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE";
|
||||
|
||||
const DEFAULT_CONFIG_FILE_PATH: &str = "./config.toml";
|
||||
const DEFAULT_DB_PATH: &str = "./data.ms";
|
||||
@@ -295,11 +293,6 @@ pub struct Opt {
|
||||
#[serde(default)]
|
||||
pub experimental_enable_metrics: bool,
|
||||
|
||||
/// Experimental RAM reduction during indexing, do not use in production, see: <https://github.com/meilisearch/product/discussions/652>
|
||||
#[clap(long, env = MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE)]
|
||||
#[serde(default)]
|
||||
pub experimental_reduce_indexing_memory_usage: bool,
|
||||
|
||||
#[serde(flatten)]
|
||||
#[clap(flatten)]
|
||||
pub indexer_options: IndexerOpts,
|
||||
@@ -392,7 +385,6 @@ impl Opt {
|
||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||
no_analytics,
|
||||
experimental_enable_metrics: enable_metrics_route,
|
||||
experimental_reduce_indexing_memory_usage: reduce_indexing_memory_usage,
|
||||
} = self;
|
||||
export_to_env_if_not_present(MEILI_DB_PATH, db_path);
|
||||
export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr);
|
||||
@@ -434,10 +426,6 @@ impl Opt {
|
||||
MEILI_EXPERIMENTAL_ENABLE_METRICS,
|
||||
enable_metrics_route.to_string(),
|
||||
);
|
||||
export_to_env_if_not_present(
|
||||
MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE,
|
||||
reduce_indexing_memory_usage.to_string(),
|
||||
);
|
||||
indexer_options.export_to_env();
|
||||
}
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ use tempfile::tempfile;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncSeekExt, AsyncWriteExt, BufWriter};
|
||||
|
||||
use crate::analytics::{Analytics, DocumentDeletionKind, DocumentFetchKind};
|
||||
use crate::analytics::{Analytics, DocumentDeletionKind};
|
||||
use crate::error::MeilisearchHttpError;
|
||||
use crate::error::PayloadError::ReceivePayload;
|
||||
use crate::extractors::authentication::policies::*;
|
||||
@@ -97,14 +97,10 @@ pub async fn get_document(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
|
||||
document_param: web::Path<DocumentParam>,
|
||||
params: AwebQueryParameter<GetDocument, DeserrQueryParamError>,
|
||||
req: HttpRequest,
|
||||
analytics: web::Data<dyn Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let DocumentParam { index_uid, document_id } = document_param.into_inner();
|
||||
let index_uid = IndexUid::try_from(index_uid)?;
|
||||
|
||||
analytics.get_fetch_documents(&DocumentFetchKind::PerDocumentId, &req);
|
||||
|
||||
let GetDocument { fields } = params.into_inner();
|
||||
let attributes_to_retrieve = fields.merge_star_and_none();
|
||||
|
||||
@@ -165,31 +161,16 @@ pub async fn documents_by_query_post(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
|
||||
index_uid: web::Path<String>,
|
||||
body: AwebJson<BrowseQuery, DeserrJsonError>,
|
||||
req: HttpRequest,
|
||||
analytics: web::Data<dyn Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
debug!("called with body: {:?}", body);
|
||||
|
||||
let body = body.into_inner();
|
||||
|
||||
analytics.post_fetch_documents(
|
||||
&DocumentFetchKind::Normal {
|
||||
with_filter: body.filter.is_some(),
|
||||
limit: body.limit,
|
||||
offset: body.offset,
|
||||
},
|
||||
&req,
|
||||
);
|
||||
|
||||
documents_by_query(&index_scheduler, index_uid, body)
|
||||
documents_by_query(&index_scheduler, index_uid, body.into_inner())
|
||||
}
|
||||
|
||||
pub async fn get_documents(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
|
||||
index_uid: web::Path<String>,
|
||||
params: AwebQueryParameter<BrowseQueryGet, DeserrQueryParamError>,
|
||||
req: HttpRequest,
|
||||
analytics: web::Data<dyn Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
debug!("called with params: {:?}", params);
|
||||
|
||||
@@ -210,15 +191,6 @@ pub async fn get_documents(
|
||||
filter,
|
||||
};
|
||||
|
||||
analytics.get_fetch_documents(
|
||||
&DocumentFetchKind::Normal {
|
||||
with_filter: query.filter.is_some(),
|
||||
limit: query.limit,
|
||||
offset: query.offset,
|
||||
},
|
||||
&req,
|
||||
);
|
||||
|
||||
documents_by_query(&index_scheduler, index_uid, query)
|
||||
}
|
||||
|
||||
@@ -486,7 +458,7 @@ pub async fn delete_documents_batch(
|
||||
#[derive(Debug, Deserr)]
|
||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||
pub struct DocumentDeletionByFilter {
|
||||
#[deserr(error = DeserrJsonError<InvalidDocumentFilter>, missing_field_error = DeserrJsonError::missing_document_filter)]
|
||||
#[deserr(error = DeserrJsonError<InvalidDocumentDeleteFilter>)]
|
||||
filter: Value,
|
||||
}
|
||||
|
||||
@@ -508,8 +480,8 @@ pub async fn delete_documents_by_filter(
|
||||
|| -> Result<_, ResponseError> {
|
||||
Ok(crate::search::parse_filter(&filter)?.ok_or(MeilisearchHttpError::EmptyFilter)?)
|
||||
}()
|
||||
// and whatever was the error, the error code should always be an InvalidDocumentFilter
|
||||
.map_err(|err| ResponseError::from_msg(err.message, Code::InvalidDocumentFilter))?;
|
||||
// and whatever was the error, the error code should always be an InvalidDocumentDeleteFilter
|
||||
.map_err(|err| ResponseError::from_msg(err.message, Code::InvalidDocumentDeleteFilter))?;
|
||||
let task = KindWithContent::DocumentDeletionByFilter { index_uid, filter_expr: filter };
|
||||
|
||||
let task: SummarizedTaskView =
|
||||
@@ -568,12 +540,7 @@ fn retrieve_documents<S: AsRef<str>>(
|
||||
};
|
||||
|
||||
let candidates = if let Some(filter) = filter {
|
||||
filter.evaluate(&rtxn, index).map_err(|err| match err {
|
||||
milli::Error::UserError(milli::UserError::InvalidFilter(_)) => {
|
||||
ResponseError::from_msg(err.to_string(), Code::InvalidDocumentFilter)
|
||||
}
|
||||
e => e.into(),
|
||||
})?
|
||||
filter.evaluate(&rtxn, index)?
|
||||
} else {
|
||||
index.documents_ids(&rtxn)?
|
||||
};
|
||||
|
||||
@@ -99,7 +99,7 @@ pub struct DetailsView {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub deleted_tasks: Option<Option<u64>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub original_filter: Option<Option<String>>,
|
||||
pub original_filter: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub dump_uid: Option<Option<String>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@@ -131,13 +131,12 @@ impl From<Details> for DetailsView {
|
||||
} => DetailsView {
|
||||
provided_ids: Some(received_document_ids),
|
||||
deleted_documents: Some(deleted_documents),
|
||||
original_filter: Some(None),
|
||||
..DetailsView::default()
|
||||
},
|
||||
Details::DocumentDeletionByFilter { original_filter, deleted_documents } => {
|
||||
DetailsView {
|
||||
provided_ids: Some(0),
|
||||
original_filter: Some(Some(original_filter)),
|
||||
original_filter: Some(original_filter),
|
||||
deleted_documents: Some(deleted_documents),
|
||||
..DetailsView::default()
|
||||
}
|
||||
@@ -149,7 +148,7 @@ impl From<Details> for DetailsView {
|
||||
DetailsView {
|
||||
matched_tasks: Some(matched_tasks),
|
||||
canceled_tasks: Some(canceled_tasks),
|
||||
original_filter: Some(Some(original_filter)),
|
||||
original_filter: Some(original_filter),
|
||||
..DetailsView::default()
|
||||
}
|
||||
}
|
||||
@@ -157,7 +156,7 @@ impl From<Details> for DetailsView {
|
||||
DetailsView {
|
||||
matched_tasks: Some(matched_tasks),
|
||||
deleted_tasks: Some(deleted_tasks),
|
||||
original_filter: Some(Some(original_filter)),
|
||||
original_filter: Some(original_filter),
|
||||
..DetailsView::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,11 +16,8 @@ pub static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), HashSet<&'
|
||||
("GET", "/indexes/products/search") => hashset!{"search", "*"},
|
||||
("POST", "/indexes/products/documents") => hashset!{"documents.add", "documents.*", "*"},
|
||||
("GET", "/indexes/products/documents") => hashset!{"documents.get", "documents.*", "*"},
|
||||
("POST", "/indexes/products/documents/fetch") => hashset!{"documents.get", "documents.*", "*"},
|
||||
("GET", "/indexes/products/documents/0") => hashset!{"documents.get", "documents.*", "*"},
|
||||
("DELETE", "/indexes/products/documents/0") => hashset!{"documents.delete", "documents.*", "*"},
|
||||
("POST", "/indexes/products/documents/delete-batch") => hashset!{"documents.delete", "documents.*", "*"},
|
||||
("POST", "/indexes/products/documents/delete") => hashset!{"documents.delete", "documents.*", "*"},
|
||||
("GET", "/tasks") => hashset!{"tasks.get", "tasks.*", "*"},
|
||||
("DELETE", "/tasks") => hashset!{"tasks.delete", "tasks.*", "*"},
|
||||
("GET", "/tasks?indexUid=products") => hashset!{"tasks.get", "tasks.*", "*"},
|
||||
|
||||
@@ -1781,7 +1781,7 @@ async fn error_add_documents_payload_size() {
|
||||
snapshot!(json_string!(response, { ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
|
||||
@r###"
|
||||
{
|
||||
"message": "The provided payload reached the size limit. The maximum accepted payload size is 10.00 MiB.",
|
||||
"message": "The provided payload reached the size limit.",
|
||||
"code": "payload_too_large",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#payload_too_large"
|
||||
|
||||
@@ -180,9 +180,9 @@ async fn get_all_documents_bad_filter() {
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo=bernese",
|
||||
"code": "invalid_document_filter",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
@@ -547,9 +547,9 @@ async fn delete_document_by_filter() {
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid syntax for the filter parameter: `expected String, Array, found: true`.",
|
||||
"code": "invalid_document_filter",
|
||||
"code": "invalid_document_delete_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_delete_filter"
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -559,9 +559,9 @@ async fn delete_document_by_filter() {
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `hello`.\n1:6 hello",
|
||||
"code": "invalid_document_filter",
|
||||
"code": "invalid_document_delete_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_delete_filter"
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -571,21 +571,9 @@ async fn delete_document_by_filter() {
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Sending an empty filter is forbidden.",
|
||||
"code": "invalid_document_filter",
|
||||
"code": "invalid_document_delete_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
}
|
||||
"###);
|
||||
|
||||
// do not send any filter
|
||||
let (response, code) = index.delete_document_by_filter(json!({})).await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Missing field `filter`",
|
||||
"code": "missing_document_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#missing_document_filter"
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_delete_filter"
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -642,9 +630,9 @@ async fn delete_document_by_filter() {
|
||||
},
|
||||
"error": {
|
||||
"message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
|
||||
"code": "invalid_document_filter",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
@@ -676,9 +664,9 @@ async fn delete_document_by_filter() {
|
||||
},
|
||||
"error": {
|
||||
"message": "Attribute `catto` is not filterable. Available filterable attributes are: `doggo`.\n1:6 catto = jorts",
|
||||
"code": "invalid_document_filter",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
@@ -760,27 +748,4 @@ async fn fetch_document_by_filter() {
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
}
|
||||
"###);
|
||||
|
||||
let (response, code) = index.get_document_by_filter(json!({ "filter": "cool doggo" })).await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `cool doggo`.\n1:11 cool doggo",
|
||||
"code": "invalid_document_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
}
|
||||
"###);
|
||||
|
||||
let (response, code) =
|
||||
index.get_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Attribute `doggo` is not filterable. Available filterable attributes are: `color`.\n1:6 doggo = bernese",
|
||||
"code": "invalid_document_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
@@ -946,7 +946,7 @@ async fn sort_unset_ranking_rule() {
|
||||
index.wait_task(1).await;
|
||||
|
||||
let expected_response = json!({
|
||||
"message": "You must specify where `sort` is listed in the rankingRules setting to use the sort parameter at search time.",
|
||||
"message": "The sort ranking rule must be specified in the ranking rules settings to use the sort parameter at search time.",
|
||||
"code": "invalid_search_sort",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_sort"
|
||||
|
||||
@@ -413,7 +413,7 @@ async fn test_summarized_document_addition_or_update() {
|
||||
}
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_delete_documents_by_batch() {
|
||||
async fn test_summarized_delete_batch() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
index.delete_batch(vec![1, 2, 3]).await;
|
||||
@@ -430,8 +430,7 @@ async fn test_summarized_delete_documents_by_batch() {
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 3,
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": null
|
||||
"deletedDocuments": 0
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` not found.",
|
||||
@@ -461,8 +460,7 @@ async fn test_summarized_delete_documents_by_batch() {
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 1,
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": null
|
||||
"deletedDocuments": 0
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
@@ -474,100 +472,7 @@ async fn test_summarized_delete_documents_by_batch() {
|
||||
}
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_delete_documents_by_filter() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||
index.wait_task(0).await;
|
||||
let (task, _) = index.get_task(0).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
@r###"
|
||||
{
|
||||
"uid": 0,
|
||||
"indexUid": "test",
|
||||
"status": "failed",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 0,
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": "\"doggo = bernese\""
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` not found.",
|
||||
"code": "index_not_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
}
|
||||
"###);
|
||||
|
||||
index.create(None).await;
|
||||
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||
index.wait_task(2).await;
|
||||
let (task, _) = index.get_task(2).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
@r###"
|
||||
{
|
||||
"uid": 2,
|
||||
"indexUid": "test",
|
||||
"status": "failed",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 0,
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": "\"doggo = bernese\""
|
||||
},
|
||||
"error": {
|
||||
"message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
|
||||
"code": "invalid_document_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
}
|
||||
"###);
|
||||
|
||||
index.update_settings(json!({ "filterableAttributes": ["doggo"] })).await;
|
||||
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||
index.wait_task(4).await;
|
||||
let (task, _) = index.get_task(4).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
@r###"
|
||||
{
|
||||
"uid": 4,
|
||||
"indexUid": "test",
|
||||
"status": "succeeded",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 0,
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": "\"doggo = bernese\""
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_delete_document_by_id() {
|
||||
async fn test_summarized_delete_document() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
index.delete_document(1).await;
|
||||
@@ -584,8 +489,7 @@ async fn test_summarized_delete_document_by_id() {
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 1,
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": null
|
||||
"deletedDocuments": 0
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` not found.",
|
||||
@@ -615,8 +519,7 @@ async fn test_summarized_delete_document_by_id() {
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 1,
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": null
|
||||
"deletedDocuments": 0
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
|
||||
@@ -25,13 +25,8 @@ flatten-serde-json = { path = "../flatten-serde-json" }
|
||||
fst = "0.4.7"
|
||||
fxhash = "0.2.1"
|
||||
geoutils = "0.5.1"
|
||||
grenad = { version = "0.4.4", default-features = false, features = [
|
||||
"tempfile",
|
||||
] }
|
||||
heed = { git = "https://github.com/meilisearch/heed", tag = "v0.12.6", default-features = false, features = [
|
||||
"lmdb",
|
||||
"sync-read-txn",
|
||||
] }
|
||||
grenad = { version = "0.4.4", default-features = false, features = ["tempfile"] }
|
||||
heed = { git = "https://github.com/meilisearch/heed", tag = "v0.12.5", default-features = false, features = ["lmdb", "sync-read-txn"] }
|
||||
json-depth-checker = { path = "../json-depth-checker" }
|
||||
levenshtein_automata = { version = "0.2.1", features = ["fst_automaton"] }
|
||||
memmap2 = "0.5.10"
|
||||
@@ -44,17 +39,12 @@ rstar = { version = "0.10.0", features = ["serde"] }
|
||||
serde = { version = "1.0.160", features = ["derive"] }
|
||||
serde_json = { version = "1.0.95", features = ["preserve_order"] }
|
||||
slice-group-by = "0.3.0"
|
||||
smallstr = { version = "0.3.0", features = ["serde"] }
|
||||
smallstr = { version = "0.3.0", features = ["serde"] }
|
||||
smallvec = "1.10.0"
|
||||
smartstring = "1.0.1"
|
||||
tempfile = "3.5.0"
|
||||
thiserror = "1.0.40"
|
||||
time = { version = "0.3.20", features = [
|
||||
"serde-well-known",
|
||||
"formatting",
|
||||
"parsing",
|
||||
"macros",
|
||||
] }
|
||||
time = { version = "0.3.20", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||
uuid = { version = "1.3.1", features = ["v4"] }
|
||||
|
||||
filter-parser = { path = "../filter-parser" }
|
||||
@@ -73,13 +63,13 @@ big_s = "1.0.2"
|
||||
insta = "1.29.0"
|
||||
maplit = "1.0.2"
|
||||
md5 = "0.7.0"
|
||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||
rand = {version = "0.8.5", features = ["small_rng"] }
|
||||
|
||||
[target.'cfg(fuzzing)'.dev-dependencies]
|
||||
fuzzcheck = "0.12.1"
|
||||
|
||||
[features]
|
||||
all-tokenizations = ["charabia/default"]
|
||||
all-tokenizations = [ "charabia/default" ]
|
||||
|
||||
# Use POSIX semaphores instead of SysV semaphores in LMDB
|
||||
# For more information on this feature, see heed's Cargo.toml
|
||||
|
||||
@@ -126,7 +126,7 @@ only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and undersco
|
||||
InvalidSortableAttribute { field: String, valid_fields: BTreeSet<String> },
|
||||
#[error("{}", HeedError::BadOpenOptions)]
|
||||
InvalidLmdbOpenOptions,
|
||||
#[error("You must specify where `sort` is listed in the rankingRules setting to use the sort parameter at search time.")]
|
||||
#[error("The sort ranking rule must be specified in the ranking rules settings to use the sort parameter at search time.")]
|
||||
SortRankingRuleMissing,
|
||||
#[error("The database file is in an invalid state.")]
|
||||
InvalidStoreFile,
|
||||
|
||||
@@ -170,46 +170,33 @@ impl Index {
|
||||
unsafe { options.flag(Flags::MdbAlwaysFreePages) };
|
||||
|
||||
let env = options.open(path)?;
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let main = env.create_poly_database(&mut wtxn, Some(MAIN))?;
|
||||
let word_docids = env.create_database(&mut wtxn, Some(WORD_DOCIDS))?;
|
||||
let exact_word_docids = env.create_database(&mut wtxn, Some(EXACT_WORD_DOCIDS))?;
|
||||
let word_prefix_docids = env.create_database(&mut wtxn, Some(WORD_PREFIX_DOCIDS))?;
|
||||
let exact_word_prefix_docids =
|
||||
env.create_database(&mut wtxn, Some(EXACT_WORD_PREFIX_DOCIDS))?;
|
||||
let docid_word_positions = env.create_database(&mut wtxn, Some(DOCID_WORD_POSITIONS))?;
|
||||
let word_pair_proximity_docids =
|
||||
env.create_database(&mut wtxn, Some(WORD_PAIR_PROXIMITY_DOCIDS))?;
|
||||
let script_language_docids =
|
||||
env.create_database(&mut wtxn, Some(SCRIPT_LANGUAGE_DOCIDS))?;
|
||||
let main = env.create_poly_database(Some(MAIN))?;
|
||||
let word_docids = env.create_database(Some(WORD_DOCIDS))?;
|
||||
let exact_word_docids = env.create_database(Some(EXACT_WORD_DOCIDS))?;
|
||||
let word_prefix_docids = env.create_database(Some(WORD_PREFIX_DOCIDS))?;
|
||||
let exact_word_prefix_docids = env.create_database(Some(EXACT_WORD_PREFIX_DOCIDS))?;
|
||||
let docid_word_positions = env.create_database(Some(DOCID_WORD_POSITIONS))?;
|
||||
let word_pair_proximity_docids = env.create_database(Some(WORD_PAIR_PROXIMITY_DOCIDS))?;
|
||||
let script_language_docids = env.create_database(Some(SCRIPT_LANGUAGE_DOCIDS))?;
|
||||
let word_prefix_pair_proximity_docids =
|
||||
env.create_database(&mut wtxn, Some(WORD_PREFIX_PAIR_PROXIMITY_DOCIDS))?;
|
||||
env.create_database(Some(WORD_PREFIX_PAIR_PROXIMITY_DOCIDS))?;
|
||||
let prefix_word_pair_proximity_docids =
|
||||
env.create_database(&mut wtxn, Some(PREFIX_WORD_PAIR_PROXIMITY_DOCIDS))?;
|
||||
let word_position_docids = env.create_database(&mut wtxn, Some(WORD_POSITION_DOCIDS))?;
|
||||
let word_fid_docids = env.create_database(&mut wtxn, Some(WORD_FIELD_ID_DOCIDS))?;
|
||||
let field_id_word_count_docids =
|
||||
env.create_database(&mut wtxn, Some(FIELD_ID_WORD_COUNT_DOCIDS))?;
|
||||
let word_prefix_position_docids =
|
||||
env.create_database(&mut wtxn, Some(WORD_PREFIX_POSITION_DOCIDS))?;
|
||||
let word_prefix_fid_docids =
|
||||
env.create_database(&mut wtxn, Some(WORD_PREFIX_FIELD_ID_DOCIDS))?;
|
||||
let facet_id_f64_docids = env.create_database(&mut wtxn, Some(FACET_ID_F64_DOCIDS))?;
|
||||
let facet_id_string_docids =
|
||||
env.create_database(&mut wtxn, Some(FACET_ID_STRING_DOCIDS))?;
|
||||
let facet_id_exists_docids =
|
||||
env.create_database(&mut wtxn, Some(FACET_ID_EXISTS_DOCIDS))?;
|
||||
let facet_id_is_null_docids =
|
||||
env.create_database(&mut wtxn, Some(FACET_ID_IS_NULL_DOCIDS))?;
|
||||
let facet_id_is_empty_docids =
|
||||
env.create_database(&mut wtxn, Some(FACET_ID_IS_EMPTY_DOCIDS))?;
|
||||
env.create_database(Some(PREFIX_WORD_PAIR_PROXIMITY_DOCIDS))?;
|
||||
let word_position_docids = env.create_database(Some(WORD_POSITION_DOCIDS))?;
|
||||
let word_fid_docids = env.create_database(Some(WORD_FIELD_ID_DOCIDS))?;
|
||||
let field_id_word_count_docids = env.create_database(Some(FIELD_ID_WORD_COUNT_DOCIDS))?;
|
||||
let word_prefix_position_docids = env.create_database(Some(WORD_PREFIX_POSITION_DOCIDS))?;
|
||||
let word_prefix_fid_docids = env.create_database(Some(WORD_PREFIX_FIELD_ID_DOCIDS))?;
|
||||
let facet_id_f64_docids = env.create_database(Some(FACET_ID_F64_DOCIDS))?;
|
||||
let facet_id_string_docids = env.create_database(Some(FACET_ID_STRING_DOCIDS))?;
|
||||
let facet_id_exists_docids = env.create_database(Some(FACET_ID_EXISTS_DOCIDS))?;
|
||||
let facet_id_is_null_docids = env.create_database(Some(FACET_ID_IS_NULL_DOCIDS))?;
|
||||
let facet_id_is_empty_docids = env.create_database(Some(FACET_ID_IS_EMPTY_DOCIDS))?;
|
||||
|
||||
let field_id_docid_facet_f64s =
|
||||
env.create_database(&mut wtxn, Some(FIELD_ID_DOCID_FACET_F64S))?;
|
||||
let field_id_docid_facet_f64s = env.create_database(Some(FIELD_ID_DOCID_FACET_F64S))?;
|
||||
let field_id_docid_facet_strings =
|
||||
env.create_database(&mut wtxn, Some(FIELD_ID_DOCID_FACET_STRINGS))?;
|
||||
let documents = env.create_database(&mut wtxn, Some(DOCUMENTS))?;
|
||||
wtxn.commit()?;
|
||||
env.create_database(Some(FIELD_ID_DOCID_FACET_STRINGS))?;
|
||||
let documents = env.create_database(Some(DOCUMENTS))?;
|
||||
|
||||
Index::set_creation_dates(&env, main, created_at, updated_at)?;
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::cmp;
|
||||
|
||||
use crate::{relative_from_absolute_position, Position};
|
||||
|
||||
pub const MAX_DISTANCE: u32 = 8;
|
||||
pub const MAX_DISTANCE: u32 = 4;
|
||||
|
||||
pub fn index_proximity(lhs: u32, rhs: u32) -> u32 {
|
||||
if lhs <= rhs {
|
||||
|
||||
@@ -4,6 +4,7 @@ use std::io::{BufWriter, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Instant;
|
||||
|
||||
// use rand::random;
|
||||
use roaring::RoaringBitmap;
|
||||
|
||||
use crate::search::new::interner::Interned;
|
||||
@@ -462,7 +463,7 @@ fill: \"#B6E2D3\"
|
||||
shape: class
|
||||
max_nbr_typo: {}",
|
||||
term_subset.description(ctx),
|
||||
term_subset.max_typo_cost(ctx)
|
||||
term_subset.max_nbr_typos(ctx)
|
||||
)?;
|
||||
|
||||
for w in term_subset.all_single_words_except_prefix_db(ctx)? {
|
||||
@@ -489,6 +490,13 @@ fill: \"#B6E2D3\"
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
// fn write_words_graph(&mut self, qg: QueryGraph) -> Result<()> {
|
||||
// self.make_new_file_for_internal_state_if_needed()?;
|
||||
|
||||
// self.write_query_graph(&qg)?;
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
fn write_rr_graph<R: RankingRuleGraphTrait>(
|
||||
&mut self,
|
||||
graph: &RankingRuleGraph<R>,
|
||||
|
||||
@@ -28,14 +28,16 @@ pub enum ZeroOrOneTypo {
|
||||
impl Interned<QueryTerm> {
|
||||
pub fn compute_fully_if_needed(self, ctx: &mut SearchContext) -> Result<()> {
|
||||
let s = ctx.term_interner.get_mut(self);
|
||||
if s.max_levenshtein_distance <= 1 && s.one_typo.is_uninit() {
|
||||
if s.max_nbr_typos == 0 {
|
||||
s.one_typo = Lazy::Init(OneTypoTerm::default());
|
||||
s.two_typo = Lazy::Init(TwoTypoTerm::default());
|
||||
} else if s.max_nbr_typos == 1 && s.one_typo.is_uninit() {
|
||||
assert!(s.two_typo.is_uninit());
|
||||
// Initialize one_typo subterm even if max_nbr_typo is 0 because of split words
|
||||
self.initialize_one_typo_subterm(ctx)?;
|
||||
let s = ctx.term_interner.get_mut(self);
|
||||
assert!(s.one_typo.is_init());
|
||||
s.two_typo = Lazy::Init(TwoTypoTerm::default());
|
||||
} else if s.max_levenshtein_distance > 1 && s.two_typo.is_uninit() {
|
||||
} else if s.max_nbr_typos > 1 && s.two_typo.is_uninit() {
|
||||
assert!(s.two_typo.is_uninit());
|
||||
self.initialize_one_and_two_typo_subterm(ctx)?;
|
||||
let s = ctx.term_interner.get_mut(self);
|
||||
@@ -185,7 +187,7 @@ pub fn partially_initialized_term_from_word(
|
||||
original: ctx.word_interner.insert(word.to_owned()),
|
||||
ngram_words: None,
|
||||
is_prefix: false,
|
||||
max_levenshtein_distance: 0,
|
||||
max_nbr_typos: 0,
|
||||
zero_typo: <_>::default(),
|
||||
one_typo: Lazy::Init(<_>::default()),
|
||||
two_typo: Lazy::Init(<_>::default()),
|
||||
@@ -256,7 +258,7 @@ pub fn partially_initialized_term_from_word(
|
||||
Ok(QueryTerm {
|
||||
original: word_interned,
|
||||
ngram_words: None,
|
||||
max_levenshtein_distance: max_typo,
|
||||
max_nbr_typos: max_typo,
|
||||
is_prefix,
|
||||
zero_typo,
|
||||
one_typo: Lazy::Uninit,
|
||||
@@ -275,16 +277,7 @@ fn find_split_words(ctx: &mut SearchContext, word: &str) -> Result<Option<Intern
|
||||
impl Interned<QueryTerm> {
|
||||
fn initialize_one_typo_subterm(self, ctx: &mut SearchContext) -> Result<()> {
|
||||
let self_mut = ctx.term_interner.get_mut(self);
|
||||
|
||||
let allows_split_words = self_mut.allows_split_words();
|
||||
let QueryTerm {
|
||||
original,
|
||||
is_prefix,
|
||||
one_typo,
|
||||
max_levenshtein_distance: max_nbr_typos,
|
||||
..
|
||||
} = self_mut;
|
||||
|
||||
let QueryTerm { original, is_prefix, one_typo, .. } = self_mut;
|
||||
let original = *original;
|
||||
let is_prefix = *is_prefix;
|
||||
// let original_str = ctx.word_interner.get(*original).to_owned();
|
||||
@@ -293,33 +286,26 @@ impl Interned<QueryTerm> {
|
||||
}
|
||||
let mut one_typo_words = BTreeSet::new();
|
||||
|
||||
if *max_nbr_typos > 0 {
|
||||
find_zero_one_typo_derivations(ctx, original, is_prefix, |derived_word, nbr_typos| {
|
||||
match nbr_typos {
|
||||
ZeroOrOneTypo::Zero => {}
|
||||
ZeroOrOneTypo::One => {
|
||||
if one_typo_words.len() < limits::MAX_ONE_TYPO_COUNT {
|
||||
one_typo_words.insert(derived_word);
|
||||
} else {
|
||||
return Ok(ControlFlow::Break(()));
|
||||
}
|
||||
find_zero_one_typo_derivations(ctx, original, is_prefix, |derived_word, nbr_typos| {
|
||||
match nbr_typos {
|
||||
ZeroOrOneTypo::Zero => {}
|
||||
ZeroOrOneTypo::One => {
|
||||
if one_typo_words.len() < limits::MAX_ONE_TYPO_COUNT {
|
||||
one_typo_words.insert(derived_word);
|
||||
} else {
|
||||
return Ok(ControlFlow::Break(()));
|
||||
}
|
||||
}
|
||||
Ok(ControlFlow::Continue(()))
|
||||
})?;
|
||||
}
|
||||
|
||||
let split_words = if allows_split_words {
|
||||
let original_str = ctx.word_interner.get(original).to_owned();
|
||||
find_split_words(ctx, original_str.as_str())?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
}
|
||||
Ok(ControlFlow::Continue(()))
|
||||
})?;
|
||||
let original_str = ctx.word_interner.get(original).to_owned();
|
||||
let split_words = find_split_words(ctx, original_str.as_str())?;
|
||||
|
||||
let self_mut = ctx.term_interner.get_mut(self);
|
||||
|
||||
// Only add the split words to the derivations if:
|
||||
// 1. the term is neither an ngram nor a phrase; OR
|
||||
// 1. the term is not an ngram; OR
|
||||
// 2. the term is an ngram, but the split words are different from the ngram's component words
|
||||
let split_words = if let Some((ngram_words, split_words)) =
|
||||
self_mut.ngram_words.as_ref().zip(split_words.as_ref())
|
||||
@@ -341,13 +327,7 @@ impl Interned<QueryTerm> {
|
||||
}
|
||||
fn initialize_one_and_two_typo_subterm(self, ctx: &mut SearchContext) -> Result<()> {
|
||||
let self_mut = ctx.term_interner.get_mut(self);
|
||||
let QueryTerm {
|
||||
original,
|
||||
is_prefix,
|
||||
two_typo,
|
||||
max_levenshtein_distance: max_nbr_typos,
|
||||
..
|
||||
} = self_mut;
|
||||
let QueryTerm { original, is_prefix, two_typo, .. } = self_mut;
|
||||
let original_str = ctx.word_interner.get(*original).to_owned();
|
||||
if two_typo.is_init() {
|
||||
return Ok(());
|
||||
@@ -355,37 +335,34 @@ impl Interned<QueryTerm> {
|
||||
let mut one_typo_words = BTreeSet::new();
|
||||
let mut two_typo_words = BTreeSet::new();
|
||||
|
||||
if *max_nbr_typos > 0 {
|
||||
find_zero_one_two_typo_derivations(
|
||||
*original,
|
||||
*is_prefix,
|
||||
ctx.index.words_fst(ctx.txn)?,
|
||||
&mut ctx.word_interner,
|
||||
|derived_word, nbr_typos| {
|
||||
if one_typo_words.len() >= limits::MAX_ONE_TYPO_COUNT
|
||||
&& two_typo_words.len() >= limits::MAX_TWO_TYPOS_COUNT
|
||||
{
|
||||
// No chance we will add either one- or two-typo derivations anymore, stop iterating.
|
||||
return Ok(ControlFlow::Break(()));
|
||||
}
|
||||
match nbr_typos {
|
||||
NumberOfTypos::Zero => {}
|
||||
NumberOfTypos::One => {
|
||||
if one_typo_words.len() < limits::MAX_ONE_TYPO_COUNT {
|
||||
one_typo_words.insert(derived_word);
|
||||
}
|
||||
}
|
||||
NumberOfTypos::Two => {
|
||||
if two_typo_words.len() < limits::MAX_TWO_TYPOS_COUNT {
|
||||
two_typo_words.insert(derived_word);
|
||||
}
|
||||
find_zero_one_two_typo_derivations(
|
||||
*original,
|
||||
*is_prefix,
|
||||
ctx.index.words_fst(ctx.txn)?,
|
||||
&mut ctx.word_interner,
|
||||
|derived_word, nbr_typos| {
|
||||
if one_typo_words.len() >= limits::MAX_ONE_TYPO_COUNT
|
||||
&& two_typo_words.len() >= limits::MAX_TWO_TYPOS_COUNT
|
||||
{
|
||||
// No chance we will add either one- or two-typo derivations anymore, stop iterating.
|
||||
return Ok(ControlFlow::Break(()));
|
||||
}
|
||||
match nbr_typos {
|
||||
NumberOfTypos::Zero => {}
|
||||
NumberOfTypos::One => {
|
||||
if one_typo_words.len() < limits::MAX_ONE_TYPO_COUNT {
|
||||
one_typo_words.insert(derived_word);
|
||||
}
|
||||
}
|
||||
Ok(ControlFlow::Continue(()))
|
||||
},
|
||||
)?;
|
||||
}
|
||||
|
||||
NumberOfTypos::Two => {
|
||||
if two_typo_words.len() < limits::MAX_TWO_TYPOS_COUNT {
|
||||
two_typo_words.insert(derived_word);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(ControlFlow::Continue(()))
|
||||
},
|
||||
)?;
|
||||
let split_words = find_split_words(ctx, original_str.as_str())?;
|
||||
let self_mut = ctx.term_interner.get_mut(self);
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ pub struct QueryTermSubset {
|
||||
pub struct QueryTerm {
|
||||
original: Interned<String>,
|
||||
ngram_words: Option<Vec<Interned<String>>>,
|
||||
max_levenshtein_distance: u8,
|
||||
max_nbr_typos: u8,
|
||||
is_prefix: bool,
|
||||
zero_typo: ZeroTypoTerm,
|
||||
// May not be computed yet
|
||||
@@ -342,16 +342,10 @@ impl QueryTermSubset {
|
||||
}
|
||||
None
|
||||
}
|
||||
pub fn max_typo_cost(&self, ctx: &SearchContext) -> u8 {
|
||||
pub fn max_nbr_typos(&self, ctx: &SearchContext) -> u8 {
|
||||
let t = ctx.term_interner.get(self.original);
|
||||
match t.max_levenshtein_distance {
|
||||
0 => {
|
||||
if t.allows_split_words() {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
match t.max_nbr_typos {
|
||||
0 => 0,
|
||||
1 => {
|
||||
if self.one_typo_subset.is_empty() {
|
||||
0
|
||||
@@ -444,9 +438,6 @@ impl QueryTerm {
|
||||
|
||||
self.zero_typo.is_empty() && one_typo.is_empty() && two_typo.is_empty()
|
||||
}
|
||||
fn allows_split_words(&self) -> bool {
|
||||
self.zero_typo.phrase.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
impl Interned<QueryTerm> {
|
||||
|
||||
@@ -217,7 +217,7 @@ pub fn make_ngram(
|
||||
original: ngram_str_interned,
|
||||
ngram_words: Some(words_interned),
|
||||
is_prefix,
|
||||
max_levenshtein_distance: max_nbr_typos,
|
||||
max_nbr_typos,
|
||||
zero_typo: term.zero_typo,
|
||||
one_typo: Lazy::Uninit,
|
||||
two_typo: Lazy::Uninit,
|
||||
@@ -271,7 +271,7 @@ impl PhraseBuilder {
|
||||
QueryTerm {
|
||||
original: ctx.word_interner.insert(phrase_desc),
|
||||
ngram_words: None,
|
||||
max_levenshtein_distance: 0,
|
||||
max_nbr_typos: 0,
|
||||
is_prefix: false,
|
||||
zero_typo: ZeroTypoTerm {
|
||||
phrase: Some(phrase),
|
||||
|
||||
@@ -209,7 +209,7 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
||||
self.traverse_breadth_first_backward(self.query_graph.end_node, |cur_node| {
|
||||
if cur_node == self.query_graph.end_node {
|
||||
*costs_to_end.get_mut(self.query_graph.end_node) = vec![0];
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
let mut self_costs = Vec::<u64>::new();
|
||||
|
||||
@@ -226,6 +226,7 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
||||
self_costs.dedup();
|
||||
|
||||
*costs_to_end.get_mut(cur_node) = self_costs;
|
||||
true
|
||||
});
|
||||
costs_to_end
|
||||
}
|
||||
@@ -235,9 +236,6 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
||||
node_with_removed_outgoing_conditions: Interned<QueryNode>,
|
||||
costs: &mut MappedInterner<QueryNode, Vec<u64>>,
|
||||
) {
|
||||
// Traverse the graph backward from the target node, recomputing the cost for each of its predecessors.
|
||||
// We first check that no other node is contributing the same total cost to a predecessor before removing
|
||||
// the cost from the predecessor.
|
||||
self.traverse_breadth_first_backward(node_with_removed_outgoing_conditions, |cur_node| {
|
||||
let mut costs_to_remove = FxHashSet::default();
|
||||
costs_to_remove.extend(costs.get(cur_node).iter().copied());
|
||||
@@ -248,18 +246,19 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
||||
for cost in costs.get(edge.dest_node).iter() {
|
||||
costs_to_remove.remove(&(*cost + edge.cost as u64));
|
||||
if costs_to_remove.is_empty() {
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if costs_to_remove.is_empty() {
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
let mut new_costs = BTreeSet::from_iter(costs.get(cur_node).iter().copied());
|
||||
for c in costs_to_remove {
|
||||
new_costs.remove(&c);
|
||||
}
|
||||
*costs.get_mut(cur_node) = new_costs.into_iter().collect();
|
||||
true
|
||||
});
|
||||
}
|
||||
|
||||
@@ -270,7 +269,7 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
||||
pub fn traverse_breadth_first_backward(
|
||||
&self,
|
||||
from: Interned<QueryNode>,
|
||||
mut visit: impl FnMut(Interned<QueryNode>),
|
||||
mut visit: impl FnMut(Interned<QueryNode>) -> bool,
|
||||
) {
|
||||
let mut reachable = SmallBitmap::for_interned_values_in(&self.query_graph.nodes);
|
||||
{
|
||||
@@ -313,11 +312,13 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
||||
continue;
|
||||
}
|
||||
unreachable_or_visited.insert(cur_node);
|
||||
visit(cur_node);
|
||||
for prev_node in self.query_graph.nodes.get(cur_node).predecessors.iter() {
|
||||
if !enqueued.contains(prev_node) && !unreachable_or_visited.contains(prev_node) {
|
||||
stack.push_back(prev_node);
|
||||
enqueued.insert(prev_node);
|
||||
if visit(cur_node) {
|
||||
for prev_node in self.query_graph.nodes.get(cur_node).predecessors.iter() {
|
||||
if !enqueued.contains(prev_node) && !unreachable_or_visited.contains(prev_node)
|
||||
{
|
||||
stack.push_back(prev_node);
|
||||
enqueued.insert(prev_node);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ use crate::search::new::query_term::LocatedQueryTermSubset;
|
||||
use crate::search::new::SearchContext;
|
||||
use crate::Result;
|
||||
|
||||
const MAX_PROX: usize = crate::proximity::MAX_DISTANCE as usize;
|
||||
|
||||
pub fn build_edges(
|
||||
_ctx: &mut SearchContext,
|
||||
conditions_interner: &mut DedupInterner<ProximityCondition>,
|
||||
@@ -35,7 +37,7 @@ pub fn build_edges(
|
||||
}
|
||||
|
||||
let mut conditions = vec![];
|
||||
for cost in right_ngram_length..(7 + right_ngram_length) {
|
||||
for cost in right_ngram_length..(MAX_PROX + right_ngram_length) {
|
||||
conditions.push((
|
||||
cost as u32,
|
||||
conditions_interner.insert(ProximityCondition::Uninit {
|
||||
@@ -47,7 +49,7 @@ pub fn build_edges(
|
||||
}
|
||||
|
||||
conditions.push((
|
||||
(7 + right_ngram_length) as u32,
|
||||
(MAX_PROX + right_ngram_length) as u32,
|
||||
conditions_interner.insert(ProximityCondition::Term { term: right_term.clone() }),
|
||||
));
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ impl RankingRuleGraphTrait for TypoGraph {
|
||||
// 3-gram -> equivalent to 2 typos
|
||||
let base_cost = if term.term_ids.len() == 1 { 0 } else { term.term_ids.len() as u32 };
|
||||
|
||||
for nbr_typos in 0..=term.term_subset.max_typo_cost(ctx) {
|
||||
for nbr_typos in 0..=term.term_subset.max_nbr_typos(ctx) {
|
||||
let mut term = term.clone();
|
||||
match nbr_typos {
|
||||
0 => {
|
||||
|
||||
@@ -138,7 +138,7 @@ fn test_attribute_position_simple() {
|
||||
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||
s.query("quick brown");
|
||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 2, 3, 4, 1, 0, 6, 8, 7, 9, 5]");
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 3, 4, 2, 1, 0, 6, 8, 7, 9, 5]");
|
||||
}
|
||||
#[test]
|
||||
fn test_attribute_position_repeated() {
|
||||
@@ -163,7 +163,7 @@ fn test_attribute_position_different_fields() {
|
||||
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||
s.query("quick brown");
|
||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 2, 3, 4, 1, 0, 6, 8, 7, 9, 5]");
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 3, 4, 2, 1, 0, 6, 8, 7, 9, 5]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -176,5 +176,5 @@ fn test_attribute_position_ngrams() {
|
||||
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||
s.query("quick brown");
|
||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 2, 3, 4, 1, 0, 6, 8, 7, 9, 5]");
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 3, 4, 2, 1, 0, 6, 8, 7, 9, 5]");
|
||||
}
|
||||
|
||||
@@ -11,11 +11,10 @@ It doesn't test properly:
|
||||
- distinct attributes with arrays (because we know it's incorrect as well)
|
||||
*/
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use big_s::S;
|
||||
use heed::RoTxn;
|
||||
use maplit::hashset;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use super::collect_field_values;
|
||||
use crate::index::tests::TempIndex;
|
||||
|
||||
@@ -3,9 +3,9 @@ This module tests the following properties:
|
||||
|
||||
1. Two consecutive words from a query can be combined into a "2gram"
|
||||
2. Three consecutive words from a query can be combined into a "3gram"
|
||||
3. A word from the query can be split into two consecutive words (split words), no matter how short it is
|
||||
3. A word from the query can be split into two consecutive words (split words)
|
||||
4. A 2gram can be split into two words
|
||||
5. A 3gram can be split into two words
|
||||
5. A 3gram cannot be split into two words
|
||||
6. 2grams can contain up to 1 typo
|
||||
7. 3grams cannot have typos
|
||||
8. 2grams and 3grams can be prefix tolerant
|
||||
@@ -14,7 +14,6 @@ This module tests the following properties:
|
||||
11. Disabling typo tolerance does not disable ngram tolerance
|
||||
12. Prefix tolerance is disabled for the last word if a space follows it
|
||||
13. Ngrams cannot be formed by combining a phrase and a word or two phrases
|
||||
14. Split words are not disabled by the `disableOnAttribute` or `disableOnWords` typo settings
|
||||
*/
|
||||
|
||||
use crate::index::tests::TempIndex;
|
||||
@@ -57,10 +56,6 @@ fn create_index() -> TempIndex {
|
||||
{
|
||||
"id": 5,
|
||||
"text": "sunflowering is not a verb"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"text": "xy z"
|
||||
}
|
||||
]))
|
||||
.unwrap();
|
||||
@@ -268,11 +263,10 @@ fn test_disable_split_words() {
|
||||
s.query("sunflower ");
|
||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
// no document containing `sun flower`
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[1, 3]");
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[3]");
|
||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||
insta::assert_debug_snapshot!(texts, @r###"
|
||||
[
|
||||
"\"the sun flower is tall\"",
|
||||
"\"the sunflower is tall\"",
|
||||
]
|
||||
"###);
|
||||
@@ -313,11 +307,10 @@ fn test_3gram_no_split_words() {
|
||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
|
||||
// no document with `sun flower`
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[1, 2, 3, 5]");
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[2, 3, 5]");
|
||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||
insta::assert_debug_snapshot!(texts, @r###"
|
||||
[
|
||||
"\"the sun flower is tall\"",
|
||||
"\"the sunflowers are pretty\"",
|
||||
"\"the sunflower is tall\"",
|
||||
"\"sunflowering is not a verb\"",
|
||||
@@ -376,50 +369,3 @@ fn test_no_ngram_phrases() {
|
||||
]
|
||||
"###);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_short_split_words() {
|
||||
let index = create_index();
|
||||
let txn = index.read_txn().unwrap();
|
||||
|
||||
let mut s = Search::new(&txn, &index);
|
||||
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||
s.query("xyz");
|
||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[6]");
|
||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||
insta::assert_debug_snapshot!(texts, @r###"
|
||||
[
|
||||
"\"xy z\"",
|
||||
]
|
||||
"###);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_words_never_disabled() {
|
||||
let index = create_index();
|
||||
|
||||
index
|
||||
.update_settings(|s| {
|
||||
s.set_exact_words(["sunflower"].iter().map(ToString::to_string).collect());
|
||||
s.set_exact_attributes(["text"].iter().map(ToString::to_string).collect());
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let txn = index.read_txn().unwrap();
|
||||
|
||||
let mut s = Search::new(&txn, &index);
|
||||
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||
s.query("the sunflower is tall");
|
||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[1, 3]");
|
||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||
insta::assert_debug_snapshot!(texts, @r###"
|
||||
[
|
||||
"\"the sun flower is tall\"",
|
||||
"\"the sunflower is tall\"",
|
||||
]
|
||||
"###);
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ This module tests the following properties:
|
||||
6. A typo on the first letter of a word counts as two typos
|
||||
7. Phrases are not typo tolerant
|
||||
8. 2grams can have 1 typo if they are larger than `min_word_len_two_typos`
|
||||
9. 3grams are not typo tolerant (but they can be split into two words)
|
||||
9. 3grams are not typo tolerant
|
||||
10. The `typo` ranking rule assumes the role of the `words` ranking rule implicitly
|
||||
if `words` doesn't exist before it.
|
||||
11. The `typo` ranking rule places documents with the same number of typos in the same bucket
|
||||
@@ -287,17 +287,16 @@ fn test_typo_exact_word() {
|
||||
]
|
||||
"###);
|
||||
|
||||
// exact words do not disable prefix (sunflowering OK, but no sunflowar)
|
||||
// exact words do not disable prefix (sunflowering OK, but no sunflowar or sun flower)
|
||||
let mut s = Search::new(&txn, &index);
|
||||
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||
s.query("network interconnection sunflower");
|
||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[16, 17, 18]");
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[16, 18]");
|
||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||
insta::assert_debug_snapshot!(texts, @r###"
|
||||
[
|
||||
"\"network interconnection sunflower\"",
|
||||
"\"network interconnection sun flower\"",
|
||||
"\"network interconnection sunflowering\"",
|
||||
]
|
||||
"###);
|
||||
|
||||
@@ -261,9 +261,7 @@ pub(crate) mod test_helpers {
|
||||
let options = options.map_size(4096 * 4 * 1000 * 100);
|
||||
let tempdir = tempfile::TempDir::new().unwrap();
|
||||
let env = options.open(tempdir.path()).unwrap();
|
||||
let mut wtxn = env.write_txn().unwrap();
|
||||
let content = env.create_database(&mut wtxn, None).unwrap();
|
||||
wtxn.commit().unwrap();
|
||||
let content = env.create_database(None).unwrap();
|
||||
|
||||
FacetIndex {
|
||||
content,
|
||||
|
||||
@@ -91,7 +91,7 @@ fn document_word_positions_into_sorter(
|
||||
while !word_positions_heap.is_empty() {
|
||||
while let Some(peeked_word_position) = word_positions_heap.pop() {
|
||||
ordered_peeked_word_positions.push(peeked_word_position);
|
||||
if ordered_peeked_word_positions.len() == 7 {
|
||||
if ordered_peeked_word_positions.len() == (MAX_DISTANCE - 1) as usize {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user