Compare commits

..

3 Commits

Author SHA1 Message Date
Loïc Lecrenier
17f6d65840 Reduce maximum indexed proximity for word pairs 2023-05-09 10:52:26 +02:00
Loïc Lecrenier
daa4138df9 Adjust costs of edges in position ranking rule
To ensure good performance
2023-05-09 10:52:16 +02:00
Loïc Lecrenier
e4035ff3ec Implement words as a graph-based ranking rule and fix some bugs 2023-05-09 10:51:35 +02:00
52 changed files with 310 additions and 869 deletions

32
Cargo.lock generated
View File

@@ -463,7 +463,7 @@ checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf"
[[package]] [[package]]
name = "benchmarks" name = "benchmarks"
version = "1.2.1" version = "1.1.1"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"bytes", "bytes",
@@ -1209,7 +1209,7 @@ dependencies = [
[[package]] [[package]]
name = "dump" name = "dump"
version = "1.2.1" version = "1.1.1"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"big_s", "big_s",
@@ -1428,7 +1428,7 @@ dependencies = [
[[package]] [[package]]
name = "file-store" name = "file-store"
version = "1.2.1" version = "1.1.1"
dependencies = [ dependencies = [
"faux", "faux",
"tempfile", "tempfile",
@@ -1450,7 +1450,7 @@ dependencies = [
[[package]] [[package]]
name = "filter-parser" name = "filter-parser"
version = "1.2.1" version = "1.1.1"
dependencies = [ dependencies = [
"insta", "insta",
"nom", "nom",
@@ -1476,7 +1476,7 @@ dependencies = [
[[package]] [[package]]
name = "flatten-serde-json" name = "flatten-serde-json"
version = "1.2.1" version = "1.1.1"
dependencies = [ dependencies = [
"criterion", "criterion",
"serde_json", "serde_json",
@@ -1794,7 +1794,7 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]] [[package]]
name = "heed" name = "heed"
version = "0.12.5" version = "0.12.5"
source = "git+https://github.com/meilisearch/heed?tag=v0.12.6#8c5b94225fc949c02bb7b900cc50ffaf6b584b1e" source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8"
dependencies = [ dependencies = [
"byteorder", "byteorder",
"heed-traits", "heed-traits",
@@ -1811,12 +1811,12 @@ dependencies = [
[[package]] [[package]]
name = "heed-traits" name = "heed-traits"
version = "0.7.0" version = "0.7.0"
source = "git+https://github.com/meilisearch/heed?tag=v0.12.6#8c5b94225fc949c02bb7b900cc50ffaf6b584b1e" source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8"
[[package]] [[package]]
name = "heed-types" name = "heed-types"
version = "0.7.2" version = "0.7.2"
source = "git+https://github.com/meilisearch/heed?tag=v0.12.6#8c5b94225fc949c02bb7b900cc50ffaf6b584b1e" source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8"
dependencies = [ dependencies = [
"bincode", "bincode",
"heed-traits", "heed-traits",
@@ -1959,7 +1959,7 @@ dependencies = [
[[package]] [[package]]
name = "index-scheduler" name = "index-scheduler"
version = "1.2.1" version = "1.1.1"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"big_s", "big_s",
@@ -2113,7 +2113,7 @@ dependencies = [
[[package]] [[package]]
name = "json-depth-checker" name = "json-depth-checker"
version = "1.2.1" version = "1.1.1"
dependencies = [ dependencies = [
"criterion", "criterion",
"serde_json", "serde_json",
@@ -2539,7 +2539,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
[[package]] [[package]]
name = "meili-snap" name = "meili-snap"
version = "1.2.1" version = "1.1.1"
dependencies = [ dependencies = [
"insta", "insta",
"md5", "md5",
@@ -2548,7 +2548,7 @@ dependencies = [
[[package]] [[package]]
name = "meilisearch" name = "meilisearch"
version = "1.2.1" version = "1.1.1"
dependencies = [ dependencies = [
"actix-cors", "actix-cors",
"actix-http", "actix-http",
@@ -2636,7 +2636,7 @@ dependencies = [
[[package]] [[package]]
name = "meilisearch-auth" name = "meilisearch-auth"
version = "1.2.1" version = "1.1.1"
dependencies = [ dependencies = [
"base64 0.21.0", "base64 0.21.0",
"enum-iterator", "enum-iterator",
@@ -2655,7 +2655,7 @@ dependencies = [
[[package]] [[package]]
name = "meilisearch-types" name = "meilisearch-types"
version = "1.2.1" version = "1.1.1"
dependencies = [ dependencies = [
"actix-web", "actix-web",
"anyhow", "anyhow",
@@ -2709,7 +2709,7 @@ dependencies = [
[[package]] [[package]]
name = "milli" name = "milli"
version = "1.2.1" version = "1.1.1"
dependencies = [ dependencies = [
"big_s", "big_s",
"bimap", "bimap",
@@ -3064,7 +3064,7 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
[[package]] [[package]]
name = "permissive-json-pointer" name = "permissive-json-pointer"
version = "1.2.1" version = "1.1.1"
dependencies = [ dependencies = [
"big_s", "big_s",
"serde_json", "serde_json",

View File

@@ -17,7 +17,7 @@ members = [
] ]
[workspace.package] [workspace.package]
version = "1.2.1" version = "1.1.1"
authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"] authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"]
description = "Meilisearch HTTP server" description = "Meilisearch HTTP server"
homepage = "https://meilisearch.com" homepage = "https://meilisearch.com"

View File

@@ -126,6 +126,3 @@ ssl_tickets = false
experimental_enable_metrics = false experimental_enable_metrics = false
# Experimental metrics feature. For more information, see: <https://github.com/meilisearch/meilisearch/discussions/3518> # Experimental metrics feature. For more information, see: <https://github.com/meilisearch/meilisearch/discussions/3518>
# Enables the Prometheus metrics on the `GET /metrics` endpoint. # Enables the Prometheus metrics on the `GET /metrics` endpoint.
experimental_reduce_indexing_memory_usage = false
# Experimental RAM reduction during indexing, do not use in production, see: <https://github.com/meilisearch/product/discussions/652>

View File

@@ -24,7 +24,6 @@ use std::io::BufWriter;
use dump::IndexMetadata; use dump::IndexMetadata;
use log::{debug, error, info}; use log::{debug, error, info};
use meilisearch_types::error::Code;
use meilisearch_types::heed::{RoTxn, RwTxn}; use meilisearch_types::heed::{RoTxn, RwTxn};
use meilisearch_types::milli::documents::{obkv_to_object, DocumentsBatchReader}; use meilisearch_types::milli::documents::{obkv_to_object, DocumentsBatchReader};
use meilisearch_types::milli::heed::CompactionOption; use meilisearch_types::milli::heed::CompactionOption;
@@ -67,6 +66,10 @@ pub(crate) enum Batch {
op: IndexOperation, op: IndexOperation,
must_create_index: bool, must_create_index: bool,
}, },
IndexDocumentDeletionByFilter {
index_uid: String,
task: Task,
},
IndexCreation { IndexCreation {
index_uid: String, index_uid: String,
primary_key: Option<String>, primary_key: Option<String>,
@@ -110,10 +113,6 @@ pub(crate) enum IndexOperation {
documents: Vec<Vec<String>>, documents: Vec<Vec<String>>,
tasks: Vec<Task>, tasks: Vec<Task>,
}, },
IndexDocumentDeletionByFilter {
index_uid: String,
task: Task,
},
DocumentClear { DocumentClear {
index_uid: String, index_uid: String,
tasks: Vec<Task>, tasks: Vec<Task>,
@@ -155,6 +154,7 @@ impl Batch {
| Batch::TaskDeletion(task) | Batch::TaskDeletion(task)
| Batch::Dump(task) | Batch::Dump(task)
| Batch::IndexCreation { task, .. } | Batch::IndexCreation { task, .. }
| Batch::IndexDocumentDeletionByFilter { task, .. }
| Batch::IndexUpdate { task, .. } => vec![task.uid], | Batch::IndexUpdate { task, .. } => vec![task.uid],
Batch::SnapshotCreation(tasks) | Batch::IndexDeletion { tasks, .. } => { Batch::SnapshotCreation(tasks) | Batch::IndexDeletion { tasks, .. } => {
tasks.iter().map(|task| task.uid).collect() tasks.iter().map(|task| task.uid).collect()
@@ -166,7 +166,6 @@ impl Batch {
| IndexOperation::DocumentClear { tasks, .. } => { | IndexOperation::DocumentClear { tasks, .. } => {
tasks.iter().map(|task| task.uid).collect() tasks.iter().map(|task| task.uid).collect()
} }
IndexOperation::IndexDocumentDeletionByFilter { task, .. } => vec![task.uid],
IndexOperation::SettingsAndDocumentOperation { IndexOperation::SettingsAndDocumentOperation {
document_import_tasks: tasks, document_import_tasks: tasks,
settings_tasks: other, settings_tasks: other,
@@ -194,7 +193,8 @@ impl Batch {
IndexOperation { op, .. } => Some(op.index_uid()), IndexOperation { op, .. } => Some(op.index_uid()),
IndexCreation { index_uid, .. } IndexCreation { index_uid, .. }
| IndexUpdate { index_uid, .. } | IndexUpdate { index_uid, .. }
| IndexDeletion { index_uid, .. } => Some(index_uid), | IndexDeletion { index_uid, .. }
| IndexDocumentDeletionByFilter { index_uid, .. } => Some(index_uid),
} }
} }
} }
@@ -204,7 +204,6 @@ impl IndexOperation {
match self { match self {
IndexOperation::DocumentOperation { index_uid, .. } IndexOperation::DocumentOperation { index_uid, .. }
| IndexOperation::DocumentDeletion { index_uid, .. } | IndexOperation::DocumentDeletion { index_uid, .. }
| IndexOperation::IndexDocumentDeletionByFilter { index_uid, .. }
| IndexOperation::DocumentClear { index_uid, .. } | IndexOperation::DocumentClear { index_uid, .. }
| IndexOperation::Settings { index_uid, .. } | IndexOperation::Settings { index_uid, .. }
| IndexOperation::DocumentClearAndSetting { index_uid, .. } | IndexOperation::DocumentClearAndSetting { index_uid, .. }
@@ -239,12 +238,9 @@ impl IndexScheduler {
let task = self.get_task(rtxn, id)?.ok_or(Error::CorruptedTaskQueue)?; let task = self.get_task(rtxn, id)?.ok_or(Error::CorruptedTaskQueue)?;
match &task.kind { match &task.kind {
KindWithContent::DocumentDeletionByFilter { index_uid, .. } => { KindWithContent::DocumentDeletionByFilter { index_uid, .. } => {
Ok(Some(Batch::IndexOperation { Ok(Some(Batch::IndexDocumentDeletionByFilter {
op: IndexOperation::IndexDocumentDeletionByFilter { index_uid: index_uid.clone(),
index_uid: index_uid.clone(), task,
task,
},
must_create_index: false,
})) }))
} }
_ => unreachable!(), _ => unreachable!(),
@@ -890,6 +886,51 @@ impl IndexScheduler {
Ok(tasks) Ok(tasks)
} }
Batch::IndexDocumentDeletionByFilter { mut task, index_uid: _ } => {
let (index_uid, filter) =
if let KindWithContent::DocumentDeletionByFilter { index_uid, filter_expr } =
&task.kind
{
(index_uid, filter_expr)
} else {
unreachable!()
};
let index = {
let rtxn = self.env.read_txn()?;
self.index_mapper.index(&rtxn, index_uid)?
};
let deleted_documents = delete_document_by_filter(filter, index);
let original_filter = if let Some(Details::DocumentDeletionByFilter {
original_filter,
deleted_documents: _,
}) = task.details
{
original_filter
} else {
// In the case of a `documentDeleteByFilter` the details MUST be set
unreachable!();
};
match deleted_documents {
Ok(deleted_documents) => {
task.status = Status::Succeeded;
task.details = Some(Details::DocumentDeletionByFilter {
original_filter,
deleted_documents: Some(deleted_documents),
});
}
Err(e) => {
task.status = Status::Failed;
task.details = Some(Details::DocumentDeletionByFilter {
original_filter,
deleted_documents: Some(0),
});
task.error = Some(e.into());
}
}
Ok(vec![task])
}
Batch::IndexCreation { index_uid, primary_key, task } => { Batch::IndexCreation { index_uid, primary_key, task } => {
let wtxn = self.env.write_txn()?; let wtxn = self.env.write_txn()?;
if self.index_mapper.exists(&wtxn, &index_uid)? { if self.index_mapper.exists(&wtxn, &index_uid)? {
@@ -1246,47 +1287,6 @@ impl IndexScheduler {
Ok(tasks) Ok(tasks)
} }
IndexOperation::IndexDocumentDeletionByFilter { mut task, index_uid: _ } => {
let filter =
if let KindWithContent::DocumentDeletionByFilter { filter_expr, .. } =
&task.kind
{
filter_expr
} else {
unreachable!()
};
let deleted_documents = delete_document_by_filter(index_wtxn, filter, index);
let original_filter = if let Some(Details::DocumentDeletionByFilter {
original_filter,
deleted_documents: _,
}) = task.details
{
original_filter
} else {
// In the case of a `documentDeleteByFilter` the details MUST be set
unreachable!();
};
match deleted_documents {
Ok(deleted_documents) => {
task.status = Status::Succeeded;
task.details = Some(Details::DocumentDeletionByFilter {
original_filter,
deleted_documents: Some(deleted_documents),
});
}
Err(e) => {
task.status = Status::Failed;
task.details = Some(Details::DocumentDeletionByFilter {
original_filter,
deleted_documents: Some(0),
});
task.error = Some(e.into());
}
}
Ok(vec![task])
}
IndexOperation::Settings { index_uid: _, settings, mut tasks } => { IndexOperation::Settings { index_uid: _, settings, mut tasks } => {
let indexer_config = self.index_mapper.indexer_config(); let indexer_config = self.index_mapper.indexer_config();
let mut builder = milli::update::Settings::new(index_wtxn, index, indexer_config); let mut builder = milli::update::Settings::new(index_wtxn, index, indexer_config);
@@ -1486,22 +1486,18 @@ impl IndexScheduler {
} }
} }
fn delete_document_by_filter<'a>( fn delete_document_by_filter(filter: &serde_json::Value, index: Index) -> Result<u64> {
wtxn: &mut RwTxn<'a, '_>,
filter: &serde_json::Value,
index: &'a Index,
) -> Result<u64> {
let filter = Filter::from_json(filter)?; let filter = Filter::from_json(filter)?;
Ok(if let Some(filter) = filter { Ok(if let Some(filter) = filter {
let candidates = filter.evaluate(wtxn, index).map_err(|err| match err { let mut wtxn = index.write_txn()?;
milli::Error::UserError(milli::UserError::InvalidFilter(_)) => {
Error::from(err).with_custom_error_code(Code::InvalidDocumentFilter) let candidates = filter.evaluate(&wtxn, &index)?;
} let mut delete_operation = DeleteDocuments::new(&mut wtxn, &index)?;
e => e.into(),
})?;
let mut delete_operation = DeleteDocuments::new(wtxn, index)?;
delete_operation.delete_documents(&candidates); delete_operation.delete_documents(&candidates);
delete_operation.execute().map(|result| result.deleted_documents)? let deleted_documents =
delete_operation.execute().map(|result| result.deleted_documents)?;
wtxn.commit()?;
deleted_documents
} else { } else {
0 0
}) })

View File

@@ -46,8 +46,6 @@ impl From<DateField> for Code {
#[allow(clippy::large_enum_variant)] #[allow(clippy::large_enum_variant)]
#[derive(Error, Debug)] #[derive(Error, Debug)]
pub enum Error { pub enum Error {
#[error("{1}")]
WithCustomErrorCode(Code, Box<Self>),
#[error("Index `{0}` not found.")] #[error("Index `{0}` not found.")]
IndexNotFound(String), IndexNotFound(String),
#[error("Index `{0}` already exists.")] #[error("Index `{0}` already exists.")]
@@ -146,7 +144,6 @@ impl Error {
pub fn is_recoverable(&self) -> bool { pub fn is_recoverable(&self) -> bool {
match self { match self {
Error::IndexNotFound(_) Error::IndexNotFound(_)
| Error::WithCustomErrorCode(_, _)
| Error::IndexAlreadyExists(_) | Error::IndexAlreadyExists(_)
| Error::SwapDuplicateIndexFound(_) | Error::SwapDuplicateIndexFound(_)
| Error::SwapDuplicateIndexesFound(_) | Error::SwapDuplicateIndexesFound(_)
@@ -179,16 +176,11 @@ impl Error {
Error::PlannedFailure => false, Error::PlannedFailure => false,
} }
} }
pub fn with_custom_error_code(self, code: Code) -> Self {
Self::WithCustomErrorCode(code, Box::new(self))
}
} }
impl ErrorCode for Error { impl ErrorCode for Error {
fn error_code(&self) -> Code { fn error_code(&self) -> Code {
match self { match self {
Error::WithCustomErrorCode(code, _) => *code,
Error::IndexNotFound(_) => Code::IndexNotFound, Error::IndexNotFound(_) => Code::IndexNotFound,
Error::IndexAlreadyExists(_) => Code::IndexAlreadyExists, Error::IndexAlreadyExists(_) => Code::IndexAlreadyExists,
Error::SwapDuplicateIndexesFound(_) => Code::InvalidSwapDuplicateIndexFound, Error::SwapDuplicateIndexesFound(_) => Code::InvalidSwapDuplicateIndexFound,

View File

@@ -5,7 +5,6 @@ use std::collections::BTreeMap;
use std::path::Path; use std::path::Path;
use std::time::Duration; use std::time::Duration;
use meilisearch_types::heed::flags::Flags;
use meilisearch_types::heed::{EnvClosingEvent, EnvOpenOptions}; use meilisearch_types::heed::{EnvClosingEvent, EnvOpenOptions};
use meilisearch_types::milli::Index; use meilisearch_types::milli::Index;
use time::OffsetDateTime; use time::OffsetDateTime;
@@ -54,7 +53,6 @@ pub struct IndexMap {
pub struct ClosingIndex { pub struct ClosingIndex {
uuid: Uuid, uuid: Uuid,
closing_event: EnvClosingEvent, closing_event: EnvClosingEvent,
enable_mdb_writemap: bool,
map_size: usize, map_size: usize,
generation: usize, generation: usize,
} }
@@ -70,7 +68,6 @@ impl ClosingIndex {
pub fn wait_timeout(self, timeout: Duration) -> Option<ReopenableIndex> { pub fn wait_timeout(self, timeout: Duration) -> Option<ReopenableIndex> {
self.closing_event.wait_timeout(timeout).then_some(ReopenableIndex { self.closing_event.wait_timeout(timeout).then_some(ReopenableIndex {
uuid: self.uuid, uuid: self.uuid,
enable_mdb_writemap: self.enable_mdb_writemap,
map_size: self.map_size, map_size: self.map_size,
generation: self.generation, generation: self.generation,
}) })
@@ -79,7 +76,6 @@ impl ClosingIndex {
pub struct ReopenableIndex { pub struct ReopenableIndex {
uuid: Uuid, uuid: Uuid,
enable_mdb_writemap: bool,
map_size: usize, map_size: usize,
generation: usize, generation: usize,
} }
@@ -107,7 +103,7 @@ impl ReopenableIndex {
return Ok(()); return Ok(());
} }
map.unavailable.remove(&self.uuid); map.unavailable.remove(&self.uuid);
map.create(&self.uuid, path, None, self.enable_mdb_writemap, self.map_size)?; map.create(&self.uuid, path, None, self.map_size)?;
} }
Ok(()) Ok(())
} }
@@ -174,17 +170,16 @@ impl IndexMap {
uuid: &Uuid, uuid: &Uuid,
path: &Path, path: &Path,
date: Option<(OffsetDateTime, OffsetDateTime)>, date: Option<(OffsetDateTime, OffsetDateTime)>,
enable_mdb_writemap: bool,
map_size: usize, map_size: usize,
) -> Result<Index> { ) -> Result<Index> {
if !matches!(self.get_unavailable(uuid), Missing) { if !matches!(self.get_unavailable(uuid), Missing) {
panic!("Attempt to open an index that was unavailable"); panic!("Attempt to open an index that was unavailable");
} }
let index = create_or_open_index(path, date, enable_mdb_writemap, map_size)?; let index = create_or_open_index(path, date, map_size)?;
match self.available.insert(*uuid, index.clone()) { match self.available.insert(*uuid, index.clone()) {
InsertionOutcome::InsertedNew => (), InsertionOutcome::InsertedNew => (),
InsertionOutcome::Evicted(evicted_uuid, evicted_index) => { InsertionOutcome::Evicted(evicted_uuid, evicted_index) => {
self.close(evicted_uuid, evicted_index, enable_mdb_writemap, 0); self.close(evicted_uuid, evicted_index, 0);
} }
InsertionOutcome::Replaced(_) => { InsertionOutcome::Replaced(_) => {
panic!("Attempt to open an index that was already opened") panic!("Attempt to open an index that was already opened")
@@ -217,32 +212,17 @@ impl IndexMap {
/// | Closing | Closing | /// | Closing | Closing |
/// | Available | Closing | /// | Available | Closing |
/// ///
pub fn close_for_resize( pub fn close_for_resize(&mut self, uuid: &Uuid, map_size_growth: usize) {
&mut self, let Some(index) = self.available.remove(uuid) else { return; };
uuid: &Uuid, self.close(*uuid, index, map_size_growth);
enable_mdb_writemap: bool,
map_size_growth: usize,
) {
let Some(index) = self.available.remove(uuid) else {
return;
};
self.close(*uuid, index, enable_mdb_writemap, map_size_growth);
} }
fn close( fn close(&mut self, uuid: Uuid, index: Index, map_size_growth: usize) {
&mut self,
uuid: Uuid,
index: Index,
enable_mdb_writemap: bool,
map_size_growth: usize,
) {
let map_size = index.map_size().unwrap_or(DEFAULT_MAP_SIZE) + map_size_growth; let map_size = index.map_size().unwrap_or(DEFAULT_MAP_SIZE) + map_size_growth;
let closing_event = index.prepare_for_closing(); let closing_event = index.prepare_for_closing();
let generation = self.next_generation(); let generation = self.next_generation();
self.unavailable.insert( self.unavailable
uuid, .insert(uuid, Some(ClosingIndex { uuid, closing_event, map_size, generation }));
Some(ClosingIndex { uuid, closing_event, enable_mdb_writemap, map_size, generation }),
);
} }
/// Attempts to delete and index. /// Attempts to delete and index.
@@ -302,15 +282,11 @@ impl IndexMap {
fn create_or_open_index( fn create_or_open_index(
path: &Path, path: &Path,
date: Option<(OffsetDateTime, OffsetDateTime)>, date: Option<(OffsetDateTime, OffsetDateTime)>,
enable_mdb_writemap: bool,
map_size: usize, map_size: usize,
) -> Result<Index> { ) -> Result<Index> {
let mut options = EnvOpenOptions::new(); let mut options = EnvOpenOptions::new();
options.map_size(clamp_to_page_size(map_size)); options.map_size(clamp_to_page_size(map_size));
options.max_readers(1024); options.max_readers(1024);
if enable_mdb_writemap {
unsafe { options.flag(Flags::MdbWriteMap) };
}
if let Some((created, updated)) = date { if let Some((created, updated)) = date {
Ok(Index::new_with_creation_dates(options, path, created, updated)?) Ok(Index::new_with_creation_dates(options, path, created, updated)?)

View File

@@ -66,8 +66,6 @@ pub struct IndexMapper {
index_base_map_size: usize, index_base_map_size: usize,
/// The quantity by which the map size of an index is incremented upon reopening, in bytes. /// The quantity by which the map size of an index is incremented upon reopening, in bytes.
index_growth_amount: usize, index_growth_amount: usize,
/// Whether we open a meilisearch index with the MDB_WRITEMAP option or not.
enable_mdb_writemap: bool,
pub indexer_config: Arc<IndexerConfig>, pub indexer_config: Arc<IndexerConfig>,
} }
@@ -125,22 +123,15 @@ impl IndexMapper {
index_base_map_size: usize, index_base_map_size: usize,
index_growth_amount: usize, index_growth_amount: usize,
index_count: usize, index_count: usize,
enable_mdb_writemap: bool,
indexer_config: IndexerConfig, indexer_config: IndexerConfig,
) -> Result<Self> { ) -> Result<Self> {
let mut wtxn = env.write_txn()?;
let index_mapping = env.create_database(&mut wtxn, Some(INDEX_MAPPING))?;
let index_stats = env.create_database(&mut wtxn, Some(INDEX_STATS))?;
wtxn.commit()?;
Ok(Self { Ok(Self {
index_map: Arc::new(RwLock::new(IndexMap::new(index_count))), index_map: Arc::new(RwLock::new(IndexMap::new(index_count))),
index_mapping, index_mapping: env.create_database(Some(INDEX_MAPPING))?,
index_stats, index_stats: env.create_database(Some(INDEX_STATS))?,
base_path, base_path,
index_base_map_size, index_base_map_size,
index_growth_amount, index_growth_amount,
enable_mdb_writemap,
indexer_config: Arc::new(indexer_config), indexer_config: Arc::new(indexer_config),
}) })
} }
@@ -171,7 +162,6 @@ impl IndexMapper {
&uuid, &uuid,
&index_path, &index_path,
date, date,
self.enable_mdb_writemap,
self.index_base_map_size, self.index_base_map_size,
)?; )?;
@@ -283,11 +273,7 @@ impl IndexMapper {
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?; .ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
// We remove the index from the in-memory index map. // We remove the index from the in-memory index map.
self.index_map.write().unwrap().close_for_resize( self.index_map.write().unwrap().close_for_resize(&uuid, self.index_growth_amount);
&uuid,
self.enable_mdb_writemap,
self.index_growth_amount,
);
Ok(()) Ok(())
} }
@@ -352,7 +338,6 @@ impl IndexMapper {
&uuid, &uuid,
&index_path, &index_path,
None, None,
self.enable_mdb_writemap,
self.index_base_map_size, self.index_base_map_size,
)?; )?;
} }

View File

@@ -233,8 +233,6 @@ pub struct IndexSchedulerOptions {
pub task_db_size: usize, pub task_db_size: usize,
/// The size, in bytes, with which a meilisearch index is opened the first time of each meilisearch index. /// The size, in bytes, with which a meilisearch index is opened the first time of each meilisearch index.
pub index_base_map_size: usize, pub index_base_map_size: usize,
/// Whether we open a meilisearch index with the MDB_WRITEMAP option or not.
pub enable_mdb_writemap: bool,
/// The size, in bytes, by which the map size of an index is increased when it resized due to being full. /// The size, in bytes, by which the map size of an index is increased when it resized due to being full.
pub index_growth_amount: usize, pub index_growth_amount: usize,
/// The number of indexes that can be concurrently opened in memory. /// The number of indexes that can be concurrently opened in memory.
@@ -376,11 +374,6 @@ impl IndexScheduler {
std::fs::create_dir_all(&options.indexes_path)?; std::fs::create_dir_all(&options.indexes_path)?;
std::fs::create_dir_all(&options.dumps_path)?; std::fs::create_dir_all(&options.dumps_path)?;
if cfg!(windows) && options.enable_mdb_writemap {
// programmer error if this happens: in normal use passing the option on Windows is an error in main
panic!("Windows doesn't support the MDB_WRITEMAP LMDB option");
}
let task_db_size = clamp_to_page_size(options.task_db_size); let task_db_size = clamp_to_page_size(options.task_db_size);
let budget = if options.indexer_config.skip_index_budget { let budget = if options.indexer_config.skip_index_budget {
IndexBudget { IndexBudget {
@@ -403,37 +396,25 @@ impl IndexScheduler {
.open(options.tasks_path)?; .open(options.tasks_path)?;
let file_store = FileStore::new(&options.update_file_path)?; let file_store = FileStore::new(&options.update_file_path)?;
let mut wtxn = env.write_txn()?;
let all_tasks = env.create_database(&mut wtxn, Some(db_name::ALL_TASKS))?;
let status = env.create_database(&mut wtxn, Some(db_name::STATUS))?;
let kind = env.create_database(&mut wtxn, Some(db_name::KIND))?;
let index_tasks = env.create_database(&mut wtxn, Some(db_name::INDEX_TASKS))?;
let canceled_by = env.create_database(&mut wtxn, Some(db_name::CANCELED_BY))?;
let enqueued_at = env.create_database(&mut wtxn, Some(db_name::ENQUEUED_AT))?;
let started_at = env.create_database(&mut wtxn, Some(db_name::STARTED_AT))?;
let finished_at = env.create_database(&mut wtxn, Some(db_name::FINISHED_AT))?;
wtxn.commit()?;
// allow unreachable_code to get rids of the warning in the case of a test build. // allow unreachable_code to get rids of the warning in the case of a test build.
let this = Self { let this = Self {
must_stop_processing: MustStopProcessing::default(), must_stop_processing: MustStopProcessing::default(),
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())), processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
file_store, file_store,
all_tasks, all_tasks: env.create_database(Some(db_name::ALL_TASKS))?,
status, status: env.create_database(Some(db_name::STATUS))?,
kind, kind: env.create_database(Some(db_name::KIND))?,
index_tasks, index_tasks: env.create_database(Some(db_name::INDEX_TASKS))?,
canceled_by, canceled_by: env.create_database(Some(db_name::CANCELED_BY))?,
enqueued_at, enqueued_at: env.create_database(Some(db_name::ENQUEUED_AT))?,
started_at, started_at: env.create_database(Some(db_name::STARTED_AT))?,
finished_at, finished_at: env.create_database(Some(db_name::FINISHED_AT))?,
index_mapper: IndexMapper::new( index_mapper: IndexMapper::new(
&env, &env,
options.indexes_path, options.indexes_path,
budget.map_size, budget.map_size,
options.index_growth_amount, options.index_growth_amount,
budget.index_count, budget.index_count,
options.enable_mdb_writemap,
options.indexer_config, options.indexer_config,
)?, )?,
env, env,
@@ -1490,7 +1471,6 @@ mod tests {
dumps_path: tempdir.path().join("dumps"), dumps_path: tempdir.path().join("dumps"),
task_db_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose. task_db_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
index_base_map_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose. index_base_map_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
enable_mdb_writemap: false,
index_growth_amount: 1000 * 1000, // 1 MB index_growth_amount: 1000 * 1000, // 1 MB
index_count: 5, index_count: 5,
indexer_config, indexer_config,

View File

@@ -466,7 +466,7 @@ impl IndexScheduler {
} }
} }
Details::DocumentDeletionByFilter { deleted_documents, original_filter: _ } => { Details::DocumentDeletionByFilter { deleted_documents, original_filter: _ } => {
assert_eq!(kind.as_kind(), Kind::DocumentDeletion); assert_eq!(kind.as_kind(), Kind::DocumentDeletionByFilter);
let (index_uid, _) = if let KindWithContent::DocumentDeletionByFilter { let (index_uid, _) = if let KindWithContent::DocumentDeletionByFilter {
ref index_uid, ref index_uid,
ref filter_expr, ref filter_expr,

View File

@@ -55,11 +55,9 @@ impl HeedAuthStore {
let path = path.as_ref().join(AUTH_DB_PATH); let path = path.as_ref().join(AUTH_DB_PATH);
create_dir_all(&path)?; create_dir_all(&path)?;
let env = Arc::new(open_auth_store_env(path.as_ref())?); let env = Arc::new(open_auth_store_env(path.as_ref())?);
let mut wtxn = env.write_txn()?; let keys = env.create_database(Some(KEY_DB_NAME))?;
let keys = env.create_database(&mut wtxn, Some(KEY_DB_NAME))?;
let action_keyid_index_expiration = let action_keyid_index_expiration =
env.create_database(&mut wtxn, Some(KEY_ID_ACTION_INDEX_EXPIRATION_DB_NAME))?; env.create_database(Some(KEY_ID_ACTION_INDEX_EXPIRATION_DB_NAME))?;
wtxn.commit()?;
Ok(Self { env, keys, action_keyid_index_expiration, should_close_on_drop: true }) Ok(Self { env, keys, action_keyid_index_expiration, should_close_on_drop: true })
} }

View File

@@ -150,7 +150,6 @@ make_missing_field_convenience_builder!(MissingApiKeyActions, missing_api_key_ac
make_missing_field_convenience_builder!(MissingApiKeyExpiresAt, missing_api_key_expires_at); make_missing_field_convenience_builder!(MissingApiKeyExpiresAt, missing_api_key_expires_at);
make_missing_field_convenience_builder!(MissingApiKeyIndexes, missing_api_key_indexes); make_missing_field_convenience_builder!(MissingApiKeyIndexes, missing_api_key_indexes);
make_missing_field_convenience_builder!(MissingSwapIndexes, missing_swap_indexes); make_missing_field_convenience_builder!(MissingSwapIndexes, missing_swap_indexes);
make_missing_field_convenience_builder!(MissingDocumentFilter, missing_document_filter);
// Integrate a sub-error into a [`DeserrError`] by taking its error message but using // Integrate a sub-error into a [`DeserrError`] by taking its error message but using
// the default error code (C) from `Self` // the default error code (C) from `Self`

View File

@@ -214,12 +214,12 @@ InvalidApiKeyUid , InvalidRequest , BAD_REQUEST ;
InvalidContentType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ; InvalidContentType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
InvalidDocumentCsvDelimiter , InvalidRequest , BAD_REQUEST ; InvalidDocumentCsvDelimiter , InvalidRequest , BAD_REQUEST ;
InvalidDocumentFields , InvalidRequest , BAD_REQUEST ; InvalidDocumentFields , InvalidRequest , BAD_REQUEST ;
MissingDocumentFilter , InvalidRequest , BAD_REQUEST ;
InvalidDocumentFilter , InvalidRequest , BAD_REQUEST ; InvalidDocumentFilter , InvalidRequest , BAD_REQUEST ;
InvalidDocumentGeoField , InvalidRequest , BAD_REQUEST ; InvalidDocumentGeoField , InvalidRequest , BAD_REQUEST ;
InvalidDocumentId , InvalidRequest , BAD_REQUEST ; InvalidDocumentId , InvalidRequest , BAD_REQUEST ;
InvalidDocumentLimit , InvalidRequest , BAD_REQUEST ; InvalidDocumentLimit , InvalidRequest , BAD_REQUEST ;
InvalidDocumentOffset , InvalidRequest , BAD_REQUEST ; InvalidDocumentOffset , InvalidRequest , BAD_REQUEST ;
InvalidDocumentDeleteFilter , InvalidRequest , BAD_REQUEST ;
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ; InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ; InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ; InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;

View File

@@ -147,7 +147,9 @@ impl Key {
fn parse_expiration_date( fn parse_expiration_date(
string: Option<String>, string: Option<String>,
) -> std::result::Result<Option<OffsetDateTime>, ParseOffsetDateTimeError> { ) -> std::result::Result<Option<OffsetDateTime>, ParseOffsetDateTimeError> {
let Some(string) = string else { return Ok(None) }; let Some(string) = string else {
return Ok(None)
};
let datetime = if let Ok(datetime) = OffsetDateTime::parse(&string, &Rfc3339) { let datetime = if let Ok(datetime) = OffsetDateTime::parse(&string, &Rfc3339) {
datetime datetime
} else if let Ok(primitive_datetime) = PrimitiveDateTime::parse( } else if let Ok(primitive_datetime) = PrimitiveDateTime::parse(

View File

@@ -395,6 +395,7 @@ impl std::error::Error for ParseTaskStatusError {}
pub enum Kind { pub enum Kind {
DocumentAdditionOrUpdate, DocumentAdditionOrUpdate,
DocumentDeletion, DocumentDeletion,
DocumentDeletionByFilter,
SettingsUpdate, SettingsUpdate,
IndexCreation, IndexCreation,
IndexDeletion, IndexDeletion,
@@ -411,6 +412,7 @@ impl Kind {
match self { match self {
Kind::DocumentAdditionOrUpdate Kind::DocumentAdditionOrUpdate
| Kind::DocumentDeletion | Kind::DocumentDeletion
| Kind::DocumentDeletionByFilter
| Kind::SettingsUpdate | Kind::SettingsUpdate
| Kind::IndexCreation | Kind::IndexCreation
| Kind::IndexDeletion | Kind::IndexDeletion
@@ -428,6 +430,7 @@ impl Display for Kind {
match self { match self {
Kind::DocumentAdditionOrUpdate => write!(f, "documentAdditionOrUpdate"), Kind::DocumentAdditionOrUpdate => write!(f, "documentAdditionOrUpdate"),
Kind::DocumentDeletion => write!(f, "documentDeletion"), Kind::DocumentDeletion => write!(f, "documentDeletion"),
Kind::DocumentDeletionByFilter => write!(f, "documentDeletionByFilter"),
Kind::SettingsUpdate => write!(f, "settingsUpdate"), Kind::SettingsUpdate => write!(f, "settingsUpdate"),
Kind::IndexCreation => write!(f, "indexCreation"), Kind::IndexCreation => write!(f, "indexCreation"),
Kind::IndexDeletion => write!(f, "indexDeletion"), Kind::IndexDeletion => write!(f, "indexDeletion"),

View File

@@ -5,7 +5,7 @@ use actix_web::HttpRequest;
use meilisearch_types::InstanceUid; use meilisearch_types::InstanceUid;
use serde_json::Value; use serde_json::Value;
use super::{find_user_id, Analytics, DocumentDeletionKind, DocumentFetchKind}; use super::{find_user_id, Analytics, DocumentDeletionKind};
use crate::routes::indexes::documents::UpdateDocumentsQuery; use crate::routes::indexes::documents::UpdateDocumentsQuery;
use crate::routes::tasks::TasksFilterQuery; use crate::routes::tasks::TasksFilterQuery;
use crate::Opt; use crate::Opt;
@@ -71,8 +71,6 @@ impl Analytics for MockAnalytics {
_request: &HttpRequest, _request: &HttpRequest,
) { ) {
} }
fn get_fetch_documents(&self, _documents_query: &DocumentFetchKind, _request: &HttpRequest) {}
fn post_fetch_documents(&self, _documents_query: &DocumentFetchKind, _request: &HttpRequest) {}
fn get_tasks(&self, _query: &TasksFilterQuery, _request: &HttpRequest) {} fn get_tasks(&self, _query: &TasksFilterQuery, _request: &HttpRequest) {}
fn health_seen(&self, _request: &HttpRequest) {} fn health_seen(&self, _request: &HttpRequest) {}
} }

View File

@@ -67,12 +67,6 @@ pub enum DocumentDeletionKind {
PerFilter, PerFilter,
} }
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum DocumentFetchKind {
PerDocumentId,
Normal { with_filter: bool, limit: usize, offset: usize },
}
pub trait Analytics: Sync + Send { pub trait Analytics: Sync + Send {
fn instance_uid(&self) -> Option<&InstanceUid>; fn instance_uid(&self) -> Option<&InstanceUid>;
@@ -96,12 +90,6 @@ pub trait Analytics: Sync + Send {
request: &HttpRequest, request: &HttpRequest,
); );
// this method should be called to aggregate a fetch documents request
fn get_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest);
// this method should be called to aggregate a fetch documents request
fn post_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest);
// this method should be called to aggregate a add documents request // this method should be called to aggregate a add documents request
fn delete_documents(&self, kind: DocumentDeletionKind, request: &HttpRequest); fn delete_documents(&self, kind: DocumentDeletionKind, request: &HttpRequest);

View File

@@ -23,9 +23,7 @@ use tokio::select;
use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::sync::mpsc::{self, Receiver, Sender};
use uuid::Uuid; use uuid::Uuid;
use super::{ use super::{config_user_id_path, DocumentDeletionKind, MEILISEARCH_CONFIG_PATH};
config_user_id_path, DocumentDeletionKind, DocumentFetchKind, MEILISEARCH_CONFIG_PATH,
};
use crate::analytics::Analytics; use crate::analytics::Analytics;
use crate::option::{default_http_addr, IndexerOpts, MaxMemory, MaxThreads, ScheduleSnapshot}; use crate::option::{default_http_addr, IndexerOpts, MaxMemory, MaxThreads, ScheduleSnapshot};
use crate::routes::indexes::documents::UpdateDocumentsQuery; use crate::routes::indexes::documents::UpdateDocumentsQuery;
@@ -74,8 +72,6 @@ pub enum AnalyticsMsg {
AggregateAddDocuments(DocumentsAggregator), AggregateAddDocuments(DocumentsAggregator),
AggregateDeleteDocuments(DocumentsDeletionAggregator), AggregateDeleteDocuments(DocumentsDeletionAggregator),
AggregateUpdateDocuments(DocumentsAggregator), AggregateUpdateDocuments(DocumentsAggregator),
AggregateGetFetchDocuments(DocumentsFetchAggregator),
AggregatePostFetchDocuments(DocumentsFetchAggregator),
AggregateTasks(TasksAggregator), AggregateTasks(TasksAggregator),
AggregateHealth(HealthAggregator), AggregateHealth(HealthAggregator),
} }
@@ -143,8 +139,6 @@ impl SegmentAnalytics {
add_documents_aggregator: DocumentsAggregator::default(), add_documents_aggregator: DocumentsAggregator::default(),
delete_documents_aggregator: DocumentsDeletionAggregator::default(), delete_documents_aggregator: DocumentsDeletionAggregator::default(),
update_documents_aggregator: DocumentsAggregator::default(), update_documents_aggregator: DocumentsAggregator::default(),
get_fetch_documents_aggregator: DocumentsFetchAggregator::default(),
post_fetch_documents_aggregator: DocumentsFetchAggregator::default(),
get_tasks_aggregator: TasksAggregator::default(), get_tasks_aggregator: TasksAggregator::default(),
health_aggregator: HealthAggregator::default(), health_aggregator: HealthAggregator::default(),
}); });
@@ -211,16 +205,6 @@ impl super::Analytics for SegmentAnalytics {
let _ = self.sender.try_send(AnalyticsMsg::AggregateUpdateDocuments(aggregate)); let _ = self.sender.try_send(AnalyticsMsg::AggregateUpdateDocuments(aggregate));
} }
fn get_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest) {
let aggregate = DocumentsFetchAggregator::from_query(documents_query, request);
let _ = self.sender.try_send(AnalyticsMsg::AggregateGetFetchDocuments(aggregate));
}
fn post_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest) {
let aggregate = DocumentsFetchAggregator::from_query(documents_query, request);
let _ = self.sender.try_send(AnalyticsMsg::AggregatePostFetchDocuments(aggregate));
}
fn get_tasks(&self, query: &TasksFilterQuery, request: &HttpRequest) { fn get_tasks(&self, query: &TasksFilterQuery, request: &HttpRequest) {
let aggregate = TasksAggregator::from_query(query, request); let aggregate = TasksAggregator::from_query(query, request);
let _ = self.sender.try_send(AnalyticsMsg::AggregateTasks(aggregate)); let _ = self.sender.try_send(AnalyticsMsg::AggregateTasks(aggregate));
@@ -241,7 +225,6 @@ impl super::Analytics for SegmentAnalytics {
struct Infos { struct Infos {
env: String, env: String,
experimental_enable_metrics: bool, experimental_enable_metrics: bool,
experimental_reduce_indexing_memory_usage: bool,
db_path: bool, db_path: bool,
import_dump: bool, import_dump: bool,
dump_dir: bool, dump_dir: bool,
@@ -275,7 +258,6 @@ impl From<Opt> for Infos {
let Opt { let Opt {
db_path, db_path,
experimental_enable_metrics, experimental_enable_metrics,
experimental_reduce_indexing_memory_usage,
http_addr, http_addr,
master_key: _, master_key: _,
env, env,
@@ -318,7 +300,6 @@ impl From<Opt> for Infos {
Self { Self {
env, env,
experimental_enable_metrics, experimental_enable_metrics,
experimental_reduce_indexing_memory_usage,
db_path: db_path != PathBuf::from("./data.ms"), db_path: db_path != PathBuf::from("./data.ms"),
import_dump: import_dump.is_some(), import_dump: import_dump.is_some(),
dump_dir: dump_dir != PathBuf::from("dumps/"), dump_dir: dump_dir != PathBuf::from("dumps/"),
@@ -357,8 +338,6 @@ pub struct Segment {
add_documents_aggregator: DocumentsAggregator, add_documents_aggregator: DocumentsAggregator,
delete_documents_aggregator: DocumentsDeletionAggregator, delete_documents_aggregator: DocumentsDeletionAggregator,
update_documents_aggregator: DocumentsAggregator, update_documents_aggregator: DocumentsAggregator,
get_fetch_documents_aggregator: DocumentsFetchAggregator,
post_fetch_documents_aggregator: DocumentsFetchAggregator,
get_tasks_aggregator: TasksAggregator, get_tasks_aggregator: TasksAggregator,
health_aggregator: HealthAggregator, health_aggregator: HealthAggregator,
} }
@@ -421,8 +400,6 @@ impl Segment {
Some(AnalyticsMsg::AggregateAddDocuments(agreg)) => self.add_documents_aggregator.aggregate(agreg), Some(AnalyticsMsg::AggregateAddDocuments(agreg)) => self.add_documents_aggregator.aggregate(agreg),
Some(AnalyticsMsg::AggregateDeleteDocuments(agreg)) => self.delete_documents_aggregator.aggregate(agreg), Some(AnalyticsMsg::AggregateDeleteDocuments(agreg)) => self.delete_documents_aggregator.aggregate(agreg),
Some(AnalyticsMsg::AggregateUpdateDocuments(agreg)) => self.update_documents_aggregator.aggregate(agreg), Some(AnalyticsMsg::AggregateUpdateDocuments(agreg)) => self.update_documents_aggregator.aggregate(agreg),
Some(AnalyticsMsg::AggregateGetFetchDocuments(agreg)) => self.get_fetch_documents_aggregator.aggregate(agreg),
Some(AnalyticsMsg::AggregatePostFetchDocuments(agreg)) => self.post_fetch_documents_aggregator.aggregate(agreg),
Some(AnalyticsMsg::AggregateTasks(agreg)) => self.get_tasks_aggregator.aggregate(agreg), Some(AnalyticsMsg::AggregateTasks(agreg)) => self.get_tasks_aggregator.aggregate(agreg),
Some(AnalyticsMsg::AggregateHealth(agreg)) => self.health_aggregator.aggregate(agreg), Some(AnalyticsMsg::AggregateHealth(agreg)) => self.health_aggregator.aggregate(agreg),
None => (), None => (),
@@ -473,10 +450,6 @@ impl Segment {
.into_event(&self.user, "Documents Deleted"); .into_event(&self.user, "Documents Deleted");
let update_documents = std::mem::take(&mut self.update_documents_aggregator) let update_documents = std::mem::take(&mut self.update_documents_aggregator)
.into_event(&self.user, "Documents Updated"); .into_event(&self.user, "Documents Updated");
let get_fetch_documents = std::mem::take(&mut self.get_fetch_documents_aggregator)
.into_event(&self.user, "Documents Fetched GET");
let post_fetch_documents = std::mem::take(&mut self.post_fetch_documents_aggregator)
.into_event(&self.user, "Documents Fetched POST");
let get_tasks = let get_tasks =
std::mem::take(&mut self.get_tasks_aggregator).into_event(&self.user, "Tasks Seen"); std::mem::take(&mut self.get_tasks_aggregator).into_event(&self.user, "Tasks Seen");
let health = let health =
@@ -500,12 +473,6 @@ impl Segment {
if let Some(update_documents) = update_documents { if let Some(update_documents) = update_documents {
let _ = self.batcher.push(update_documents).await; let _ = self.batcher.push(update_documents).await;
} }
if let Some(get_fetch_documents) = get_fetch_documents {
let _ = self.batcher.push(get_fetch_documents).await;
}
if let Some(post_fetch_documents) = post_fetch_documents {
let _ = self.batcher.push(post_fetch_documents).await;
}
if let Some(get_tasks) = get_tasks { if let Some(get_tasks) = get_tasks {
let _ = self.batcher.push(get_tasks).await; let _ = self.batcher.push(get_tasks).await;
} }
@@ -1168,76 +1135,3 @@ impl HealthAggregator {
}) })
} }
} }
#[derive(Default, Serialize)]
pub struct DocumentsFetchAggregator {
#[serde(skip)]
timestamp: Option<OffsetDateTime>,
// context
#[serde(rename = "user-agent")]
user_agents: HashSet<String>,
#[serde(rename = "requests.max_limit")]
total_received: usize,
// a call on ../documents/:doc_id
per_document_id: bool,
// if a filter was used
per_filter: bool,
// pagination
#[serde(rename = "pagination.max_limit")]
max_limit: usize,
#[serde(rename = "pagination.max_offset")]
max_offset: usize,
}
impl DocumentsFetchAggregator {
pub fn from_query(query: &DocumentFetchKind, request: &HttpRequest) -> Self {
let (limit, offset) = match query {
DocumentFetchKind::PerDocumentId => (1, 0),
DocumentFetchKind::Normal { limit, offset, .. } => (*limit, *offset),
};
Self {
timestamp: Some(OffsetDateTime::now_utc()),
user_agents: extract_user_agents(request).into_iter().collect(),
total_received: 1,
per_document_id: matches!(query, DocumentFetchKind::PerDocumentId),
per_filter: matches!(query, DocumentFetchKind::Normal { with_filter, .. } if *with_filter),
max_limit: limit,
max_offset: offset,
}
}
/// Aggregate one [DocumentsFetchAggregator] into another.
pub fn aggregate(&mut self, other: Self) {
if self.timestamp.is_none() {
self.timestamp = other.timestamp;
}
for user_agent in other.user_agents {
self.user_agents.insert(user_agent);
}
self.total_received = self.total_received.saturating_add(other.total_received);
self.per_document_id |= other.per_document_id;
self.per_filter |= other.per_filter;
self.max_limit = self.max_limit.max(other.max_limit);
self.max_offset = self.max_offset.max(other.max_offset);
}
pub fn into_event(self, user: &User, event_name: &str) -> Option<Track> {
// if we had no timestamp it means we never encountered any events and
// thus we don't need to send this event.
let timestamp = self.timestamp?;
Some(Track {
timestamp: Some(timestamp),
user: user.clone(),
event: event_name.to_string(),
properties: serde_json::to_value(self).ok()?,
..Default::default()
})
}
}

View File

@@ -1,6 +1,5 @@
use actix_web as aweb; use actix_web as aweb;
use aweb::error::{JsonPayloadError, QueryPayloadError}; use aweb::error::{JsonPayloadError, QueryPayloadError};
use byte_unit::Byte;
use meilisearch_types::document_formats::{DocumentFormatError, PayloadType}; use meilisearch_types::document_formats::{DocumentFormatError, PayloadType};
use meilisearch_types::error::{Code, ErrorCode, ResponseError}; use meilisearch_types::error::{Code, ErrorCode, ResponseError};
use meilisearch_types::index_uid::{IndexUid, IndexUidFormatError}; use meilisearch_types::index_uid::{IndexUid, IndexUidFormatError};
@@ -27,8 +26,8 @@ pub enum MeilisearchHttpError {
InvalidExpression(&'static [&'static str], Value), InvalidExpression(&'static [&'static str], Value),
#[error("A {0} payload is missing.")] #[error("A {0} payload is missing.")]
MissingPayload(PayloadType), MissingPayload(PayloadType),
#[error("The provided payload reached the size limit. The maximum accepted payload size is {}.", Byte::from_bytes(*.0 as u64).get_appropriate_unit(true))] #[error("The provided payload reached the size limit.")]
PayloadTooLarge(usize), PayloadTooLarge,
#[error("Two indexes must be given for each swap. The list `[{}]` contains {} indexes.", #[error("Two indexes must be given for each swap. The list `[{}]` contains {} indexes.",
.0.iter().map(|uid| format!("\"{uid}\"")).collect::<Vec<_>>().join(", "), .0.len() .0.iter().map(|uid| format!("\"{uid}\"")).collect::<Vec<_>>().join(", "), .0.len()
)] )]
@@ -61,9 +60,9 @@ impl ErrorCode for MeilisearchHttpError {
MeilisearchHttpError::MissingPayload(_) => Code::MissingPayload, MeilisearchHttpError::MissingPayload(_) => Code::MissingPayload,
MeilisearchHttpError::InvalidContentType(_, _) => Code::InvalidContentType, MeilisearchHttpError::InvalidContentType(_, _) => Code::InvalidContentType,
MeilisearchHttpError::DocumentNotFound(_) => Code::DocumentNotFound, MeilisearchHttpError::DocumentNotFound(_) => Code::DocumentNotFound,
MeilisearchHttpError::EmptyFilter => Code::InvalidDocumentFilter, MeilisearchHttpError::EmptyFilter => Code::InvalidDocumentDeleteFilter,
MeilisearchHttpError::InvalidExpression(_, _) => Code::InvalidSearchFilter, MeilisearchHttpError::InvalidExpression(_, _) => Code::InvalidSearchFilter,
MeilisearchHttpError::PayloadTooLarge(_) => Code::PayloadTooLarge, MeilisearchHttpError::PayloadTooLarge => Code::PayloadTooLarge,
MeilisearchHttpError::SwapIndexPayloadWrongLength(_) => Code::InvalidSwapIndexes, MeilisearchHttpError::SwapIndexPayloadWrongLength(_) => Code::InvalidSwapIndexes,
MeilisearchHttpError::IndexUid(e) => e.error_code(), MeilisearchHttpError::IndexUid(e) => e.error_code(),
MeilisearchHttpError::SerdeJson(_) => Code::Internal, MeilisearchHttpError::SerdeJson(_) => Code::Internal,

View File

@@ -11,7 +11,6 @@ use crate::error::MeilisearchHttpError;
pub struct Payload { pub struct Payload {
payload: Decompress<dev::Payload>, payload: Decompress<dev::Payload>,
limit: usize, limit: usize,
remaining: usize,
} }
pub struct PayloadConfig { pub struct PayloadConfig {
@@ -44,7 +43,6 @@ impl FromRequest for Payload {
ready(Ok(Payload { ready(Ok(Payload {
payload: Decompress::from_headers(payload.take(), req.headers()), payload: Decompress::from_headers(payload.take(), req.headers()),
limit, limit,
remaining: limit,
})) }))
} }
} }
@@ -56,14 +54,12 @@ impl Stream for Payload {
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match Pin::new(&mut self.payload).poll_next(cx) { match Pin::new(&mut self.payload).poll_next(cx) {
Poll::Ready(Some(result)) => match result { Poll::Ready(Some(result)) => match result {
Ok(bytes) => match self.remaining.checked_sub(bytes.len()) { Ok(bytes) => match self.limit.checked_sub(bytes.len()) {
Some(new_limit) => { Some(new_limit) => {
self.remaining = new_limit; self.limit = new_limit;
Poll::Ready(Some(Ok(bytes))) Poll::Ready(Some(Ok(bytes)))
} }
None => { None => Poll::Ready(Some(Err(MeilisearchHttpError::PayloadTooLarge))),
Poll::Ready(Some(Err(MeilisearchHttpError::PayloadTooLarge(self.limit))))
}
}, },
x => Poll::Ready(Some(x.map_err(MeilisearchHttpError::from))), x => Poll::Ready(Some(x.map_err(MeilisearchHttpError::from))),
}, },

View File

@@ -232,7 +232,6 @@ fn open_or_create_database_unchecked(
dumps_path: opt.dump_dir.clone(), dumps_path: opt.dump_dir.clone(),
task_db_size: opt.max_task_db_size.get_bytes() as usize, task_db_size: opt.max_task_db_size.get_bytes() as usize,
index_base_map_size: opt.max_index_size.get_bytes() as usize, index_base_map_size: opt.max_index_size.get_bytes() as usize,
enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage,
indexer_config: (&opt.indexer_options).try_into()?, indexer_config: (&opt.indexer_options).try_into()?,
autobatching_enabled: true, autobatching_enabled: true,
max_number_of_tasks: 1_000_000, max_number_of_tasks: 1_000_000,

View File

@@ -29,11 +29,6 @@ fn setup(opt: &Opt) -> anyhow::Result<()> {
async fn main() -> anyhow::Result<()> { async fn main() -> anyhow::Result<()> {
let (opt, config_read_from) = Opt::try_build()?; let (opt, config_read_from) = Opt::try_build()?;
anyhow::ensure!(
!(cfg!(windows) && opt.experimental_reduce_indexing_memory_usage),
"The `experimental-reduce-indexing-memory-usage` flag is not supported on Windows"
);
setup(&opt)?; setup(&opt)?;
match (opt.env.as_ref(), &opt.master_key) { match (opt.env.as_ref(), &opt.master_key) {

View File

@@ -48,8 +48,6 @@ const MEILI_IGNORE_DUMP_IF_DB_EXISTS: &str = "MEILI_IGNORE_DUMP_IF_DB_EXISTS";
const MEILI_DUMP_DIR: &str = "MEILI_DUMP_DIR"; const MEILI_DUMP_DIR: &str = "MEILI_DUMP_DIR";
const MEILI_LOG_LEVEL: &str = "MEILI_LOG_LEVEL"; const MEILI_LOG_LEVEL: &str = "MEILI_LOG_LEVEL";
const MEILI_EXPERIMENTAL_ENABLE_METRICS: &str = "MEILI_EXPERIMENTAL_ENABLE_METRICS"; const MEILI_EXPERIMENTAL_ENABLE_METRICS: &str = "MEILI_EXPERIMENTAL_ENABLE_METRICS";
const MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE: &str =
"MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE";
const DEFAULT_CONFIG_FILE_PATH: &str = "./config.toml"; const DEFAULT_CONFIG_FILE_PATH: &str = "./config.toml";
const DEFAULT_DB_PATH: &str = "./data.ms"; const DEFAULT_DB_PATH: &str = "./data.ms";
@@ -295,11 +293,6 @@ pub struct Opt {
#[serde(default)] #[serde(default)]
pub experimental_enable_metrics: bool, pub experimental_enable_metrics: bool,
/// Experimental RAM reduction during indexing, do not use in production, see: <https://github.com/meilisearch/product/discussions/652>
#[clap(long, env = MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE)]
#[serde(default)]
pub experimental_reduce_indexing_memory_usage: bool,
#[serde(flatten)] #[serde(flatten)]
#[clap(flatten)] #[clap(flatten)]
pub indexer_options: IndexerOpts, pub indexer_options: IndexerOpts,
@@ -392,7 +385,6 @@ impl Opt {
#[cfg(all(not(debug_assertions), feature = "analytics"))] #[cfg(all(not(debug_assertions), feature = "analytics"))]
no_analytics, no_analytics,
experimental_enable_metrics: enable_metrics_route, experimental_enable_metrics: enable_metrics_route,
experimental_reduce_indexing_memory_usage: reduce_indexing_memory_usage,
} = self; } = self;
export_to_env_if_not_present(MEILI_DB_PATH, db_path); export_to_env_if_not_present(MEILI_DB_PATH, db_path);
export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr); export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr);
@@ -434,10 +426,6 @@ impl Opt {
MEILI_EXPERIMENTAL_ENABLE_METRICS, MEILI_EXPERIMENTAL_ENABLE_METRICS,
enable_metrics_route.to_string(), enable_metrics_route.to_string(),
); );
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE,
reduce_indexing_memory_usage.to_string(),
);
indexer_options.export_to_env(); indexer_options.export_to_env();
} }

View File

@@ -29,7 +29,7 @@ use tempfile::tempfile;
use tokio::fs::File; use tokio::fs::File;
use tokio::io::{AsyncSeekExt, AsyncWriteExt, BufWriter}; use tokio::io::{AsyncSeekExt, AsyncWriteExt, BufWriter};
use crate::analytics::{Analytics, DocumentDeletionKind, DocumentFetchKind}; use crate::analytics::{Analytics, DocumentDeletionKind};
use crate::error::MeilisearchHttpError; use crate::error::MeilisearchHttpError;
use crate::error::PayloadError::ReceivePayload; use crate::error::PayloadError::ReceivePayload;
use crate::extractors::authentication::policies::*; use crate::extractors::authentication::policies::*;
@@ -97,14 +97,10 @@ pub async fn get_document(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>, index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
document_param: web::Path<DocumentParam>, document_param: web::Path<DocumentParam>,
params: AwebQueryParameter<GetDocument, DeserrQueryParamError>, params: AwebQueryParameter<GetDocument, DeserrQueryParamError>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> { ) -> Result<HttpResponse, ResponseError> {
let DocumentParam { index_uid, document_id } = document_param.into_inner(); let DocumentParam { index_uid, document_id } = document_param.into_inner();
let index_uid = IndexUid::try_from(index_uid)?; let index_uid = IndexUid::try_from(index_uid)?;
analytics.get_fetch_documents(&DocumentFetchKind::PerDocumentId, &req);
let GetDocument { fields } = params.into_inner(); let GetDocument { fields } = params.into_inner();
let attributes_to_retrieve = fields.merge_star_and_none(); let attributes_to_retrieve = fields.merge_star_and_none();
@@ -165,31 +161,16 @@ pub async fn documents_by_query_post(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>, index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
index_uid: web::Path<String>, index_uid: web::Path<String>,
body: AwebJson<BrowseQuery, DeserrJsonError>, body: AwebJson<BrowseQuery, DeserrJsonError>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> { ) -> Result<HttpResponse, ResponseError> {
debug!("called with body: {:?}", body); debug!("called with body: {:?}", body);
let body = body.into_inner(); documents_by_query(&index_scheduler, index_uid, body.into_inner())
analytics.post_fetch_documents(
&DocumentFetchKind::Normal {
with_filter: body.filter.is_some(),
limit: body.limit,
offset: body.offset,
},
&req,
);
documents_by_query(&index_scheduler, index_uid, body)
} }
pub async fn get_documents( pub async fn get_documents(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>, index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
index_uid: web::Path<String>, index_uid: web::Path<String>,
params: AwebQueryParameter<BrowseQueryGet, DeserrQueryParamError>, params: AwebQueryParameter<BrowseQueryGet, DeserrQueryParamError>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> { ) -> Result<HttpResponse, ResponseError> {
debug!("called with params: {:?}", params); debug!("called with params: {:?}", params);
@@ -210,15 +191,6 @@ pub async fn get_documents(
filter, filter,
}; };
analytics.get_fetch_documents(
&DocumentFetchKind::Normal {
with_filter: query.filter.is_some(),
limit: query.limit,
offset: query.offset,
},
&req,
);
documents_by_query(&index_scheduler, index_uid, query) documents_by_query(&index_scheduler, index_uid, query)
} }
@@ -486,7 +458,7 @@ pub async fn delete_documents_batch(
#[derive(Debug, Deserr)] #[derive(Debug, Deserr)]
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)] #[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
pub struct DocumentDeletionByFilter { pub struct DocumentDeletionByFilter {
#[deserr(error = DeserrJsonError<InvalidDocumentFilter>, missing_field_error = DeserrJsonError::missing_document_filter)] #[deserr(error = DeserrJsonError<InvalidDocumentDeleteFilter>)]
filter: Value, filter: Value,
} }
@@ -508,8 +480,8 @@ pub async fn delete_documents_by_filter(
|| -> Result<_, ResponseError> { || -> Result<_, ResponseError> {
Ok(crate::search::parse_filter(&filter)?.ok_or(MeilisearchHttpError::EmptyFilter)?) Ok(crate::search::parse_filter(&filter)?.ok_or(MeilisearchHttpError::EmptyFilter)?)
}() }()
// and whatever was the error, the error code should always be an InvalidDocumentFilter // and whatever was the error, the error code should always be an InvalidDocumentDeleteFilter
.map_err(|err| ResponseError::from_msg(err.message, Code::InvalidDocumentFilter))?; .map_err(|err| ResponseError::from_msg(err.message, Code::InvalidDocumentDeleteFilter))?;
let task = KindWithContent::DocumentDeletionByFilter { index_uid, filter_expr: filter }; let task = KindWithContent::DocumentDeletionByFilter { index_uid, filter_expr: filter };
let task: SummarizedTaskView = let task: SummarizedTaskView =
@@ -568,12 +540,7 @@ fn retrieve_documents<S: AsRef<str>>(
}; };
let candidates = if let Some(filter) = filter { let candidates = if let Some(filter) = filter {
filter.evaluate(&rtxn, index).map_err(|err| match err { filter.evaluate(&rtxn, index)?
milli::Error::UserError(milli::UserError::InvalidFilter(_)) => {
ResponseError::from_msg(err.to_string(), Code::InvalidDocumentFilter)
}
e => e.into(),
})?
} else { } else {
index.documents_ids(&rtxn)? index.documents_ids(&rtxn)?
}; };

View File

@@ -99,7 +99,7 @@ pub struct DetailsView {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub deleted_tasks: Option<Option<u64>>, pub deleted_tasks: Option<Option<u64>>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub original_filter: Option<Option<String>>, pub original_filter: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub dump_uid: Option<Option<String>>, pub dump_uid: Option<Option<String>>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
@@ -131,13 +131,12 @@ impl From<Details> for DetailsView {
} => DetailsView { } => DetailsView {
provided_ids: Some(received_document_ids), provided_ids: Some(received_document_ids),
deleted_documents: Some(deleted_documents), deleted_documents: Some(deleted_documents),
original_filter: Some(None),
..DetailsView::default() ..DetailsView::default()
}, },
Details::DocumentDeletionByFilter { original_filter, deleted_documents } => { Details::DocumentDeletionByFilter { original_filter, deleted_documents } => {
DetailsView { DetailsView {
provided_ids: Some(0), provided_ids: Some(0),
original_filter: Some(Some(original_filter)), original_filter: Some(original_filter),
deleted_documents: Some(deleted_documents), deleted_documents: Some(deleted_documents),
..DetailsView::default() ..DetailsView::default()
} }
@@ -149,7 +148,7 @@ impl From<Details> for DetailsView {
DetailsView { DetailsView {
matched_tasks: Some(matched_tasks), matched_tasks: Some(matched_tasks),
canceled_tasks: Some(canceled_tasks), canceled_tasks: Some(canceled_tasks),
original_filter: Some(Some(original_filter)), original_filter: Some(original_filter),
..DetailsView::default() ..DetailsView::default()
} }
} }
@@ -157,7 +156,7 @@ impl From<Details> for DetailsView {
DetailsView { DetailsView {
matched_tasks: Some(matched_tasks), matched_tasks: Some(matched_tasks),
deleted_tasks: Some(deleted_tasks), deleted_tasks: Some(deleted_tasks),
original_filter: Some(Some(original_filter)), original_filter: Some(original_filter),
..DetailsView::default() ..DetailsView::default()
} }
} }
@@ -730,7 +729,7 @@ mod tests {
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err(); let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
snapshot!(meili_snap::json_string!(err), @r###" snapshot!(meili_snap::json_string!(err), @r###"
{ {
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.", "message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
"code": "invalid_task_types", "code": "invalid_task_types",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types" "link": "https://docs.meilisearch.com/errors#invalid_task_types"

View File

@@ -16,11 +16,8 @@ pub static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), HashSet<&'
("GET", "/indexes/products/search") => hashset!{"search", "*"}, ("GET", "/indexes/products/search") => hashset!{"search", "*"},
("POST", "/indexes/products/documents") => hashset!{"documents.add", "documents.*", "*"}, ("POST", "/indexes/products/documents") => hashset!{"documents.add", "documents.*", "*"},
("GET", "/indexes/products/documents") => hashset!{"documents.get", "documents.*", "*"}, ("GET", "/indexes/products/documents") => hashset!{"documents.get", "documents.*", "*"},
("POST", "/indexes/products/documents/fetch") => hashset!{"documents.get", "documents.*", "*"},
("GET", "/indexes/products/documents/0") => hashset!{"documents.get", "documents.*", "*"}, ("GET", "/indexes/products/documents/0") => hashset!{"documents.get", "documents.*", "*"},
("DELETE", "/indexes/products/documents/0") => hashset!{"documents.delete", "documents.*", "*"}, ("DELETE", "/indexes/products/documents/0") => hashset!{"documents.delete", "documents.*", "*"},
("POST", "/indexes/products/documents/delete-batch") => hashset!{"documents.delete", "documents.*", "*"},
("POST", "/indexes/products/documents/delete") => hashset!{"documents.delete", "documents.*", "*"},
("GET", "/tasks") => hashset!{"tasks.get", "tasks.*", "*"}, ("GET", "/tasks") => hashset!{"tasks.get", "tasks.*", "*"},
("DELETE", "/tasks") => hashset!{"tasks.delete", "tasks.*", "*"}, ("DELETE", "/tasks") => hashset!{"tasks.delete", "tasks.*", "*"},
("GET", "/tasks?indexUid=products") => hashset!{"tasks.get", "tasks.*", "*"}, ("GET", "/tasks?indexUid=products") => hashset!{"tasks.get", "tasks.*", "*"},

View File

@@ -1781,7 +1781,7 @@ async fn error_add_documents_payload_size() {
snapshot!(json_string!(response, { ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }), snapshot!(json_string!(response, { ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###" @r###"
{ {
"message": "The provided payload reached the size limit. The maximum accepted payload size is 10.00 MiB.", "message": "The provided payload reached the size limit.",
"code": "payload_too_large", "code": "payload_too_large",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#payload_too_large" "link": "https://docs.meilisearch.com/errors#payload_too_large"

View File

@@ -154,19 +154,6 @@ async fn delete_document_by_filter() {
) )
.await; .await;
index.wait_task(1).await; index.wait_task(1).await;
let (stats, _) = index.stats().await;
snapshot!(json_string!(stats), @r###"
{
"numberOfDocuments": 4,
"isIndexing": false,
"fieldDistribution": {
"color": 3,
"id": 4
}
}
"###);
let (response, code) = let (response, code) =
index.delete_document_by_filter(json!({ "filter": "color = blue"})).await; index.delete_document_by_filter(json!({ "filter": "color = blue"})).await;
snapshot!(code, @"202 Accepted"); snapshot!(code, @"202 Accepted");
@@ -201,18 +188,6 @@ async fn delete_document_by_filter() {
} }
"###); "###);
let (stats, _) = index.stats().await;
snapshot!(json_string!(stats), @r###"
{
"numberOfDocuments": 2,
"isIndexing": false,
"fieldDistribution": {
"color": 1,
"id": 2
}
}
"###);
let (documents, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await; let (documents, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
snapshot!(code, @"200 OK"); snapshot!(code, @"200 OK");
snapshot!(json_string!(documents), @r###" snapshot!(json_string!(documents), @r###"
@@ -266,18 +241,6 @@ async fn delete_document_by_filter() {
} }
"###); "###);
let (stats, _) = index.stats().await;
snapshot!(json_string!(stats), @r###"
{
"numberOfDocuments": 1,
"isIndexing": false,
"fieldDistribution": {
"color": 1,
"id": 1
}
}
"###);
let (documents, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await; let (documents, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
snapshot!(code, @"200 OK"); snapshot!(code, @"200 OK");
snapshot!(json_string!(documents), @r###" snapshot!(json_string!(documents), @r###"

View File

@@ -180,9 +180,9 @@ async fn get_all_documents_bad_filter() {
snapshot!(json_string!(response), @r###" snapshot!(json_string!(response), @r###"
{ {
"message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo=bernese", "message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo=bernese",
"code": "invalid_document_filter", "code": "invalid_search_filter",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter" "link": "https://docs.meilisearch.com/errors#invalid_search_filter"
} }
"###); "###);
} }
@@ -547,9 +547,9 @@ async fn delete_document_by_filter() {
snapshot!(json_string!(response), @r###" snapshot!(json_string!(response), @r###"
{ {
"message": "Invalid syntax for the filter parameter: `expected String, Array, found: true`.", "message": "Invalid syntax for the filter parameter: `expected String, Array, found: true`.",
"code": "invalid_document_filter", "code": "invalid_document_delete_filter",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter" "link": "https://docs.meilisearch.com/errors#invalid_document_delete_filter"
} }
"###); "###);
@@ -559,9 +559,9 @@ async fn delete_document_by_filter() {
snapshot!(json_string!(response), @r###" snapshot!(json_string!(response), @r###"
{ {
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `hello`.\n1:6 hello", "message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `hello`.\n1:6 hello",
"code": "invalid_document_filter", "code": "invalid_document_delete_filter",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter" "link": "https://docs.meilisearch.com/errors#invalid_document_delete_filter"
} }
"###); "###);
@@ -571,21 +571,9 @@ async fn delete_document_by_filter() {
snapshot!(json_string!(response), @r###" snapshot!(json_string!(response), @r###"
{ {
"message": "Sending an empty filter is forbidden.", "message": "Sending an empty filter is forbidden.",
"code": "invalid_document_filter", "code": "invalid_document_delete_filter",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter" "link": "https://docs.meilisearch.com/errors#invalid_document_delete_filter"
}
"###);
// do not send any filter
let (response, code) = index.delete_document_by_filter(json!({})).await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Missing field `filter`",
"code": "missing_document_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#missing_document_filter"
} }
"###); "###);
@@ -642,9 +630,9 @@ async fn delete_document_by_filter() {
}, },
"error": { "error": {
"message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese", "message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
"code": "invalid_document_filter", "code": "invalid_search_filter",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter" "link": "https://docs.meilisearch.com/errors#invalid_search_filter"
}, },
"duration": "[duration]", "duration": "[duration]",
"enqueuedAt": "[date]", "enqueuedAt": "[date]",
@@ -676,9 +664,9 @@ async fn delete_document_by_filter() {
}, },
"error": { "error": {
"message": "Attribute `catto` is not filterable. Available filterable attributes are: `doggo`.\n1:6 catto = jorts", "message": "Attribute `catto` is not filterable. Available filterable attributes are: `doggo`.\n1:6 catto = jorts",
"code": "invalid_document_filter", "code": "invalid_search_filter",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter" "link": "https://docs.meilisearch.com/errors#invalid_search_filter"
}, },
"duration": "[duration]", "duration": "[duration]",
"enqueuedAt": "[date]", "enqueuedAt": "[date]",
@@ -760,27 +748,4 @@ async fn fetch_document_by_filter() {
"link": "https://docs.meilisearch.com/errors#invalid_document_filter" "link": "https://docs.meilisearch.com/errors#invalid_document_filter"
} }
"###); "###);
let (response, code) = index.get_document_by_filter(json!({ "filter": "cool doggo" })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `cool doggo`.\n1:11 cool doggo",
"code": "invalid_document_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
}
"###);
let (response, code) =
index.get_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Attribute `doggo` is not filterable. Available filterable attributes are: `color`.\n1:6 doggo = bernese",
"code": "invalid_document_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
}
"###);
} }

View File

@@ -946,7 +946,7 @@ async fn sort_unset_ranking_rule() {
index.wait_task(1).await; index.wait_task(1).await;
let expected_response = json!({ let expected_response = json!({
"message": "You must specify where `sort` is listed in the rankingRules setting to use the sort parameter at search time.", "message": "The sort ranking rule must be specified in the ranking rules settings to use the sort parameter at search time.",
"code": "invalid_search_sort", "code": "invalid_search_sort",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_sort" "link": "https://docs.meilisearch.com/errors#invalid_search_sort"

View File

@@ -97,7 +97,7 @@ async fn task_bad_types() {
snapshot!(code, @"400 Bad Request"); snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###" snapshot!(json_string!(response), @r###"
{ {
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.", "message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
"code": "invalid_task_types", "code": "invalid_task_types",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types" "link": "https://docs.meilisearch.com/errors#invalid_task_types"
@@ -108,7 +108,7 @@ async fn task_bad_types() {
snapshot!(code, @"400 Bad Request"); snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###" snapshot!(json_string!(response), @r###"
{ {
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.", "message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
"code": "invalid_task_types", "code": "invalid_task_types",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types" "link": "https://docs.meilisearch.com/errors#invalid_task_types"
@@ -119,7 +119,7 @@ async fn task_bad_types() {
snapshot!(code, @"400 Bad Request"); snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###" snapshot!(json_string!(response), @r###"
{ {
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.", "message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
"code": "invalid_task_types", "code": "invalid_task_types",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_task_types" "link": "https://docs.meilisearch.com/errors#invalid_task_types"

View File

@@ -413,7 +413,7 @@ async fn test_summarized_document_addition_or_update() {
} }
#[actix_web::test] #[actix_web::test]
async fn test_summarized_delete_documents_by_batch() { async fn test_summarized_delete_batch() {
let server = Server::new().await; let server = Server::new().await;
let index = server.index("test"); let index = server.index("test");
index.delete_batch(vec![1, 2, 3]).await; index.delete_batch(vec![1, 2, 3]).await;
@@ -430,8 +430,7 @@ async fn test_summarized_delete_documents_by_batch() {
"canceledBy": null, "canceledBy": null,
"details": { "details": {
"providedIds": 3, "providedIds": 3,
"deletedDocuments": 0, "deletedDocuments": 0
"originalFilter": null
}, },
"error": { "error": {
"message": "Index `test` not found.", "message": "Index `test` not found.",
@@ -461,8 +460,7 @@ async fn test_summarized_delete_documents_by_batch() {
"canceledBy": null, "canceledBy": null,
"details": { "details": {
"providedIds": 1, "providedIds": 1,
"deletedDocuments": 0, "deletedDocuments": 0
"originalFilter": null
}, },
"error": null, "error": null,
"duration": "[duration]", "duration": "[duration]",
@@ -474,100 +472,7 @@ async fn test_summarized_delete_documents_by_batch() {
} }
#[actix_web::test] #[actix_web::test]
async fn test_summarized_delete_documents_by_filter() { async fn test_summarized_delete_document() {
let server = Server::new().await;
let index = server.index("test");
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
index.wait_task(0).await;
let (task, _) = index.get_task(0).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
@r###"
{
"uid": 0,
"indexUid": "test",
"status": "failed",
"type": "documentDeletion",
"canceledBy": null,
"details": {
"providedIds": 0,
"deletedDocuments": 0,
"originalFilter": "\"doggo = bernese\""
},
"error": {
"message": "Index `test` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
},
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
"###);
index.create(None).await;
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
index.wait_task(2).await;
let (task, _) = index.get_task(2).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
@r###"
{
"uid": 2,
"indexUid": "test",
"status": "failed",
"type": "documentDeletion",
"canceledBy": null,
"details": {
"providedIds": 0,
"deletedDocuments": 0,
"originalFilter": "\"doggo = bernese\""
},
"error": {
"message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
"code": "invalid_document_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
},
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
"###);
index.update_settings(json!({ "filterableAttributes": ["doggo"] })).await;
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
index.wait_task(4).await;
let (task, _) = index.get_task(4).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
@r###"
{
"uid": 4,
"indexUid": "test",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,
"details": {
"providedIds": 0,
"deletedDocuments": 0,
"originalFilter": "\"doggo = bernese\""
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
"###);
}
#[actix_web::test]
async fn test_summarized_delete_document_by_id() {
let server = Server::new().await; let server = Server::new().await;
let index = server.index("test"); let index = server.index("test");
index.delete_document(1).await; index.delete_document(1).await;
@@ -584,8 +489,7 @@ async fn test_summarized_delete_document_by_id() {
"canceledBy": null, "canceledBy": null,
"details": { "details": {
"providedIds": 1, "providedIds": 1,
"deletedDocuments": 0, "deletedDocuments": 0
"originalFilter": null
}, },
"error": { "error": {
"message": "Index `test` not found.", "message": "Index `test` not found.",
@@ -615,8 +519,7 @@ async fn test_summarized_delete_document_by_id() {
"canceledBy": null, "canceledBy": null,
"details": { "details": {
"providedIds": 1, "providedIds": 1,
"deletedDocuments": 0, "deletedDocuments": 0
"originalFilter": null
}, },
"error": null, "error": null,
"duration": "[duration]", "duration": "[duration]",

View File

@@ -25,13 +25,8 @@ flatten-serde-json = { path = "../flatten-serde-json" }
fst = "0.4.7" fst = "0.4.7"
fxhash = "0.2.1" fxhash = "0.2.1"
geoutils = "0.5.1" geoutils = "0.5.1"
grenad = { version = "0.4.4", default-features = false, features = [ grenad = { version = "0.4.4", default-features = false, features = ["tempfile"] }
"tempfile", heed = { git = "https://github.com/meilisearch/heed", tag = "v0.12.5", default-features = false, features = ["lmdb", "sync-read-txn"] }
] }
heed = { git = "https://github.com/meilisearch/heed", tag = "v0.12.6", default-features = false, features = [
"lmdb",
"sync-read-txn",
] }
json-depth-checker = { path = "../json-depth-checker" } json-depth-checker = { path = "../json-depth-checker" }
levenshtein_automata = { version = "0.2.1", features = ["fst_automaton"] } levenshtein_automata = { version = "0.2.1", features = ["fst_automaton"] }
memmap2 = "0.5.10" memmap2 = "0.5.10"
@@ -44,17 +39,12 @@ rstar = { version = "0.10.0", features = ["serde"] }
serde = { version = "1.0.160", features = ["derive"] } serde = { version = "1.0.160", features = ["derive"] }
serde_json = { version = "1.0.95", features = ["preserve_order"] } serde_json = { version = "1.0.95", features = ["preserve_order"] }
slice-group-by = "0.3.0" slice-group-by = "0.3.0"
smallstr = { version = "0.3.0", features = ["serde"] } smallstr = { version = "0.3.0", features = ["serde"] }
smallvec = "1.10.0" smallvec = "1.10.0"
smartstring = "1.0.1" smartstring = "1.0.1"
tempfile = "3.5.0" tempfile = "3.5.0"
thiserror = "1.0.40" thiserror = "1.0.40"
time = { version = "0.3.20", features = [ time = { version = "0.3.20", features = ["serde-well-known", "formatting", "parsing", "macros"] }
"serde-well-known",
"formatting",
"parsing",
"macros",
] }
uuid = { version = "1.3.1", features = ["v4"] } uuid = { version = "1.3.1", features = ["v4"] }
filter-parser = { path = "../filter-parser" } filter-parser = { path = "../filter-parser" }
@@ -73,13 +63,13 @@ big_s = "1.0.2"
insta = "1.29.0" insta = "1.29.0"
maplit = "1.0.2" maplit = "1.0.2"
md5 = "0.7.0" md5 = "0.7.0"
rand = { version = "0.8.5", features = ["small_rng"] } rand = {version = "0.8.5", features = ["small_rng"] }
[target.'cfg(fuzzing)'.dev-dependencies] [target.'cfg(fuzzing)'.dev-dependencies]
fuzzcheck = "0.12.1" fuzzcheck = "0.12.1"
[features] [features]
all-tokenizations = ["charabia/default"] all-tokenizations = [ "charabia/default" ]
# Use POSIX semaphores instead of SysV semaphores in LMDB # Use POSIX semaphores instead of SysV semaphores in LMDB
# For more information on this feature, see heed's Cargo.toml # For more information on this feature, see heed's Cargo.toml

View File

@@ -126,7 +126,7 @@ only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and undersco
InvalidSortableAttribute { field: String, valid_fields: BTreeSet<String> }, InvalidSortableAttribute { field: String, valid_fields: BTreeSet<String> },
#[error("{}", HeedError::BadOpenOptions)] #[error("{}", HeedError::BadOpenOptions)]
InvalidLmdbOpenOptions, InvalidLmdbOpenOptions,
#[error("You must specify where `sort` is listed in the rankingRules setting to use the sort parameter at search time.")] #[error("The sort ranking rule must be specified in the ranking rules settings to use the sort parameter at search time.")]
SortRankingRuleMissing, SortRankingRuleMissing,
#[error("The database file is in an invalid state.")] #[error("The database file is in an invalid state.")]
InvalidStoreFile, InvalidStoreFile,

View File

@@ -170,46 +170,33 @@ impl Index {
unsafe { options.flag(Flags::MdbAlwaysFreePages) }; unsafe { options.flag(Flags::MdbAlwaysFreePages) };
let env = options.open(path)?; let env = options.open(path)?;
let mut wtxn = env.write_txn()?; let main = env.create_poly_database(Some(MAIN))?;
let main = env.create_poly_database(&mut wtxn, Some(MAIN))?; let word_docids = env.create_database(Some(WORD_DOCIDS))?;
let word_docids = env.create_database(&mut wtxn, Some(WORD_DOCIDS))?; let exact_word_docids = env.create_database(Some(EXACT_WORD_DOCIDS))?;
let exact_word_docids = env.create_database(&mut wtxn, Some(EXACT_WORD_DOCIDS))?; let word_prefix_docids = env.create_database(Some(WORD_PREFIX_DOCIDS))?;
let word_prefix_docids = env.create_database(&mut wtxn, Some(WORD_PREFIX_DOCIDS))?; let exact_word_prefix_docids = env.create_database(Some(EXACT_WORD_PREFIX_DOCIDS))?;
let exact_word_prefix_docids = let docid_word_positions = env.create_database(Some(DOCID_WORD_POSITIONS))?;
env.create_database(&mut wtxn, Some(EXACT_WORD_PREFIX_DOCIDS))?; let word_pair_proximity_docids = env.create_database(Some(WORD_PAIR_PROXIMITY_DOCIDS))?;
let docid_word_positions = env.create_database(&mut wtxn, Some(DOCID_WORD_POSITIONS))?; let script_language_docids = env.create_database(Some(SCRIPT_LANGUAGE_DOCIDS))?;
let word_pair_proximity_docids =
env.create_database(&mut wtxn, Some(WORD_PAIR_PROXIMITY_DOCIDS))?;
let script_language_docids =
env.create_database(&mut wtxn, Some(SCRIPT_LANGUAGE_DOCIDS))?;
let word_prefix_pair_proximity_docids = let word_prefix_pair_proximity_docids =
env.create_database(&mut wtxn, Some(WORD_PREFIX_PAIR_PROXIMITY_DOCIDS))?; env.create_database(Some(WORD_PREFIX_PAIR_PROXIMITY_DOCIDS))?;
let prefix_word_pair_proximity_docids = let prefix_word_pair_proximity_docids =
env.create_database(&mut wtxn, Some(PREFIX_WORD_PAIR_PROXIMITY_DOCIDS))?; env.create_database(Some(PREFIX_WORD_PAIR_PROXIMITY_DOCIDS))?;
let word_position_docids = env.create_database(&mut wtxn, Some(WORD_POSITION_DOCIDS))?; let word_position_docids = env.create_database(Some(WORD_POSITION_DOCIDS))?;
let word_fid_docids = env.create_database(&mut wtxn, Some(WORD_FIELD_ID_DOCIDS))?; let word_fid_docids = env.create_database(Some(WORD_FIELD_ID_DOCIDS))?;
let field_id_word_count_docids = let field_id_word_count_docids = env.create_database(Some(FIELD_ID_WORD_COUNT_DOCIDS))?;
env.create_database(&mut wtxn, Some(FIELD_ID_WORD_COUNT_DOCIDS))?; let word_prefix_position_docids = env.create_database(Some(WORD_PREFIX_POSITION_DOCIDS))?;
let word_prefix_position_docids = let word_prefix_fid_docids = env.create_database(Some(WORD_PREFIX_FIELD_ID_DOCIDS))?;
env.create_database(&mut wtxn, Some(WORD_PREFIX_POSITION_DOCIDS))?; let facet_id_f64_docids = env.create_database(Some(FACET_ID_F64_DOCIDS))?;
let word_prefix_fid_docids = let facet_id_string_docids = env.create_database(Some(FACET_ID_STRING_DOCIDS))?;
env.create_database(&mut wtxn, Some(WORD_PREFIX_FIELD_ID_DOCIDS))?; let facet_id_exists_docids = env.create_database(Some(FACET_ID_EXISTS_DOCIDS))?;
let facet_id_f64_docids = env.create_database(&mut wtxn, Some(FACET_ID_F64_DOCIDS))?; let facet_id_is_null_docids = env.create_database(Some(FACET_ID_IS_NULL_DOCIDS))?;
let facet_id_string_docids = let facet_id_is_empty_docids = env.create_database(Some(FACET_ID_IS_EMPTY_DOCIDS))?;
env.create_database(&mut wtxn, Some(FACET_ID_STRING_DOCIDS))?;
let facet_id_exists_docids =
env.create_database(&mut wtxn, Some(FACET_ID_EXISTS_DOCIDS))?;
let facet_id_is_null_docids =
env.create_database(&mut wtxn, Some(FACET_ID_IS_NULL_DOCIDS))?;
let facet_id_is_empty_docids =
env.create_database(&mut wtxn, Some(FACET_ID_IS_EMPTY_DOCIDS))?;
let field_id_docid_facet_f64s = let field_id_docid_facet_f64s = env.create_database(Some(FIELD_ID_DOCID_FACET_F64S))?;
env.create_database(&mut wtxn, Some(FIELD_ID_DOCID_FACET_F64S))?;
let field_id_docid_facet_strings = let field_id_docid_facet_strings =
env.create_database(&mut wtxn, Some(FIELD_ID_DOCID_FACET_STRINGS))?; env.create_database(Some(FIELD_ID_DOCID_FACET_STRINGS))?;
let documents = env.create_database(&mut wtxn, Some(DOCUMENTS))?; let documents = env.create_database(Some(DOCUMENTS))?;
wtxn.commit()?;
Index::set_creation_dates(&env, main, created_at, updated_at)?; Index::set_creation_dates(&env, main, created_at, updated_at)?;

View File

@@ -2,7 +2,7 @@ use std::cmp;
use crate::{relative_from_absolute_position, Position}; use crate::{relative_from_absolute_position, Position};
pub const MAX_DISTANCE: u32 = 8; pub const MAX_DISTANCE: u32 = 4;
pub fn index_proximity(lhs: u32, rhs: u32) -> u32 { pub fn index_proximity(lhs: u32, rhs: u32) -> u32 {
if lhs <= rhs { if lhs <= rhs {

View File

@@ -125,12 +125,7 @@ pub fn bucket_sort<'ctx, Q: RankingRuleQueryTrait>(
continue; continue;
} }
let Some(next_bucket) = ranking_rules[cur_ranking_rule_index].next_bucket( let Some(next_bucket) = ranking_rules[cur_ranking_rule_index].next_bucket(ctx, logger, &ranking_rule_universes[cur_ranking_rule_index])? else {
ctx,
logger,
&ranking_rule_universes[cur_ranking_rule_index],
)?
else {
back!(); back!();
continue; continue;
}; };

View File

@@ -193,10 +193,9 @@ impl<'ctx, G: RankingRuleGraphTrait> RankingRule<'ctx, QueryGraph> for GraphBase
.all_costs .all_costs
.get(state.graph.query_graph.root_node) .get(state.graph.query_graph.root_node)
.iter() .iter()
.find(|c| **c >= state.cur_cost) .find(|c| **c >= state.cur_cost) else {
else { self.state = None;
self.state = None; return Ok(None);
return Ok(None);
}; };
state.cur_cost = cost + 1; state.cur_cost = cost + 1;

View File

@@ -4,6 +4,7 @@ use std::io::{BufWriter, Write};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::time::Instant; use std::time::Instant;
// use rand::random;
use roaring::RoaringBitmap; use roaring::RoaringBitmap;
use crate::search::new::interner::Interned; use crate::search::new::interner::Interned;
@@ -462,7 +463,7 @@ fill: \"#B6E2D3\"
shape: class shape: class
max_nbr_typo: {}", max_nbr_typo: {}",
term_subset.description(ctx), term_subset.description(ctx),
term_subset.max_typo_cost(ctx) term_subset.max_nbr_typos(ctx)
)?; )?;
for w in term_subset.all_single_words_except_prefix_db(ctx)? { for w in term_subset.all_single_words_except_prefix_db(ctx)? {
@@ -489,6 +490,13 @@ fill: \"#B6E2D3\"
} }
Ok(()) Ok(())
} }
// fn write_words_graph(&mut self, qg: QueryGraph) -> Result<()> {
// self.make_new_file_for_internal_state_if_needed()?;
// self.write_query_graph(&qg)?;
// Ok(())
// }
fn write_rr_graph<R: RankingRuleGraphTrait>( fn write_rr_graph<R: RankingRuleGraphTrait>(
&mut self, &mut self,
graph: &RankingRuleGraph<R>, graph: &RankingRuleGraph<R>,

View File

@@ -80,9 +80,7 @@ impl MatchingWords {
let word = self.word_interner.get(*word); let word = self.word_interner.get(*word);
// if the word is a prefix we match using starts_with. // if the word is a prefix we match using starts_with.
if located_words.is_prefix && token.lemma().starts_with(word) { if located_words.is_prefix && token.lemma().starts_with(word) {
let Some((char_index, c)) = let Some((char_index, c)) = word.char_indices().take(located_words.original_char_count).last() else {
word.char_indices().take(located_words.original_char_count).last()
else {
continue; continue;
}; };
let prefix_length = char_index + c.len_utf8(); let prefix_length = char_index + c.len_utf8();

View File

@@ -28,14 +28,16 @@ pub enum ZeroOrOneTypo {
impl Interned<QueryTerm> { impl Interned<QueryTerm> {
pub fn compute_fully_if_needed(self, ctx: &mut SearchContext) -> Result<()> { pub fn compute_fully_if_needed(self, ctx: &mut SearchContext) -> Result<()> {
let s = ctx.term_interner.get_mut(self); let s = ctx.term_interner.get_mut(self);
if s.max_levenshtein_distance <= 1 && s.one_typo.is_uninit() { if s.max_nbr_typos == 0 {
s.one_typo = Lazy::Init(OneTypoTerm::default());
s.two_typo = Lazy::Init(TwoTypoTerm::default());
} else if s.max_nbr_typos == 1 && s.one_typo.is_uninit() {
assert!(s.two_typo.is_uninit()); assert!(s.two_typo.is_uninit());
// Initialize one_typo subterm even if max_nbr_typo is 0 because of split words
self.initialize_one_typo_subterm(ctx)?; self.initialize_one_typo_subterm(ctx)?;
let s = ctx.term_interner.get_mut(self); let s = ctx.term_interner.get_mut(self);
assert!(s.one_typo.is_init()); assert!(s.one_typo.is_init());
s.two_typo = Lazy::Init(TwoTypoTerm::default()); s.two_typo = Lazy::Init(TwoTypoTerm::default());
} else if s.max_levenshtein_distance > 1 && s.two_typo.is_uninit() { } else if s.max_nbr_typos > 1 && s.two_typo.is_uninit() {
assert!(s.two_typo.is_uninit()); assert!(s.two_typo.is_uninit());
self.initialize_one_and_two_typo_subterm(ctx)?; self.initialize_one_and_two_typo_subterm(ctx)?;
let s = ctx.term_interner.get_mut(self); let s = ctx.term_interner.get_mut(self);
@@ -185,7 +187,7 @@ pub fn partially_initialized_term_from_word(
original: ctx.word_interner.insert(word.to_owned()), original: ctx.word_interner.insert(word.to_owned()),
ngram_words: None, ngram_words: None,
is_prefix: false, is_prefix: false,
max_levenshtein_distance: 0, max_nbr_typos: 0,
zero_typo: <_>::default(), zero_typo: <_>::default(),
one_typo: Lazy::Init(<_>::default()), one_typo: Lazy::Init(<_>::default()),
two_typo: Lazy::Init(<_>::default()), two_typo: Lazy::Init(<_>::default()),
@@ -256,7 +258,7 @@ pub fn partially_initialized_term_from_word(
Ok(QueryTerm { Ok(QueryTerm {
original: word_interned, original: word_interned,
ngram_words: None, ngram_words: None,
max_levenshtein_distance: max_typo, max_nbr_typos: max_typo,
is_prefix, is_prefix,
zero_typo, zero_typo,
one_typo: Lazy::Uninit, one_typo: Lazy::Uninit,
@@ -275,16 +277,7 @@ fn find_split_words(ctx: &mut SearchContext, word: &str) -> Result<Option<Intern
impl Interned<QueryTerm> { impl Interned<QueryTerm> {
fn initialize_one_typo_subterm(self, ctx: &mut SearchContext) -> Result<()> { fn initialize_one_typo_subterm(self, ctx: &mut SearchContext) -> Result<()> {
let self_mut = ctx.term_interner.get_mut(self); let self_mut = ctx.term_interner.get_mut(self);
let QueryTerm { original, is_prefix, one_typo, .. } = self_mut;
let allows_split_words = self_mut.allows_split_words();
let QueryTerm {
original,
is_prefix,
one_typo,
max_levenshtein_distance: max_nbr_typos,
..
} = self_mut;
let original = *original; let original = *original;
let is_prefix = *is_prefix; let is_prefix = *is_prefix;
// let original_str = ctx.word_interner.get(*original).to_owned(); // let original_str = ctx.word_interner.get(*original).to_owned();
@@ -293,33 +286,26 @@ impl Interned<QueryTerm> {
} }
let mut one_typo_words = BTreeSet::new(); let mut one_typo_words = BTreeSet::new();
if *max_nbr_typos > 0 { find_zero_one_typo_derivations(ctx, original, is_prefix, |derived_word, nbr_typos| {
find_zero_one_typo_derivations(ctx, original, is_prefix, |derived_word, nbr_typos| { match nbr_typos {
match nbr_typos { ZeroOrOneTypo::Zero => {}
ZeroOrOneTypo::Zero => {} ZeroOrOneTypo::One => {
ZeroOrOneTypo::One => { if one_typo_words.len() < limits::MAX_ONE_TYPO_COUNT {
if one_typo_words.len() < limits::MAX_ONE_TYPO_COUNT { one_typo_words.insert(derived_word);
one_typo_words.insert(derived_word); } else {
} else { return Ok(ControlFlow::Break(()));
return Ok(ControlFlow::Break(()));
}
} }
} }
Ok(ControlFlow::Continue(())) }
})?; Ok(ControlFlow::Continue(()))
} })?;
let original_str = ctx.word_interner.get(original).to_owned();
let split_words = if allows_split_words { let split_words = find_split_words(ctx, original_str.as_str())?;
let original_str = ctx.word_interner.get(original).to_owned();
find_split_words(ctx, original_str.as_str())?
} else {
None
};
let self_mut = ctx.term_interner.get_mut(self); let self_mut = ctx.term_interner.get_mut(self);
// Only add the split words to the derivations if: // Only add the split words to the derivations if:
// 1. the term is neither an ngram nor a phrase; OR // 1. the term is not an ngram; OR
// 2. the term is an ngram, but the split words are different from the ngram's component words // 2. the term is an ngram, but the split words are different from the ngram's component words
let split_words = if let Some((ngram_words, split_words)) = let split_words = if let Some((ngram_words, split_words)) =
self_mut.ngram_words.as_ref().zip(split_words.as_ref()) self_mut.ngram_words.as_ref().zip(split_words.as_ref())
@@ -341,13 +327,7 @@ impl Interned<QueryTerm> {
} }
fn initialize_one_and_two_typo_subterm(self, ctx: &mut SearchContext) -> Result<()> { fn initialize_one_and_two_typo_subterm(self, ctx: &mut SearchContext) -> Result<()> {
let self_mut = ctx.term_interner.get_mut(self); let self_mut = ctx.term_interner.get_mut(self);
let QueryTerm { let QueryTerm { original, is_prefix, two_typo, .. } = self_mut;
original,
is_prefix,
two_typo,
max_levenshtein_distance: max_nbr_typos,
..
} = self_mut;
let original_str = ctx.word_interner.get(*original).to_owned(); let original_str = ctx.word_interner.get(*original).to_owned();
if two_typo.is_init() { if two_typo.is_init() {
return Ok(()); return Ok(());
@@ -355,37 +335,34 @@ impl Interned<QueryTerm> {
let mut one_typo_words = BTreeSet::new(); let mut one_typo_words = BTreeSet::new();
let mut two_typo_words = BTreeSet::new(); let mut two_typo_words = BTreeSet::new();
if *max_nbr_typos > 0 { find_zero_one_two_typo_derivations(
find_zero_one_two_typo_derivations( *original,
*original, *is_prefix,
*is_prefix, ctx.index.words_fst(ctx.txn)?,
ctx.index.words_fst(ctx.txn)?, &mut ctx.word_interner,
&mut ctx.word_interner, |derived_word, nbr_typos| {
|derived_word, nbr_typos| { if one_typo_words.len() >= limits::MAX_ONE_TYPO_COUNT
if one_typo_words.len() >= limits::MAX_ONE_TYPO_COUNT && two_typo_words.len() >= limits::MAX_TWO_TYPOS_COUNT
&& two_typo_words.len() >= limits::MAX_TWO_TYPOS_COUNT {
{ // No chance we will add either one- or two-typo derivations anymore, stop iterating.
// No chance we will add either one- or two-typo derivations anymore, stop iterating. return Ok(ControlFlow::Break(()));
return Ok(ControlFlow::Break(())); }
} match nbr_typos {
match nbr_typos { NumberOfTypos::Zero => {}
NumberOfTypos::Zero => {} NumberOfTypos::One => {
NumberOfTypos::One => { if one_typo_words.len() < limits::MAX_ONE_TYPO_COUNT {
if one_typo_words.len() < limits::MAX_ONE_TYPO_COUNT { one_typo_words.insert(derived_word);
one_typo_words.insert(derived_word);
}
}
NumberOfTypos::Two => {
if two_typo_words.len() < limits::MAX_TWO_TYPOS_COUNT {
two_typo_words.insert(derived_word);
}
} }
} }
Ok(ControlFlow::Continue(())) NumberOfTypos::Two => {
}, if two_typo_words.len() < limits::MAX_TWO_TYPOS_COUNT {
)?; two_typo_words.insert(derived_word);
} }
}
}
Ok(ControlFlow::Continue(()))
},
)?;
let split_words = find_split_words(ctx, original_str.as_str())?; let split_words = find_split_words(ctx, original_str.as_str())?;
let self_mut = ctx.term_interner.get_mut(self); let self_mut = ctx.term_interner.get_mut(self);

View File

@@ -43,7 +43,7 @@ pub struct QueryTermSubset {
pub struct QueryTerm { pub struct QueryTerm {
original: Interned<String>, original: Interned<String>,
ngram_words: Option<Vec<Interned<String>>>, ngram_words: Option<Vec<Interned<String>>>,
max_levenshtein_distance: u8, max_nbr_typos: u8,
is_prefix: bool, is_prefix: bool,
zero_typo: ZeroTypoTerm, zero_typo: ZeroTypoTerm,
// May not be computed yet // May not be computed yet
@@ -176,7 +176,9 @@ impl QueryTermSubset {
pub fn use_prefix_db(&self, ctx: &SearchContext) -> Option<Word> { pub fn use_prefix_db(&self, ctx: &SearchContext) -> Option<Word> {
let original = ctx.term_interner.get(self.original); let original = ctx.term_interner.get(self.original);
let Some(use_prefix_db) = original.zero_typo.use_prefix_db else { return None }; let Some(use_prefix_db) = original.zero_typo.use_prefix_db else {
return None
};
let word = match &self.zero_typo_subset { let word = match &self.zero_typo_subset {
NTypoTermSubset::All => Some(use_prefix_db), NTypoTermSubset::All => Some(use_prefix_db),
NTypoTermSubset::Subset { words, phrases: _ } => { NTypoTermSubset::Subset { words, phrases: _ } => {
@@ -262,15 +264,13 @@ impl QueryTermSubset {
match &self.one_typo_subset { match &self.one_typo_subset {
NTypoTermSubset::All => { NTypoTermSubset::All => {
let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo else {
else {
panic!() panic!()
}; };
result.extend(one_typo.iter().copied().map(Word::Derived)) result.extend(one_typo.iter().copied().map(Word::Derived))
} }
NTypoTermSubset::Subset { words, phrases: _ } => { NTypoTermSubset::Subset { words, phrases: _ } => {
let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo else {
else {
panic!() panic!()
}; };
result.extend(one_typo.intersection(words).copied().map(Word::Derived)); result.extend(one_typo.intersection(words).copied().map(Word::Derived));
@@ -280,11 +280,15 @@ impl QueryTermSubset {
match &self.two_typo_subset { match &self.two_typo_subset {
NTypoTermSubset::All => { NTypoTermSubset::All => {
let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { panic!() }; let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else {
panic!()
};
result.extend(two_typos.iter().copied().map(Word::Derived)); result.extend(two_typos.iter().copied().map(Word::Derived));
} }
NTypoTermSubset::Subset { words, phrases: _ } => { NTypoTermSubset::Subset { words, phrases: _ } => {
let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { panic!() }; let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else {
panic!()
};
result.extend(two_typos.intersection(words).copied().map(Word::Derived)); result.extend(two_typos.intersection(words).copied().map(Word::Derived));
} }
NTypoTermSubset::Nothing => {} NTypoTermSubset::Nothing => {}
@@ -308,15 +312,13 @@ impl QueryTermSubset {
match &self.one_typo_subset { match &self.one_typo_subset {
NTypoTermSubset::All => { NTypoTermSubset::All => {
let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo else {
else {
panic!(); panic!();
}; };
result.extend(split_words.iter().copied()); result.extend(split_words.iter().copied());
} }
NTypoTermSubset::Subset { phrases, .. } => { NTypoTermSubset::Subset { phrases, .. } => {
let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo else {
else {
panic!(); panic!();
}; };
if let Some(split_words) = split_words { if let Some(split_words) = split_words {
@@ -340,16 +342,10 @@ impl QueryTermSubset {
} }
None None
} }
pub fn max_typo_cost(&self, ctx: &SearchContext) -> u8 { pub fn max_nbr_typos(&self, ctx: &SearchContext) -> u8 {
let t = ctx.term_interner.get(self.original); let t = ctx.term_interner.get(self.original);
match t.max_levenshtein_distance { match t.max_nbr_typos {
0 => { 0 => 0,
if t.allows_split_words() {
1
} else {
0
}
}
1 => { 1 => {
if self.one_typo_subset.is_empty() { if self.one_typo_subset.is_empty() {
0 0
@@ -442,9 +438,6 @@ impl QueryTerm {
self.zero_typo.is_empty() && one_typo.is_empty() && two_typo.is_empty() self.zero_typo.is_empty() && one_typo.is_empty() && two_typo.is_empty()
} }
fn allows_split_words(&self) -> bool {
self.zero_typo.phrase.is_none()
}
} }
impl Interned<QueryTerm> { impl Interned<QueryTerm> {

View File

@@ -77,9 +77,13 @@ pub fn located_query_terms_from_tokens(
} }
} }
TokenKind::Separator(separator_kind) => { TokenKind::Separator(separator_kind) => {
// add penalty for hard separators match separator_kind {
if let SeparatorKind::Hard = separator_kind { SeparatorKind::Hard => {
position = position.wrapping_add(1); position += 1;
}
SeparatorKind::Soft => {
position += 0;
}
} }
phrase = 'phrase: { phrase = 'phrase: {
@@ -213,7 +217,7 @@ pub fn make_ngram(
original: ngram_str_interned, original: ngram_str_interned,
ngram_words: Some(words_interned), ngram_words: Some(words_interned),
is_prefix, is_prefix,
max_levenshtein_distance: max_nbr_typos, max_nbr_typos,
zero_typo: term.zero_typo, zero_typo: term.zero_typo,
one_typo: Lazy::Uninit, one_typo: Lazy::Uninit,
two_typo: Lazy::Uninit, two_typo: Lazy::Uninit,
@@ -267,7 +271,7 @@ impl PhraseBuilder {
QueryTerm { QueryTerm {
original: ctx.word_interner.insert(phrase_desc), original: ctx.word_interner.insert(phrase_desc),
ngram_words: None, ngram_words: None,
max_levenshtein_distance: 0, max_nbr_typos: 0,
is_prefix: false, is_prefix: false,
zero_typo: ZeroTypoTerm { zero_typo: ZeroTypoTerm {
phrase: Some(phrase), phrase: Some(phrase),
@@ -284,36 +288,3 @@ impl PhraseBuilder {
}) })
} }
} }
#[cfg(test)]
mod tests {
use charabia::TokenizerBuilder;
use super::*;
use crate::index::tests::TempIndex;
fn temp_index_with_documents() -> TempIndex {
let temp_index = TempIndex::new();
temp_index
.add_documents(documents!([
{ "id": 1, "name": "split this world westfali westfalia the Ŵôřlḑôle" },
{ "id": 2, "name": "Westfália" },
{ "id": 3, "name": "Ŵôřlḑôle" },
]))
.unwrap();
temp_index
}
#[test]
fn start_with_hard_separator() -> Result<()> {
let tokenizer = TokenizerBuilder::new().build();
let tokens = tokenizer.tokenize(".");
let index = temp_index_with_documents();
let rtxn = index.read_txn()?;
let mut ctx = SearchContext::new(&index, &rtxn);
// panics with `attempt to add with overflow` before <https://github.com/meilisearch/meilisearch/issues/3785>
let located_query_terms = located_query_terms_from_tokens(&mut ctx, tokens, None)?;
assert!(located_query_terms.is_empty());
Ok(())
}
}

View File

@@ -209,7 +209,7 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
self.traverse_breadth_first_backward(self.query_graph.end_node, |cur_node| { self.traverse_breadth_first_backward(self.query_graph.end_node, |cur_node| {
if cur_node == self.query_graph.end_node { if cur_node == self.query_graph.end_node {
*costs_to_end.get_mut(self.query_graph.end_node) = vec![0]; *costs_to_end.get_mut(self.query_graph.end_node) = vec![0];
return; return true;
} }
let mut self_costs = Vec::<u64>::new(); let mut self_costs = Vec::<u64>::new();
@@ -226,6 +226,7 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
self_costs.dedup(); self_costs.dedup();
*costs_to_end.get_mut(cur_node) = self_costs; *costs_to_end.get_mut(cur_node) = self_costs;
true
}); });
costs_to_end costs_to_end
} }
@@ -235,9 +236,6 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
node_with_removed_outgoing_conditions: Interned<QueryNode>, node_with_removed_outgoing_conditions: Interned<QueryNode>,
costs: &mut MappedInterner<QueryNode, Vec<u64>>, costs: &mut MappedInterner<QueryNode, Vec<u64>>,
) { ) {
// Traverse the graph backward from the target node, recomputing the cost for each of its predecessors.
// We first check that no other node is contributing the same total cost to a predecessor before removing
// the cost from the predecessor.
self.traverse_breadth_first_backward(node_with_removed_outgoing_conditions, |cur_node| { self.traverse_breadth_first_backward(node_with_removed_outgoing_conditions, |cur_node| {
let mut costs_to_remove = FxHashSet::default(); let mut costs_to_remove = FxHashSet::default();
costs_to_remove.extend(costs.get(cur_node).iter().copied()); costs_to_remove.extend(costs.get(cur_node).iter().copied());
@@ -248,18 +246,19 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
for cost in costs.get(edge.dest_node).iter() { for cost in costs.get(edge.dest_node).iter() {
costs_to_remove.remove(&(*cost + edge.cost as u64)); costs_to_remove.remove(&(*cost + edge.cost as u64));
if costs_to_remove.is_empty() { if costs_to_remove.is_empty() {
return; return false;
} }
} }
} }
if costs_to_remove.is_empty() { if costs_to_remove.is_empty() {
return; return false;
} }
let mut new_costs = BTreeSet::from_iter(costs.get(cur_node).iter().copied()); let mut new_costs = BTreeSet::from_iter(costs.get(cur_node).iter().copied());
for c in costs_to_remove { for c in costs_to_remove {
new_costs.remove(&c); new_costs.remove(&c);
} }
*costs.get_mut(cur_node) = new_costs.into_iter().collect(); *costs.get_mut(cur_node) = new_costs.into_iter().collect();
true
}); });
} }
@@ -270,7 +269,7 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
pub fn traverse_breadth_first_backward( pub fn traverse_breadth_first_backward(
&self, &self,
from: Interned<QueryNode>, from: Interned<QueryNode>,
mut visit: impl FnMut(Interned<QueryNode>), mut visit: impl FnMut(Interned<QueryNode>) -> bool,
) { ) {
let mut reachable = SmallBitmap::for_interned_values_in(&self.query_graph.nodes); let mut reachable = SmallBitmap::for_interned_values_in(&self.query_graph.nodes);
{ {
@@ -313,11 +312,13 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
continue; continue;
} }
unreachable_or_visited.insert(cur_node); unreachable_or_visited.insert(cur_node);
visit(cur_node); if visit(cur_node) {
for prev_node in self.query_graph.nodes.get(cur_node).predecessors.iter() { for prev_node in self.query_graph.nodes.get(cur_node).predecessors.iter() {
if !enqueued.contains(prev_node) && !unreachable_or_visited.contains(prev_node) { if !enqueued.contains(prev_node) && !unreachable_or_visited.contains(prev_node)
stack.push_back(prev_node); {
enqueued.insert(prev_node); stack.push_back(prev_node);
enqueued.insert(prev_node);
}
} }
} }
} }

View File

@@ -6,6 +6,8 @@ use crate::search::new::query_term::LocatedQueryTermSubset;
use crate::search::new::SearchContext; use crate::search::new::SearchContext;
use crate::Result; use crate::Result;
const MAX_PROX: usize = crate::proximity::MAX_DISTANCE as usize;
pub fn build_edges( pub fn build_edges(
_ctx: &mut SearchContext, _ctx: &mut SearchContext,
conditions_interner: &mut DedupInterner<ProximityCondition>, conditions_interner: &mut DedupInterner<ProximityCondition>,
@@ -18,7 +20,7 @@ pub fn build_edges(
return Ok(vec![( return Ok(vec![(
(right_ngram_length - 1) as u32, (right_ngram_length - 1) as u32,
conditions_interner.insert(ProximityCondition::Term { term: right_term.clone() }), conditions_interner.insert(ProximityCondition::Term { term: right_term.clone() }),
)]); )])
}; };
if left_term.positions.end() + 1 != *right_term.positions.start() { if left_term.positions.end() + 1 != *right_term.positions.start() {
@@ -35,7 +37,7 @@ pub fn build_edges(
} }
let mut conditions = vec![]; let mut conditions = vec![];
for cost in right_ngram_length..(7 + right_ngram_length) { for cost in right_ngram_length..(MAX_PROX + right_ngram_length) {
conditions.push(( conditions.push((
cost as u32, cost as u32,
conditions_interner.insert(ProximityCondition::Uninit { conditions_interner.insert(ProximityCondition::Uninit {
@@ -47,7 +49,7 @@ pub fn build_edges(
} }
conditions.push(( conditions.push((
(7 + right_ngram_length) as u32, (MAX_PROX + right_ngram_length) as u32,
conditions_interner.insert(ProximityCondition::Term { term: right_term.clone() }), conditions_interner.insert(ProximityCondition::Term { term: right_term.clone() }),
)); ));

View File

@@ -50,7 +50,7 @@ impl RankingRuleGraphTrait for TypoGraph {
// 3-gram -> equivalent to 2 typos // 3-gram -> equivalent to 2 typos
let base_cost = if term.term_ids.len() == 1 { 0 } else { term.term_ids.len() as u32 }; let base_cost = if term.term_ids.len() == 1 { 0 } else { term.term_ids.len() as u32 };
for nbr_typos in 0..=term.term_subset.max_typo_cost(ctx) { for nbr_typos in 0..=term.term_subset.max_nbr_typos(ctx) {
let mut term = term.clone(); let mut term = term.clone();
match nbr_typos { match nbr_typos {
0 => { 0 => {

View File

@@ -138,7 +138,7 @@ fn test_attribute_position_simple() {
s.terms_matching_strategy(TermsMatchingStrategy::All); s.terms_matching_strategy(TermsMatchingStrategy::All);
s.query("quick brown"); s.query("quick brown");
let SearchResult { documents_ids, .. } = s.execute().unwrap(); let SearchResult { documents_ids, .. } = s.execute().unwrap();
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 2, 3, 4, 1, 0, 6, 8, 7, 9, 5]"); insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 3, 4, 2, 1, 0, 6, 8, 7, 9, 5]");
} }
#[test] #[test]
fn test_attribute_position_repeated() { fn test_attribute_position_repeated() {
@@ -163,7 +163,7 @@ fn test_attribute_position_different_fields() {
s.terms_matching_strategy(TermsMatchingStrategy::All); s.terms_matching_strategy(TermsMatchingStrategy::All);
s.query("quick brown"); s.query("quick brown");
let SearchResult { documents_ids, .. } = s.execute().unwrap(); let SearchResult { documents_ids, .. } = s.execute().unwrap();
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 2, 3, 4, 1, 0, 6, 8, 7, 9, 5]"); insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 3, 4, 2, 1, 0, 6, 8, 7, 9, 5]");
} }
#[test] #[test]
@@ -176,5 +176,5 @@ fn test_attribute_position_ngrams() {
s.terms_matching_strategy(TermsMatchingStrategy::All); s.terms_matching_strategy(TermsMatchingStrategy::All);
s.query("quick brown"); s.query("quick brown");
let SearchResult { documents_ids, .. } = s.execute().unwrap(); let SearchResult { documents_ids, .. } = s.execute().unwrap();
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 2, 3, 4, 1, 0, 6, 8, 7, 9, 5]"); insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 3, 4, 2, 1, 0, 6, 8, 7, 9, 5]");
} }

View File

@@ -11,11 +11,10 @@ It doesn't test properly:
- distinct attributes with arrays (because we know it's incorrect as well) - distinct attributes with arrays (because we know it's incorrect as well)
*/ */
use std::collections::HashSet;
use big_s::S; use big_s::S;
use heed::RoTxn; use heed::RoTxn;
use maplit::hashset; use maplit::hashset;
use std::collections::HashSet;
use super::collect_field_values; use super::collect_field_values;
use crate::index::tests::TempIndex; use crate::index::tests::TempIndex;

View File

@@ -3,9 +3,9 @@ This module tests the following properties:
1. Two consecutive words from a query can be combined into a "2gram" 1. Two consecutive words from a query can be combined into a "2gram"
2. Three consecutive words from a query can be combined into a "3gram" 2. Three consecutive words from a query can be combined into a "3gram"
3. A word from the query can be split into two consecutive words (split words), no matter how short it is 3. A word from the query can be split into two consecutive words (split words)
4. A 2gram can be split into two words 4. A 2gram can be split into two words
5. A 3gram can be split into two words 5. A 3gram cannot be split into two words
6. 2grams can contain up to 1 typo 6. 2grams can contain up to 1 typo
7. 3grams cannot have typos 7. 3grams cannot have typos
8. 2grams and 3grams can be prefix tolerant 8. 2grams and 3grams can be prefix tolerant
@@ -14,7 +14,6 @@ This module tests the following properties:
11. Disabling typo tolerance does not disable ngram tolerance 11. Disabling typo tolerance does not disable ngram tolerance
12. Prefix tolerance is disabled for the last word if a space follows it 12. Prefix tolerance is disabled for the last word if a space follows it
13. Ngrams cannot be formed by combining a phrase and a word or two phrases 13. Ngrams cannot be formed by combining a phrase and a word or two phrases
14. Split words are not disabled by the `disableOnAttribute` or `disableOnWords` typo settings
*/ */
use crate::index::tests::TempIndex; use crate::index::tests::TempIndex;
@@ -57,10 +56,6 @@ fn create_index() -> TempIndex {
{ {
"id": 5, "id": 5,
"text": "sunflowering is not a verb" "text": "sunflowering is not a verb"
},
{
"id": 6,
"text": "xy z"
} }
])) ]))
.unwrap(); .unwrap();
@@ -268,11 +263,10 @@ fn test_disable_split_words() {
s.query("sunflower "); s.query("sunflower ");
let SearchResult { documents_ids, .. } = s.execute().unwrap(); let SearchResult { documents_ids, .. } = s.execute().unwrap();
// no document containing `sun flower` // no document containing `sun flower`
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[1, 3]"); insta::assert_snapshot!(format!("{documents_ids:?}"), @"[3]");
let texts = collect_field_values(&index, &txn, "text", &documents_ids); let texts = collect_field_values(&index, &txn, "text", &documents_ids);
insta::assert_debug_snapshot!(texts, @r###" insta::assert_debug_snapshot!(texts, @r###"
[ [
"\"the sun flower is tall\"",
"\"the sunflower is tall\"", "\"the sunflower is tall\"",
] ]
"###); "###);
@@ -313,11 +307,10 @@ fn test_3gram_no_split_words() {
let SearchResult { documents_ids, .. } = s.execute().unwrap(); let SearchResult { documents_ids, .. } = s.execute().unwrap();
// no document with `sun flower` // no document with `sun flower`
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[1, 2, 3, 5]"); insta::assert_snapshot!(format!("{documents_ids:?}"), @"[2, 3, 5]");
let texts = collect_field_values(&index, &txn, "text", &documents_ids); let texts = collect_field_values(&index, &txn, "text", &documents_ids);
insta::assert_debug_snapshot!(texts, @r###" insta::assert_debug_snapshot!(texts, @r###"
[ [
"\"the sun flower is tall\"",
"\"the sunflowers are pretty\"", "\"the sunflowers are pretty\"",
"\"the sunflower is tall\"", "\"the sunflower is tall\"",
"\"sunflowering is not a verb\"", "\"sunflowering is not a verb\"",
@@ -376,50 +369,3 @@ fn test_no_ngram_phrases() {
] ]
"###); "###);
} }
#[test]
fn test_short_split_words() {
let index = create_index();
let txn = index.read_txn().unwrap();
let mut s = Search::new(&txn, &index);
s.terms_matching_strategy(TermsMatchingStrategy::All);
s.query("xyz");
let SearchResult { documents_ids, .. } = s.execute().unwrap();
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[6]");
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
insta::assert_debug_snapshot!(texts, @r###"
[
"\"xy z\"",
]
"###);
}
#[test]
fn test_split_words_never_disabled() {
let index = create_index();
index
.update_settings(|s| {
s.set_exact_words(["sunflower"].iter().map(ToString::to_string).collect());
s.set_exact_attributes(["text"].iter().map(ToString::to_string).collect());
})
.unwrap();
let txn = index.read_txn().unwrap();
let mut s = Search::new(&txn, &index);
s.terms_matching_strategy(TermsMatchingStrategy::All);
s.query("the sunflower is tall");
let SearchResult { documents_ids, .. } = s.execute().unwrap();
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[1, 3]");
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
insta::assert_debug_snapshot!(texts, @r###"
[
"\"the sun flower is tall\"",
"\"the sunflower is tall\"",
]
"###);
}

View File

@@ -9,7 +9,7 @@ This module tests the following properties:
6. A typo on the first letter of a word counts as two typos 6. A typo on the first letter of a word counts as two typos
7. Phrases are not typo tolerant 7. Phrases are not typo tolerant
8. 2grams can have 1 typo if they are larger than `min_word_len_two_typos` 8. 2grams can have 1 typo if they are larger than `min_word_len_two_typos`
9. 3grams are not typo tolerant (but they can be split into two words) 9. 3grams are not typo tolerant
10. The `typo` ranking rule assumes the role of the `words` ranking rule implicitly 10. The `typo` ranking rule assumes the role of the `words` ranking rule implicitly
if `words` doesn't exist before it. if `words` doesn't exist before it.
11. The `typo` ranking rule places documents with the same number of typos in the same bucket 11. The `typo` ranking rule places documents with the same number of typos in the same bucket
@@ -287,17 +287,16 @@ fn test_typo_exact_word() {
] ]
"###); "###);
// exact words do not disable prefix (sunflowering OK, but no sunflowar) // exact words do not disable prefix (sunflowering OK, but no sunflowar or sun flower)
let mut s = Search::new(&txn, &index); let mut s = Search::new(&txn, &index);
s.terms_matching_strategy(TermsMatchingStrategy::All); s.terms_matching_strategy(TermsMatchingStrategy::All);
s.query("network interconnection sunflower"); s.query("network interconnection sunflower");
let SearchResult { documents_ids, .. } = s.execute().unwrap(); let SearchResult { documents_ids, .. } = s.execute().unwrap();
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[16, 17, 18]"); insta::assert_snapshot!(format!("{documents_ids:?}"), @"[16, 18]");
let texts = collect_field_values(&index, &txn, "text", &documents_ids); let texts = collect_field_values(&index, &txn, "text", &documents_ids);
insta::assert_debug_snapshot!(texts, @r###" insta::assert_debug_snapshot!(texts, @r###"
[ [
"\"network interconnection sunflower\"", "\"network interconnection sunflower\"",
"\"network interconnection sun flower\"",
"\"network interconnection sunflowering\"", "\"network interconnection sunflowering\"",
] ]
"###); "###);

View File

@@ -261,9 +261,7 @@ pub(crate) mod test_helpers {
let options = options.map_size(4096 * 4 * 1000 * 100); let options = options.map_size(4096 * 4 * 1000 * 100);
let tempdir = tempfile::TempDir::new().unwrap(); let tempdir = tempfile::TempDir::new().unwrap();
let env = options.open(tempdir.path()).unwrap(); let env = options.open(tempdir.path()).unwrap();
let mut wtxn = env.write_txn().unwrap(); let content = env.create_database(None).unwrap();
let content = env.create_database(&mut wtxn, None).unwrap();
wtxn.commit().unwrap();
FacetIndex { FacetIndex {
content, content,

View File

@@ -91,7 +91,7 @@ fn document_word_positions_into_sorter(
while !word_positions_heap.is_empty() { while !word_positions_heap.is_empty() {
while let Some(peeked_word_position) = word_positions_heap.pop() { while let Some(peeked_word_position) = word_positions_heap.pop() {
ordered_peeked_word_positions.push(peeked_word_position); ordered_peeked_word_positions.push(peeked_word_position);
if ordered_peeked_word_positions.len() == 7 { if ordered_peeked_word_positions.len() == (MAX_DISTANCE - 1) as usize {
break; break;
} }
} }

View File

@@ -2045,11 +2045,10 @@ mod tests {
"branch_id_number": 0 "branch_id_number": 0
}]}; }]};
let Err(Error::UserError(UserError::MultiplePrimaryKeyCandidatesFound { candidates })) = let Err(Error::UserError(UserError::MultiplePrimaryKeyCandidatesFound {
index.add_documents(doc_multiple_ids) candidates
else { })) =
panic!("Expected Error::UserError(MultiplePrimaryKeyCandidatesFound)") index.add_documents(doc_multiple_ids) else { panic!("Expected Error::UserError(MultiplePrimaryKeyCandidatesFound)") };
};
assert_eq!(candidates, vec![S("id"), S("project_id"), S("public_uid"),]); assert_eq!(candidates, vec![S("id"), S("project_id"), S("public_uid"),]);