mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-04 03:35:43 +00:00
Compare commits
15 Commits
v1.2.0-rc.
...
prototype-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8f65605845 | ||
|
|
0a7817a002 | ||
|
|
1dfc4038ab | ||
|
|
73198179f1 | ||
|
|
087866d59f | ||
|
|
9111f5176f | ||
|
|
b9dd092a62 | ||
|
|
ca99bc3188 | ||
|
|
2e49d6aec1 | ||
|
|
51043f78f0 | ||
|
|
a490a11325 | ||
|
|
ec8f685d84 | ||
|
|
5758268866 | ||
|
|
a37da36766 | ||
|
|
85d96d35a8 |
@@ -150,6 +150,7 @@ make_missing_field_convenience_builder!(MissingApiKeyActions, missing_api_key_ac
|
||||
make_missing_field_convenience_builder!(MissingApiKeyExpiresAt, missing_api_key_expires_at);
|
||||
make_missing_field_convenience_builder!(MissingApiKeyIndexes, missing_api_key_indexes);
|
||||
make_missing_field_convenience_builder!(MissingSwapIndexes, missing_swap_indexes);
|
||||
make_missing_field_convenience_builder!(MissingDocumentFilter, missing_document_filter);
|
||||
|
||||
// Integrate a sub-error into a [`DeserrError`] by taking its error message but using
|
||||
// the default error code (C) from `Self`
|
||||
|
||||
@@ -214,12 +214,12 @@ InvalidApiKeyUid , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidContentType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
||||
InvalidDocumentCsvDelimiter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentFields , InvalidRequest , BAD_REQUEST ;
|
||||
MissingDocumentFilter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentFilter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentGeoField , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentId , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentDeleteFilter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
|
||||
|
||||
@@ -61,7 +61,7 @@ impl ErrorCode for MeilisearchHttpError {
|
||||
MeilisearchHttpError::MissingPayload(_) => Code::MissingPayload,
|
||||
MeilisearchHttpError::InvalidContentType(_, _) => Code::InvalidContentType,
|
||||
MeilisearchHttpError::DocumentNotFound(_) => Code::DocumentNotFound,
|
||||
MeilisearchHttpError::EmptyFilter => Code::InvalidDocumentDeleteFilter,
|
||||
MeilisearchHttpError::EmptyFilter => Code::InvalidDocumentFilter,
|
||||
MeilisearchHttpError::InvalidExpression(_, _) => Code::InvalidSearchFilter,
|
||||
MeilisearchHttpError::PayloadTooLarge(_) => Code::PayloadTooLarge,
|
||||
MeilisearchHttpError::SwapIndexPayloadWrongLength(_) => Code::InvalidSwapIndexes,
|
||||
|
||||
@@ -486,7 +486,7 @@ pub async fn delete_documents_batch(
|
||||
#[derive(Debug, Deserr)]
|
||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||
pub struct DocumentDeletionByFilter {
|
||||
#[deserr(error = DeserrJsonError<InvalidDocumentDeleteFilter>)]
|
||||
#[deserr(error = DeserrJsonError<InvalidDocumentFilter>, missing_field_error = DeserrJsonError::missing_document_filter)]
|
||||
filter: Value,
|
||||
}
|
||||
|
||||
@@ -508,8 +508,8 @@ pub async fn delete_documents_by_filter(
|
||||
|| -> Result<_, ResponseError> {
|
||||
Ok(crate::search::parse_filter(&filter)?.ok_or(MeilisearchHttpError::EmptyFilter)?)
|
||||
}()
|
||||
// and whatever was the error, the error code should always be an InvalidDocumentDeleteFilter
|
||||
.map_err(|err| ResponseError::from_msg(err.message, Code::InvalidDocumentDeleteFilter))?;
|
||||
// and whatever was the error, the error code should always be an InvalidDocumentFilter
|
||||
.map_err(|err| ResponseError::from_msg(err.message, Code::InvalidDocumentFilter))?;
|
||||
let task = KindWithContent::DocumentDeletionByFilter { index_uid, filter_expr: filter };
|
||||
|
||||
let task: SummarizedTaskView =
|
||||
|
||||
@@ -99,7 +99,7 @@ pub struct DetailsView {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub deleted_tasks: Option<Option<u64>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub original_filter: Option<String>,
|
||||
pub original_filter: Option<Option<String>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub dump_uid: Option<Option<String>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@@ -131,12 +131,13 @@ impl From<Details> for DetailsView {
|
||||
} => DetailsView {
|
||||
provided_ids: Some(received_document_ids),
|
||||
deleted_documents: Some(deleted_documents),
|
||||
original_filter: Some(None),
|
||||
..DetailsView::default()
|
||||
},
|
||||
Details::DocumentDeletionByFilter { original_filter, deleted_documents } => {
|
||||
DetailsView {
|
||||
provided_ids: Some(0),
|
||||
original_filter: Some(original_filter),
|
||||
original_filter: Some(Some(original_filter)),
|
||||
deleted_documents: Some(deleted_documents),
|
||||
..DetailsView::default()
|
||||
}
|
||||
@@ -148,7 +149,7 @@ impl From<Details> for DetailsView {
|
||||
DetailsView {
|
||||
matched_tasks: Some(matched_tasks),
|
||||
canceled_tasks: Some(canceled_tasks),
|
||||
original_filter: Some(original_filter),
|
||||
original_filter: Some(Some(original_filter)),
|
||||
..DetailsView::default()
|
||||
}
|
||||
}
|
||||
@@ -156,7 +157,7 @@ impl From<Details> for DetailsView {
|
||||
DetailsView {
|
||||
matched_tasks: Some(matched_tasks),
|
||||
deleted_tasks: Some(deleted_tasks),
|
||||
original_filter: Some(original_filter),
|
||||
original_filter: Some(Some(original_filter)),
|
||||
..DetailsView::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -547,9 +547,9 @@ async fn delete_document_by_filter() {
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid syntax for the filter parameter: `expected String, Array, found: true`.",
|
||||
"code": "invalid_document_delete_filter",
|
||||
"code": "invalid_document_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_delete_filter"
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -559,9 +559,9 @@ async fn delete_document_by_filter() {
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `hello`.\n1:6 hello",
|
||||
"code": "invalid_document_delete_filter",
|
||||
"code": "invalid_document_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_delete_filter"
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
}
|
||||
"###);
|
||||
|
||||
@@ -571,9 +571,21 @@ async fn delete_document_by_filter() {
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Sending an empty filter is forbidden.",
|
||||
"code": "invalid_document_delete_filter",
|
||||
"code": "invalid_document_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_delete_filter"
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
}
|
||||
"###);
|
||||
|
||||
// do not send any filter
|
||||
let (response, code) = index.delete_document_by_filter(json!({})).await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Missing field `filter`",
|
||||
"code": "missing_document_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#missing_document_filter"
|
||||
}
|
||||
"###);
|
||||
|
||||
|
||||
@@ -413,7 +413,7 @@ async fn test_summarized_document_addition_or_update() {
|
||||
}
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_delete_batch() {
|
||||
async fn test_summarized_delete_documents_by_batch() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
index.delete_batch(vec![1, 2, 3]).await;
|
||||
@@ -430,7 +430,8 @@ async fn test_summarized_delete_batch() {
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 3,
|
||||
"deletedDocuments": 0
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": null
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` not found.",
|
||||
@@ -460,7 +461,8 @@ async fn test_summarized_delete_batch() {
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 1,
|
||||
"deletedDocuments": 0
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": null
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
@@ -472,7 +474,100 @@ async fn test_summarized_delete_batch() {
|
||||
}
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_delete_document() {
|
||||
async fn test_summarized_delete_documents_by_filter() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||
index.wait_task(0).await;
|
||||
let (task, _) = index.get_task(0).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
@r###"
|
||||
{
|
||||
"uid": 0,
|
||||
"indexUid": "test",
|
||||
"status": "failed",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 0,
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": "\"doggo = bernese\""
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` not found.",
|
||||
"code": "index_not_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
}
|
||||
"###);
|
||||
|
||||
index.create(None).await;
|
||||
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||
index.wait_task(2).await;
|
||||
let (task, _) = index.get_task(2).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
@r###"
|
||||
{
|
||||
"uid": 2,
|
||||
"indexUid": "test",
|
||||
"status": "failed",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 0,
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": "\"doggo = bernese\""
|
||||
},
|
||||
"error": {
|
||||
"message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
|
||||
"code": "invalid_document_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
}
|
||||
"###);
|
||||
|
||||
index.update_settings(json!({ "filterableAttributes": ["doggo"] })).await;
|
||||
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||
index.wait_task(4).await;
|
||||
let (task, _) = index.get_task(4).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
@r###"
|
||||
{
|
||||
"uid": 4,
|
||||
"indexUid": "test",
|
||||
"status": "succeeded",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 0,
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": "\"doggo = bernese\""
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_delete_document_by_id() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
index.delete_document(1).await;
|
||||
@@ -489,7 +584,8 @@ async fn test_summarized_delete_document() {
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 1,
|
||||
"deletedDocuments": 0
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": null
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` not found.",
|
||||
@@ -519,7 +615,8 @@ async fn test_summarized_delete_document() {
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"providedIds": 1,
|
||||
"deletedDocuments": 0
|
||||
"deletedDocuments": 0,
|
||||
"originalFilter": null
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
|
||||
@@ -116,16 +116,15 @@ pub fn bucket_sort<'ctx, Q: RankingRuleQueryTrait>(
|
||||
}
|
||||
|
||||
while valid_docids.len() < length {
|
||||
// The universe for this bucket is zero or one element, so we don't need to sort
|
||||
// anything, just extend the results and go back to the parent ranking rule.
|
||||
if ranking_rule_universes[cur_ranking_rule_index].len() <= 1 {
|
||||
let bucket = std::mem::take(&mut ranking_rule_universes[cur_ranking_rule_index]);
|
||||
maybe_add_to_results!(bucket);
|
||||
// The universe for this bucket is zero element, so we don't need to sort
|
||||
// anything, just go back to the parent ranking rule.
|
||||
if ranking_rule_universes[cur_ranking_rule_index].is_empty() {
|
||||
back!();
|
||||
continue;
|
||||
}
|
||||
|
||||
let Some(next_bucket) = ranking_rules[cur_ranking_rule_index].next_bucket(ctx, logger, &ranking_rule_universes[cur_ranking_rule_index])? else {
|
||||
let Some(next_bucket) = ranking_rules[cur_ranking_rule_index].next_bucket(ctx, logger, &ranking_rule_universes[cur_ranking_rule_index])?
|
||||
else {
|
||||
back!();
|
||||
continue;
|
||||
};
|
||||
|
||||
@@ -46,7 +46,7 @@ use super::logger::SearchLogger;
|
||||
use super::query_graph::QueryNode;
|
||||
use super::ranking_rule_graph::{
|
||||
ConditionDocIdsCache, DeadEndsCache, ExactnessGraph, FidGraph, PositionGraph, ProximityGraph,
|
||||
RankingRuleGraph, RankingRuleGraphTrait, TypoGraph,
|
||||
RankingRuleGraph, RankingRuleGraphTrait, TypoGraph, WordsGraph,
|
||||
};
|
||||
use super::small_bitmap::SmallBitmap;
|
||||
use super::{QueryGraph, RankingRule, RankingRuleOutput, SearchContext};
|
||||
@@ -54,6 +54,12 @@ use crate::search::new::query_term::LocatedQueryTermSubset;
|
||||
use crate::search::new::ranking_rule_graph::PathVisitor;
|
||||
use crate::{Result, TermsMatchingStrategy};
|
||||
|
||||
pub type Words = GraphBasedRankingRule<WordsGraph>;
|
||||
impl GraphBasedRankingRule<WordsGraph> {
|
||||
pub fn new(terms_matching_strategy: TermsMatchingStrategy) -> Self {
|
||||
Self::new_with_id("words".to_owned(), Some(terms_matching_strategy))
|
||||
}
|
||||
}
|
||||
pub type Proximity = GraphBasedRankingRule<ProximityGraph>;
|
||||
impl GraphBasedRankingRule<ProximityGraph> {
|
||||
pub fn new(terms_matching_strategy: Option<TermsMatchingStrategy>) -> Self {
|
||||
@@ -175,9 +181,6 @@ impl<'ctx, G: RankingRuleGraphTrait> RankingRule<'ctx, QueryGraph> for GraphBase
|
||||
logger: &mut dyn SearchLogger<QueryGraph>,
|
||||
universe: &RoaringBitmap,
|
||||
) -> Result<Option<RankingRuleOutput<QueryGraph>>> {
|
||||
// If universe.len() <= 1, the bucket sort algorithm
|
||||
// should not have called this function.
|
||||
assert!(universe.len() > 1);
|
||||
// Will crash if `next_bucket` is called before `start_iteration` or after `end_iteration`,
|
||||
// should never happen
|
||||
let mut state = self.state.take().unwrap();
|
||||
|
||||
@@ -4,7 +4,6 @@ use std::io::{BufWriter, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Instant;
|
||||
|
||||
// use rand::random;
|
||||
use roaring::RoaringBitmap;
|
||||
|
||||
use crate::search::new::interner::Interned;
|
||||
@@ -13,6 +12,7 @@ use crate::search::new::query_term::LocatedQueryTermSubset;
|
||||
use crate::search::new::ranking_rule_graph::{
|
||||
Edge, FidCondition, FidGraph, PositionCondition, PositionGraph, ProximityCondition,
|
||||
ProximityGraph, RankingRuleGraph, RankingRuleGraphTrait, TypoCondition, TypoGraph,
|
||||
WordsCondition, WordsGraph,
|
||||
};
|
||||
use crate::search::new::ranking_rules::BoxRankingRule;
|
||||
use crate::search::new::{QueryGraph, QueryNode, RankingRule, SearchContext, SearchLogger};
|
||||
@@ -24,11 +24,12 @@ pub enum SearchEvents {
|
||||
RankingRuleSkipBucket { ranking_rule_idx: usize, bucket_len: u64 },
|
||||
RankingRuleEndIteration { ranking_rule_idx: usize, universe_len: u64 },
|
||||
ExtendResults { new: Vec<u32> },
|
||||
WordsGraph { query_graph: QueryGraph },
|
||||
ProximityGraph { graph: RankingRuleGraph<ProximityGraph> },
|
||||
ProximityPaths { paths: Vec<Vec<Interned<ProximityCondition>>> },
|
||||
TypoGraph { graph: RankingRuleGraph<TypoGraph> },
|
||||
TypoPaths { paths: Vec<Vec<Interned<TypoCondition>>> },
|
||||
WordsGraph { graph: RankingRuleGraph<WordsGraph> },
|
||||
WordsPaths { paths: Vec<Vec<Interned<WordsCondition>>> },
|
||||
FidGraph { graph: RankingRuleGraph<FidGraph> },
|
||||
FidPaths { paths: Vec<Vec<Interned<FidCondition>>> },
|
||||
PositionGraph { graph: RankingRuleGraph<PositionGraph> },
|
||||
@@ -139,8 +140,11 @@ impl SearchLogger<QueryGraph> for VisualSearchLogger {
|
||||
let Some(location) = self.location.last() else { return };
|
||||
match location {
|
||||
Location::Words => {
|
||||
if let Some(query_graph) = state.downcast_ref::<QueryGraph>() {
|
||||
self.events.push(SearchEvents::WordsGraph { query_graph: query_graph.clone() });
|
||||
if let Some(graph) = state.downcast_ref::<RankingRuleGraph<WordsGraph>>() {
|
||||
self.events.push(SearchEvents::WordsGraph { graph: graph.clone() });
|
||||
}
|
||||
if let Some(paths) = state.downcast_ref::<Vec<Vec<Interned<WordsCondition>>>>() {
|
||||
self.events.push(SearchEvents::WordsPaths { paths: paths.clone() });
|
||||
}
|
||||
}
|
||||
Location::Typo => {
|
||||
@@ -329,7 +333,6 @@ impl<'ctx> DetailedLoggerFinish<'ctx> {
|
||||
SearchEvents::ExtendResults { new } => {
|
||||
self.write_extend_results(new)?;
|
||||
}
|
||||
SearchEvents::WordsGraph { query_graph } => self.write_words_graph(query_graph)?,
|
||||
SearchEvents::ProximityGraph { graph } => self.write_rr_graph(&graph)?,
|
||||
SearchEvents::ProximityPaths { paths } => {
|
||||
self.write_rr_graph_paths::<ProximityGraph>(paths)?;
|
||||
@@ -338,6 +341,10 @@ impl<'ctx> DetailedLoggerFinish<'ctx> {
|
||||
SearchEvents::TypoPaths { paths } => {
|
||||
self.write_rr_graph_paths::<TypoGraph>(paths)?;
|
||||
}
|
||||
SearchEvents::WordsGraph { graph } => self.write_rr_graph(&graph)?,
|
||||
SearchEvents::WordsPaths { paths } => {
|
||||
self.write_rr_graph_paths::<WordsGraph>(paths)?;
|
||||
}
|
||||
SearchEvents::FidGraph { graph } => self.write_rr_graph(&graph)?,
|
||||
SearchEvents::FidPaths { paths } => {
|
||||
self.write_rr_graph_paths::<FidGraph>(paths)?;
|
||||
@@ -455,7 +462,7 @@ fill: \"#B6E2D3\"
|
||||
shape: class
|
||||
max_nbr_typo: {}",
|
||||
term_subset.description(ctx),
|
||||
term_subset.max_nbr_typos(ctx)
|
||||
term_subset.max_typo_cost(ctx)
|
||||
)?;
|
||||
|
||||
for w in term_subset.all_single_words_except_prefix_db(ctx)? {
|
||||
@@ -482,13 +489,6 @@ fill: \"#B6E2D3\"
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
fn write_words_graph(&mut self, qg: QueryGraph) -> Result<()> {
|
||||
self.make_new_file_for_internal_state_if_needed()?;
|
||||
|
||||
self.write_query_graph(&qg)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
fn write_rr_graph<R: RankingRuleGraphTrait>(
|
||||
&mut self,
|
||||
graph: &RankingRuleGraph<R>,
|
||||
|
||||
@@ -15,11 +15,7 @@ mod resolve_query_graph;
|
||||
mod small_bitmap;
|
||||
|
||||
mod exact_attribute;
|
||||
// TODO: documentation + comments
|
||||
// implementation is currently an adaptation of the previous implementation to fit with the new model
|
||||
mod sort;
|
||||
// TODO: documentation + comments
|
||||
mod words;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
@@ -43,10 +39,10 @@ use ranking_rules::{
|
||||
use resolve_query_graph::{compute_query_graph_docids, PhraseDocIdsCache};
|
||||
use roaring::RoaringBitmap;
|
||||
use sort::Sort;
|
||||
use words::Words;
|
||||
|
||||
use self::geo_sort::GeoSort;
|
||||
pub use self::geo_sort::Strategy as GeoSortStrategy;
|
||||
use self::graph_based_ranking_rule::Words;
|
||||
use self::interner::Interned;
|
||||
use crate::search::new::distinct::apply_distinct_rule;
|
||||
use crate::{AscDesc, DocumentId, Filter, Index, Member, Result, TermsMatchingStrategy, UserError};
|
||||
@@ -202,6 +198,11 @@ fn get_ranking_rules_for_query_graph_search<'ctx>(
|
||||
let mut sorted_fields = HashSet::new();
|
||||
let mut geo_sorted = false;
|
||||
|
||||
// Don't add the `words` ranking rule if the term matching strategy is `All`
|
||||
if matches!(terms_matching_strategy, TermsMatchingStrategy::All) {
|
||||
words = true;
|
||||
}
|
||||
|
||||
let mut ranking_rules: Vec<BoxRankingRule<QueryGraph>> = vec![];
|
||||
let settings_ranking_rules = ctx.index.criteria(ctx.txn)?;
|
||||
for rr in settings_ranking_rules {
|
||||
|
||||
@@ -28,14 +28,14 @@ pub enum ZeroOrOneTypo {
|
||||
impl Interned<QueryTerm> {
|
||||
pub fn compute_fully_if_needed(self, ctx: &mut SearchContext) -> Result<()> {
|
||||
let s = ctx.term_interner.get_mut(self);
|
||||
if s.max_nbr_typos <= 1 && s.one_typo.is_uninit() {
|
||||
if s.max_levenshtein_distance <= 1 && s.one_typo.is_uninit() {
|
||||
assert!(s.two_typo.is_uninit());
|
||||
// Initialize one_typo subterm even if max_nbr_typo is 0 because of split words
|
||||
self.initialize_one_typo_subterm(ctx)?;
|
||||
let s = ctx.term_interner.get_mut(self);
|
||||
assert!(s.one_typo.is_init());
|
||||
s.two_typo = Lazy::Init(TwoTypoTerm::default());
|
||||
} else if s.max_nbr_typos > 1 && s.two_typo.is_uninit() {
|
||||
} else if s.max_levenshtein_distance > 1 && s.two_typo.is_uninit() {
|
||||
assert!(s.two_typo.is_uninit());
|
||||
self.initialize_one_and_two_typo_subterm(ctx)?;
|
||||
let s = ctx.term_interner.get_mut(self);
|
||||
@@ -185,7 +185,7 @@ pub fn partially_initialized_term_from_word(
|
||||
original: ctx.word_interner.insert(word.to_owned()),
|
||||
ngram_words: None,
|
||||
is_prefix: false,
|
||||
max_nbr_typos: 0,
|
||||
max_levenshtein_distance: 0,
|
||||
zero_typo: <_>::default(),
|
||||
one_typo: Lazy::Init(<_>::default()),
|
||||
two_typo: Lazy::Init(<_>::default()),
|
||||
@@ -256,7 +256,7 @@ pub fn partially_initialized_term_from_word(
|
||||
Ok(QueryTerm {
|
||||
original: word_interned,
|
||||
ngram_words: None,
|
||||
max_nbr_typos: max_typo,
|
||||
max_levenshtein_distance: max_typo,
|
||||
is_prefix,
|
||||
zero_typo,
|
||||
one_typo: Lazy::Uninit,
|
||||
@@ -275,7 +275,16 @@ fn find_split_words(ctx: &mut SearchContext, word: &str) -> Result<Option<Intern
|
||||
impl Interned<QueryTerm> {
|
||||
fn initialize_one_typo_subterm(self, ctx: &mut SearchContext) -> Result<()> {
|
||||
let self_mut = ctx.term_interner.get_mut(self);
|
||||
let QueryTerm { original, is_prefix, one_typo, max_nbr_typos, .. } = self_mut;
|
||||
|
||||
let allows_split_words = self_mut.allows_split_words();
|
||||
let QueryTerm {
|
||||
original,
|
||||
is_prefix,
|
||||
one_typo,
|
||||
max_levenshtein_distance: max_nbr_typos,
|
||||
..
|
||||
} = self_mut;
|
||||
|
||||
let original = *original;
|
||||
let is_prefix = *is_prefix;
|
||||
// let original_str = ctx.word_interner.get(*original).to_owned();
|
||||
@@ -300,13 +309,17 @@ impl Interned<QueryTerm> {
|
||||
})?;
|
||||
}
|
||||
|
||||
let original_str = ctx.word_interner.get(original).to_owned();
|
||||
let split_words = find_split_words(ctx, original_str.as_str())?;
|
||||
let split_words = if allows_split_words {
|
||||
let original_str = ctx.word_interner.get(original).to_owned();
|
||||
find_split_words(ctx, original_str.as_str())?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let self_mut = ctx.term_interner.get_mut(self);
|
||||
|
||||
// Only add the split words to the derivations if:
|
||||
// 1. the term is not an ngram; OR
|
||||
// 1. the term is neither an ngram nor a phrase; OR
|
||||
// 2. the term is an ngram, but the split words are different from the ngram's component words
|
||||
let split_words = if let Some((ngram_words, split_words)) =
|
||||
self_mut.ngram_words.as_ref().zip(split_words.as_ref())
|
||||
@@ -328,7 +341,13 @@ impl Interned<QueryTerm> {
|
||||
}
|
||||
fn initialize_one_and_two_typo_subterm(self, ctx: &mut SearchContext) -> Result<()> {
|
||||
let self_mut = ctx.term_interner.get_mut(self);
|
||||
let QueryTerm { original, is_prefix, two_typo, max_nbr_typos, .. } = self_mut;
|
||||
let QueryTerm {
|
||||
original,
|
||||
is_prefix,
|
||||
two_typo,
|
||||
max_levenshtein_distance: max_nbr_typos,
|
||||
..
|
||||
} = self_mut;
|
||||
let original_str = ctx.word_interner.get(*original).to_owned();
|
||||
if two_typo.is_init() {
|
||||
return Ok(());
|
||||
|
||||
@@ -43,7 +43,7 @@ pub struct QueryTermSubset {
|
||||
pub struct QueryTerm {
|
||||
original: Interned<String>,
|
||||
ngram_words: Option<Vec<Interned<String>>>,
|
||||
max_nbr_typos: u8,
|
||||
max_levenshtein_distance: u8,
|
||||
is_prefix: bool,
|
||||
zero_typo: ZeroTypoTerm,
|
||||
// May not be computed yet
|
||||
@@ -342,10 +342,16 @@ impl QueryTermSubset {
|
||||
}
|
||||
None
|
||||
}
|
||||
pub fn max_nbr_typos(&self, ctx: &SearchContext) -> u8 {
|
||||
pub fn max_typo_cost(&self, ctx: &SearchContext) -> u8 {
|
||||
let t = ctx.term_interner.get(self.original);
|
||||
match t.max_nbr_typos {
|
||||
0 => 0,
|
||||
match t.max_levenshtein_distance {
|
||||
0 => {
|
||||
if t.allows_split_words() {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
1 => {
|
||||
if self.one_typo_subset.is_empty() {
|
||||
0
|
||||
@@ -438,6 +444,9 @@ impl QueryTerm {
|
||||
|
||||
self.zero_typo.is_empty() && one_typo.is_empty() && two_typo.is_empty()
|
||||
}
|
||||
fn allows_split_words(&self) -> bool {
|
||||
self.zero_typo.phrase.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
impl Interned<QueryTerm> {
|
||||
|
||||
@@ -77,13 +77,9 @@ pub fn located_query_terms_from_tokens(
|
||||
}
|
||||
}
|
||||
TokenKind::Separator(separator_kind) => {
|
||||
match separator_kind {
|
||||
SeparatorKind::Hard => {
|
||||
position += 1;
|
||||
}
|
||||
SeparatorKind::Soft => {
|
||||
position += 0;
|
||||
}
|
||||
// add penalty for hard separators
|
||||
if let SeparatorKind::Hard = separator_kind {
|
||||
position = position.wrapping_add(1);
|
||||
}
|
||||
|
||||
phrase = 'phrase: {
|
||||
@@ -217,7 +213,7 @@ pub fn make_ngram(
|
||||
original: ngram_str_interned,
|
||||
ngram_words: Some(words_interned),
|
||||
is_prefix,
|
||||
max_nbr_typos,
|
||||
max_levenshtein_distance: max_nbr_typos,
|
||||
zero_typo: term.zero_typo,
|
||||
one_typo: Lazy::Uninit,
|
||||
two_typo: Lazy::Uninit,
|
||||
@@ -271,7 +267,7 @@ impl PhraseBuilder {
|
||||
QueryTerm {
|
||||
original: ctx.word_interner.insert(phrase_desc),
|
||||
ngram_words: None,
|
||||
max_nbr_typos: 0,
|
||||
max_levenshtein_distance: 0,
|
||||
is_prefix: false,
|
||||
zero_typo: ZeroTypoTerm {
|
||||
phrase: Some(phrase),
|
||||
@@ -288,3 +284,36 @@ impl PhraseBuilder {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use charabia::TokenizerBuilder;
|
||||
|
||||
use super::*;
|
||||
use crate::index::tests::TempIndex;
|
||||
|
||||
fn temp_index_with_documents() -> TempIndex {
|
||||
let temp_index = TempIndex::new();
|
||||
temp_index
|
||||
.add_documents(documents!([
|
||||
{ "id": 1, "name": "split this world westfali westfalia the Ŵôřlḑôle" },
|
||||
{ "id": 2, "name": "Westfália" },
|
||||
{ "id": 3, "name": "Ŵôřlḑôle" },
|
||||
]))
|
||||
.unwrap();
|
||||
temp_index
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn start_with_hard_separator() -> Result<()> {
|
||||
let tokenizer = TokenizerBuilder::new().build();
|
||||
let tokens = tokenizer.tokenize(".");
|
||||
let index = temp_index_with_documents();
|
||||
let rtxn = index.read_txn()?;
|
||||
let mut ctx = SearchContext::new(&index, &rtxn);
|
||||
// panics with `attempt to add with overflow` before <https://github.com/meilisearch/meilisearch/issues/3785>
|
||||
let located_query_terms = located_query_terms_from_tokens(&mut ctx, tokens, None)?;
|
||||
assert!(located_query_terms.is_empty());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -205,18 +205,12 @@ impl<G: RankingRuleGraphTrait> VisitorState<G> {
|
||||
impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
||||
pub fn find_all_costs_to_end(&self) -> MappedInterner<QueryNode, Vec<u64>> {
|
||||
let mut costs_to_end = self.query_graph.nodes.map(|_| vec![]);
|
||||
let mut enqueued = SmallBitmap::new(self.query_graph.nodes.len());
|
||||
|
||||
let mut node_stack = VecDeque::new();
|
||||
|
||||
*costs_to_end.get_mut(self.query_graph.end_node) = vec![0];
|
||||
|
||||
for prev_node in self.query_graph.nodes.get(self.query_graph.end_node).predecessors.iter() {
|
||||
node_stack.push_back(prev_node);
|
||||
enqueued.insert(prev_node);
|
||||
}
|
||||
|
||||
while let Some(cur_node) = node_stack.pop_front() {
|
||||
self.traverse_breadth_first_backward(self.query_graph.end_node, |cur_node| {
|
||||
if cur_node == self.query_graph.end_node {
|
||||
*costs_to_end.get_mut(self.query_graph.end_node) = vec![0];
|
||||
return;
|
||||
}
|
||||
let mut self_costs = Vec::<u64>::new();
|
||||
|
||||
let cur_node_edges = &self.edges_of_node.get(cur_node);
|
||||
@@ -232,13 +226,7 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
||||
self_costs.dedup();
|
||||
|
||||
*costs_to_end.get_mut(cur_node) = self_costs;
|
||||
for prev_node in self.query_graph.nodes.get(cur_node).predecessors.iter() {
|
||||
if !enqueued.contains(prev_node) {
|
||||
node_stack.push_back(prev_node);
|
||||
enqueued.insert(prev_node);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
costs_to_end
|
||||
}
|
||||
|
||||
@@ -247,17 +235,12 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
||||
node_with_removed_outgoing_conditions: Interned<QueryNode>,
|
||||
costs: &mut MappedInterner<QueryNode, Vec<u64>>,
|
||||
) {
|
||||
let mut enqueued = SmallBitmap::new(self.query_graph.nodes.len());
|
||||
let mut node_stack = VecDeque::new();
|
||||
|
||||
enqueued.insert(node_with_removed_outgoing_conditions);
|
||||
node_stack.push_back(node_with_removed_outgoing_conditions);
|
||||
|
||||
'main_loop: while let Some(cur_node) = node_stack.pop_front() {
|
||||
// Traverse the graph backward from the target node, recomputing the cost for each of its predecessors.
|
||||
// We first check that no other node is contributing the same total cost to a predecessor before removing
|
||||
// the cost from the predecessor.
|
||||
self.traverse_breadth_first_backward(node_with_removed_outgoing_conditions, |cur_node| {
|
||||
let mut costs_to_remove = FxHashSet::default();
|
||||
for c in costs.get(cur_node) {
|
||||
costs_to_remove.insert(*c);
|
||||
}
|
||||
costs_to_remove.extend(costs.get(cur_node).iter().copied());
|
||||
|
||||
let cur_node_edges = &self.edges_of_node.get(cur_node);
|
||||
for edge_idx in cur_node_edges.iter() {
|
||||
@@ -265,22 +248,75 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
||||
for cost in costs.get(edge.dest_node).iter() {
|
||||
costs_to_remove.remove(&(*cost + edge.cost as u64));
|
||||
if costs_to_remove.is_empty() {
|
||||
continue 'main_loop;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
if costs_to_remove.is_empty() {
|
||||
continue 'main_loop;
|
||||
return;
|
||||
}
|
||||
let mut new_costs = BTreeSet::from_iter(costs.get(cur_node).iter().copied());
|
||||
for c in costs_to_remove {
|
||||
new_costs.remove(&c);
|
||||
}
|
||||
*costs.get_mut(cur_node) = new_costs.into_iter().collect();
|
||||
});
|
||||
}
|
||||
|
||||
/// Traverse the graph backwards from the given node such that every time
|
||||
/// a node is visited, we are guaranteed that all its successors either:
|
||||
/// 1. have already been visited; OR
|
||||
/// 2. were not reachable from the given node
|
||||
pub fn traverse_breadth_first_backward(
|
||||
&self,
|
||||
from: Interned<QueryNode>,
|
||||
mut visit: impl FnMut(Interned<QueryNode>),
|
||||
) {
|
||||
let mut reachable = SmallBitmap::for_interned_values_in(&self.query_graph.nodes);
|
||||
{
|
||||
// go backward to get the set of all reachable nodes from the given node
|
||||
// the nodes that are not reachable will be set as `visited`
|
||||
let mut stack = VecDeque::new();
|
||||
let mut enqueued = SmallBitmap::for_interned_values_in(&self.query_graph.nodes);
|
||||
enqueued.insert(from);
|
||||
stack.push_back(from);
|
||||
while let Some(n) = stack.pop_front() {
|
||||
if reachable.contains(n) {
|
||||
continue;
|
||||
}
|
||||
reachable.insert(n);
|
||||
for prev_node in self.query_graph.nodes.get(n).predecessors.iter() {
|
||||
if !enqueued.contains(prev_node) && !reachable.contains(prev_node) {
|
||||
stack.push_back(prev_node);
|
||||
enqueued.insert(prev_node);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
let mut unreachable_or_visited =
|
||||
SmallBitmap::for_interned_values_in(&self.query_graph.nodes);
|
||||
for (n, _) in self.query_graph.nodes.iter() {
|
||||
if !reachable.contains(n) {
|
||||
unreachable_or_visited.insert(n);
|
||||
}
|
||||
}
|
||||
|
||||
let mut enqueued = SmallBitmap::for_interned_values_in(&self.query_graph.nodes);
|
||||
let mut stack = VecDeque::new();
|
||||
|
||||
enqueued.insert(from);
|
||||
stack.push_back(from);
|
||||
|
||||
while let Some(cur_node) = stack.pop_front() {
|
||||
if !self.query_graph.nodes.get(cur_node).successors.is_subset(&unreachable_or_visited) {
|
||||
stack.push_back(cur_node);
|
||||
continue;
|
||||
}
|
||||
unreachable_or_visited.insert(cur_node);
|
||||
visit(cur_node);
|
||||
for prev_node in self.query_graph.nodes.get(cur_node).predecessors.iter() {
|
||||
if !enqueued.contains(prev_node) {
|
||||
node_stack.push_back(prev_node);
|
||||
if !enqueued.contains(prev_node) && !unreachable_or_visited.contains(prev_node) {
|
||||
stack.push_back(prev_node);
|
||||
enqueued.insert(prev_node);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,8 @@ mod position;
|
||||
mod proximity;
|
||||
/// Implementation of the `typo` ranking rule
|
||||
mod typo;
|
||||
/// Implementation of the `words` ranking rule
|
||||
mod words;
|
||||
|
||||
use std::collections::BTreeSet;
|
||||
use std::hash::Hash;
|
||||
@@ -33,6 +35,7 @@ pub use position::{PositionCondition, PositionGraph};
|
||||
pub use proximity::{ProximityCondition, ProximityGraph};
|
||||
use roaring::RoaringBitmap;
|
||||
pub use typo::{TypoCondition, TypoGraph};
|
||||
pub use words::{WordsCondition, WordsGraph};
|
||||
|
||||
use super::interner::{DedupInterner, FixedSizeInterner, Interned, MappedInterner};
|
||||
use super::query_term::LocatedQueryTermSubset;
|
||||
|
||||
@@ -50,7 +50,7 @@ impl RankingRuleGraphTrait for TypoGraph {
|
||||
// 3-gram -> equivalent to 2 typos
|
||||
let base_cost = if term.term_ids.len() == 1 { 0 } else { term.term_ids.len() as u32 };
|
||||
|
||||
for nbr_typos in 0..=term.term_subset.max_nbr_typos(ctx) {
|
||||
for nbr_typos in 0..=term.term_subset.max_typo_cost(ctx) {
|
||||
let mut term = term.clone();
|
||||
match nbr_typos {
|
||||
0 => {
|
||||
|
||||
49
milli/src/search/new/ranking_rule_graph/words/mod.rs
Normal file
49
milli/src/search/new/ranking_rule_graph/words/mod.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
use roaring::RoaringBitmap;
|
||||
|
||||
use super::{ComputedCondition, RankingRuleGraphTrait};
|
||||
use crate::search::new::interner::{DedupInterner, Interned};
|
||||
use crate::search::new::query_term::LocatedQueryTermSubset;
|
||||
use crate::search::new::resolve_query_graph::compute_query_term_subset_docids;
|
||||
use crate::search::new::SearchContext;
|
||||
use crate::Result;
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Hash)]
|
||||
pub struct WordsCondition {
|
||||
term: LocatedQueryTermSubset,
|
||||
}
|
||||
|
||||
pub enum WordsGraph {}
|
||||
|
||||
impl RankingRuleGraphTrait for WordsGraph {
|
||||
type Condition = WordsCondition;
|
||||
|
||||
fn resolve_condition(
|
||||
ctx: &mut SearchContext,
|
||||
condition: &Self::Condition,
|
||||
universe: &RoaringBitmap,
|
||||
) -> Result<ComputedCondition> {
|
||||
let WordsCondition { term, .. } = condition;
|
||||
// maybe compute_query_term_subset_docids should accept a universe as argument
|
||||
let mut docids = compute_query_term_subset_docids(ctx, &term.term_subset)?;
|
||||
docids &= universe;
|
||||
|
||||
Ok(ComputedCondition {
|
||||
docids,
|
||||
universe_len: universe.len(),
|
||||
start_term_subset: None,
|
||||
end_term_subset: term.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn build_edges(
|
||||
_ctx: &mut SearchContext,
|
||||
conditions_interner: &mut DedupInterner<Self::Condition>,
|
||||
_from: Option<&LocatedQueryTermSubset>,
|
||||
to_term: &LocatedQueryTermSubset,
|
||||
) -> Result<Vec<(u32, Interned<Self::Condition>)>> {
|
||||
Ok(vec![(
|
||||
to_term.term_ids.len() as u32,
|
||||
conditions_interner.insert(WordsCondition { term: to_term.clone() }),
|
||||
)])
|
||||
}
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
use roaring::RoaringBitmap;
|
||||
|
||||
use super::logger::SearchLogger;
|
||||
use super::query_graph::QueryNode;
|
||||
use super::resolve_query_graph::compute_query_graph_docids;
|
||||
use super::small_bitmap::SmallBitmap;
|
||||
use super::{QueryGraph, RankingRule, RankingRuleOutput, SearchContext};
|
||||
use crate::{Result, TermsMatchingStrategy};
|
||||
|
||||
pub struct Words {
|
||||
exhausted: bool, // TODO: remove
|
||||
query_graph: Option<QueryGraph>,
|
||||
nodes_to_remove: Vec<SmallBitmap<QueryNode>>,
|
||||
terms_matching_strategy: TermsMatchingStrategy,
|
||||
}
|
||||
impl Words {
|
||||
pub fn new(terms_matching_strategy: TermsMatchingStrategy) -> Self {
|
||||
Self {
|
||||
exhausted: true,
|
||||
query_graph: None,
|
||||
nodes_to_remove: vec![],
|
||||
terms_matching_strategy,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> RankingRule<'ctx, QueryGraph> for Words {
|
||||
fn id(&self) -> String {
|
||||
"words".to_owned()
|
||||
}
|
||||
fn start_iteration(
|
||||
&mut self,
|
||||
ctx: &mut SearchContext<'ctx>,
|
||||
_logger: &mut dyn SearchLogger<QueryGraph>,
|
||||
_universe: &RoaringBitmap,
|
||||
parent_query_graph: &QueryGraph,
|
||||
) -> Result<()> {
|
||||
self.exhausted = false;
|
||||
self.query_graph = Some(parent_query_graph.clone());
|
||||
self.nodes_to_remove = match self.terms_matching_strategy {
|
||||
TermsMatchingStrategy::Last => {
|
||||
let mut ns = parent_query_graph.removal_order_for_terms_matching_strategy_last(ctx);
|
||||
ns.reverse();
|
||||
ns
|
||||
}
|
||||
TermsMatchingStrategy::All => {
|
||||
vec![]
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn next_bucket(
|
||||
&mut self,
|
||||
ctx: &mut SearchContext<'ctx>,
|
||||
logger: &mut dyn SearchLogger<QueryGraph>,
|
||||
universe: &RoaringBitmap,
|
||||
) -> Result<Option<RankingRuleOutput<QueryGraph>>> {
|
||||
if self.exhausted {
|
||||
return Ok(None);
|
||||
}
|
||||
let Some(query_graph) = &mut self.query_graph else { panic!() };
|
||||
logger.log_internal_state(query_graph);
|
||||
|
||||
let this_bucket = compute_query_graph_docids(ctx, query_graph, universe)?;
|
||||
|
||||
let child_query_graph = query_graph.clone();
|
||||
|
||||
if self.nodes_to_remove.is_empty() {
|
||||
self.exhausted = true;
|
||||
} else {
|
||||
let nodes_to_remove = self.nodes_to_remove.pop().unwrap();
|
||||
query_graph.remove_nodes_keep_edges(&nodes_to_remove.iter().collect::<Vec<_>>());
|
||||
}
|
||||
Ok(Some(RankingRuleOutput { query: child_query_graph, candidates: this_bucket }))
|
||||
}
|
||||
|
||||
fn end_iteration(
|
||||
&mut self,
|
||||
_ctx: &mut SearchContext<'ctx>,
|
||||
_logger: &mut dyn SearchLogger<QueryGraph>,
|
||||
) {
|
||||
self.exhausted = true;
|
||||
self.nodes_to_remove = vec![];
|
||||
self.query_graph = None;
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user