mirror of
				https://github.com/meilisearch/meilisearch.git
				synced 2025-11-04 01:46:28 +00:00 
			
		
		
		
	Plug new indexer
This commit is contained in:
		@@ -0,0 +1,130 @@
 | 
			
		||||
use std::collections::HashSet;
 | 
			
		||||
use std::convert::TryInto;
 | 
			
		||||
use std::fs::File;
 | 
			
		||||
use std::{io, mem, str};
 | 
			
		||||
 | 
			
		||||
use meilisearch_tokenizer::{Analyzer, AnalyzerConfig, Token};
 | 
			
		||||
use roaring::RoaringBitmap;
 | 
			
		||||
use serde_json::Value;
 | 
			
		||||
 | 
			
		||||
use super::helpers::{concat_u32s_array, create_sorter, sorter_into_reader, GrenadParameters};
 | 
			
		||||
use crate::error::{InternalError, SerializationError};
 | 
			
		||||
use crate::proximity::ONE_ATTRIBUTE;
 | 
			
		||||
use crate::{FieldId, Result};
 | 
			
		||||
 | 
			
		||||
/// Extracts the word and positions where this word appear and
 | 
			
		||||
/// prefixes it by the document id.
 | 
			
		||||
///
 | 
			
		||||
/// Returns the generated internal documents ids and a grenad reader
 | 
			
		||||
/// with the list of extracted words from the given chunk of documents.
 | 
			
		||||
pub fn extract_docid_word_positions<R: io::Read>(
 | 
			
		||||
    mut obkv_documents: grenad::Reader<R>,
 | 
			
		||||
    indexer: GrenadParameters,
 | 
			
		||||
    searchable_fields: &Option<HashSet<FieldId>>,
 | 
			
		||||
) -> Result<(RoaringBitmap, grenad::Reader<File>)> {
 | 
			
		||||
    let max_memory = indexer.max_memory_by_thread();
 | 
			
		||||
 | 
			
		||||
    let mut documents_ids = RoaringBitmap::new();
 | 
			
		||||
    let mut docid_word_positions_sorter = create_sorter(
 | 
			
		||||
        concat_u32s_array,
 | 
			
		||||
        indexer.chunk_compression_type,
 | 
			
		||||
        indexer.chunk_compression_level,
 | 
			
		||||
        indexer.max_nb_chunks,
 | 
			
		||||
        max_memory,
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    let mut key_buffer = Vec::new();
 | 
			
		||||
    let mut field_buffer = String::new();
 | 
			
		||||
    let analyzer = Analyzer::<Vec<u8>>::new(AnalyzerConfig::default());
 | 
			
		||||
 | 
			
		||||
    while let Some((key, value)) = obkv_documents.next()? {
 | 
			
		||||
        let document_id = key
 | 
			
		||||
            .try_into()
 | 
			
		||||
            .map(u32::from_be_bytes)
 | 
			
		||||
            .map_err(|_| SerializationError::InvalidNumberSerialization)?;
 | 
			
		||||
        let obkv = obkv::KvReader::<FieldId>::new(value);
 | 
			
		||||
 | 
			
		||||
        documents_ids.push(document_id);
 | 
			
		||||
        key_buffer.clear();
 | 
			
		||||
        key_buffer.extend_from_slice(&document_id.to_be_bytes());
 | 
			
		||||
 | 
			
		||||
        for (field_id, field_bytes) in obkv.iter() {
 | 
			
		||||
            if searchable_fields.as_ref().map_or(true, |sf| sf.contains(&field_id)) {
 | 
			
		||||
                let value =
 | 
			
		||||
                    serde_json::from_slice(field_bytes).map_err(InternalError::SerdeJson)?;
 | 
			
		||||
                field_buffer.clear();
 | 
			
		||||
                if let Some(field) = json_to_string(&value, &mut field_buffer) {
 | 
			
		||||
                    let analyzed = analyzer.analyze(field);
 | 
			
		||||
                    let tokens = analyzed
 | 
			
		||||
                        .tokens()
 | 
			
		||||
                        .filter(Token::is_word)
 | 
			
		||||
                        .enumerate()
 | 
			
		||||
                        .take_while(|(i, _)| (*i as u32) < ONE_ATTRIBUTE);
 | 
			
		||||
 | 
			
		||||
                    for (index, token) in tokens {
 | 
			
		||||
                        let token = token.text().trim();
 | 
			
		||||
                        key_buffer.truncate(mem::size_of::<u32>());
 | 
			
		||||
                        key_buffer.extend_from_slice(token.as_bytes());
 | 
			
		||||
 | 
			
		||||
                        let position: u32 = index
 | 
			
		||||
                            .try_into()
 | 
			
		||||
                            .map_err(|_| SerializationError::InvalidNumberSerialization)?;
 | 
			
		||||
                        let position = field_id as u32 * ONE_ATTRIBUTE + position;
 | 
			
		||||
                        docid_word_positions_sorter.insert(&key_buffer, &position.to_ne_bytes())?;
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    sorter_into_reader(docid_word_positions_sorter, indexer).map(|reader| (documents_ids, reader))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Transform a JSON value into a string that can be indexed.
 | 
			
		||||
fn json_to_string<'a>(value: &'a Value, buffer: &'a mut String) -> Option<&'a str> {
 | 
			
		||||
    fn inner(value: &Value, output: &mut String) -> bool {
 | 
			
		||||
        use std::fmt::Write;
 | 
			
		||||
        match value {
 | 
			
		||||
            Value::Null => false,
 | 
			
		||||
            Value::Bool(boolean) => write!(output, "{}", boolean).is_ok(),
 | 
			
		||||
            Value::Number(number) => write!(output, "{}", number).is_ok(),
 | 
			
		||||
            Value::String(string) => write!(output, "{}", string).is_ok(),
 | 
			
		||||
            Value::Array(array) => {
 | 
			
		||||
                let mut count = 0;
 | 
			
		||||
                for value in array {
 | 
			
		||||
                    if inner(value, output) {
 | 
			
		||||
                        output.push_str(". ");
 | 
			
		||||
                        count += 1;
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
                // check that at least one value was written
 | 
			
		||||
                count != 0
 | 
			
		||||
            }
 | 
			
		||||
            Value::Object(object) => {
 | 
			
		||||
                let mut buffer = String::new();
 | 
			
		||||
                let mut count = 0;
 | 
			
		||||
                for (key, value) in object {
 | 
			
		||||
                    buffer.clear();
 | 
			
		||||
                    let _ = write!(&mut buffer, "{}: ", key);
 | 
			
		||||
                    if inner(value, &mut buffer) {
 | 
			
		||||
                        buffer.push_str(". ");
 | 
			
		||||
                        // We write the "key: value. " pair only when
 | 
			
		||||
                        // we are sure that the value can be written.
 | 
			
		||||
                        output.push_str(&buffer);
 | 
			
		||||
                        count += 1;
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
                // check that at least one value was written
 | 
			
		||||
                count != 0
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if let Value::String(string) = value {
 | 
			
		||||
        Some(&string)
 | 
			
		||||
    } else if inner(value, buffer) {
 | 
			
		||||
        Some(buffer)
 | 
			
		||||
    } else {
 | 
			
		||||
        None
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@@ -0,0 +1,41 @@
 | 
			
		||||
use std::fs::File;
 | 
			
		||||
use std::io;
 | 
			
		||||
 | 
			
		||||
use heed::{BytesDecode, BytesEncode};
 | 
			
		||||
 | 
			
		||||
use super::helpers::{
 | 
			
		||||
    create_sorter, merge_cbo_roaring_bitmaps, sorter_into_reader, GrenadParameters,
 | 
			
		||||
};
 | 
			
		||||
use crate::heed_codec::facet::{FacetLevelValueF64Codec, FieldDocIdFacetF64Codec};
 | 
			
		||||
use crate::Result;
 | 
			
		||||
 | 
			
		||||
/// Extracts the facet number and the documents ids where this facet number appear.
 | 
			
		||||
///
 | 
			
		||||
/// Returns a grenad reader with the list of extracted facet numbers and
 | 
			
		||||
/// documents ids from the given chunk of docid facet number positions.
 | 
			
		||||
pub fn extract_facet_number_docids<R: io::Read>(
 | 
			
		||||
    mut docid_fid_facet_number: grenad::Reader<R>,
 | 
			
		||||
    indexer: GrenadParameters,
 | 
			
		||||
) -> Result<grenad::Reader<File>> {
 | 
			
		||||
    let max_memory = indexer.max_memory_by_thread();
 | 
			
		||||
 | 
			
		||||
    let mut facet_number_docids_sorter = create_sorter(
 | 
			
		||||
        merge_cbo_roaring_bitmaps,
 | 
			
		||||
        indexer.chunk_compression_type,
 | 
			
		||||
        indexer.chunk_compression_level,
 | 
			
		||||
        indexer.max_nb_chunks,
 | 
			
		||||
        max_memory,
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    while let Some((key_bytes, _)) = docid_fid_facet_number.next()? {
 | 
			
		||||
        let (field_id, document_id, number) =
 | 
			
		||||
            FieldDocIdFacetF64Codec::bytes_decode(key_bytes).unwrap();
 | 
			
		||||
 | 
			
		||||
        let key = (field_id, 0, number, number);
 | 
			
		||||
        let key_bytes = FacetLevelValueF64Codec::bytes_encode(&key).unwrap();
 | 
			
		||||
 | 
			
		||||
        facet_number_docids_sorter.insert(key_bytes, document_id.to_ne_bytes())?;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    sorter_into_reader(facet_number_docids_sorter, indexer)
 | 
			
		||||
}
 | 
			
		||||
@@ -0,0 +1,57 @@
 | 
			
		||||
use std::fs::File;
 | 
			
		||||
use std::iter::FromIterator;
 | 
			
		||||
use std::{io, str};
 | 
			
		||||
 | 
			
		||||
use roaring::RoaringBitmap;
 | 
			
		||||
 | 
			
		||||
use super::helpers::{
 | 
			
		||||
    create_sorter, keep_first_prefix_value_merge_roaring_bitmaps, sorter_into_reader,
 | 
			
		||||
    try_split_array_at, GrenadParameters,
 | 
			
		||||
};
 | 
			
		||||
use crate::heed_codec::facet::{encode_prefix_string, FacetStringLevelZeroCodec};
 | 
			
		||||
use crate::{FieldId, Result};
 | 
			
		||||
 | 
			
		||||
/// Extracts the facet string and the documents ids where this facet string appear.
 | 
			
		||||
///
 | 
			
		||||
/// Returns a grenad reader with the list of extracted facet strings and
 | 
			
		||||
/// documents ids from the given chunk of docid facet string positions.
 | 
			
		||||
pub fn extract_facet_string_docids<R: io::Read>(
 | 
			
		||||
    mut docid_fid_facet_string: grenad::Reader<R>,
 | 
			
		||||
    indexer: GrenadParameters,
 | 
			
		||||
) -> Result<grenad::Reader<File>> {
 | 
			
		||||
    let max_memory = indexer.max_memory_by_thread();
 | 
			
		||||
 | 
			
		||||
    let mut facet_string_docids_sorter = create_sorter(
 | 
			
		||||
        keep_first_prefix_value_merge_roaring_bitmaps,
 | 
			
		||||
        indexer.chunk_compression_type,
 | 
			
		||||
        indexer.chunk_compression_level,
 | 
			
		||||
        indexer.max_nb_chunks,
 | 
			
		||||
        max_memory,
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    let mut key_buffer = Vec::new();
 | 
			
		||||
    let mut value_buffer = Vec::new();
 | 
			
		||||
    while let Some((key, original_value_bytes)) = docid_fid_facet_string.next()? {
 | 
			
		||||
        let (field_id_bytes, bytes) = try_split_array_at(key).unwrap();
 | 
			
		||||
        let field_id = FieldId::from_be_bytes(field_id_bytes);
 | 
			
		||||
        let (document_id_bytes, normalized_value_bytes) = try_split_array_at(bytes).unwrap();
 | 
			
		||||
        let document_id = u32::from_be_bytes(document_id_bytes);
 | 
			
		||||
        let original_value = str::from_utf8(original_value_bytes)?;
 | 
			
		||||
 | 
			
		||||
        key_buffer.clear();
 | 
			
		||||
        FacetStringLevelZeroCodec::serialize_into(
 | 
			
		||||
            field_id,
 | 
			
		||||
            str::from_utf8(normalized_value_bytes)?,
 | 
			
		||||
            &mut key_buffer,
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        value_buffer.clear();
 | 
			
		||||
        encode_prefix_string(original_value, &mut value_buffer)?;
 | 
			
		||||
        let bitmap = RoaringBitmap::from_iter(Some(document_id));
 | 
			
		||||
        bitmap.serialize_into(&mut value_buffer)?;
 | 
			
		||||
 | 
			
		||||
        facet_string_docids_sorter.insert(&key_buffer, &value_buffer)?;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    sorter_into_reader(facet_string_docids_sorter, indexer)
 | 
			
		||||
}
 | 
			
		||||
@@ -0,0 +1,118 @@
 | 
			
		||||
use std::collections::HashSet;
 | 
			
		||||
use std::fs::File;
 | 
			
		||||
use std::io;
 | 
			
		||||
use std::mem::size_of;
 | 
			
		||||
 | 
			
		||||
use heed::zerocopy::AsBytes;
 | 
			
		||||
use serde_json::Value;
 | 
			
		||||
 | 
			
		||||
use super::helpers::{create_sorter, keep_first, sorter_into_reader, GrenadParameters};
 | 
			
		||||
use crate::error::InternalError;
 | 
			
		||||
use crate::facet::value_encoding::f64_into_bytes;
 | 
			
		||||
use crate::{DocumentId, FieldId, Result};
 | 
			
		||||
 | 
			
		||||
/// Extracts the facet values of each faceted field of each document.
 | 
			
		||||
///
 | 
			
		||||
/// Returns the generated grenad reader containing the docid the fid and the orginal value as key
 | 
			
		||||
/// and the normalized value as value extracted from the given chunk of documents.
 | 
			
		||||
pub fn extract_fid_docid_facet_values<R: io::Read>(
 | 
			
		||||
    mut obkv_documents: grenad::Reader<R>,
 | 
			
		||||
    indexer: GrenadParameters,
 | 
			
		||||
    faceted_fields: &HashSet<FieldId>,
 | 
			
		||||
) -> Result<(grenad::Reader<File>, grenad::Reader<File>)> {
 | 
			
		||||
    let max_memory = indexer.max_memory_by_thread();
 | 
			
		||||
 | 
			
		||||
    let mut fid_docid_facet_numbers_sorter = create_sorter(
 | 
			
		||||
        keep_first,
 | 
			
		||||
        indexer.chunk_compression_type,
 | 
			
		||||
        indexer.chunk_compression_level,
 | 
			
		||||
        indexer.max_nb_chunks,
 | 
			
		||||
        max_memory.map(|m| m / 2),
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    let mut fid_docid_facet_strings_sorter = create_sorter(
 | 
			
		||||
        keep_first,
 | 
			
		||||
        indexer.chunk_compression_type,
 | 
			
		||||
        indexer.chunk_compression_level,
 | 
			
		||||
        indexer.max_nb_chunks,
 | 
			
		||||
        max_memory.map(|m| m / 2),
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    let mut key_buffer = Vec::new();
 | 
			
		||||
    while let Some((docid_bytes, value)) = obkv_documents.next()? {
 | 
			
		||||
        let obkv = obkv::KvReader::new(value);
 | 
			
		||||
 | 
			
		||||
        for (field_id, field_bytes) in obkv.iter() {
 | 
			
		||||
            if faceted_fields.contains(&field_id) {
 | 
			
		||||
                let value =
 | 
			
		||||
                    serde_json::from_slice(field_bytes).map_err(InternalError::SerdeJson)?;
 | 
			
		||||
                let (numbers, strings) = extract_facet_values(&value);
 | 
			
		||||
 | 
			
		||||
                key_buffer.clear();
 | 
			
		||||
 | 
			
		||||
                // prefix key with the field_id and the document_id
 | 
			
		||||
                key_buffer.extend_from_slice(&field_id.to_be_bytes());
 | 
			
		||||
                key_buffer.extend_from_slice(&docid_bytes);
 | 
			
		||||
 | 
			
		||||
                // insert facet numbers in sorter
 | 
			
		||||
                for number in numbers {
 | 
			
		||||
                    key_buffer.truncate(size_of::<FieldId>() + size_of::<DocumentId>());
 | 
			
		||||
                    let value_bytes = f64_into_bytes(number).unwrap(); // invalid float
 | 
			
		||||
                    key_buffer.extend_from_slice(&value_bytes);
 | 
			
		||||
                    key_buffer.extend_from_slice(&number.to_be_bytes());
 | 
			
		||||
 | 
			
		||||
                    fid_docid_facet_numbers_sorter.insert(&key_buffer, ().as_bytes())?;
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
                // insert  normalized and original facet string in sorter
 | 
			
		||||
                for (normalized, original) in strings {
 | 
			
		||||
                    key_buffer.truncate(size_of::<FieldId>() + size_of::<DocumentId>());
 | 
			
		||||
                    key_buffer.extend_from_slice(normalized.as_bytes());
 | 
			
		||||
                    fid_docid_facet_strings_sorter.insert(&key_buffer, original.as_bytes())?;
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Ok((
 | 
			
		||||
        sorter_into_reader(fid_docid_facet_numbers_sorter, indexer.clone())?,
 | 
			
		||||
        sorter_into_reader(fid_docid_facet_strings_sorter, indexer)?,
 | 
			
		||||
    ))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn extract_facet_values(value: &Value) -> (Vec<f64>, Vec<(String, String)>) {
 | 
			
		||||
    fn inner_extract_facet_values(
 | 
			
		||||
        value: &Value,
 | 
			
		||||
        can_recurse: bool,
 | 
			
		||||
        output_numbers: &mut Vec<f64>,
 | 
			
		||||
        output_strings: &mut Vec<(String, String)>,
 | 
			
		||||
    ) {
 | 
			
		||||
        match value {
 | 
			
		||||
            Value::Null => (),
 | 
			
		||||
            Value::Bool(b) => output_strings.push((b.to_string(), b.to_string())),
 | 
			
		||||
            Value::Number(number) => {
 | 
			
		||||
                if let Some(float) = number.as_f64() {
 | 
			
		||||
                    output_numbers.push(float);
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
            Value::String(original) => {
 | 
			
		||||
                let normalized = original.trim().to_lowercase();
 | 
			
		||||
                output_strings.push((normalized, original.clone()));
 | 
			
		||||
            }
 | 
			
		||||
            Value::Array(values) => {
 | 
			
		||||
                if can_recurse {
 | 
			
		||||
                    for value in values {
 | 
			
		||||
                        inner_extract_facet_values(value, false, output_numbers, output_strings);
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
            Value::Object(_) => (),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    let mut facet_number_values = Vec::new();
 | 
			
		||||
    let mut facet_string_values = Vec::new();
 | 
			
		||||
    inner_extract_facet_values(value, true, &mut facet_number_values, &mut facet_string_values);
 | 
			
		||||
 | 
			
		||||
    (facet_number_values, facet_string_values)
 | 
			
		||||
}
 | 
			
		||||
@@ -0,0 +1,91 @@
 | 
			
		||||
use std::collections::HashMap;
 | 
			
		||||
use std::fs::File;
 | 
			
		||||
use std::{cmp, io};
 | 
			
		||||
 | 
			
		||||
use grenad::Sorter;
 | 
			
		||||
 | 
			
		||||
use super::helpers::{
 | 
			
		||||
    create_sorter, merge_cbo_roaring_bitmaps, read_u32_ne_bytes, sorter_into_reader,
 | 
			
		||||
    try_split_array_at, GrenadParameters, MergeFn,
 | 
			
		||||
};
 | 
			
		||||
use crate::proximity::extract_position;
 | 
			
		||||
use crate::{DocumentId, FieldId, Result};
 | 
			
		||||
 | 
			
		||||
/// Extracts the field id word count and the documents ids where
 | 
			
		||||
/// this field id with this amount of words appear.
 | 
			
		||||
///
 | 
			
		||||
/// Returns a grenad reader with the list of extracted field id word counts
 | 
			
		||||
/// and documents ids from the given chunk of docid word positions.
 | 
			
		||||
pub fn extract_fid_word_count_docids<R: io::Read>(
 | 
			
		||||
    mut docid_word_positions: grenad::Reader<R>,
 | 
			
		||||
    indexer: GrenadParameters,
 | 
			
		||||
) -> Result<grenad::Reader<File>> {
 | 
			
		||||
    let max_memory = indexer.max_memory_by_thread();
 | 
			
		||||
 | 
			
		||||
    let mut fid_word_count_docids_sorter = create_sorter(
 | 
			
		||||
        merge_cbo_roaring_bitmaps,
 | 
			
		||||
        indexer.chunk_compression_type,
 | 
			
		||||
        indexer.chunk_compression_level,
 | 
			
		||||
        indexer.max_nb_chunks,
 | 
			
		||||
        max_memory,
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    // This map is assumed to not consume a lot of memory.
 | 
			
		||||
    let mut document_fid_wordcount = HashMap::new();
 | 
			
		||||
    let mut current_document_id = None;
 | 
			
		||||
 | 
			
		||||
    while let Some((key, value)) = docid_word_positions.next()? {
 | 
			
		||||
        let (document_id_bytes, _word_bytes) = try_split_array_at(key).unwrap();
 | 
			
		||||
        let document_id = u32::from_be_bytes(document_id_bytes);
 | 
			
		||||
 | 
			
		||||
        let curr_document_id = *current_document_id.get_or_insert(document_id);
 | 
			
		||||
        if curr_document_id != document_id {
 | 
			
		||||
            drain_document_fid_wordcount_into_sorter(
 | 
			
		||||
                &mut fid_word_count_docids_sorter,
 | 
			
		||||
                &mut document_fid_wordcount,
 | 
			
		||||
                curr_document_id,
 | 
			
		||||
            )?;
 | 
			
		||||
            current_document_id = Some(document_id);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        for position in read_u32_ne_bytes(value) {
 | 
			
		||||
            let (field_id, position) = extract_position(position);
 | 
			
		||||
            let word_count = position + 1;
 | 
			
		||||
 | 
			
		||||
            let value = document_fid_wordcount.entry(field_id as FieldId).or_insert(0);
 | 
			
		||||
            *value = cmp::max(*value, word_count);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if let Some(document_id) = current_document_id {
 | 
			
		||||
        // We must make sure that don't lose the current document field id
 | 
			
		||||
        // word count map if we break because we reached the end of the chunk.
 | 
			
		||||
        drain_document_fid_wordcount_into_sorter(
 | 
			
		||||
            &mut fid_word_count_docids_sorter,
 | 
			
		||||
            &mut document_fid_wordcount,
 | 
			
		||||
            document_id,
 | 
			
		||||
        )?;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    sorter_into_reader(fid_word_count_docids_sorter, indexer)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn drain_document_fid_wordcount_into_sorter(
 | 
			
		||||
    fid_word_count_docids_sorter: &mut Sorter<MergeFn>,
 | 
			
		||||
    document_fid_wordcount: &mut HashMap<FieldId, u32>,
 | 
			
		||||
    document_id: DocumentId,
 | 
			
		||||
) -> Result<()> {
 | 
			
		||||
    let mut key_buffer = Vec::new();
 | 
			
		||||
 | 
			
		||||
    for (fid, count) in document_fid_wordcount.drain() {
 | 
			
		||||
        if count <= 10 {
 | 
			
		||||
            key_buffer.clear();
 | 
			
		||||
            key_buffer.extend_from_slice(&fid.to_be_bytes());
 | 
			
		||||
            key_buffer.push(count as u8);
 | 
			
		||||
 | 
			
		||||
            fid_word_count_docids_sorter.insert(&key_buffer, document_id.to_ne_bytes())?;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Ok(())
 | 
			
		||||
}
 | 
			
		||||
@@ -0,0 +1,42 @@
 | 
			
		||||
use std::fs::File;
 | 
			
		||||
use std::io;
 | 
			
		||||
use std::iter::FromIterator;
 | 
			
		||||
 | 
			
		||||
use roaring::RoaringBitmap;
 | 
			
		||||
 | 
			
		||||
use super::helpers::{
 | 
			
		||||
    create_sorter, merge_roaring_bitmaps, serialize_roaring_bitmap, sorter_into_reader,
 | 
			
		||||
    try_split_array_at, GrenadParameters,
 | 
			
		||||
};
 | 
			
		||||
use crate::Result;
 | 
			
		||||
 | 
			
		||||
/// Extracts the word and the documents ids where this word appear.
 | 
			
		||||
///
 | 
			
		||||
/// Returns a grenad reader with the list of extracted words and
 | 
			
		||||
/// documents ids from the given chunk of docid word positions.
 | 
			
		||||
pub fn extract_word_docids<R: io::Read>(
 | 
			
		||||
    mut docid_word_positions: grenad::Reader<R>,
 | 
			
		||||
    indexer: GrenadParameters,
 | 
			
		||||
) -> Result<grenad::Reader<File>> {
 | 
			
		||||
    let max_memory = indexer.max_memory_by_thread();
 | 
			
		||||
 | 
			
		||||
    let mut word_docids_sorter = create_sorter(
 | 
			
		||||
        merge_roaring_bitmaps,
 | 
			
		||||
        indexer.chunk_compression_type,
 | 
			
		||||
        indexer.chunk_compression_level,
 | 
			
		||||
        indexer.max_nb_chunks,
 | 
			
		||||
        max_memory,
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    let mut value_buffer = Vec::new();
 | 
			
		||||
    while let Some((key, _value)) = docid_word_positions.next()? {
 | 
			
		||||
        let (document_id_bytes, word_bytes) = try_split_array_at(key).unwrap();
 | 
			
		||||
        let document_id = u32::from_be_bytes(document_id_bytes);
 | 
			
		||||
 | 
			
		||||
        let bitmap = RoaringBitmap::from_iter(Some(document_id));
 | 
			
		||||
        serialize_roaring_bitmap(&bitmap, &mut value_buffer)?;
 | 
			
		||||
        word_docids_sorter.insert(word_bytes, &value_buffer)?;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    sorter_into_reader(word_docids_sorter, indexer)
 | 
			
		||||
}
 | 
			
		||||
@@ -0,0 +1,46 @@
 | 
			
		||||
use std::fs::File;
 | 
			
		||||
use std::io;
 | 
			
		||||
 | 
			
		||||
use super::helpers::{
 | 
			
		||||
    create_sorter, merge_cbo_roaring_bitmaps, read_u32_ne_bytes, sorter_into_reader,
 | 
			
		||||
    try_split_array_at, GrenadParameters,
 | 
			
		||||
};
 | 
			
		||||
use crate::{DocumentId, Result};
 | 
			
		||||
/// Extracts the word positions and the documents ids where this word appear.
 | 
			
		||||
///
 | 
			
		||||
/// Returns a grenad reader with the list of extracted words at positions and
 | 
			
		||||
/// documents ids from the given chunk of docid word positions.
 | 
			
		||||
pub fn extract_word_level_position_docids<R: io::Read>(
 | 
			
		||||
    mut docid_word_positions: grenad::Reader<R>,
 | 
			
		||||
    indexer: GrenadParameters,
 | 
			
		||||
) -> Result<grenad::Reader<File>> {
 | 
			
		||||
    let max_memory = indexer.max_memory_by_thread();
 | 
			
		||||
 | 
			
		||||
    let mut word_level_position_docids_sorter = create_sorter(
 | 
			
		||||
        merge_cbo_roaring_bitmaps,
 | 
			
		||||
        indexer.chunk_compression_type,
 | 
			
		||||
        indexer.chunk_compression_level,
 | 
			
		||||
        indexer.max_nb_chunks,
 | 
			
		||||
        max_memory,
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    let mut key_buffer = Vec::new();
 | 
			
		||||
    while let Some((key, value)) = docid_word_positions.next()? {
 | 
			
		||||
        let (document_id_bytes, word_bytes) = try_split_array_at(key).unwrap();
 | 
			
		||||
        let document_id = DocumentId::from_be_bytes(document_id_bytes);
 | 
			
		||||
 | 
			
		||||
        for position in read_u32_ne_bytes(value) {
 | 
			
		||||
            key_buffer.clear();
 | 
			
		||||
            key_buffer.extend_from_slice(word_bytes);
 | 
			
		||||
            key_buffer.push(0); // tree level
 | 
			
		||||
 | 
			
		||||
            // Levels are composed of left and right bounds.
 | 
			
		||||
            key_buffer.extend_from_slice(&position.to_be_bytes());
 | 
			
		||||
            key_buffer.extend_from_slice(&position.to_be_bytes());
 | 
			
		||||
 | 
			
		||||
            word_level_position_docids_sorter.insert(&key_buffer, &document_id.to_ne_bytes())?;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    sorter_into_reader(word_level_position_docids_sorter, indexer)
 | 
			
		||||
}
 | 
			
		||||
@@ -0,0 +1,196 @@
 | 
			
		||||
use std::cmp::Ordering;
 | 
			
		||||
use std::collections::{BinaryHeap, HashMap};
 | 
			
		||||
use std::fs::File;
 | 
			
		||||
use std::time::{Duration, Instant};
 | 
			
		||||
use std::{cmp, io, mem, str, vec};
 | 
			
		||||
 | 
			
		||||
use log::debug;
 | 
			
		||||
 | 
			
		||||
use super::helpers::{
 | 
			
		||||
    create_sorter, merge_cbo_roaring_bitmaps, read_u32_ne_bytes, sorter_into_reader,
 | 
			
		||||
    try_split_array_at, GrenadParameters, MergeFn,
 | 
			
		||||
};
 | 
			
		||||
use crate::proximity::{positions_proximity, MAX_DISTANCE};
 | 
			
		||||
use crate::{DocumentId, Result};
 | 
			
		||||
 | 
			
		||||
/// Extracts the best proximity between pairs of words and the documents ids where this pair appear.
 | 
			
		||||
///
 | 
			
		||||
/// Returns a grenad reader with the list of extracted word pairs proximities and
 | 
			
		||||
/// documents ids from the given chunk of docid word positions.
 | 
			
		||||
pub fn extract_word_pair_proximity_docids<R: io::Read>(
 | 
			
		||||
    mut docid_word_positions: grenad::Reader<R>,
 | 
			
		||||
    indexer: GrenadParameters,
 | 
			
		||||
) -> Result<grenad::Reader<File>> {
 | 
			
		||||
    let max_memory = indexer.max_memory_by_thread();
 | 
			
		||||
 | 
			
		||||
    let mut word_pair_proximity_docids_sorter = create_sorter(
 | 
			
		||||
        merge_cbo_roaring_bitmaps,
 | 
			
		||||
        indexer.chunk_compression_type,
 | 
			
		||||
        indexer.chunk_compression_level,
 | 
			
		||||
        indexer.max_nb_chunks,
 | 
			
		||||
        max_memory,
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    let mut number_of_documents = 0;
 | 
			
		||||
    let mut total_time_aggregation = Duration::default();
 | 
			
		||||
    let mut total_time_grenad_insert = Duration::default();
 | 
			
		||||
 | 
			
		||||
    // This map is assumed to not consume a lot of memory.
 | 
			
		||||
    let mut document_word_positions_heap = BinaryHeap::new();
 | 
			
		||||
    let mut current_document_id = None;
 | 
			
		||||
 | 
			
		||||
    while let Some((key, value)) = docid_word_positions.next()? {
 | 
			
		||||
        let (document_id_bytes, word_bytes) = try_split_array_at(key).unwrap();
 | 
			
		||||
        let document_id = u32::from_be_bytes(document_id_bytes);
 | 
			
		||||
        let word = str::from_utf8(word_bytes)?;
 | 
			
		||||
 | 
			
		||||
        let curr_document_id = *current_document_id.get_or_insert(document_id);
 | 
			
		||||
        if curr_document_id != document_id {
 | 
			
		||||
            let document_word_positions_heap = mem::take(&mut document_word_positions_heap);
 | 
			
		||||
            document_word_positions_into_sorter(
 | 
			
		||||
                curr_document_id,
 | 
			
		||||
                document_word_positions_heap,
 | 
			
		||||
                &mut word_pair_proximity_docids_sorter,
 | 
			
		||||
                &mut total_time_aggregation,
 | 
			
		||||
                &mut total_time_grenad_insert,
 | 
			
		||||
            )?;
 | 
			
		||||
            number_of_documents += 1;
 | 
			
		||||
            current_document_id = Some(document_id);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let word = word.to_string();
 | 
			
		||||
        let mut iter = read_u32_ne_bytes(value).collect::<Vec<_>>().into_iter();
 | 
			
		||||
        if let Some(position) = iter.next() {
 | 
			
		||||
            document_word_positions_heap.push(PeekedWordPosition { word, position, iter });
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if let Some(document_id) = current_document_id {
 | 
			
		||||
        // We must make sure that don't lose the current document field id
 | 
			
		||||
        // word count map if we break because we reached the end of the chunk.
 | 
			
		||||
        let document_word_positions_heap = mem::take(&mut document_word_positions_heap);
 | 
			
		||||
        document_word_positions_into_sorter(
 | 
			
		||||
            document_id,
 | 
			
		||||
            document_word_positions_heap,
 | 
			
		||||
            &mut word_pair_proximity_docids_sorter,
 | 
			
		||||
            &mut total_time_aggregation,
 | 
			
		||||
            &mut total_time_grenad_insert,
 | 
			
		||||
        )?;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    debug!(
 | 
			
		||||
        "Number of documents {}
 | 
			
		||||
        - we took {:02?} to aggregate proximities
 | 
			
		||||
        - we took {:02?} to grenad insert those proximities",
 | 
			
		||||
        number_of_documents, total_time_aggregation, total_time_grenad_insert,
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    sorter_into_reader(word_pair_proximity_docids_sorter, indexer)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Fills the list of all pairs of words with the shortest proximity between 1 and 7 inclusive.
 | 
			
		||||
///
 | 
			
		||||
/// This list is used by the engine to calculate the documents containing words that are
 | 
			
		||||
/// close to each other.
 | 
			
		||||
fn document_word_positions_into_sorter<'b>(
 | 
			
		||||
    document_id: DocumentId,
 | 
			
		||||
    mut word_positions_heap: BinaryHeap<PeekedWordPosition<vec::IntoIter<u32>>>,
 | 
			
		||||
    word_pair_proximity_docids_sorter: &mut grenad::Sorter<MergeFn>,
 | 
			
		||||
    total_time_aggregation: &mut Duration,
 | 
			
		||||
    total_time_grenad_insert: &mut Duration,
 | 
			
		||||
) -> Result<()> {
 | 
			
		||||
    let before_aggregating = Instant::now();
 | 
			
		||||
    let mut word_pair_proximity = HashMap::new();
 | 
			
		||||
    let mut ordered_peeked_word_positions = Vec::new();
 | 
			
		||||
    while !word_positions_heap.is_empty() {
 | 
			
		||||
        while let Some(peeked_word_position) = word_positions_heap.pop() {
 | 
			
		||||
            ordered_peeked_word_positions.push(peeked_word_position);
 | 
			
		||||
            if ordered_peeked_word_positions.len() == 7 {
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if let Some((head, tail)) = ordered_peeked_word_positions.split_first() {
 | 
			
		||||
            for PeekedWordPosition { word, position, .. } in tail {
 | 
			
		||||
                let prox = positions_proximity(head.position, *position);
 | 
			
		||||
                if prox > 0 && prox < MAX_DISTANCE {
 | 
			
		||||
                    word_pair_proximity
 | 
			
		||||
                        .entry((head.word.clone(), word.clone()))
 | 
			
		||||
                        .and_modify(|p| {
 | 
			
		||||
                            *p = cmp::min(*p, prox);
 | 
			
		||||
                        })
 | 
			
		||||
                        .or_insert(prox);
 | 
			
		||||
 | 
			
		||||
                    // We also compute the inverse proximity.
 | 
			
		||||
                    let prox = prox + 1;
 | 
			
		||||
                    if prox < MAX_DISTANCE {
 | 
			
		||||
                        word_pair_proximity
 | 
			
		||||
                            .entry((word.clone(), head.word.clone()))
 | 
			
		||||
                            .and_modify(|p| {
 | 
			
		||||
                                *p = cmp::min(*p, prox);
 | 
			
		||||
                            })
 | 
			
		||||
                            .or_insert(prox);
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            // Push the tail in the heap.
 | 
			
		||||
            let tail_iter = ordered_peeked_word_positions.drain(1..);
 | 
			
		||||
            word_positions_heap.extend(tail_iter);
 | 
			
		||||
 | 
			
		||||
            // Advance the head and push it in the heap.
 | 
			
		||||
            if let Some(mut head) = ordered_peeked_word_positions.pop() {
 | 
			
		||||
                if let Some(next_position) = head.iter.next() {
 | 
			
		||||
                    word_positions_heap.push(PeekedWordPosition {
 | 
			
		||||
                        word: head.word,
 | 
			
		||||
                        position: next_position,
 | 
			
		||||
                        iter: head.iter,
 | 
			
		||||
                    });
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    *total_time_aggregation += before_aggregating.elapsed();
 | 
			
		||||
 | 
			
		||||
    let mut key_buffer = Vec::new();
 | 
			
		||||
    for ((w1, w2), prox) in word_pair_proximity {
 | 
			
		||||
        key_buffer.clear();
 | 
			
		||||
        key_buffer.extend_from_slice(w1.as_bytes());
 | 
			
		||||
        key_buffer.push(0);
 | 
			
		||||
        key_buffer.extend_from_slice(w2.as_bytes());
 | 
			
		||||
        key_buffer.push(prox as u8);
 | 
			
		||||
 | 
			
		||||
        let before_grenad_insert = Instant::now();
 | 
			
		||||
        word_pair_proximity_docids_sorter.insert(&key_buffer, &document_id.to_ne_bytes())?;
 | 
			
		||||
        *total_time_grenad_insert += before_grenad_insert.elapsed();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Ok(())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct PeekedWordPosition<I> {
 | 
			
		||||
    word: String,
 | 
			
		||||
    position: u32,
 | 
			
		||||
    iter: I,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<I> Ord for PeekedWordPosition<I> {
 | 
			
		||||
    fn cmp(&self, other: &Self) -> Ordering {
 | 
			
		||||
        self.position.cmp(&other.position).reverse()
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<I> PartialOrd for PeekedWordPosition<I> {
 | 
			
		||||
    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
 | 
			
		||||
        Some(self.cmp(other))
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<I> Eq for PeekedWordPosition<I> {}
 | 
			
		||||
 | 
			
		||||
impl<I> PartialEq for PeekedWordPosition<I> {
 | 
			
		||||
    fn eq(&self, other: &Self) -> bool {
 | 
			
		||||
        self.position == other.position
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										199
									
								
								milli/src/update/index_documents/extract/mod.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										199
									
								
								milli/src/update/index_documents/extract/mod.rs
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,199 @@
 | 
			
		||||
mod extract_docid_word_positions;
 | 
			
		||||
mod extract_facet_number_docids;
 | 
			
		||||
mod extract_facet_string_docids;
 | 
			
		||||
mod extract_fid_docid_facet_values;
 | 
			
		||||
mod extract_fid_word_count_docids;
 | 
			
		||||
mod extract_word_docids;
 | 
			
		||||
mod extract_word_level_position_docids;
 | 
			
		||||
mod extract_word_pair_proximity_docids;
 | 
			
		||||
 | 
			
		||||
use std::collections::HashSet;
 | 
			
		||||
use std::fs::File;
 | 
			
		||||
 | 
			
		||||
use crossbeam_channel::Sender;
 | 
			
		||||
use rayon::prelude::*;
 | 
			
		||||
 | 
			
		||||
use self::extract_docid_word_positions::extract_docid_word_positions;
 | 
			
		||||
use self::extract_facet_number_docids::extract_facet_number_docids;
 | 
			
		||||
use self::extract_facet_string_docids::extract_facet_string_docids;
 | 
			
		||||
use self::extract_fid_docid_facet_values::extract_fid_docid_facet_values;
 | 
			
		||||
use self::extract_fid_word_count_docids::extract_fid_word_count_docids;
 | 
			
		||||
use self::extract_word_docids::extract_word_docids;
 | 
			
		||||
use self::extract_word_level_position_docids::extract_word_level_position_docids;
 | 
			
		||||
use self::extract_word_pair_proximity_docids::extract_word_pair_proximity_docids;
 | 
			
		||||
use super::helpers::{
 | 
			
		||||
    into_clonable_grenad, keep_first_prefix_value_merge_roaring_bitmaps, merge_cbo_roaring_bitmaps,
 | 
			
		||||
    merge_readers, merge_roaring_bitmaps, CursorClonableMmap, GrenadParameters, MergeFn,
 | 
			
		||||
};
 | 
			
		||||
use super::{helpers, TypedChunk};
 | 
			
		||||
use crate::{FieldId, Result};
 | 
			
		||||
 | 
			
		||||
/// Extract data for each databases from obkv documents in parallel.
 | 
			
		||||
/// Send data in grenad file over provided Sender.
 | 
			
		||||
pub(crate) fn data_from_obkv_documents(
 | 
			
		||||
    obkv_chunks: impl Iterator<Item = Result<grenad::Reader<File>>> + Send,
 | 
			
		||||
    indexer: GrenadParameters,
 | 
			
		||||
    lmdb_writer_sx: Sender<TypedChunk>,
 | 
			
		||||
    searchable_fields: Option<HashSet<FieldId>>,
 | 
			
		||||
    faceted_fields: HashSet<FieldId>,
 | 
			
		||||
) -> Result<()> {
 | 
			
		||||
    let result: Result<(Vec<_>, (Vec<_>, Vec<_>))> = obkv_chunks
 | 
			
		||||
        .par_bridge()
 | 
			
		||||
        .map(|result| {
 | 
			
		||||
            let documents_chunk = result.and_then(|c| unsafe { into_clonable_grenad(c) }).unwrap();
 | 
			
		||||
 | 
			
		||||
            lmdb_writer_sx.send(TypedChunk::Documents(documents_chunk.clone())).unwrap();
 | 
			
		||||
 | 
			
		||||
            let (docid_word_positions_chunk, docid_fid_facet_values_chunks): (
 | 
			
		||||
                Result<_>,
 | 
			
		||||
                Result<_>,
 | 
			
		||||
            ) = rayon::join(
 | 
			
		||||
                || {
 | 
			
		||||
                    let (documents_ids, docid_word_positions_chunk) = extract_docid_word_positions(
 | 
			
		||||
                        documents_chunk.clone(),
 | 
			
		||||
                        indexer.clone(),
 | 
			
		||||
                        &searchable_fields,
 | 
			
		||||
                    )?;
 | 
			
		||||
 | 
			
		||||
                    // send documents_ids to DB writer
 | 
			
		||||
                    lmdb_writer_sx.send(TypedChunk::NewDocumentsIds(documents_ids)).unwrap();
 | 
			
		||||
 | 
			
		||||
                    // send docid_word_positions_chunk to DB writer
 | 
			
		||||
                    let docid_word_positions_chunk =
 | 
			
		||||
                        unsafe { into_clonable_grenad(docid_word_positions_chunk)? };
 | 
			
		||||
                    lmdb_writer_sx
 | 
			
		||||
                        .send(TypedChunk::DocidWordPositions(docid_word_positions_chunk.clone()))
 | 
			
		||||
                        .unwrap();
 | 
			
		||||
                    Ok(docid_word_positions_chunk)
 | 
			
		||||
                },
 | 
			
		||||
                || {
 | 
			
		||||
                    let (docid_fid_facet_numbers_chunk, docid_fid_facet_strings_chunk) =
 | 
			
		||||
                        extract_fid_docid_facet_values(
 | 
			
		||||
                            documents_chunk.clone(),
 | 
			
		||||
                            indexer.clone(),
 | 
			
		||||
                            &faceted_fields,
 | 
			
		||||
                        )?;
 | 
			
		||||
 | 
			
		||||
                    // send docid_fid_facet_numbers_chunk to DB writer
 | 
			
		||||
                    let docid_fid_facet_numbers_chunk =
 | 
			
		||||
                        unsafe { into_clonable_grenad(docid_fid_facet_numbers_chunk)? };
 | 
			
		||||
                    lmdb_writer_sx
 | 
			
		||||
                        .send(TypedChunk::FieldIdDocidFacetNumbers(
 | 
			
		||||
                            docid_fid_facet_numbers_chunk.clone(),
 | 
			
		||||
                        ))
 | 
			
		||||
                        .unwrap();
 | 
			
		||||
 | 
			
		||||
                    // send docid_fid_facet_strings_chunk to DB writer
 | 
			
		||||
                    let docid_fid_facet_strings_chunk =
 | 
			
		||||
                        unsafe { into_clonable_grenad(docid_fid_facet_strings_chunk)? };
 | 
			
		||||
                    lmdb_writer_sx
 | 
			
		||||
                        .send(TypedChunk::FieldIdDocidFacetStrings(
 | 
			
		||||
                            docid_fid_facet_strings_chunk.clone(),
 | 
			
		||||
                        ))
 | 
			
		||||
                        .unwrap();
 | 
			
		||||
 | 
			
		||||
                    Ok((docid_fid_facet_numbers_chunk, docid_fid_facet_strings_chunk))
 | 
			
		||||
                },
 | 
			
		||||
            );
 | 
			
		||||
            Ok((docid_word_positions_chunk?, docid_fid_facet_values_chunks?))
 | 
			
		||||
        })
 | 
			
		||||
        .collect();
 | 
			
		||||
 | 
			
		||||
    let (
 | 
			
		||||
        docid_word_positions_chunks,
 | 
			
		||||
        (docid_fid_facet_numbers_chunks, docid_fid_facet_strings_chunks),
 | 
			
		||||
    ) = result?;
 | 
			
		||||
 | 
			
		||||
    spawn_extraction_task(
 | 
			
		||||
        docid_word_positions_chunks.clone(),
 | 
			
		||||
        indexer.clone(),
 | 
			
		||||
        lmdb_writer_sx.clone(),
 | 
			
		||||
        extract_word_pair_proximity_docids,
 | 
			
		||||
        merge_cbo_roaring_bitmaps,
 | 
			
		||||
        TypedChunk::WordPairProximityDocids,
 | 
			
		||||
        "word-pair-proximity-docids",
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    spawn_extraction_task(
 | 
			
		||||
        docid_word_positions_chunks.clone(),
 | 
			
		||||
        indexer.clone(),
 | 
			
		||||
        lmdb_writer_sx.clone(),
 | 
			
		||||
        extract_fid_word_count_docids,
 | 
			
		||||
        merge_cbo_roaring_bitmaps,
 | 
			
		||||
        TypedChunk::FieldIdWordcountDocids,
 | 
			
		||||
        "field-id-wordcount-docids",
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    spawn_extraction_task(
 | 
			
		||||
        docid_word_positions_chunks.clone(),
 | 
			
		||||
        indexer.clone(),
 | 
			
		||||
        lmdb_writer_sx.clone(),
 | 
			
		||||
        extract_word_docids,
 | 
			
		||||
        merge_roaring_bitmaps,
 | 
			
		||||
        TypedChunk::WordDocids,
 | 
			
		||||
        "word-docids",
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    spawn_extraction_task(
 | 
			
		||||
        docid_word_positions_chunks.clone(),
 | 
			
		||||
        indexer.clone(),
 | 
			
		||||
        lmdb_writer_sx.clone(),
 | 
			
		||||
        extract_word_level_position_docids,
 | 
			
		||||
        merge_cbo_roaring_bitmaps,
 | 
			
		||||
        TypedChunk::WordLevelPositionDocids,
 | 
			
		||||
        "word-level-position-docids",
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    spawn_extraction_task(
 | 
			
		||||
        docid_fid_facet_strings_chunks.clone(),
 | 
			
		||||
        indexer.clone(),
 | 
			
		||||
        lmdb_writer_sx.clone(),
 | 
			
		||||
        extract_facet_string_docids,
 | 
			
		||||
        keep_first_prefix_value_merge_roaring_bitmaps,
 | 
			
		||||
        TypedChunk::FieldIdFacetStringDocids,
 | 
			
		||||
        "field-id-facet-string-docids",
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    spawn_extraction_task(
 | 
			
		||||
        docid_fid_facet_numbers_chunks.clone(),
 | 
			
		||||
        indexer.clone(),
 | 
			
		||||
        lmdb_writer_sx.clone(),
 | 
			
		||||
        extract_facet_number_docids,
 | 
			
		||||
        merge_cbo_roaring_bitmaps,
 | 
			
		||||
        TypedChunk::FieldIdFacetNumberDocids,
 | 
			
		||||
        "field-id-facet-number-docids",
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    Ok(())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Spawn a new task to extract data for a specific DB using extract_fn.
 | 
			
		||||
/// Generated grenad chunks are merged using the merge_fn.
 | 
			
		||||
/// The result of merged chunks is serialized as TypedChunk using the serialize_fn
 | 
			
		||||
/// and sent into lmdb_writer_sx.
 | 
			
		||||
fn spawn_extraction_task<FE, FS>(
 | 
			
		||||
    chunks: Vec<grenad::Reader<CursorClonableMmap>>,
 | 
			
		||||
    indexer: GrenadParameters,
 | 
			
		||||
    lmdb_writer_sx: Sender<TypedChunk>,
 | 
			
		||||
    extract_fn: FE,
 | 
			
		||||
    merge_fn: MergeFn,
 | 
			
		||||
    serialize_fn: FS,
 | 
			
		||||
    name: &'static str,
 | 
			
		||||
) where
 | 
			
		||||
    FE: Fn(grenad::Reader<CursorClonableMmap>, GrenadParameters) -> Result<grenad::Reader<File>>
 | 
			
		||||
        + Sync
 | 
			
		||||
        + Send
 | 
			
		||||
        + 'static,
 | 
			
		||||
    FS: Fn(grenad::Reader<File>) -> TypedChunk + Sync + Send + 'static,
 | 
			
		||||
{
 | 
			
		||||
    rayon::spawn(move || {
 | 
			
		||||
        let chunks: Vec<_> = chunks
 | 
			
		||||
            .into_par_iter()
 | 
			
		||||
            .map(|chunk| extract_fn(chunk, indexer.clone()).unwrap())
 | 
			
		||||
            .collect();
 | 
			
		||||
        rayon::spawn(move || {
 | 
			
		||||
            let reader = merge_readers(chunks, merge_fn, indexer).unwrap();
 | 
			
		||||
            lmdb_writer_sx.send(serialize_fn(reader)).unwrap();
 | 
			
		||||
        });
 | 
			
		||||
    });
 | 
			
		||||
}
 | 
			
		||||
		Reference in New Issue
	
	Block a user