Introduce the UpdateBuilder and use it in the HTTP routes

This commit is contained in:
Clément Renault
2020-10-26 20:18:10 +01:00
committed by Kerollmops
parent 5c62fbb6a8
commit 3889d956d9
8 changed files with 641 additions and 537 deletions

View File

@ -0,0 +1,87 @@
use std::borrow::Cow;
use anyhow::{bail, ensure};
use bstr::ByteSlice as _;
use fst::IntoStreamer;
use roaring::RoaringBitmap;
use crate::heed_codec::CboRoaringBitmapCodec;
const WORDS_FST_KEY: &[u8] = crate::index::WORDS_FST_KEY.as_bytes();
const FIELDS_IDS_MAP_KEY: &[u8] = crate::index::FIELDS_IDS_MAP_KEY.as_bytes();
const DOCUMENTS_IDS_KEY: &[u8] = crate::index::DOCUMENTS_IDS_KEY.as_bytes();
pub fn main_merge(key: &[u8], values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
match key {
WORDS_FST_KEY => {
let fsts: Vec<_> = values.iter().map(|v| fst::Set::new(v).unwrap()).collect();
// Union of the FSTs
let mut op = fst::set::OpBuilder::new();
fsts.iter().for_each(|fst| op.push(fst.into_stream()));
let op = op.r#union();
let mut build = fst::SetBuilder::memory();
build.extend_stream(op.into_stream()).unwrap();
Ok(build.into_inner().unwrap())
},
FIELDS_IDS_MAP_KEY => {
ensure!(values.windows(2).all(|vs| vs[0] == vs[1]), "fields ids map doesn't match");
Ok(values[0].to_vec())
},
DOCUMENTS_IDS_KEY => word_docids_merge(&[], values),
otherwise => bail!("wut {:?}", otherwise),
}
}
pub fn word_docids_merge(_key: &[u8], values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
let (head, tail) = values.split_first().unwrap();
let mut head = RoaringBitmap::deserialize_from(&head[..])?;
for value in tail {
let bitmap = RoaringBitmap::deserialize_from(&value[..])?;
head.union_with(&bitmap);
}
let mut vec = Vec::with_capacity(head.serialized_size());
head.serialize_into(&mut vec)?;
Ok(vec)
}
pub fn docid_word_positions_merge(key: &[u8], _values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
bail!("merging docid word positions is an error ({:?})", key.as_bstr())
}
pub fn words_pairs_proximities_docids_merge(_key: &[u8], values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
let (head, tail) = values.split_first().unwrap();
let mut head = CboRoaringBitmapCodec::deserialize_from(&head[..])?;
for value in tail {
let bitmap = CboRoaringBitmapCodec::deserialize_from(&value[..])?;
head.union_with(&bitmap);
}
let mut vec = Vec::new();
CboRoaringBitmapCodec::serialize_into(&head, &mut vec)?;
Ok(vec)
}
pub fn documents_merge(key: &[u8], _values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
bail!("merging documents is an error ({:?})", key.as_bstr())
}
pub fn merge_two_obkv(base: obkv::KvReader, update: obkv::KvReader, buffer: &mut Vec<u8>) {
use itertools::merge_join_by;
use itertools::EitherOrBoth::{Both, Left, Right};
buffer.clear();
let mut writer = obkv::KvWriter::new(buffer);
for eob in merge_join_by(base.iter(), update.iter(), |(b, _), (u, _)| b.cmp(u)) {
match eob {
Both(_, (k, v)) | Left((k, v)) | Right((k, v)) => writer.insert(k, v).unwrap(),
}
}
writer.finish().unwrap();
}

View File

@ -1,4 +1,161 @@
use crate::Index;
use std::borrow::Cow;
use std::fs::File;
use std::io::{self, Seek, SeekFrom};
use std::sync::mpsc::sync_channel;
use std::time::Instant;
use anyhow::Context;
use bstr::ByteSlice as _;
use grenad::{Writer, Sorter, Merger, Reader, FileFuse, CompressionType};
use heed::types::ByteSlice;
use log::{debug, info, error};
use rayon::prelude::*;
use crate::index::Index;
use self::store::Store;
use self::merge_function::{
main_merge, word_docids_merge, words_pairs_proximities_docids_merge,
docid_word_positions_merge, documents_merge,
};
pub use self::transform::{Transform, TransformOutput};
use super::UpdateBuilder;
mod merge_function;
mod store;
mod transform;
#[derive(Debug, Copy, Clone)]
enum WriteMethod {
Append,
GetMergePut,
}
type MergeFn = for<'a> fn(&[u8], &[Cow<'a, [u8]>]) -> anyhow::Result<Vec<u8>>;
fn create_writer(typ: CompressionType, level: Option<u32>, file: File) -> io::Result<Writer<File>> {
let mut builder = Writer::builder();
builder.compression_type(typ);
if let Some(level) = level {
builder.compression_level(level);
}
builder.build(file)
}
fn create_sorter(
merge: MergeFn,
chunk_compression_type: CompressionType,
chunk_compression_level: Option<u32>,
chunk_fusing_shrink_size: Option<u64>,
max_nb_chunks: Option<usize>,
max_memory: Option<usize>,
) -> Sorter<MergeFn>
{
let mut builder = Sorter::builder(merge);
if let Some(shrink_size) = chunk_fusing_shrink_size {
builder.file_fusing_shrink_size(shrink_size);
}
builder.chunk_compression_type(chunk_compression_type);
if let Some(level) = chunk_compression_level {
builder.chunk_compression_level(level);
}
if let Some(nb_chunks) = max_nb_chunks {
builder.max_nb_chunks(nb_chunks);
}
if let Some(memory) = max_memory {
builder.max_memory(memory);
}
builder.build()
}
fn writer_into_reader(writer: Writer<File>, shrink_size: Option<u64>) -> anyhow::Result<Reader<FileFuse>> {
let mut file = writer.into_inner()?;
file.seek(SeekFrom::Start(0))?;
let file = if let Some(shrink_size) = shrink_size {
FileFuse::builder().shrink_size(shrink_size).build(file)
} else {
FileFuse::new(file)
};
Reader::new(file).map_err(Into::into)
}
fn merge_readers(sources: Vec<Reader<FileFuse>>, merge: MergeFn) -> Merger<FileFuse, MergeFn> {
let mut builder = Merger::builder(merge);
builder.extend(sources);
builder.build()
}
fn merge_into_lmdb_database(
wtxn: &mut heed::RwTxn,
database: heed::PolyDatabase,
sources: Vec<Reader<FileFuse>>,
merge: MergeFn,
method: WriteMethod,
) -> anyhow::Result<()> {
debug!("Merging {} MTBL stores...", sources.len());
let before = Instant::now();
let merger = merge_readers(sources, merge);
let mut in_iter = merger.into_merge_iter()?;
match method {
WriteMethod::Append => {
let mut out_iter = database.iter_mut::<_, ByteSlice, ByteSlice>(wtxn)?;
while let Some((k, v)) = in_iter.next()? {
out_iter.append(k, v).with_context(|| format!("writing {:?} into LMDB", k.as_bstr()))?;
}
},
WriteMethod::GetMergePut => {
while let Some((k, v)) = in_iter.next()? {
match database.get::<_, ByteSlice, ByteSlice>(wtxn, k)? {
Some(old_val) => {
let vals = vec![Cow::Borrowed(old_val), Cow::Borrowed(v)];
let val = merge(k, &vals).expect("merge failed");
database.put::<_, ByteSlice, ByteSlice>(wtxn, k, &val)?
},
None => database.put::<_, ByteSlice, ByteSlice>(wtxn, k, v)?,
}
}
},
}
debug!("MTBL stores merged in {:.02?}!", before.elapsed());
Ok(())
}
fn write_into_lmdb_database(
wtxn: &mut heed::RwTxn,
database: heed::PolyDatabase,
mut reader: Reader<FileFuse>,
merge: MergeFn,
method: WriteMethod,
) -> anyhow::Result<()> {
debug!("Writing MTBL stores...");
let before = Instant::now();
match method {
WriteMethod::Append => {
let mut out_iter = database.iter_mut::<_, ByteSlice, ByteSlice>(wtxn)?;
while let Some((k, v)) = reader.next()? {
out_iter.append(k, v).with_context(|| format!("writing {:?} into LMDB", k.as_bstr()))?;
}
},
WriteMethod::GetMergePut => {
while let Some((k, v)) = reader.next()? {
match database.get::<_, ByteSlice, ByteSlice>(wtxn, k)? {
Some(old_val) => {
let vals = vec![Cow::Borrowed(old_val), Cow::Borrowed(v)];
let val = merge(k, &vals).expect("merge failed");
database.put::<_, ByteSlice, ByteSlice>(wtxn, k, &val)?
},
None => database.put::<_, ByteSlice, ByteSlice>(wtxn, k, v)?,
}
}
}
}
debug!("MTBL stores merged in {:.02?}!", before.elapsed());
Ok(())
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum IndexDocumentsMethod {
@ -14,12 +171,72 @@ pub enum IndexDocumentsMethod {
pub struct IndexDocuments<'t, 'u, 'i> {
wtxn: &'t mut heed::RwTxn<'u>,
index: &'i Index,
log_every_n: Option<usize>,
max_nb_chunks: Option<usize>,
max_memory: Option<usize>,
linked_hash_map_size: Option<usize>,
chunk_compression_type: CompressionType,
chunk_compression_level: Option<u32>,
chunk_fusing_shrink_size: Option<u64>,
indexing_jobs: Option<usize>,
update_method: IndexDocumentsMethod,
}
impl<'t, 'u, 'i> IndexDocuments<'t, 'u, 'i> {
pub fn new(wtxn: &'t mut heed::RwTxn<'u>, index: &'i Index) -> IndexDocuments<'t, 'u, 'i> {
IndexDocuments { wtxn, index, update_method: IndexDocumentsMethod::ReplaceDocuments }
IndexDocuments {
wtxn,
index,
log_every_n: None,
max_nb_chunks: None,
max_memory: None,
linked_hash_map_size: None,
chunk_compression_type: CompressionType::None,
chunk_compression_level: None,
chunk_fusing_shrink_size: None,
indexing_jobs: None,
update_method: IndexDocumentsMethod::ReplaceDocuments
}
}
pub(crate) fn log_every_n(&mut self, log_every_n: usize) -> &mut Self {
self.log_every_n = Some(log_every_n);
self
}
pub(crate) fn max_nb_chunks(&mut self, max_nb_chunks: usize) -> &mut Self {
self.max_nb_chunks = Some(max_nb_chunks);
self
}
pub(crate) fn max_memory(&mut self, max_memory: usize) -> &mut Self {
self.max_memory = Some(max_memory);
self
}
pub(crate) fn linked_hash_map_size(&mut self, linked_hash_map_size: usize) -> &mut Self {
self.linked_hash_map_size = Some(linked_hash_map_size);
self
}
pub(crate) fn chunk_compression_type(&mut self, chunk_compression_type: CompressionType) -> &mut Self {
self.chunk_compression_type = chunk_compression_type;
self
}
pub(crate) fn chunk_compression_level(&mut self, chunk_compression_level: u32) -> &mut Self {
self.chunk_compression_level = Some(chunk_compression_level);
self
}
pub(crate) fn chunk_fusing_shrink_size(&mut self, chunk_fusing_shrink_size: u64) -> &mut Self {
self.chunk_fusing_shrink_size = Some(chunk_fusing_shrink_size);
self
}
pub(crate) fn indexing_jobs(&mut self, indexing_jobs: usize) -> &mut Self {
self.indexing_jobs = Some(indexing_jobs);
self
}
pub fn index_documents_method(&mut self, method: IndexDocumentsMethod) -> &mut Self {
@ -27,7 +244,228 @@ impl<'t, 'u, 'i> IndexDocuments<'t, 'u, 'i> {
self
}
pub fn execute(self) -> anyhow::Result<()> {
todo!()
pub fn execute<R, F>(self, reader: R, progress_callback: F) -> anyhow::Result<()>
where
R: io::Read,
F: Fn(usize, usize) + Sync,
{
let before_indexing = Instant::now();
let transform = Transform {
rtxn: &self.wtxn,
index: self.index,
chunk_compression_type: self.chunk_compression_type,
chunk_compression_level: self.chunk_compression_level,
chunk_fusing_shrink_size: self.chunk_fusing_shrink_size,
max_nb_chunks: self.max_nb_chunks,
max_memory: self.max_memory,
index_documents_method: self.update_method,
};
let TransformOutput {
fields_ids_map,
users_ids_documents_ids,
new_documents_ids,
replaced_documents_ids,
documents_count,
documents_file,
} = transform.from_csv(reader)?;
// We delete the documents that this document addition replaces. This way we are
// able to simply insert all the documents even if they already exist in the database.
if !replaced_documents_ids.is_empty() {
let update_builder = UpdateBuilder {
log_every_n: self.log_every_n,
max_nb_chunks: self.max_nb_chunks,
max_memory: self.max_memory,
linked_hash_map_size: self.linked_hash_map_size,
chunk_compression_type: self.chunk_compression_type,
chunk_compression_level: self.chunk_compression_level,
chunk_fusing_shrink_size: self.chunk_fusing_shrink_size,
indexing_jobs: self.indexing_jobs,
};
let mut deletion_builder = update_builder.delete_documents(self.wtxn, self.index)?;
deletion_builder.delete_documents(&replaced_documents_ids);
let _deleted_documents_count = deletion_builder.execute()?;
}
let mmap = unsafe {
memmap::Mmap::map(&documents_file).context("mmaping the transform documents file")?
};
let documents = grenad::Reader::new(mmap.as_ref())?;
// The enum which indicates the type of the readers
// merges that are potentially done on different threads.
enum DatabaseType {
Main,
WordDocids,
WordsPairsProximitiesDocids,
}
let linked_hash_map_size = self.linked_hash_map_size;
let max_nb_chunks = self.max_nb_chunks;
let max_memory = self.max_memory;
let chunk_compression_type = self.chunk_compression_type;
let chunk_compression_level = self.chunk_compression_level;
let log_every_n = self.log_every_n;
let chunk_fusing_shrink_size = self.chunk_fusing_shrink_size;
let jobs = self.indexing_jobs.unwrap_or(0);
let pool = rayon::ThreadPoolBuilder::new().num_threads(jobs).build()?;
let (receiver, docid_word_positions_readers, documents_readers) = pool.install(|| {
let num_threads = rayon::current_num_threads();
let max_memory_by_job = max_memory.map(|mm| mm / num_threads);
let readers = rayon::iter::repeatn(documents, num_threads)
.enumerate()
.map(|(i, documents)| {
let store = Store::new(
linked_hash_map_size,
max_nb_chunks,
max_memory_by_job,
chunk_compression_type,
chunk_compression_level,
chunk_fusing_shrink_size,
)?;
store.index(
documents,
documents_count,
i,
num_threads,
log_every_n,
&progress_callback,
)
})
.collect::<Result<Vec<_>, _>>()?;
let mut main_readers = Vec::with_capacity(readers.len());
let mut word_docids_readers = Vec::with_capacity(readers.len());
let mut docid_word_positions_readers = Vec::with_capacity(readers.len());
let mut words_pairs_proximities_docids_readers = Vec::with_capacity(readers.len());
let mut documents_readers = Vec::with_capacity(readers.len());
readers.into_iter().for_each(|readers| {
main_readers.push(readers.main);
word_docids_readers.push(readers.word_docids);
docid_word_positions_readers.push(readers.docid_word_positions);
words_pairs_proximities_docids_readers.push(readers.words_pairs_proximities_docids);
documents_readers.push(readers.documents);
});
// This is the function that merge the readers
// by using the given merge function.
let merge_readers = move |readers, merge| {
let mut writer = tempfile::tempfile().and_then(|f| {
create_writer(chunk_compression_type, chunk_compression_level, f)
})?;
let merger = merge_readers(readers, merge);
merger.write_into(&mut writer)?;
writer_into_reader(writer, chunk_fusing_shrink_size)
};
// The enum and the channel which is used to transfert
// the readers merges potentially done on another thread.
let (sender, receiver) = sync_channel(3);
debug!("Merging the main, word docids and words pairs proximity docids in parallel...");
rayon::spawn(move || {
vec![
(DatabaseType::Main, main_readers, main_merge as MergeFn),
(DatabaseType::WordDocids, word_docids_readers, word_docids_merge),
(
DatabaseType::WordsPairsProximitiesDocids,
words_pairs_proximities_docids_readers,
words_pairs_proximities_docids_merge,
),
]
.into_par_iter()
.for_each(|(dbtype, readers, merge)| {
let result = merge_readers(readers, merge);
if let Err(e) = sender.send((dbtype, result)) {
error!("sender error: {}", e);
}
});
});
Ok((receiver, docid_word_positions_readers, documents_readers)) as anyhow::Result<_>
})?;
let mut documents_ids = self.index.documents_ids(self.wtxn)?;
let contains_documents = !documents_ids.is_empty();
let write_method = if contains_documents {
WriteMethod::GetMergePut
} else {
WriteMethod::Append
};
// We write the fields ids map into the main database
self.index.put_fields_ids_map(self.wtxn, &fields_ids_map)?;
// We write the users_ids_documents_ids into the main database.
self.index.put_users_ids_documents_ids(self.wtxn, &users_ids_documents_ids)?;
// We merge the new documents ids with the existing ones.
documents_ids.union_with(&new_documents_ids);
self.index.put_documents_ids(self.wtxn, &documents_ids)?;
debug!("Writing the docid word positions into LMDB on disk...");
merge_into_lmdb_database(
self.wtxn,
*self.index.docid_word_positions.as_polymorph(),
docid_word_positions_readers,
docid_word_positions_merge,
write_method
)?;
debug!("Writing the documents into LMDB on disk...");
merge_into_lmdb_database(
self.wtxn,
*self.index.documents.as_polymorph(),
documents_readers,
documents_merge,
write_method
)?;
for (db_type, result) in receiver {
let content = result?;
match db_type {
DatabaseType::Main => {
debug!("Writing the main elements into LMDB on disk...");
write_into_lmdb_database(
self.wtxn,
self.index.main,
content,
main_merge,
write_method,
)?;
},
DatabaseType::WordDocids => {
debug!("Writing the words docids into LMDB on disk...");
let db = *self.index.word_docids.as_polymorph();
write_into_lmdb_database(
self.wtxn,
db,
content,
word_docids_merge,
write_method,
)?;
},
DatabaseType::WordsPairsProximitiesDocids => {
debug!("Writing the words pairs proximities docids into LMDB on disk...");
let db = *self.index.word_pair_proximity_docids.as_polymorph();
write_into_lmdb_database(
self.wtxn,
db,
content,
words_pairs_proximities_docids_merge,
write_method,
)?;
},
}
}
info!("Update processed in {:.02?}", before_indexing.elapsed());
Ok(())
}
}

View File

@ -0,0 +1,422 @@
use std::borrow::Cow;
use std::collections::{BTreeMap, HashMap};
use std::convert::{TryFrom, TryInto};
use std::fs::File;
use std::iter::FromIterator;
use std::time::Instant;
use std::{cmp, iter};
use anyhow::Context;
use bstr::ByteSlice as _;
use heed::BytesEncode;
use linked_hash_map::LinkedHashMap;
use log::{debug, info};
use grenad::{Reader, FileFuse, Writer, Sorter, CompressionType};
use roaring::RoaringBitmap;
use tempfile::tempfile;
use crate::heed_codec::{BoRoaringBitmapCodec, CboRoaringBitmapCodec};
use crate::tokenizer::{simple_tokenizer, only_token};
use crate::{SmallVec32, Position, DocumentId};
use super::{MergeFn, create_writer, create_sorter, writer_into_reader};
use super::merge_function::{main_merge, word_docids_merge, words_pairs_proximities_docids_merge};
const LMDB_MAX_KEY_LENGTH: usize = 511;
const ONE_KILOBYTE: usize = 1024 * 1024;
const MAX_POSITION: usize = 1000;
const WORDS_FST_KEY: &[u8] = crate::index::WORDS_FST_KEY.as_bytes();
pub struct Readers {
pub main: Reader<FileFuse>,
pub word_docids: Reader<FileFuse>,
pub docid_word_positions: Reader<FileFuse>,
pub words_pairs_proximities_docids: Reader<FileFuse>,
pub documents: Reader<FileFuse>,
}
pub struct Store {
word_docids: LinkedHashMap<SmallVec32<u8>, RoaringBitmap>,
word_docids_limit: usize,
words_pairs_proximities_docids: LinkedHashMap<(SmallVec32<u8>, SmallVec32<u8>, u8), RoaringBitmap>,
words_pairs_proximities_docids_limit: usize,
// MTBL parameters
chunk_compression_type: CompressionType,
chunk_compression_level: Option<u32>,
chunk_fusing_shrink_size: Option<u64>,
// MTBL sorters
main_sorter: Sorter<MergeFn>,
word_docids_sorter: Sorter<MergeFn>,
words_pairs_proximities_docids_sorter: Sorter<MergeFn>,
// MTBL writers
docid_word_positions_writer: Writer<File>,
documents_writer: Writer<File>,
}
impl Store {
pub fn new(
linked_hash_map_size: Option<usize>,
max_nb_chunks: Option<usize>,
max_memory: Option<usize>,
chunk_compression_type: CompressionType,
chunk_compression_level: Option<u32>,
chunk_fusing_shrink_size: Option<u64>,
) -> anyhow::Result<Store>
{
// We divide the max memory by the number of sorter the Store have.
let max_memory = max_memory.map(|mm| cmp::max(ONE_KILOBYTE, mm / 3));
let linked_hash_map_size = linked_hash_map_size.unwrap_or(500);
let main_sorter = create_sorter(
main_merge,
chunk_compression_type,
chunk_compression_level,
chunk_fusing_shrink_size,
max_nb_chunks,
max_memory,
);
let word_docids_sorter = create_sorter(
word_docids_merge,
chunk_compression_type,
chunk_compression_level,
chunk_fusing_shrink_size,
max_nb_chunks,
max_memory,
);
let words_pairs_proximities_docids_sorter = create_sorter(
words_pairs_proximities_docids_merge,
chunk_compression_type,
chunk_compression_level,
chunk_fusing_shrink_size,
max_nb_chunks,
max_memory,
);
let documents_writer = tempfile().and_then(|f| {
create_writer(chunk_compression_type, chunk_compression_level, f)
})?;
let docid_word_positions_writer = tempfile().and_then(|f| {
create_writer(chunk_compression_type, chunk_compression_level, f)
})?;
Ok(Store {
word_docids: LinkedHashMap::with_capacity(linked_hash_map_size),
word_docids_limit: linked_hash_map_size,
words_pairs_proximities_docids: LinkedHashMap::with_capacity(linked_hash_map_size),
words_pairs_proximities_docids_limit: linked_hash_map_size,
chunk_compression_type,
chunk_compression_level,
chunk_fusing_shrink_size,
main_sorter,
word_docids_sorter,
words_pairs_proximities_docids_sorter,
docid_word_positions_writer,
documents_writer,
})
}
// Save the documents ids under the position and word we have seen it.
fn insert_word_docid(&mut self, word: &str, id: DocumentId) -> anyhow::Result<()> {
// if get_refresh finds the element it is assured to be at the end of the linked hash map.
match self.word_docids.get_refresh(word.as_bytes()) {
Some(old) => { old.insert(id); },
None => {
let word_vec = SmallVec32::from(word.as_bytes());
// A newly inserted element is append at the end of the linked hash map.
self.word_docids.insert(word_vec, RoaringBitmap::from_iter(Some(id)));
// If the word docids just reached it's capacity we must make sure to remove
// one element, this way next time we insert we doesn't grow the capacity.
if self.word_docids.len() == self.word_docids_limit {
// Removing the front element is equivalent to removing the LRU element.
let lru = self.word_docids.pop_front();
Self::write_word_docids(&mut self.word_docids_sorter, lru)?;
}
}
}
Ok(())
}
// Save the documents ids under the words pairs proximities that it contains.
fn insert_words_pairs_proximities_docids<'a>(
&mut self,
words_pairs_proximities: impl IntoIterator<Item=((&'a str, &'a str), u8)>,
id: DocumentId,
) -> anyhow::Result<()>
{
for ((w1, w2), prox) in words_pairs_proximities {
let w1 = SmallVec32::from(w1.as_bytes());
let w2 = SmallVec32::from(w2.as_bytes());
let key = (w1, w2, prox);
// if get_refresh finds the element it is assured
// to be at the end of the linked hash map.
match self.words_pairs_proximities_docids.get_refresh(&key) {
Some(old) => { old.insert(id); },
None => {
// A newly inserted element is append at the end of the linked hash map.
let ids = RoaringBitmap::from_iter(Some(id));
self.words_pairs_proximities_docids.insert(key, ids);
}
}
}
// If the linked hashmap is over capacity we must remove the overflowing elements.
let len = self.words_pairs_proximities_docids.len();
let overflow = len.checked_sub(self.words_pairs_proximities_docids_limit);
if let Some(overflow) = overflow {
let mut lrus = Vec::with_capacity(overflow);
// Removing front elements is equivalent to removing the LRUs.
let iter = iter::from_fn(|| self.words_pairs_proximities_docids.pop_front());
iter.take(overflow).for_each(|x| lrus.push(x));
Self::write_words_pairs_proximities(&mut self.words_pairs_proximities_docids_sorter, lrus)?;
}
Ok(())
}
fn write_document(
&mut self,
document_id: DocumentId,
words_positions: &HashMap<String, SmallVec32<Position>>,
record: &[u8],
) -> anyhow::Result<()>
{
// We compute the list of words pairs proximities (self-join) and write it directly to disk.
let words_pair_proximities = compute_words_pair_proximities(&words_positions);
self.insert_words_pairs_proximities_docids(words_pair_proximities, document_id)?;
// We store document_id associated with all the words the record contains.
for (word, _) in words_positions {
self.insert_word_docid(word, document_id)?;
}
self.documents_writer.insert(document_id.to_be_bytes(), record)?;
Self::write_docid_word_positions(&mut self.docid_word_positions_writer, document_id, words_positions)?;
Ok(())
}
fn write_words_pairs_proximities(
sorter: &mut Sorter<MergeFn>,
iter: impl IntoIterator<Item=((SmallVec32<u8>, SmallVec32<u8>, u8), RoaringBitmap)>,
) -> anyhow::Result<()>
{
let mut key = Vec::new();
let mut buffer = Vec::new();
for ((w1, w2, min_prox), docids) in iter {
key.clear();
key.extend_from_slice(w1.as_bytes());
key.push(0);
key.extend_from_slice(w2.as_bytes());
// Storing the minimun proximity found between those words
key.push(min_prox);
// We serialize the document ids into a buffer
buffer.clear();
buffer.reserve(CboRoaringBitmapCodec::serialized_size(&docids));
CboRoaringBitmapCodec::serialize_into(&docids, &mut buffer)?;
// that we write under the generated key into MTBL
if lmdb_key_valid_size(&key) {
sorter.insert(&key, &buffer)?;
}
}
Ok(())
}
fn write_docid_word_positions(
writer: &mut Writer<File>,
id: DocumentId,
words_positions: &HashMap<String, SmallVec32<Position>>,
) -> anyhow::Result<()>
{
// We prefix the words by the document id.
let mut key = id.to_be_bytes().to_vec();
let base_size = key.len();
// We order the words lexicographically, this way we avoid passing by a sorter.
let words_positions = BTreeMap::from_iter(words_positions);
for (word, positions) in words_positions {
key.truncate(base_size);
key.extend_from_slice(word.as_bytes());
// We serialize the positions into a buffer.
let positions = RoaringBitmap::from_iter(positions.iter().cloned());
let bytes = BoRoaringBitmapCodec::bytes_encode(&positions)
.with_context(|| "could not serialize positions")?;
// that we write under the generated key into MTBL
if lmdb_key_valid_size(&key) {
writer.insert(&key, &bytes)?;
}
}
Ok(())
}
fn write_word_docids<I>(sorter: &mut Sorter<MergeFn>, iter: I) -> anyhow::Result<()>
where I: IntoIterator<Item=(SmallVec32<u8>, RoaringBitmap)>
{
let mut key = Vec::new();
let mut buffer = Vec::new();
for (word, ids) in iter {
key.clear();
key.extend_from_slice(&word);
// We serialize the document ids into a buffer
buffer.clear();
let ids = RoaringBitmap::from_iter(ids);
buffer.reserve(ids.serialized_size());
ids.serialize_into(&mut buffer)?;
// that we write under the generated key into MTBL
if lmdb_key_valid_size(&key) {
sorter.insert(&key, &buffer)?;
}
}
Ok(())
}
pub fn index<F>(
mut self,
mut documents: grenad::Reader<&[u8]>,
documents_count: usize,
thread_index: usize,
num_threads: usize,
log_every_n: Option<usize>,
mut progress_callback: F,
) -> anyhow::Result<Readers>
where F: FnMut(usize, usize),
{
debug!("{:?}: Indexing in a Store...", thread_index);
let mut before = Instant::now();
let mut words_positions = HashMap::new();
let mut count: usize = 0;
while let Some((key, value)) = documents.next()? {
let document_id = key.try_into().map(u32::from_be_bytes).unwrap();
let document = obkv::KvReader::new(value);
// We skip documents that must not be indexed by this thread.
if count % num_threads == thread_index {
// This is a log routine that we do every `log_every_n` documents.
if log_every_n.map_or(false, |len| count % len == 0) {
info!("We have seen {} documents so far ({:.02?}).", format_count(count), before.elapsed());
progress_callback(count, documents_count);
before = Instant::now();
}
for (attr, content) in document.iter() {
let content: Cow<str> = serde_json::from_slice(content).unwrap();
for (pos, token) in simple_tokenizer(&content).filter_map(only_token).enumerate().take(MAX_POSITION) {
let word = token.to_lowercase();
let position = (attr as usize * MAX_POSITION + pos) as u32;
words_positions.entry(word).or_insert_with(SmallVec32::new).push(position);
}
}
// We write the document in the documents store.
self.write_document(document_id, &words_positions, value)?;
words_positions.clear();
}
// Compute the document id of the next document.
count = count + 1;
}
progress_callback(count, documents_count);
let readers = self.finish()?;
debug!("{:?}: Store created!", thread_index);
Ok(readers)
}
fn finish(mut self) -> anyhow::Result<Readers> {
let comp_type = self.chunk_compression_type;
let comp_level = self.chunk_compression_level;
let shrink_size = self.chunk_fusing_shrink_size;
Self::write_word_docids(&mut self.word_docids_sorter, self.word_docids)?;
Self::write_words_pairs_proximities(
&mut self.words_pairs_proximities_docids_sorter,
self.words_pairs_proximities_docids,
)?;
let mut word_docids_wtr = tempfile().and_then(|f| create_writer(comp_type, comp_level, f))?;
let mut builder = fst::SetBuilder::memory();
let mut iter = self.word_docids_sorter.into_iter()?;
while let Some((word, val)) = iter.next()? {
// This is a lexicographically ordered word position
// we use the key to construct the words fst.
builder.insert(word)?;
word_docids_wtr.insert(word, val)?;
}
let fst = builder.into_set();
self.main_sorter.insert(WORDS_FST_KEY, fst.as_fst().as_bytes())?;
let mut main_wtr = tempfile().and_then(|f| create_writer(comp_type, comp_level, f))?;
self.main_sorter.write_into(&mut main_wtr)?;
let mut words_pairs_proximities_docids_wtr = tempfile().and_then(|f| create_writer(comp_type, comp_level, f))?;
self.words_pairs_proximities_docids_sorter.write_into(&mut words_pairs_proximities_docids_wtr)?;
let main = writer_into_reader(main_wtr, shrink_size)?;
let word_docids = writer_into_reader(word_docids_wtr, shrink_size)?;
let words_pairs_proximities_docids = writer_into_reader(words_pairs_proximities_docids_wtr, shrink_size)?;
let docid_word_positions = writer_into_reader(self.docid_word_positions_writer, shrink_size)?;
let documents = writer_into_reader(self.documents_writer, shrink_size)?;
Ok(Readers {
main,
word_docids,
docid_word_positions,
words_pairs_proximities_docids,
documents,
})
}
}
/// Outputs a list of all pairs of words with the shortest proximity between 1 and 7 inclusive.
///
/// This list is used by the engine to calculate the documents containing words that are
/// close to each other.
fn compute_words_pair_proximities(
word_positions: &HashMap<String, SmallVec32<Position>>,
) -> HashMap<(&str, &str), u8>
{
use itertools::Itertools;
let mut words_pair_proximities = HashMap::new();
for ((w1, ps1), (w2, ps2)) in word_positions.iter().cartesian_product(word_positions) {
let mut min_prox = None;
for (ps1, ps2) in ps1.iter().cartesian_product(ps2) {
let prox = crate::proximity::positions_proximity(*ps1, *ps2);
let prox = u8::try_from(prox).unwrap();
// We don't care about a word that appear at the
// same position or too far from the other.
if prox >= 1 && prox <= 7 {
if min_prox.map_or(true, |mp| prox < mp) {
min_prox = Some(prox)
}
}
}
if let Some(min_prox) = min_prox {
words_pair_proximities.insert((w1.as_str(), w2.as_str()), min_prox);
}
}
words_pair_proximities
}
fn format_count(n: usize) -> String {
human_format::Formatter::new().with_decimals(1).with_separator("").format(n as f64)
}
fn lmdb_key_valid_size(key: &[u8]) -> bool {
!key.is_empty() && key.len() <= LMDB_MAX_KEY_LENGTH
}

View File

@ -0,0 +1,179 @@
use std::borrow::Cow;
use std::convert::TryFrom;
use std::fs::File;
use std::io::{Read, Seek, SeekFrom};
use anyhow::Context;
use fst::{IntoStreamer, Streamer};
use grenad::CompressionType;
use roaring::RoaringBitmap;
use crate::{BEU32, Index, FieldsIdsMap};
use crate::update::AvailableDocumentsIds;
use super::merge_function::merge_two_obkv;
use super::{create_writer, create_sorter, IndexDocumentsMethod};
pub struct TransformOutput {
pub fields_ids_map: FieldsIdsMap,
pub users_ids_documents_ids: fst::Map<Vec<u8>>,
pub new_documents_ids: RoaringBitmap,
pub replaced_documents_ids: RoaringBitmap,
pub documents_count: usize,
pub documents_file: File,
}
pub struct Transform<'t, 'i> {
pub rtxn: &'t heed::RoTxn,
pub index: &'i Index,
pub chunk_compression_type: CompressionType,
pub chunk_compression_level: Option<u32>,
pub chunk_fusing_shrink_size: Option<u64>,
pub max_nb_chunks: Option<usize>,
pub max_memory: Option<usize>,
pub index_documents_method: IndexDocumentsMethod,
}
impl Transform<'_, '_> {
/// Extract the users ids, deduplicate and compute the new internal documents ids
/// and fields ids, writing all the documents under their internal ids into a final file.
///
/// Outputs the new `FieldsIdsMap`, the new `UsersIdsDocumentsIds` map, the new documents ids,
/// the replaced documents ids, the number of documents in this update and the file
/// containing all those documents.
pub fn from_csv<R: Read>(self, reader: R) -> anyhow::Result<TransformOutput> {
let mut fields_ids_map = self.index.fields_ids_map(self.rtxn)?;
let documents_ids = self.index.documents_ids(self.rtxn)?;
let mut available_documents_ids = AvailableDocumentsIds::from_documents_ids(&documents_ids);
let users_ids_documents_ids = self.index.users_ids_documents_ids(self.rtxn).unwrap();
let mut csv = csv::Reader::from_reader(reader);
let headers = csv.headers()?.clone();
let user_id_pos = headers.iter().position(|h| h == "id").context(r#"missing "id" header"#)?;
// Generate the new fields ids based on the current fields ids and this CSV headers.
let mut fields_ids = Vec::new();
for header in headers.iter() {
let id = fields_ids_map.insert(header)
.context("impossible to generate a field id (limit reached)")?;
fields_ids.push(id);
}
/// The last value associated with an id is kept.
fn merge_last_win(_key: &[u8], vals: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
vals.last().context("no last value").map(|last| last.clone().into_owned())
}
// We initialize the sorter with the user indexing settings.
let mut sorter = create_sorter(
merge_last_win,
self.chunk_compression_type,
self.chunk_compression_level,
self.chunk_fusing_shrink_size,
self.max_nb_chunks,
self.max_memory,
);
// We write into the sorter to merge and deduplicate the documents
// based on the users ids.
let mut json_buffer = Vec::new();
let mut obkv_buffer = Vec::new();
let mut record = csv::StringRecord::new();
while csv.read_record(&mut record)? {
obkv_buffer.clear();
let mut writer = obkv::KvWriter::new(&mut obkv_buffer);
// We retrieve the field id based on the CSV header position
// and zip it with the record value.
for (key, field) in fields_ids.iter().copied().zip(&record) {
// We serialize the attribute values as JSON strings.
json_buffer.clear();
serde_json::to_writer(&mut json_buffer, &field)?;
writer.insert(key, &json_buffer)?;
}
// We extract the user id and use it as the key for this document.
// TODO we must validate the user id (i.e. [a-zA-Z0-9\-_]).
let user_id = &record[user_id_pos];
sorter.insert(user_id, &obkv_buffer)?;
}
// Once we have sort and deduplicated the documents we write them into a final file.
let file = tempfile::tempfile()?;
let mut writer = create_writer(self.chunk_compression_type, self.chunk_compression_level, file)?;
let mut new_users_ids_documents_ids_builder = fst::MapBuilder::memory();
let mut replaced_documents_ids = RoaringBitmap::new();
let mut new_documents_ids = RoaringBitmap::new();
// While we write into final file we get or generate the internal documents ids.
let mut documents_count = 0;
let mut iter = sorter.into_iter()?;
while let Some((user_id, update_obkv)) = iter.next()? {
let (docid, obkv) = match users_ids_documents_ids.get(user_id) {
Some(docid) => {
// If we find the user id in the current users ids documents ids map
// we use it and insert it in the list of replaced documents.
let docid = u32::try_from(docid).expect("valid document id");
replaced_documents_ids.insert(docid);
// Depending on the update indexing method we will merge
// the document update with the current document or not.
match self.index_documents_method {
IndexDocumentsMethod::ReplaceDocuments => (docid, update_obkv),
IndexDocumentsMethod::UpdateDocuments => {
let key = BEU32::new(docid);
let base_obkv = self.index.documents.get(&self.rtxn, &key)?
.context("document not found")?;
let update_obkv = obkv::KvReader::new(update_obkv);
merge_two_obkv(base_obkv, update_obkv, &mut obkv_buffer);
(docid, obkv_buffer.as_slice())
}
}
},
None => {
// If this user id is new we add it to the users ids documents ids map
// for new ids and into the list of new documents.
let new_docid = available_documents_ids.next()
.context("no more available documents ids")?;
new_users_ids_documents_ids_builder.insert(user_id, new_docid as u64)?;
new_documents_ids.insert(new_docid);
(new_docid, update_obkv)
},
};
// We insert the document under the documents ids map into the final file.
writer.insert(docid.to_be_bytes(), obkv)?;
documents_count += 1;
}
// Once we have written all the documents into the final file, we extract it
// from the writer and reset the seek to be able to read it again.
let mut documents_file = writer.into_inner()?;
documents_file.seek(SeekFrom::Start(0))?;
// We create the union between the existing users ids documents ids with the new ones.
let new_users_ids_documents_ids = new_users_ids_documents_ids_builder.into_map();
let union_ = fst::map::OpBuilder::new()
.add(&users_ids_documents_ids)
.add(&new_users_ids_documents_ids)
.r#union();
// We stream and merge the new users ids documents ids map with the existing one.
let mut users_ids_documents_ids_builder = fst::MapBuilder::memory();
let mut iter = union_.into_stream();
while let Some((user_id, vals)) = iter.next() {
assert_eq!(vals.len(), 1, "there must be exactly one document id");
users_ids_documents_ids_builder.insert(user_id, vals[0].value)?;
}
Ok(TransformOutput {
fields_ids_map,
users_ids_documents_ids: users_ids_documents_ids_builder.into_map(),
new_documents_ids,
replaced_documents_ids,
documents_count,
documents_file,
})
}
}

View File

@ -1,35 +1,37 @@
use std::borrow::Cow;
use std::convert::TryFrom;
use fst::{IntoStreamer, Streamer};
use grenad::CompressionType;
use itertools::Itertools;
use roaring::RoaringBitmap;
use crate::{Index, BEU32};
use crate::Index;
use super::clear_documents::ClearDocuments;
use super::delete_documents::DeleteDocuments;
use super::index_documents::IndexDocuments;
pub struct UpdateBuilder {
log_every_n: usize,
max_nb_chunks: Option<usize>,
max_memory: usize,
linked_hash_map_size: usize,
chunk_compression_type: CompressionType,
chunk_compression_level: Option<u32>,
chunk_fusing_shrink_size: u64,
enable_chunk_fusing: bool,
indexing_jobs: Option<usize>,
pub(crate) log_every_n: Option<usize>,
pub(crate) max_nb_chunks: Option<usize>,
pub(crate) max_memory: Option<usize>,
pub(crate) linked_hash_map_size: Option<usize>,
pub(crate) chunk_compression_type: CompressionType,
pub(crate) chunk_compression_level: Option<u32>,
pub(crate) chunk_fusing_shrink_size: Option<u64>,
pub(crate) indexing_jobs: Option<usize>,
}
impl UpdateBuilder {
pub fn new() -> UpdateBuilder {
todo!()
UpdateBuilder {
log_every_n: None,
max_nb_chunks: None,
max_memory: None,
linked_hash_map_size: None,
chunk_compression_type: CompressionType::None,
chunk_compression_level: None,
chunk_fusing_shrink_size: None,
indexing_jobs: None,
}
}
pub fn log_every_n(&mut self, log_every_n: usize) -> &mut Self {
self.log_every_n = log_every_n;
self.log_every_n = Some(log_every_n);
self
}
@ -39,12 +41,12 @@ impl UpdateBuilder {
}
pub fn max_memory(&mut self, max_memory: usize) -> &mut Self {
self.max_memory = max_memory;
self.max_memory = Some(max_memory);
self
}
pub fn linked_hash_map_size(&mut self, linked_hash_map_size: usize) -> &mut Self {
self.linked_hash_map_size = linked_hash_map_size;
self.linked_hash_map_size = Some(linked_hash_map_size);
self
}
@ -59,12 +61,7 @@ impl UpdateBuilder {
}
pub fn chunk_fusing_shrink_size(&mut self, chunk_fusing_shrink_size: u64) -> &mut Self {
self.chunk_fusing_shrink_size = chunk_fusing_shrink_size;
self
}
pub fn enable_chunk_fusing(&mut self, enable_chunk_fusing: bool) -> &mut Self {
self.enable_chunk_fusing = enable_chunk_fusing;
self.chunk_fusing_shrink_size = Some(chunk_fusing_shrink_size);
self
}
@ -97,6 +94,33 @@ impl UpdateBuilder {
index: &'i Index,
) -> IndexDocuments<'t, 'u, 'i>
{
IndexDocuments::new(wtxn, index)
let mut builder = IndexDocuments::new(wtxn, index);
if let Some(log_every_n) = self.log_every_n {
builder.log_every_n(log_every_n);
}
if let Some(max_nb_chunks) = self.max_nb_chunks {
builder.max_nb_chunks(max_nb_chunks);
}
if let Some(max_memory) = self.max_memory {
builder.max_memory(max_memory);
}
if let Some(linked_hash_map_size) = self.linked_hash_map_size {
builder.linked_hash_map_size(linked_hash_map_size);
}
builder.chunk_compression_type(self.chunk_compression_type);
if let Some(chunk_compression_level) = self.chunk_compression_level {
builder.chunk_compression_level(chunk_compression_level);
}
if let Some(chunk_fusing_shrink_size) = self.chunk_fusing_shrink_size {
builder.chunk_fusing_shrink_size(chunk_fusing_shrink_size);
}
if let Some(indexing_jobs) = self.indexing_jobs {
builder.indexing_jobs(indexing_jobs);
}
builder
}
}