Introduce the UpdateBuilder type along with some update operations

This commit is contained in:
Clément Renault
2020-10-25 18:32:01 +01:00
parent adacc7977d
commit b14cca2ad9
9 changed files with 382 additions and 39 deletions

5
src/update/mod.rs Normal file
View File

@ -0,0 +1,5 @@
mod update_builder;
mod update_store;
pub use self::update_builder::UpdateBuilder;
pub use self::update_store::UpdateStore;

View File

@ -0,0 +1,356 @@
use std::borrow::Cow;
use std::convert::TryFrom;
use fst::{IntoStreamer, Streamer};
use grenad::CompressionType;
use itertools::Itertools;
use roaring::RoaringBitmap;
use crate::{Index, BEU32};
pub struct UpdateBuilder {
log_every_n: usize,
max_nb_chunks: Option<usize>,
max_memory: usize,
linked_hash_map_size: usize,
chunk_compression_type: CompressionType,
chunk_compression_level: Option<u32>,
chunk_fusing_shrink_size: u64,
enable_chunk_fusing: bool,
indexing_jobs: Option<usize>,
}
impl UpdateBuilder {
pub fn new() -> UpdateBuilder {
todo!()
}
pub fn log_every_n(&mut self, log_every_n: usize) -> &mut Self {
self.log_every_n = log_every_n;
self
}
pub fn max_nb_chunks(&mut self, max_nb_chunks: usize) -> &mut Self {
self.max_nb_chunks = Some(max_nb_chunks);
self
}
pub fn max_memory(&mut self, max_memory: usize) -> &mut Self {
self.max_memory = max_memory;
self
}
pub fn linked_hash_map_size(&mut self, linked_hash_map_size: usize) -> &mut Self {
self.linked_hash_map_size = linked_hash_map_size;
self
}
pub fn chunk_compression_type(&mut self, chunk_compression_type: CompressionType) -> &mut Self {
self.chunk_compression_type = chunk_compression_type;
self
}
pub fn chunk_compression_level(&mut self, chunk_compression_level: u32) -> &mut Self {
self.chunk_compression_level = Some(chunk_compression_level);
self
}
pub fn chunk_fusing_shrink_size(&mut self, chunk_fusing_shrink_size: u64) -> &mut Self {
self.chunk_fusing_shrink_size = chunk_fusing_shrink_size;
self
}
pub fn enable_chunk_fusing(&mut self, enable_chunk_fusing: bool) -> &mut Self {
self.enable_chunk_fusing = enable_chunk_fusing;
self
}
pub fn indexing_jobs(&mut self, indexing_jobs: usize) -> &mut Self {
self.indexing_jobs = Some(indexing_jobs);
self
}
pub fn clear_documents<'t, 'u, 'i>(
self,
wtxn: &'t mut heed::RwTxn<'u>,
index: &'i Index,
) -> ClearDocuments<'t, 'u, 'i>
{
ClearDocuments::new(wtxn, index)
}
pub fn delete_documents<'t, 'u, 'i>(
self,
wtxn: &'t mut heed::RwTxn<'u>,
index: &'i Index,
) -> anyhow::Result<DeleteDocuments<'t, 'u, 'i>>
{
DeleteDocuments::new(wtxn, index)
}
pub fn index_documents<'t, 'u, 'i>(
self,
wtxn: &'t mut heed::RwTxn<'u>,
index: &'i Index,
) -> IndexDocuments<'t, 'u, 'i>
{
IndexDocuments::new(wtxn, index)
}
}
pub struct ClearDocuments<'t, 'u, 'i> {
wtxn: &'t mut heed::RwTxn<'u>,
index: &'i Index,
}
impl<'t, 'u, 'i> ClearDocuments<'t, 'u, 'i> {
fn new(wtxn: &'t mut heed::RwTxn<'u>, index: &'i Index) -> ClearDocuments<'t, 'u, 'i> {
ClearDocuments { wtxn, index }
}
pub fn execute(self) -> anyhow::Result<usize> {
let Index {
main: _main,
word_docids,
docid_word_positions,
word_pair_proximity_docids,
documents,
} = self.index;
// We clear the word fst.
self.index.put_words_fst(self.wtxn, &fst::Set::default())?;
// We clear the users ids documents ids.
self.index.put_users_ids_documents_ids(self.wtxn, &fst::Map::default())?;
// We retrieve the documents ids.
let documents_ids = self.index.documents_ids(self.wtxn)?;
// We clear the internal documents ids.
self.index.put_documents_ids(self.wtxn, &RoaringBitmap::default())?;
// We clear the word docids.
word_docids.clear(self.wtxn)?;
// We clear the docid word positions.
docid_word_positions.clear(self.wtxn)?;
// We clear the word pair proximity docids.
word_pair_proximity_docids.clear(self.wtxn)?;
// We clear the documents themselves.
documents.clear(self.wtxn)?;
Ok(documents_ids.len() as usize)
}
}
pub struct DeleteDocuments<'t, 'u, 'i> {
wtxn: &'t mut heed::RwTxn<'u>,
index: &'i Index,
users_ids_documents_ids: fst::Map<Vec<u8>>,
documents_ids: RoaringBitmap,
}
impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
fn new(wtxn: &'t mut heed::RwTxn<'u>, index: &'i Index) -> anyhow::Result<DeleteDocuments<'t, 'u, 'i>> {
let users_ids_documents_ids = index
.users_ids_documents_ids(wtxn)?
.map_data(Cow::into_owned)?;
Ok(DeleteDocuments {
wtxn,
index,
users_ids_documents_ids,
documents_ids: RoaringBitmap::new(),
})
}
pub fn delete_document(&mut self, docid: u32) {
self.documents_ids.insert(docid);
}
pub fn delete_documents(&mut self, docids: &RoaringBitmap) {
self.documents_ids.union_with(docids);
}
pub fn delete_user_id(&mut self, user_id: &str) -> Option<u32> {
let docid = self.users_ids_documents_ids.get(user_id).map(|id| u32::try_from(id).unwrap())?;
self.delete_document(docid);
Some(docid)
}
pub fn execute(self) -> anyhow::Result<usize> {
// We retrieve remove the deleted documents ids and write them into the database.
let mut documents_ids = self.index.documents_ids(self.wtxn)?;
// We can and must stop removing documents in a database that is empty.
if documents_ids.is_empty() {
return Ok(0);
}
documents_ids.intersect_with(&self.documents_ids);
self.index.put_documents_ids(self.wtxn, &documents_ids)?;
let fields_ids_map = self.index.fields_ids_map(self.wtxn)?;
let id_field = fields_ids_map.id("id").expect(r#"the field "id" to be present"#);
let Index {
main: _main,
word_docids,
docid_word_positions,
word_pair_proximity_docids,
documents,
} = self.index;
// Retrieve the words and the users ids contained in the documents.
// TODO we must use a smallword instead of a string.
let mut words = Vec::new();
let mut users_ids = Vec::new();
for docid in &documents_ids {
// We create an iterator to be able to get the content and delete the document
// content itself. It's faster to acquire a cursor to get and delete,
// as we avoid traversing the LMDB B-Tree two times but only once.
let key = BEU32::new(docid);
let mut iter = documents.range_mut(self.wtxn, &(key..=key))?;
if let Some((_key, obkv)) = iter.next().transpose()? {
if let Some(content) = obkv.get(id_field) {
let user_id: String = serde_json::from_slice(content).unwrap();
users_ids.push(user_id);
}
iter.del_current()?;
}
drop(iter);
// We iterate througt the words positions of the document id,
// retrieve the word and delete the positions.
let mut iter = docid_word_positions.prefix_iter_mut(self.wtxn, &(docid, ""))?;
while let Some(result) = iter.next() {
let ((_docid, word), _positions) = result?;
// This boolean will indicate if we must remove this word from the words FST.
words.push((String::from(word), false));
iter.del_current()?;
}
}
// We create the FST map of the users ids that we must delete.
users_ids.sort_unstable();
let users_ids_to_delete = fst::Set::from_iter(users_ids)?;
let users_ids_to_delete = fst::Map::from(users_ids_to_delete.into_fst());
let new_users_ids_documents_ids = {
// We acquire the current users ids documents ids map and create
// a difference operation between the current and to-delete users ids.
let users_ids_documents_ids = self.index.users_ids_documents_ids(self.wtxn)?;
let difference = users_ids_documents_ids.op().add(&users_ids_to_delete).difference();
// We stream the new users ids that does no more contains the to-delete users ids.
let mut iter = difference.into_stream();
let mut new_users_ids_documents_ids_builder = fst::MapBuilder::memory();
while let Some((userid, docids)) = iter.next() {
new_users_ids_documents_ids_builder.insert(userid, docids[0].value)?;
}
// We create an FST map from the above builder.
new_users_ids_documents_ids_builder.into_map()
};
// We write the new users ids into the main database.
self.index.put_users_ids_documents_ids(self.wtxn, &new_users_ids_documents_ids)?;
// Maybe we can improve the get performance of the words
// if we sort the words first, keeping the LMDB pages in cache.
words.sort_unstable();
// We iterate over the words and delete the documents ids
// from the word docids database.
for (word, must_remove) in &mut words {
// We create an iterator to be able to get the content and delete the word docids.
// It's faster to acquire a cursor to get and delete or put, as we avoid traversing
// the LMDB B-Tree two times but only once.
let mut iter = word_docids.prefix_iter_mut(self.wtxn, &word)?;
if let Some((key, mut docids)) = iter.next().transpose()? {
if key == word {
docids.difference_with(&mut documents_ids);
if docids.is_empty() {
iter.del_current()?;
*must_remove = true;
} else {
iter.put_current(key, &docids)?;
}
}
}
}
// We construct an FST set that contains the words to delete from the words FST.
let words_to_delete = words.iter().filter_map(|(w, d)| if *d { Some(w) } else { None });
let words_to_delete = fst::Set::from_iter(words_to_delete)?;
let new_words_fst = {
// We retrieve the current words FST from the database.
let words_fst = self.index.words_fst(self.wtxn)?;
let difference = words_fst.op().add(&words_to_delete).difference();
// We stream the new users ids that does no more contains the to-delete users ids.
let mut new_words_fst_builder = fst::SetBuilder::memory();
new_words_fst_builder.extend_stream(difference.into_stream())?;
// We create an words FST set from the above builder.
new_words_fst_builder.into_set()
};
// We write the new words FST into the main database.
self.index.put_words_fst(self.wtxn, &new_words_fst)?;
// We delete the documents ids that are under the pairs of words we found.
// TODO We can maybe improve this by using the `compute_words_pair_proximities`
// function instead of iterating over all the possible word pairs.
for ((w1, _), (w2, _)) in words.iter().cartesian_product(&words) {
let start = &(w1.as_str(), w2.as_str(), 0);
let end = &(w1.as_str(), w2.as_str(), 7);
let mut iter = word_pair_proximity_docids.range_mut(self.wtxn, &(start..=end))?;
while let Some(result) = iter.next() {
let ((w1, w2, prox), mut docids) = result?;
docids.difference_with(&documents_ids);
if docids.is_empty() {
iter.del_current()?;
} else {
iter.put_current(&(w1, w2, prox), &docids)?;
}
}
}
Ok(documents_ids.len() as usize)
}
}
pub enum IndexDocumentsMethod {
/// Replace the previous document with the new one,
/// removing all the already known attributes.
ReplaceDocuments,
/// Merge the previous version of the document with the new version,
/// replacing old attributes values with the new ones and add the new attributes.
UpdateDocuments,
}
pub struct IndexDocuments<'t, 'u, 'i> {
wtxn: &'t mut heed::RwTxn<'u>,
index: &'i Index,
update_method: IndexDocumentsMethod,
}
impl<'t, 'u, 'i> IndexDocuments<'t, 'u, 'i> {
fn new(wtxn: &'t mut heed::RwTxn<'u>, index: &'i Index) -> IndexDocuments<'t, 'u, 'i> {
IndexDocuments { wtxn, index, update_method: IndexDocumentsMethod::ReplaceDocuments }
}
pub fn index_documents_method(&mut self, method: IndexDocumentsMethod) -> &mut Self {
self.update_method = method;
self
}
pub fn execute(self) -> anyhow::Result<()> {
todo!()
}
}

256
src/update/update_store.rs Normal file
View File

@ -0,0 +1,256 @@
use std::path::Path;
use std::sync::Arc;
use crossbeam_channel::Sender;
use heed::types::{OwnedType, DecodeIgnore, SerdeJson, ByteSlice};
use heed::{EnvOpenOptions, Env, Database};
use serde::{Serialize, Deserialize};
use crate::BEU64;
#[derive(Clone)]
pub struct UpdateStore<M, N> {
env: Env,
pending_meta: Database<OwnedType<BEU64>, SerdeJson<M>>,
pending: Database<OwnedType<BEU64>, ByteSlice>,
processed_meta: Database<OwnedType<BEU64>, SerdeJson<N>>,
notification_sender: Sender<()>,
}
impl<M: 'static, N: 'static> UpdateStore<M, N> {
pub fn open<P, F>(
mut options: EnvOpenOptions,
path: P,
mut update_function: F,
) -> heed::Result<Arc<UpdateStore<M, N>>>
where
P: AsRef<Path>,
F: FnMut(u64, M, &[u8]) -> heed::Result<N> + Send + 'static,
M: for<'a> Deserialize<'a>,
N: Serialize,
{
options.max_dbs(3);
let env = options.open(path)?;
let pending_meta = env.create_database(Some("pending-meta"))?;
let pending = env.create_database(Some("pending"))?;
let processed_meta = env.create_database(Some("processed-meta"))?;
let (notification_sender, notification_receiver) = crossbeam_channel::bounded(1);
let update_store = Arc::new(UpdateStore {
env,
pending,
pending_meta,
processed_meta,
notification_sender,
});
let update_store_cloned = update_store.clone();
std::thread::spawn(move || {
// Block and wait for something to process.
for () in notification_receiver {
loop {
match update_store_cloned.process_pending_update(&mut update_function) {
Ok(Some(_)) => (),
Ok(None) => break,
Err(e) => eprintln!("error while processing update: {}", e),
}
}
}
});
Ok(update_store)
}
/// Returns the new biggest id to use to store the new update.
fn new_update_id(&self, txn: &heed::RoTxn) -> heed::Result<u64> {
let last_pending = self.pending_meta
.as_polymorph()
.last::<_, OwnedType<BEU64>, DecodeIgnore>(txn)?
.map(|(k, _)| k.get());
if let Some(last_id) = last_pending {
return Ok(last_id + 1);
}
let last_processed = self.processed_meta
.as_polymorph()
.last::<_, OwnedType<BEU64>, DecodeIgnore>(txn)?
.map(|(k, _)| k.get());
match last_processed {
Some(last_id) => Ok(last_id + 1),
None => Ok(0),
}
}
/// Registers the update content in the pending store and the meta
/// into the pending-meta store. Returns the new unique update id.
pub fn register_update(&self, meta: &M, content: &[u8]) -> heed::Result<u64>
where M: Serialize,
{
let mut wtxn = self.env.write_txn()?;
// We ask the update store to give us a new update id, this is safe,
// no other update can have the same id because we use a write txn before
// asking for the id and registering it so other update registering
// will be forced to wait for a new write txn.
let update_id = self.new_update_id(&wtxn)?;
let update_key = BEU64::new(update_id);
self.pending_meta.put(&mut wtxn, &update_key, meta)?;
self.pending.put(&mut wtxn, &update_key, content)?;
wtxn.commit()?;
if let Err(e) = self.notification_sender.try_send(()) {
assert!(!e.is_disconnected(), "update notification channel is disconnected");
}
Ok(update_id)
}
/// Executes the user provided function on the next pending update (the one with the lowest id).
/// This is asynchronous as it let the user process the update with a read-only txn and
/// only writing the result meta to the processed-meta store *after* it has been processed.
fn process_pending_update<F>(&self, mut f: F) -> heed::Result<Option<(u64, N)>>
where
F: FnMut(u64, M, &[u8]) -> heed::Result<N>,
M: for<'a> Deserialize<'a>,
N: Serialize,
{
// Create a read transaction to be able to retrieve the pending update in order.
let rtxn = self.env.read_txn()?;
let first_meta = self.pending_meta.first(&rtxn)?;
// If there is a pending update we process and only keep
// a reader while processing it, not a writer.
match first_meta {
Some((first_id, first_meta)) => {
let first_content = self.pending
.get(&rtxn, &first_id)?
.expect("associated update content");
// Process the pending update using the provided user function.
let new_meta = (f)(first_id.get(), first_meta, first_content)?;
drop(rtxn);
// Once the pending update have been successfully processed
// we must remove the content from the pending stores and
// write the *new* meta to the processed-meta store and commit.
let mut wtxn = self.env.write_txn()?;
self.pending_meta.delete(&mut wtxn, &first_id)?;
self.pending.delete(&mut wtxn, &first_id)?;
self.processed_meta.put(&mut wtxn, &first_id, &new_meta)?;
wtxn.commit()?;
Ok(Some((first_id.get(), new_meta)))
},
None => Ok(None)
}
}
/// Execute the user defined function with both meta-store iterators, the first
/// iterator is the *processed* meta one and the secind is the *pending* meta one.
pub fn iter_metas<F, T>(&self, mut f: F) -> heed::Result<T>
where
M: for<'a> Deserialize<'a>,
N: for<'a> Deserialize<'a>,
F: for<'a> FnMut(
heed::RoIter<'a, OwnedType<BEU64>, SerdeJson<N>>,
heed::RoIter<'a, OwnedType<BEU64>, SerdeJson<M>>,
) -> heed::Result<T>,
{
let rtxn = self.env.read_txn()?;
// We get both the pending and processed meta iterators.
let processed_iter = self.processed_meta.iter(&rtxn)?;
let pending_iter = self.pending_meta.iter(&rtxn)?;
// We execute the user defined function with both iterators.
(f)(processed_iter, pending_iter)
}
/// Returns the update associated meta or `None` if the update deosn't exist.
pub fn meta(&self, update_id: u64) -> heed::Result<Option<UpdateStatusMeta<M, N>>>
where
M: for<'a> Deserialize<'a>,
N: for<'a> Deserialize<'a>,
{
let rtxn = self.env.read_txn()?;
let key = BEU64::new(update_id);
if let Some(meta) = self.pending_meta.get(&rtxn, &key)? {
return Ok(Some(UpdateStatusMeta::Pending(meta)));
}
match self.processed_meta.get(&rtxn, &key)? {
Some(meta) => Ok(Some(UpdateStatusMeta::Processed(meta))),
None => Ok(None),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum UpdateStatusMeta<M, N> {
Pending(M),
Processed(N),
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
use std::time::{Duration, Instant};
#[test]
fn simple() {
let dir = tempfile::tempdir().unwrap();
let options = EnvOpenOptions::new();
let update_store = UpdateStore::open(options, dir, |_id, meta: String, _content| {
Ok(meta + " processed")
}).unwrap();
let meta = String::from("kiki");
let update_id = update_store.register_update(&meta, &[]).unwrap();
thread::sleep(Duration::from_millis(100));
let meta = update_store.meta(update_id).unwrap().unwrap();
assert_eq!(meta, UpdateStatusMeta::Processed(format!("kiki processed")));
}
#[test]
fn long_running_update() {
let dir = tempfile::tempdir().unwrap();
let options = EnvOpenOptions::new();
let update_store = UpdateStore::open(options, dir, |_id, meta: String, _content| {
thread::sleep(Duration::from_millis(400));
Ok(meta + " processed")
}).unwrap();
let before_register = Instant::now();
let meta = String::from("kiki");
let update_id_kiki = update_store.register_update(&meta, &[]).unwrap();
assert!(before_register.elapsed() < Duration::from_millis(200));
let meta = String::from("coco");
let update_id_coco = update_store.register_update(&meta, &[]).unwrap();
assert!(before_register.elapsed() < Duration::from_millis(200));
let meta = String::from("cucu");
let update_id_cucu = update_store.register_update(&meta, &[]).unwrap();
assert!(before_register.elapsed() < Duration::from_millis(200));
thread::sleep(Duration::from_millis(400 * 3 + 100));
let meta = update_store.meta(update_id_kiki).unwrap().unwrap();
assert_eq!(meta, UpdateStatusMeta::Processed(format!("kiki processed")));
let meta = update_store.meta(update_id_coco).unwrap().unwrap();
assert_eq!(meta, UpdateStatusMeta::Processed(format!("coco processed")));
let meta = update_store.meta(update_id_cucu).unwrap().unwrap();
assert_eq!(meta, UpdateStatusMeta::Processed(format!("cucu processed")));
}
}