Change the project to become a workspace with milli as a default-member

This commit is contained in:
Clément Renault
2021-02-12 16:15:09 +01:00
parent d450b971f9
commit e8639517da
56 changed files with 1053 additions and 2617 deletions

View File

@ -0,0 +1,105 @@
use std::borrow::Cow;
use anyhow::{bail, ensure, Context};
use bstr::ByteSlice as _;
use fst::IntoStreamer;
use roaring::RoaringBitmap;
use crate::heed_codec::CboRoaringBitmapCodec;
const WORDS_FST_KEY: &[u8] = crate::index::WORDS_FST_KEY.as_bytes();
const FIELDS_IDS_MAP_KEY: &[u8] = crate::index::FIELDS_IDS_MAP_KEY.as_bytes();
const DOCUMENTS_IDS_KEY: &[u8] = crate::index::DOCUMENTS_IDS_KEY.as_bytes();
pub fn main_merge(key: &[u8], values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
match key {
WORDS_FST_KEY => {
let fsts: Vec<_> = values.iter().map(|v| fst::Set::new(v).unwrap()).collect();
// Union of the FSTs
let mut op = fst::set::OpBuilder::new();
fsts.iter().for_each(|fst| op.push(fst.into_stream()));
let op = op.r#union();
let mut build = fst::SetBuilder::memory();
build.extend_stream(op.into_stream()).unwrap();
Ok(build.into_inner().unwrap())
},
FIELDS_IDS_MAP_KEY => {
ensure!(values.windows(2).all(|vs| vs[0] == vs[1]), "fields ids map doesn't match");
Ok(values[0].to_vec())
},
DOCUMENTS_IDS_KEY => roaring_bitmap_merge(values),
otherwise => bail!("wut {:?}", otherwise),
}
}
pub fn word_docids_merge(_key: &[u8], values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
roaring_bitmap_merge(values)
}
pub fn docid_word_positions_merge(key: &[u8], _values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
bail!("merging docid word positions is an error ({:?})", key.as_bstr())
}
pub fn field_id_docid_facet_values_merge(_key: &[u8], values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
let first = values.first().context("no value to merge")?;
ensure!(values.iter().all(|v| v == first), "invalid field id docid facet value merging");
Ok(first.to_vec())
}
pub fn words_pairs_proximities_docids_merge(_key: &[u8], values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
cbo_roaring_bitmap_merge(values)
}
pub fn facet_field_value_docids_merge(_key: &[u8], values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
cbo_roaring_bitmap_merge(values)
}
pub fn documents_merge(key: &[u8], _values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
bail!("merging documents is an error ({:?})", key.as_bstr())
}
pub fn merge_two_obkvs(base: obkv::KvReader, update: obkv::KvReader, buffer: &mut Vec<u8>) {
use itertools::merge_join_by;
use itertools::EitherOrBoth::{Both, Left, Right};
buffer.clear();
let mut writer = obkv::KvWriter::new(buffer);
for eob in merge_join_by(base.iter(), update.iter(), |(b, _), (u, _)| b.cmp(u)) {
match eob {
Both(_, (k, v)) | Left((k, v)) | Right((k, v)) => writer.insert(k, v).unwrap(),
}
}
writer.finish().unwrap();
}
fn roaring_bitmap_merge(values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
let (head, tail) = values.split_first().unwrap();
let mut head = RoaringBitmap::deserialize_from(&head[..])?;
for value in tail {
let bitmap = RoaringBitmap::deserialize_from(&value[..])?;
head.union_with(&bitmap);
}
let mut vec = Vec::with_capacity(head.serialized_size());
head.serialize_into(&mut vec)?;
Ok(vec)
}
fn cbo_roaring_bitmap_merge(values: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
let (head, tail) = values.split_first().unwrap();
let mut head = CboRoaringBitmapCodec::deserialize_from(&head[..])?;
for value in tail {
let bitmap = CboRoaringBitmapCodec::deserialize_from(&value[..])?;
head.union_with(&bitmap);
}
let mut vec = Vec::new();
CboRoaringBitmapCodec::serialize_into(&head, &mut vec)?;
Ok(vec)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,690 @@
use std::borrow::Cow;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::convert::{TryFrom, TryInto};
use std::fs::File;
use std::iter::FromIterator;
use std::time::Instant;
use std::{cmp, iter};
use anyhow::{bail, Context};
use bstr::ByteSlice as _;
use fst::Set;
use grenad::{Reader, FileFuse, Writer, Sorter, CompressionType};
use heed::BytesEncode;
use linked_hash_map::LinkedHashMap;
use log::{debug, info};
use meilisearch_tokenizer::{Analyzer, AnalyzerConfig};
use ordered_float::OrderedFloat;
use roaring::RoaringBitmap;
use serde_json::Value;
use tempfile::tempfile;
use crate::facet::FacetType;
use crate::heed_codec::facet::{FacetValueStringCodec, FacetLevelValueF64Codec, FacetLevelValueI64Codec};
use crate::heed_codec::facet::{FieldDocIdFacetStringCodec, FieldDocIdFacetF64Codec, FieldDocIdFacetI64Codec};
use crate::heed_codec::{BoRoaringBitmapCodec, CboRoaringBitmapCodec};
use crate::update::UpdateIndexingStep;
use crate::{json_to_string, SmallVec8, SmallVec32, SmallString32, Position, DocumentId, FieldId};
use super::{MergeFn, create_writer, create_sorter, writer_into_reader};
use super::merge_function::{
main_merge, word_docids_merge, words_pairs_proximities_docids_merge,
facet_field_value_docids_merge, field_id_docid_facet_values_merge,
};
const LMDB_MAX_KEY_LENGTH: usize = 511;
const ONE_KILOBYTE: usize = 1024 * 1024;
const MAX_POSITION: usize = 1000;
const WORDS_FST_KEY: &[u8] = crate::index::WORDS_FST_KEY.as_bytes();
pub struct Readers {
pub main: Reader<FileFuse>,
pub word_docids: Reader<FileFuse>,
pub docid_word_positions: Reader<FileFuse>,
pub words_pairs_proximities_docids: Reader<FileFuse>,
pub facet_field_value_docids: Reader<FileFuse>,
pub field_id_docid_facet_values: Reader<FileFuse>,
pub documents: Reader<FileFuse>,
}
pub struct Store<'s, A> {
// Indexing parameters
searchable_fields: HashSet<FieldId>,
faceted_fields: HashMap<FieldId, FacetType>,
// Caches
word_docids: LinkedHashMap<SmallVec32<u8>, RoaringBitmap>,
word_docids_limit: usize,
words_pairs_proximities_docids: LinkedHashMap<(SmallVec32<u8>, SmallVec32<u8>, u8), RoaringBitmap>,
words_pairs_proximities_docids_limit: usize,
facet_field_value_docids: LinkedHashMap<(u8, FacetValue), RoaringBitmap>,
facet_field_value_docids_limit: usize,
// MTBL parameters
chunk_compression_type: CompressionType,
chunk_compression_level: Option<u32>,
chunk_fusing_shrink_size: Option<u64>,
// MTBL sorters
main_sorter: Sorter<MergeFn>,
word_docids_sorter: Sorter<MergeFn>,
words_pairs_proximities_docids_sorter: Sorter<MergeFn>,
facet_field_value_docids_sorter: Sorter<MergeFn>,
field_id_docid_facet_values_sorter: Sorter<MergeFn>,
// MTBL writers
docid_word_positions_writer: Writer<File>,
documents_writer: Writer<File>,
// tokenizer
analyzer: Analyzer<'s, A>,
}
impl<'s, A: AsRef<[u8]>> Store<'s, A> {
pub fn new(
searchable_fields: HashSet<FieldId>,
faceted_fields: HashMap<FieldId, FacetType>,
linked_hash_map_size: Option<usize>,
max_nb_chunks: Option<usize>,
max_memory: Option<usize>,
chunk_compression_type: CompressionType,
chunk_compression_level: Option<u32>,
chunk_fusing_shrink_size: Option<u64>,
stop_words: &'s Set<A>,
) -> anyhow::Result<Self>
{
// We divide the max memory by the number of sorter the Store have.
let max_memory = max_memory.map(|mm| cmp::max(ONE_KILOBYTE, mm / 4));
let linked_hash_map_size = linked_hash_map_size.unwrap_or(500);
let main_sorter = create_sorter(
main_merge,
chunk_compression_type,
chunk_compression_level,
chunk_fusing_shrink_size,
max_nb_chunks,
max_memory,
);
let word_docids_sorter = create_sorter(
word_docids_merge,
chunk_compression_type,
chunk_compression_level,
chunk_fusing_shrink_size,
max_nb_chunks,
max_memory,
);
let words_pairs_proximities_docids_sorter = create_sorter(
words_pairs_proximities_docids_merge,
chunk_compression_type,
chunk_compression_level,
chunk_fusing_shrink_size,
max_nb_chunks,
max_memory,
);
let facet_field_value_docids_sorter = create_sorter(
facet_field_value_docids_merge,
chunk_compression_type,
chunk_compression_level,
chunk_fusing_shrink_size,
max_nb_chunks,
max_memory,
);
let field_id_docid_facet_values_sorter = create_sorter(
field_id_docid_facet_values_merge,
chunk_compression_type,
chunk_compression_level,
chunk_fusing_shrink_size,
max_nb_chunks,
Some(1024 * 1024 * 1024), // 1MB
);
let documents_writer = tempfile().and_then(|f| {
create_writer(chunk_compression_type, chunk_compression_level, f)
})?;
let docid_word_positions_writer = tempfile().and_then(|f| {
create_writer(chunk_compression_type, chunk_compression_level, f)
})?;
let analyzer = Analyzer::new(AnalyzerConfig::default_with_stopwords(stop_words));
Ok(Store {
// Indexing parameters.
searchable_fields,
faceted_fields,
// Caches
word_docids: LinkedHashMap::with_capacity(linked_hash_map_size),
word_docids_limit: linked_hash_map_size,
words_pairs_proximities_docids: LinkedHashMap::with_capacity(linked_hash_map_size),
words_pairs_proximities_docids_limit: linked_hash_map_size,
facet_field_value_docids: LinkedHashMap::with_capacity(linked_hash_map_size),
facet_field_value_docids_limit: linked_hash_map_size,
// MTBL parameters
chunk_compression_type,
chunk_compression_level,
chunk_fusing_shrink_size,
// MTBL sorters
main_sorter,
word_docids_sorter,
words_pairs_proximities_docids_sorter,
facet_field_value_docids_sorter,
field_id_docid_facet_values_sorter,
// MTBL writers
docid_word_positions_writer,
documents_writer,
// tokenizer
analyzer,
})
}
// Save the documents ids under the position and word we have seen it.
fn insert_word_docid(&mut self, word: &str, id: DocumentId) -> anyhow::Result<()> {
// if get_refresh finds the element it is assured to be at the end of the linked hash map.
match self.word_docids.get_refresh(word.as_bytes()) {
Some(old) => { old.insert(id); },
None => {
let word_vec = SmallVec32::from(word.as_bytes());
// A newly inserted element is append at the end of the linked hash map.
self.word_docids.insert(word_vec, RoaringBitmap::from_iter(Some(id)));
// If the word docids just reached it's capacity we must make sure to remove
// one element, this way next time we insert we doesn't grow the capacity.
if self.word_docids.len() == self.word_docids_limit {
// Removing the front element is equivalent to removing the LRU element.
let lru = self.word_docids.pop_front();
Self::write_word_docids(&mut self.word_docids_sorter, lru)?;
}
}
}
Ok(())
}
// Save the documents ids under the facet field id and value we have seen it.
fn insert_facet_values_docid(
&mut self,
field_id: FieldId,
field_value: FacetValue,
id: DocumentId,
) -> anyhow::Result<()>
{
Self::write_field_id_docid_facet_value(&mut self.field_id_docid_facet_values_sorter, field_id, id, &field_value)?;
let key = (field_id, field_value);
// if get_refresh finds the element it is assured to be at the end of the linked hash map.
match self.facet_field_value_docids.get_refresh(&key) {
Some(old) => { old.insert(id); },
None => {
// A newly inserted element is append at the end of the linked hash map.
self.facet_field_value_docids.insert(key, RoaringBitmap::from_iter(Some(id)));
// If the word docids just reached it's capacity we must make sure to remove
// one element, this way next time we insert we doesn't grow the capacity.
if self.facet_field_value_docids.len() == self.facet_field_value_docids_limit {
// Removing the front element is equivalent to removing the LRU element.
Self::write_facet_field_value_docids(
&mut self.facet_field_value_docids_sorter,
self.facet_field_value_docids.pop_front(),
)?;
}
}
}
Ok(())
}
// Save the documents ids under the words pairs proximities that it contains.
fn insert_words_pairs_proximities_docids<'a>(
&mut self,
words_pairs_proximities: impl IntoIterator<Item=((&'a str, &'a str), u8)>,
id: DocumentId,
) -> anyhow::Result<()>
{
for ((w1, w2), prox) in words_pairs_proximities {
let w1 = SmallVec32::from(w1.as_bytes());
let w2 = SmallVec32::from(w2.as_bytes());
let key = (w1, w2, prox);
// if get_refresh finds the element it is assured
// to be at the end of the linked hash map.
match self.words_pairs_proximities_docids.get_refresh(&key) {
Some(old) => { old.insert(id); },
None => {
// A newly inserted element is append at the end of the linked hash map.
let ids = RoaringBitmap::from_iter(Some(id));
self.words_pairs_proximities_docids.insert(key, ids);
}
}
}
// If the linked hashmap is over capacity we must remove the overflowing elements.
let len = self.words_pairs_proximities_docids.len();
let overflow = len.checked_sub(self.words_pairs_proximities_docids_limit);
if let Some(overflow) = overflow {
let mut lrus = Vec::with_capacity(overflow);
// Removing front elements is equivalent to removing the LRUs.
let iter = iter::from_fn(|| self.words_pairs_proximities_docids.pop_front());
iter.take(overflow).for_each(|x| lrus.push(x));
Self::write_words_pairs_proximities(&mut self.words_pairs_proximities_docids_sorter, lrus)?;
}
Ok(())
}
fn write_document(
&mut self,
document_id: DocumentId,
words_positions: &mut HashMap<String, SmallVec32<Position>>,
facet_values: &mut HashMap<FieldId, SmallVec8<FacetValue>>,
record: &[u8],
) -> anyhow::Result<()>
{
// We compute the list of words pairs proximities (self-join) and write it directly to disk.
let words_pair_proximities = compute_words_pair_proximities(&words_positions);
self.insert_words_pairs_proximities_docids(words_pair_proximities, document_id)?;
// We store document_id associated with all the words the record contains.
for (word, _) in words_positions.drain() {
self.insert_word_docid(&word, document_id)?;
}
self.documents_writer.insert(document_id.to_be_bytes(), record)?;
Self::write_docid_word_positions(&mut self.docid_word_positions_writer, document_id, words_positions)?;
// We store document_id associated with all the field id and values.
for (field, values) in facet_values.drain() {
for value in values {
self.insert_facet_values_docid(field, value, document_id)?;
}
}
Ok(())
}
fn write_words_pairs_proximities(
sorter: &mut Sorter<MergeFn>,
iter: impl IntoIterator<Item=((SmallVec32<u8>, SmallVec32<u8>, u8), RoaringBitmap)>,
) -> anyhow::Result<()>
{
let mut key = Vec::new();
let mut buffer = Vec::new();
for ((w1, w2, min_prox), docids) in iter {
key.clear();
key.extend_from_slice(w1.as_bytes());
key.push(0);
key.extend_from_slice(w2.as_bytes());
// Storing the minimun proximity found between those words
key.push(min_prox);
// We serialize the document ids into a buffer
buffer.clear();
buffer.reserve(CboRoaringBitmapCodec::serialized_size(&docids));
CboRoaringBitmapCodec::serialize_into(&docids, &mut buffer)?;
// that we write under the generated key into MTBL
if lmdb_key_valid_size(&key) {
sorter.insert(&key, &buffer)?;
}
}
Ok(())
}
fn write_docid_word_positions(
writer: &mut Writer<File>,
id: DocumentId,
words_positions: &HashMap<String, SmallVec32<Position>>,
) -> anyhow::Result<()>
{
// We prefix the words by the document id.
let mut key = id.to_be_bytes().to_vec();
let base_size = key.len();
// We order the words lexicographically, this way we avoid passing by a sorter.
let words_positions = BTreeMap::from_iter(words_positions);
for (word, positions) in words_positions {
key.truncate(base_size);
key.extend_from_slice(word.as_bytes());
// We serialize the positions into a buffer.
let positions = RoaringBitmap::from_iter(positions.iter().cloned());
let bytes = BoRoaringBitmapCodec::bytes_encode(&positions)
.with_context(|| "could not serialize positions")?;
// that we write under the generated key into MTBL
if lmdb_key_valid_size(&key) {
writer.insert(&key, &bytes)?;
}
}
Ok(())
}
fn write_facet_field_value_docids<I>(
sorter: &mut Sorter<MergeFn>,
iter: I,
) -> anyhow::Result<()>
where I: IntoIterator<Item=((FieldId, FacetValue), RoaringBitmap)>
{
use FacetValue::*;
for ((field_id, value), docids) in iter {
let result = match value {
String(s) => FacetValueStringCodec::bytes_encode(&(field_id, &s)).map(Cow::into_owned),
Float(f) => FacetLevelValueF64Codec::bytes_encode(&(field_id, 0, *f, *f)).map(Cow::into_owned),
Integer(i) => FacetLevelValueI64Codec::bytes_encode(&(field_id, 0, i, i)).map(Cow::into_owned),
};
let key = result.context("could not serialize facet key")?;
let bytes = CboRoaringBitmapCodec::bytes_encode(&docids)
.context("could not serialize docids")?;
if lmdb_key_valid_size(&key) {
sorter.insert(&key, &bytes)?;
}
}
Ok(())
}
fn write_field_id_docid_facet_value(
sorter: &mut Sorter<MergeFn>,
field_id: FieldId,
document_id: DocumentId,
value: &FacetValue,
) -> anyhow::Result<()>
{
use FacetValue::*;
let result = match value {
String(s) => FieldDocIdFacetStringCodec::bytes_encode(&(field_id, document_id, s)).map(Cow::into_owned),
Float(f) => FieldDocIdFacetF64Codec::bytes_encode(&(field_id, document_id, **f)).map(Cow::into_owned),
Integer(i) => FieldDocIdFacetI64Codec::bytes_encode(&(field_id, document_id, *i)).map(Cow::into_owned),
};
let key = result.context("could not serialize facet key")?;
if lmdb_key_valid_size(&key) {
sorter.insert(&key, &[])?;
}
Ok(())
}
fn write_word_docids<I>(sorter: &mut Sorter<MergeFn>, iter: I) -> anyhow::Result<()>
where I: IntoIterator<Item=(SmallVec32<u8>, RoaringBitmap)>
{
let mut key = Vec::new();
let mut buffer = Vec::new();
for (word, ids) in iter {
key.clear();
key.extend_from_slice(&word);
// We serialize the document ids into a buffer
buffer.clear();
let ids = RoaringBitmap::from_iter(ids);
buffer.reserve(ids.serialized_size());
ids.serialize_into(&mut buffer)?;
// that we write under the generated key into MTBL
if lmdb_key_valid_size(&key) {
sorter.insert(&key, &buffer)?;
}
}
Ok(())
}
pub fn index<F>(
mut self,
mut documents: grenad::Reader<&[u8]>,
documents_count: usize,
thread_index: usize,
num_threads: usize,
log_every_n: Option<usize>,
mut progress_callback: F,
) -> anyhow::Result<Readers>
where F: FnMut(UpdateIndexingStep),
{
debug!("{:?}: Indexing in a Store...", thread_index);
let mut before = Instant::now();
let mut words_positions = HashMap::new();
let mut facet_values = HashMap::new();
let mut count: usize = 0;
while let Some((key, value)) = documents.next()? {
let document_id = key.try_into().map(u32::from_be_bytes).unwrap();
let document = obkv::KvReader::new(value);
// We skip documents that must not be indexed by this thread.
if count % num_threads == thread_index {
// This is a log routine that we do every `log_every_n` documents.
if thread_index == 0 && log_every_n.map_or(false, |len| count % len == 0) {
info!("We have seen {} documents so far ({:.02?}).", format_count(count), before.elapsed());
progress_callback(UpdateIndexingStep::IndexDocuments {
documents_seen: count,
total_documents: documents_count,
});
before = Instant::now();
}
for (attr, content) in document.iter() {
if self.faceted_fields.contains_key(&attr) || self.searchable_fields.contains(&attr) {
let value = serde_json::from_slice(content)?;
if let Some(ftype) = self.faceted_fields.get(&attr) {
let mut values = parse_facet_value(*ftype, &value).with_context(|| {
format!("extracting facets from the value {}", value)
})?;
facet_values.entry(attr).or_insert_with(SmallVec8::new).extend(values.drain(..));
}
if self.searchable_fields.contains(&attr) {
let content = match json_to_string(&value) {
Some(content) => content,
None => continue,
};
let analyzed = self.analyzer.analyze(&content);
let tokens = analyzed
.tokens()
.filter(|t| t.is_word())
.map(|t| t.text().to_string());
for (pos, word) in tokens.enumerate().take(MAX_POSITION) {
let position = (attr as usize * MAX_POSITION + pos) as u32;
words_positions.entry(word).or_insert_with(SmallVec32::new).push(position);
}
}
}
}
// We write the document in the documents store.
self.write_document(document_id, &mut words_positions, &mut facet_values, value)?;
}
// Compute the document id of the next document.
count += 1;
}
progress_callback(UpdateIndexingStep::IndexDocuments {
documents_seen: count,
total_documents: documents_count,
});
let readers = self.finish()?;
debug!("{:?}: Store created!", thread_index);
Ok(readers)
}
fn finish(mut self) -> anyhow::Result<Readers> {
let comp_type = self.chunk_compression_type;
let comp_level = self.chunk_compression_level;
let shrink_size = self.chunk_fusing_shrink_size;
Self::write_word_docids(&mut self.word_docids_sorter, self.word_docids)?;
Self::write_words_pairs_proximities(
&mut self.words_pairs_proximities_docids_sorter,
self.words_pairs_proximities_docids,
)?;
Self::write_facet_field_value_docids(
&mut self.facet_field_value_docids_sorter,
self.facet_field_value_docids,
)?;
let mut word_docids_wtr = tempfile().and_then(|f| create_writer(comp_type, comp_level, f))?;
let mut builder = fst::SetBuilder::memory();
let mut iter = self.word_docids_sorter.into_iter()?;
while let Some((word, val)) = iter.next()? {
// This is a lexicographically ordered word position
// we use the key to construct the words fst.
builder.insert(word)?;
word_docids_wtr.insert(word, val)?;
}
let fst = builder.into_set();
self.main_sorter.insert(WORDS_FST_KEY, fst.as_fst().as_bytes())?;
let mut main_wtr = tempfile().and_then(|f| create_writer(comp_type, comp_level, f))?;
self.main_sorter.write_into(&mut main_wtr)?;
let mut words_pairs_proximities_docids_wtr = tempfile().and_then(|f| create_writer(comp_type, comp_level, f))?;
self.words_pairs_proximities_docids_sorter.write_into(&mut words_pairs_proximities_docids_wtr)?;
let mut facet_field_value_docids_wtr = tempfile().and_then(|f| create_writer(comp_type, comp_level, f))?;
self.facet_field_value_docids_sorter.write_into(&mut facet_field_value_docids_wtr)?;
let mut field_id_docid_facet_values_wtr = tempfile().and_then(|f| create_writer(comp_type, comp_level, f))?;
self.field_id_docid_facet_values_sorter.write_into(&mut field_id_docid_facet_values_wtr)?;
let main = writer_into_reader(main_wtr, shrink_size)?;
let word_docids = writer_into_reader(word_docids_wtr, shrink_size)?;
let words_pairs_proximities_docids = writer_into_reader(words_pairs_proximities_docids_wtr, shrink_size)?;
let facet_field_value_docids = writer_into_reader(facet_field_value_docids_wtr, shrink_size)?;
let field_id_docid_facet_values = writer_into_reader(field_id_docid_facet_values_wtr, shrink_size)?;
let docid_word_positions = writer_into_reader(self.docid_word_positions_writer, shrink_size)?;
let documents = writer_into_reader(self.documents_writer, shrink_size)?;
Ok(Readers {
main,
word_docids,
docid_word_positions,
words_pairs_proximities_docids,
facet_field_value_docids,
field_id_docid_facet_values,
documents,
})
}
}
/// Outputs a list of all pairs of words with the shortest proximity between 1 and 7 inclusive.
///
/// This list is used by the engine to calculate the documents containing words that are
/// close to each other.
fn compute_words_pair_proximities(
word_positions: &HashMap<String, SmallVec32<Position>>,
) -> HashMap<(&str, &str), u8>
{
use itertools::Itertools;
let mut words_pair_proximities = HashMap::new();
for ((w1, ps1), (w2, ps2)) in word_positions.iter().cartesian_product(word_positions) {
let mut min_prox = None;
for (ps1, ps2) in ps1.iter().cartesian_product(ps2) {
let prox = crate::proximity::positions_proximity(*ps1, *ps2);
let prox = u8::try_from(prox).unwrap();
// We don't care about a word that appear at the
// same position or too far from the other.
if prox >= 1 && prox <= 7 && min_prox.map_or(true, |mp| prox < mp) {
min_prox = Some(prox)
}
}
if let Some(min_prox) = min_prox {
words_pair_proximities.insert((w1.as_str(), w2.as_str()), min_prox);
}
}
words_pair_proximities
}
fn format_count(n: usize) -> String {
human_format::Formatter::new().with_decimals(1).with_separator("").format(n as f64)
}
fn lmdb_key_valid_size(key: &[u8]) -> bool {
!key.is_empty() && key.len() <= LMDB_MAX_KEY_LENGTH
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
enum FacetValue {
String(SmallString32),
Float(OrderedFloat<f64>),
Integer(i64),
}
fn parse_facet_value(ftype: FacetType, value: &Value) -> anyhow::Result<SmallVec8<FacetValue>> {
use FacetValue::*;
fn inner_parse_facet_value(
ftype: FacetType,
value: &Value,
can_recurse: bool,
output: &mut SmallVec8<FacetValue>,
) -> anyhow::Result<()>
{
match value {
Value::Null => Ok(()),
Value::Bool(b) => {
output.push(Integer(*b as i64));
Ok(())
},
Value::Number(number) => match ftype {
FacetType::String => {
let string = SmallString32::from(number.to_string());
output.push(String(string));
Ok(())
},
FacetType::Float => match number.as_f64() {
Some(float) => {
output.push(Float(OrderedFloat(float)));
Ok(())
},
None => bail!("invalid facet type, expecting {} found integer", ftype),
},
FacetType::Integer => match number.as_i64() {
Some(integer) => {
output.push(Integer(integer));
Ok(())
},
None => if number.is_f64() {
bail!("invalid facet type, expecting {} found float", ftype)
} else {
bail!("invalid facet type, expecting {} found out-of-bound integer (64bit)", ftype)
},
},
},
Value::String(string) => {
let string = string.trim().to_lowercase();
if string.is_empty() { return Ok(()) }
match ftype {
FacetType::String => {
let string = SmallString32::from(string);
output.push(String(string));
Ok(())
},
FacetType::Float => match string.parse() {
Ok(float) => {
output.push(Float(OrderedFloat(float)));
Ok(())
},
Err(_err) => bail!("invalid facet type, expecting {} found string", ftype),
},
FacetType::Integer => match string.parse() {
Ok(integer) => {
output.push(Integer(integer));
Ok(())
},
Err(_err) => bail!("invalid facet type, expecting {} found string", ftype),
},
}
},
Value::Array(values) => if can_recurse {
values.iter().map(|v| inner_parse_facet_value(ftype, v, false, output)).collect()
} else {
bail!("invalid facet type, expecting {} found sub-array ()", ftype)
},
Value::Object(_) => bail!("invalid facet type, expecting {} found object", ftype),
}
}
let mut facet_values = SmallVec8::new();
inner_parse_facet_value(ftype, value, true, &mut facet_values)?;
Ok(facet_values)
}

View File

@ -0,0 +1,641 @@
use std::borrow::Cow;
use std::fs::File;
use std::io::{Read, Seek, SeekFrom};
use std::iter::Peekable;
use std::time::Instant;
use anyhow::{anyhow, Context};
use grenad::CompressionType;
use log::info;
use roaring::RoaringBitmap;
use serde_json::{Map, Value};
use crate::{Index, BEU32, MergeFn, FieldsIdsMap, ExternalDocumentsIds, FieldId};
use crate::update::{AvailableDocumentsIds, UpdateIndexingStep};
use super::merge_function::merge_two_obkvs;
use super::{create_writer, create_sorter, IndexDocumentsMethod};
const DEFAULT_PRIMARY_KEY_NAME: &str = "id";
pub struct TransformOutput {
pub primary_key: String,
pub fields_ids_map: FieldsIdsMap,
pub external_documents_ids: ExternalDocumentsIds<'static>,
pub new_documents_ids: RoaringBitmap,
pub replaced_documents_ids: RoaringBitmap,
pub documents_count: usize,
pub documents_file: File,
}
/// Extract the external ids, deduplicate and compute the new internal documents ids
/// and fields ids, writing all the documents under their internal ids into a final file.
///
/// Outputs the new `FieldsIdsMap`, the new `UsersIdsDocumentsIds` map, the new documents ids,
/// the replaced documents ids, the number of documents in this update and the file
/// containing all those documents.
pub struct Transform<'t, 'i> {
pub rtxn: &'t heed::RoTxn<'i>,
pub index: &'i Index,
pub log_every_n: Option<usize>,
pub chunk_compression_type: CompressionType,
pub chunk_compression_level: Option<u32>,
pub chunk_fusing_shrink_size: Option<u64>,
pub max_nb_chunks: Option<usize>,
pub max_memory: Option<usize>,
pub index_documents_method: IndexDocumentsMethod,
pub autogenerate_docids: bool,
}
impl Transform<'_, '_> {
pub fn output_from_json<R, F>(self, reader: R, progress_callback: F) -> anyhow::Result<TransformOutput>
where
R: Read,
F: Fn(UpdateIndexingStep) + Sync,
{
self.output_from_generic_json(reader, false, progress_callback)
}
pub fn output_from_json_stream<R, F>(self, reader: R, progress_callback: F) -> anyhow::Result<TransformOutput>
where
R: Read,
F: Fn(UpdateIndexingStep) + Sync,
{
self.output_from_generic_json(reader, true, progress_callback)
}
fn output_from_generic_json<R, F>(
self,
reader: R,
is_stream: bool,
progress_callback: F,
) -> anyhow::Result<TransformOutput>
where
R: Read,
F: Fn(UpdateIndexingStep) + Sync,
{
let mut fields_ids_map = self.index.fields_ids_map(self.rtxn)?;
let external_documents_ids = self.index.external_documents_ids(self.rtxn).unwrap();
// Deserialize the whole batch of documents in memory.
let mut documents: Peekable<Box<dyn Iterator<Item=serde_json::Result<Map<String, Value>>>>> = if is_stream {
let iter = serde_json::Deserializer::from_reader(reader).into_iter();
let iter = Box::new(iter) as Box<dyn Iterator<Item=_>>;
iter.peekable()
} else {
let vec: Vec<_> = serde_json::from_reader(reader)?;
let iter = vec.into_iter().map(Ok);
let iter = Box::new(iter) as Box<dyn Iterator<Item=_>>;
iter.peekable()
};
// We extract the primary key from the first document in
// the batch if it hasn't already been defined in the index
let first = documents.peek().and_then(|r| r.as_ref().ok());
let alternative_name = first.and_then(|doc| doc.keys().find(|k| k.contains(DEFAULT_PRIMARY_KEY_NAME)).cloned());
let (primary_key_id, primary_key) = compute_primary_key_pair(
self.index.primary_key(self.rtxn)?,
&mut fields_ids_map,
alternative_name,
self.autogenerate_docids
)?;
if documents.peek().is_none() {
return Ok(TransformOutput {
primary_key,
fields_ids_map,
external_documents_ids: ExternalDocumentsIds::default(),
new_documents_ids: RoaringBitmap::new(),
replaced_documents_ids: RoaringBitmap::new(),
documents_count: 0,
documents_file: tempfile::tempfile()?,
});
}
// We must choose the appropriate merge function for when two or more documents
// with the same user id must be merged or fully replaced in the same batch.
let merge_function = match self.index_documents_method {
IndexDocumentsMethod::ReplaceDocuments => keep_latest_obkv,
IndexDocumentsMethod::UpdateDocuments => merge_obkvs,
};
// We initialize the sorter with the user indexing settings.
let mut sorter = create_sorter(
merge_function,
self.chunk_compression_type,
self.chunk_compression_level,
self.chunk_fusing_shrink_size,
self.max_nb_chunks,
self.max_memory,
);
let mut json_buffer = Vec::new();
let mut obkv_buffer = Vec::new();
let mut uuid_buffer = [0; uuid::adapter::Hyphenated::LENGTH];
let mut documents_count = 0;
for result in documents {
let document = result?;
if self.log_every_n.map_or(false, |len| documents_count % len == 0) {
progress_callback(UpdateIndexingStep::TransformFromUserIntoGenericFormat {
documents_seen: documents_count,
});
}
obkv_buffer.clear();
let mut writer = obkv::KvWriter::new(&mut obkv_buffer);
// We prepare the fields ids map with the documents keys.
for (key, _value) in &document {
fields_ids_map.insert(&key).context("field id limit reached")?;
}
// We retrieve the user id from the document based on the primary key name,
// if the document id isn't present we generate a uuid.
let external_id = match document.get(&primary_key) {
Some(value) => match value {
Value::String(string) => Cow::Borrowed(string.as_str()),
Value::Number(number) => Cow::Owned(number.to_string()),
_ => return Err(anyhow!("documents ids must be either strings or numbers")),
},
None => {
if !self.autogenerate_docids {
return Err(anyhow!("missing primary key"));
}
let uuid = uuid::Uuid::new_v4().to_hyphenated().encode_lower(&mut uuid_buffer);
Cow::Borrowed(uuid)
},
};
// We iterate in the fields ids ordered.
for (field_id, name) in fields_ids_map.iter() {
json_buffer.clear();
// We try to extract the value from the document and if we don't find anything
// and this should be the document id we return the one we generated.
if let Some(value) = document.get(name) {
// We serialize the attribute values.
serde_json::to_writer(&mut json_buffer, value)?;
writer.insert(field_id, &json_buffer)?;
}
else if field_id == primary_key_id {
// We validate the document id [a-zA-Z0-9\-_].
let external_id = match validate_document_id(&external_id) {
Some(valid) => valid,
None => return Err(anyhow!("invalid document id: {:?}", external_id)),
};
// We serialize the document id.
serde_json::to_writer(&mut json_buffer, &external_id)?;
writer.insert(field_id, &json_buffer)?;
}
}
// We use the extracted/generated user id as the key for this document.
sorter.insert(external_id.as_bytes(), &obkv_buffer)?;
documents_count += 1;
}
progress_callback(UpdateIndexingStep::TransformFromUserIntoGenericFormat {
documents_seen: documents_count,
});
// Now that we have a valid sorter that contains the user id and the obkv we
// give it to the last transforming function which returns the TransformOutput.
self.output_from_sorter(
sorter,
primary_key,
fields_ids_map,
documents_count,
external_documents_ids,
progress_callback,
)
}
pub fn output_from_csv<R, F>(self, reader: R, progress_callback: F) -> anyhow::Result<TransformOutput>
where
R: Read,
F: Fn(UpdateIndexingStep) + Sync,
{
let mut fields_ids_map = self.index.fields_ids_map(self.rtxn)?;
let external_documents_ids = self.index.external_documents_ids(self.rtxn).unwrap();
let mut csv = csv::Reader::from_reader(reader);
let headers = csv.headers()?;
let mut fields_ids = Vec::new();
// Generate the new fields ids based on the current fields ids and this CSV headers.
for (i, header) in headers.iter().enumerate() {
let id = fields_ids_map.insert(header).context("field id limit reached)")?;
fields_ids.push((id, i));
}
// Extract the position of the primary key in the current headers, None if not found.
let primary_key_pos = match self.index.primary_key(self.rtxn)? {
Some(primary_key) => {
// The primary key is known so we must find the position in the CSV headers.
headers.iter().position(|h| h == primary_key)
},
None => headers.iter().position(|h| h.contains("id")),
};
// Returns the field id in the fields ids map, create an "id" field
// in case it is not in the current headers.
let alternative_name = primary_key_pos.map(|pos| headers[pos].to_string());
let (primary_key_id, _) = compute_primary_key_pair(
self.index.primary_key(self.rtxn)?,
&mut fields_ids_map,
alternative_name,
self.autogenerate_docids
)?;
// The primary key field is not present in the header, so we need to create it.
if primary_key_pos.is_none() {
fields_ids.push((primary_key_id, usize::max_value()));
}
// We sort the fields ids by the fields ids map id, this way we are sure to iterate over
// the records fields in the fields ids map order and correctly generate the obkv.
fields_ids.sort_unstable_by_key(|(field_id, _)| *field_id);
// We initialize the sorter with the user indexing settings.
let mut sorter = create_sorter(
keep_latest_obkv,
self.chunk_compression_type,
self.chunk_compression_level,
self.chunk_fusing_shrink_size,
self.max_nb_chunks,
self.max_memory,
);
// We write into the sorter to merge and deduplicate the documents
// based on the external ids.
let mut json_buffer = Vec::new();
let mut obkv_buffer = Vec::new();
let mut uuid_buffer = [0; uuid::adapter::Hyphenated::LENGTH];
let mut documents_count = 0;
let mut record = csv::StringRecord::new();
while csv.read_record(&mut record)? {
obkv_buffer.clear();
let mut writer = obkv::KvWriter::new(&mut obkv_buffer);
if self.log_every_n.map_or(false, |len| documents_count % len == 0) {
progress_callback(UpdateIndexingStep::TransformFromUserIntoGenericFormat {
documents_seen: documents_count,
});
}
// We extract the user id if we know where it is or generate an UUID V4 otherwise.
let external_id = match primary_key_pos {
Some(pos) => {
let external_id = &record[pos];
// We validate the document id [a-zA-Z0-9\-_].
match validate_document_id(&external_id) {
Some(valid) => valid,
None => return Err(anyhow!("invalid document id: {:?}", external_id)),
}
},
None => uuid::Uuid::new_v4().to_hyphenated().encode_lower(&mut uuid_buffer),
};
// When the primary_key_field_id is found in the fields ids list
// we return the generated document id instead of the record field.
let iter = fields_ids.iter()
.map(|(fi, i)| {
let field = if *fi == primary_key_id { external_id } else { &record[*i] };
(fi, field)
});
// We retrieve the field id based on the fields ids map fields ids order.
for (field_id, field) in iter {
// We serialize the attribute values as JSON strings.
json_buffer.clear();
serde_json::to_writer(&mut json_buffer, &field)?;
writer.insert(*field_id, &json_buffer)?;
}
// We use the extracted/generated user id as the key for this document.
sorter.insert(external_id, &obkv_buffer)?;
documents_count += 1;
}
progress_callback(UpdateIndexingStep::TransformFromUserIntoGenericFormat {
documents_seen: documents_count,
});
// Now that we have a valid sorter that contains the user id and the obkv we
// give it to the last transforming function which returns the TransformOutput.
let primary_key_name = fields_ids_map
.name(primary_key_id)
.map(String::from)
.expect("Primary key must be present in fields id map");
self.output_from_sorter(
sorter,
primary_key_name,
fields_ids_map,
documents_count,
external_documents_ids,
progress_callback,
)
}
/// Generate the `TransformOutput` based on the given sorter that can be generated from any
/// format like CSV, JSON or JSON stream. This sorter must contain a key that is the document
/// id for the user side and the value must be an obkv where keys are valid fields ids.
fn output_from_sorter<F>(
self,
sorter: grenad::Sorter<MergeFn>,
primary_key: String,
fields_ids_map: FieldsIdsMap,
approximate_number_of_documents: usize,
mut external_documents_ids: ExternalDocumentsIds<'_>,
progress_callback: F,
) -> anyhow::Result<TransformOutput>
where
F: Fn(UpdateIndexingStep) + Sync,
{
let documents_ids = self.index.documents_ids(self.rtxn)?;
let mut available_documents_ids = AvailableDocumentsIds::from_documents_ids(&documents_ids);
// Once we have sort and deduplicated the documents we write them into a final file.
let mut final_sorter = create_sorter(
|_docid, _obkvs| Err(anyhow!("cannot merge two documents")),
self.chunk_compression_type,
self.chunk_compression_level,
self.chunk_fusing_shrink_size,
self.max_nb_chunks,
self.max_memory,
);
let mut new_external_documents_ids_builder = fst::MapBuilder::memory();
let mut replaced_documents_ids = RoaringBitmap::new();
let mut new_documents_ids = RoaringBitmap::new();
let mut obkv_buffer = Vec::new();
// While we write into final file we get or generate the internal documents ids.
let mut documents_count = 0;
let mut iter = sorter.into_iter()?;
while let Some((external_id, update_obkv)) = iter.next()? {
if self.log_every_n.map_or(false, |len| documents_count % len == 0) {
progress_callback(UpdateIndexingStep::ComputeIdsAndMergeDocuments {
documents_seen: documents_count,
total_documents: approximate_number_of_documents,
});
}
let (docid, obkv) = match external_documents_ids.get(external_id) {
Some(docid) => {
// If we find the user id in the current external documents ids map
// we use it and insert it in the list of replaced documents.
replaced_documents_ids.insert(docid);
// Depending on the update indexing method we will merge
// the document update with the current document or not.
match self.index_documents_method {
IndexDocumentsMethod::ReplaceDocuments => (docid, update_obkv),
IndexDocumentsMethod::UpdateDocuments => {
let key = BEU32::new(docid);
let base_obkv = self.index.documents.get(&self.rtxn, &key)?
.context("document not found")?;
let update_obkv = obkv::KvReader::new(update_obkv);
merge_two_obkvs(base_obkv, update_obkv, &mut obkv_buffer);
(docid, obkv_buffer.as_slice())
}
}
},
None => {
// If this user id is new we add it to the external documents ids map
// for new ids and into the list of new documents.
let new_docid = available_documents_ids.next()
.context("no more available documents ids")?;
new_external_documents_ids_builder.insert(external_id, new_docid as u64)?;
new_documents_ids.insert(new_docid);
(new_docid, update_obkv)
},
};
// We insert the document under the documents ids map into the final file.
final_sorter.insert(docid.to_be_bytes(), obkv)?;
documents_count += 1;
}
progress_callback(UpdateIndexingStep::ComputeIdsAndMergeDocuments {
documents_seen: documents_count,
total_documents: documents_count,
});
// We create a final writer to write the new documents in order from the sorter.
let file = tempfile::tempfile()?;
let mut writer = create_writer(self.chunk_compression_type, self.chunk_compression_level, file)?;
// Once we have written all the documents into the final sorter, we write the documents
// into this writer, extract the file and reset the seek to be able to read it again.
final_sorter.write_into(&mut writer)?;
let mut documents_file = writer.into_inner()?;
documents_file.seek(SeekFrom::Start(0))?;
let before_docids_merging = Instant::now();
// We merge the new external ids with existing external documents ids.
let new_external_documents_ids = new_external_documents_ids_builder.into_map();
external_documents_ids.insert_ids(&new_external_documents_ids)?;
info!("Documents external merging took {:.02?}", before_docids_merging.elapsed());
Ok(TransformOutput {
primary_key,
fields_ids_map,
external_documents_ids: external_documents_ids.into_static(),
new_documents_ids,
replaced_documents_ids,
documents_count,
documents_file,
})
}
/// Returns a `TransformOutput` with a file that contains the documents of the index
/// with the attributes reordered accordingly to the `FieldsIdsMap` given as argument.
// TODO this can be done in parallel by using the rayon `ThreadPool`.
pub fn remap_index_documents(
self,
primary_key: String,
old_fields_ids_map: FieldsIdsMap,
new_fields_ids_map: FieldsIdsMap,
) -> anyhow::Result<TransformOutput>
{
let external_documents_ids = self.index.external_documents_ids(self.rtxn)?;
let documents_ids = self.index.documents_ids(self.rtxn)?;
let documents_count = documents_ids.len() as usize;
// We create a final writer to write the new documents in order from the sorter.
let file = tempfile::tempfile()?;
let mut writer = create_writer(self.chunk_compression_type, self.chunk_compression_level, file)?;
let mut obkv_buffer = Vec::new();
for result in self.index.documents.iter(self.rtxn)? {
let (docid, obkv) = result?;
let docid = docid.get();
obkv_buffer.clear();
let mut obkv_writer = obkv::KvWriter::new(&mut obkv_buffer);
// We iterate over the new `FieldsIdsMap` ids in order and construct the new obkv.
for (id, name) in new_fields_ids_map.iter() {
if let Some(val) = old_fields_ids_map.id(name).and_then(|id| obkv.get(id)) {
obkv_writer.insert(id, val)?;
}
}
let buffer = obkv_writer.into_inner()?;
writer.insert(docid.to_be_bytes(), buffer)?;
}
// Once we have written all the documents, we extract
// the file and reset the seek to be able to read it again.
let mut documents_file = writer.into_inner()?;
documents_file.seek(SeekFrom::Start(0))?;
Ok(TransformOutput {
primary_key,
fields_ids_map: new_fields_ids_map,
external_documents_ids: external_documents_ids.into_static(),
new_documents_ids: documents_ids,
replaced_documents_ids: RoaringBitmap::default(),
documents_count,
documents_file,
})
}
}
/// Given an optional primary key and an optional alternative name, returns the (field_id, attr_name)
/// for the primary key according to the following rules:
/// - if primary_key is `Some`, returns the id and the name, else
/// - if alternative_name is Some, adds alternative to the fields_ids_map, and returns the pair, else
/// - if autogenerate_docids is true, insert the default id value in the field ids map ("id") and
/// returns the pair, else
/// - returns an error.
fn compute_primary_key_pair(
primary_key: Option<&str>,
fields_ids_map: &mut FieldsIdsMap,
alternative_name: Option<String>,
autogenerate_docids: bool,
) -> anyhow::Result<(FieldId, String)> {
match primary_key {
Some(primary_key) => {
let id = fields_ids_map.id(primary_key).expect("primary key must be present in the fields id map");
Ok((id, primary_key.to_string()))
}
None => {
let name = match alternative_name {
Some(key) => key,
None => {
if !autogenerate_docids {
// If there is no primary key in the current document batch, we must
// return an error and not automatically generate any document id.
anyhow::bail!("missing primary key")
}
DEFAULT_PRIMARY_KEY_NAME.to_string()
},
};
let id = fields_ids_map.insert(&name).context("field id limit reached")?;
Ok((id, name))
},
}
}
/// Only the last value associated with an id is kept.
fn keep_latest_obkv(_key: &[u8], obkvs: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
obkvs.last().context("no last value").map(|last| last.clone().into_owned())
}
/// Merge all the obks in the order we see them.
fn merge_obkvs(_key: &[u8], obkvs: &[Cow<[u8]>]) -> anyhow::Result<Vec<u8>> {
let mut iter = obkvs.iter();
let first = iter.next().map(|b| b.clone().into_owned()).context("no first value")?;
Ok(iter.fold(first, |acc, current| {
let first = obkv::KvReader::new(&acc);
let second = obkv::KvReader::new(current);
let mut buffer = Vec::new();
merge_two_obkvs(first, second, &mut buffer);
buffer
}))
}
fn validate_document_id(document_id: &str) -> Option<&str> {
let document_id = document_id.trim();
Some(document_id).filter(|id| {
!id.is_empty() && id.chars().all(|c| {
matches!(c, 'a'..='z' | 'A'..='Z' | '0'..='9' | '-' | '_')
})
})
}
#[cfg(test)]
mod test {
use super::*;
mod compute_primary_key {
use super::compute_primary_key_pair;
use super::FieldsIdsMap;
#[test]
#[should_panic]
fn should_panic_primary_key_not_in_map() {
let mut fields_map = FieldsIdsMap::new();
let _result = compute_primary_key_pair(
Some("toto"),
&mut fields_map,
None,
false);
}
#[test]
fn should_return_primary_key_if_is_some() {
let mut fields_map = FieldsIdsMap::new();
fields_map.insert("toto").unwrap();
let result = compute_primary_key_pair(
Some("toto"),
&mut fields_map,
Some("tata".to_string()),
false);
assert_eq!(result.unwrap(), (0u8, "toto".to_string()));
assert_eq!(fields_map.len(), 1);
}
#[test]
fn should_return_alternative_if_primary_is_none() {
let mut fields_map = FieldsIdsMap::new();
let result = compute_primary_key_pair(
None,
&mut fields_map,
Some("tata".to_string()),
false);
assert_eq!(result.unwrap(), (0u8, "tata".to_string()));
assert_eq!(fields_map.len(), 1);
}
#[test]
fn should_return_default_if_both_are_none() {
let mut fields_map = FieldsIdsMap::new();
let result = compute_primary_key_pair(
None,
&mut fields_map,
None,
true);
assert_eq!(result.unwrap(), (0u8, "id".to_string()));
assert_eq!(fields_map.len(), 1);
}
#[test]
fn should_return_err_if_both_are_none_and_recompute_is_false(){
let mut fields_map = FieldsIdsMap::new();
let result = compute_primary_key_pair(
None,
&mut fields_map,
None,
false);
assert!(result.is_err());
assert_eq!(fields_map.len(), 0);
}
}
}