mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-09-14 00:36:25 +00:00
Fixing piles of clippy errors.
Most of these are calling clone when the struct supports Copy. Many are using & and &mut on `self` when the function they are called from already has an immutable or mutable borrow so this isn't needed. I tried to stay away from actual changes or places where I'd have to name fresh variables.
This commit is contained in:
@ -40,7 +40,7 @@ pub fn extract_fid_word_count_docids<R: io::Read + io::Seek>(
|
||||
let mut cursor = docid_word_positions.into_cursor()?;
|
||||
while let Some((key, value)) = cursor.move_on_next()? {
|
||||
let (document_id_bytes, _word_bytes) = try_split_array_at(key)
|
||||
.ok_or_else(|| SerializationError::Decoding { db_name: Some(DOCID_WORD_POSITIONS) })?;
|
||||
.ok_or(SerializationError::Decoding { db_name: Some(DOCID_WORD_POSITIONS) })?;
|
||||
let document_id = u32::from_be_bytes(document_id_bytes);
|
||||
|
||||
let curr_document_id = *current_document_id.get_or_insert(document_id);
|
||||
|
@ -60,5 +60,5 @@ pub fn extract_geo_points<R: io::Read + io::Seek>(
|
||||
}
|
||||
}
|
||||
|
||||
Ok(writer_into_reader(writer)?)
|
||||
writer_into_reader(writer)
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ pub fn extract_word_docids<R: io::Read + io::Seek>(
|
||||
let mut cursor = docid_word_positions.into_cursor()?;
|
||||
while let Some((key, positions)) = cursor.move_on_next()? {
|
||||
let (document_id_bytes, word_bytes) = try_split_array_at(key)
|
||||
.ok_or_else(|| SerializationError::Decoding { db_name: Some(DOCID_WORD_POSITIONS) })?;
|
||||
.ok_or(SerializationError::Decoding { db_name: Some(DOCID_WORD_POSITIONS) })?;
|
||||
let document_id = u32::from_be_bytes(document_id_bytes);
|
||||
|
||||
let bitmap = RoaringBitmap::from_iter(Some(document_id));
|
||||
|
@ -39,7 +39,7 @@ pub fn extract_word_pair_proximity_docids<R: io::Read + io::Seek>(
|
||||
let mut cursor = docid_word_positions.into_cursor()?;
|
||||
while let Some((key, value)) = cursor.move_on_next()? {
|
||||
let (document_id_bytes, word_bytes) = try_split_array_at(key)
|
||||
.ok_or_else(|| SerializationError::Decoding { db_name: Some(DOCID_WORD_POSITIONS) })?;
|
||||
.ok_or(SerializationError::Decoding { db_name: Some(DOCID_WORD_POSITIONS) })?;
|
||||
let document_id = u32::from_be_bytes(document_id_bytes);
|
||||
let word = str::from_utf8(word_bytes)?;
|
||||
|
||||
@ -81,7 +81,7 @@ pub fn extract_word_pair_proximity_docids<R: io::Read + io::Seek>(
|
||||
///
|
||||
/// This list is used by the engine to calculate the documents containing words that are
|
||||
/// close to each other.
|
||||
fn document_word_positions_into_sorter<'b>(
|
||||
fn document_word_positions_into_sorter(
|
||||
document_id: DocumentId,
|
||||
mut word_positions_heap: BinaryHeap<PeekedWordPosition<vec::IntoIter<u32>>>,
|
||||
word_pair_proximity_docids_sorter: &mut grenad::Sorter<MergeFn>,
|
||||
|
@ -33,7 +33,7 @@ pub fn extract_word_position_docids<R: io::Read + io::Seek>(
|
||||
let mut cursor = docid_word_positions.into_cursor()?;
|
||||
while let Some((key, value)) = cursor.move_on_next()? {
|
||||
let (document_id_bytes, word_bytes) = try_split_array_at(key)
|
||||
.ok_or_else(|| SerializationError::Decoding { db_name: Some(DOCID_WORD_POSITIONS) })?;
|
||||
.ok_or(SerializationError::Decoding { db_name: Some(DOCID_WORD_POSITIONS) })?;
|
||||
let document_id = DocumentId::from_be_bytes(document_id_bytes);
|
||||
|
||||
for position in read_u32_ne_bytes(value) {
|
||||
|
@ -96,7 +96,7 @@ pub(crate) fn data_from_obkv_documents(
|
||||
|
||||
spawn_extraction_task::<_, _, Vec<grenad::Reader<File>>>(
|
||||
docid_word_positions_chunks.clone(),
|
||||
indexer.clone(),
|
||||
indexer,
|
||||
lmdb_writer_sx.clone(),
|
||||
extract_word_pair_proximity_docids,
|
||||
merge_cbo_roaring_bitmaps,
|
||||
@ -106,7 +106,7 @@ pub(crate) fn data_from_obkv_documents(
|
||||
|
||||
spawn_extraction_task::<_, _, Vec<grenad::Reader<File>>>(
|
||||
docid_word_positions_chunks.clone(),
|
||||
indexer.clone(),
|
||||
indexer,
|
||||
lmdb_writer_sx.clone(),
|
||||
extract_fid_word_count_docids,
|
||||
merge_cbo_roaring_bitmaps,
|
||||
@ -116,7 +116,7 @@ pub(crate) fn data_from_obkv_documents(
|
||||
|
||||
spawn_extraction_task::<_, _, Vec<(grenad::Reader<File>, grenad::Reader<File>)>>(
|
||||
docid_word_positions_chunks.clone(),
|
||||
indexer.clone(),
|
||||
indexer,
|
||||
lmdb_writer_sx.clone(),
|
||||
move |doc_word_pos, indexer| extract_word_docids(doc_word_pos, indexer, &exact_attributes),
|
||||
merge_roaring_bitmaps,
|
||||
@ -128,8 +128,8 @@ pub(crate) fn data_from_obkv_documents(
|
||||
);
|
||||
|
||||
spawn_extraction_task::<_, _, Vec<grenad::Reader<File>>>(
|
||||
docid_word_positions_chunks.clone(),
|
||||
indexer.clone(),
|
||||
docid_word_positions_chunks,
|
||||
indexer,
|
||||
lmdb_writer_sx.clone(),
|
||||
extract_word_position_docids,
|
||||
merge_cbo_roaring_bitmaps,
|
||||
@ -138,8 +138,8 @@ pub(crate) fn data_from_obkv_documents(
|
||||
);
|
||||
|
||||
spawn_extraction_task::<_, _, Vec<grenad::Reader<File>>>(
|
||||
docid_fid_facet_strings_chunks.clone(),
|
||||
indexer.clone(),
|
||||
docid_fid_facet_strings_chunks,
|
||||
indexer,
|
||||
lmdb_writer_sx.clone(),
|
||||
extract_facet_string_docids,
|
||||
keep_first_prefix_value_merge_roaring_bitmaps,
|
||||
@ -148,8 +148,8 @@ pub(crate) fn data_from_obkv_documents(
|
||||
);
|
||||
|
||||
spawn_extraction_task::<_, _, Vec<grenad::Reader<File>>>(
|
||||
docid_fid_facet_numbers_chunks.clone(),
|
||||
indexer.clone(),
|
||||
docid_fid_facet_numbers_chunks,
|
||||
indexer,
|
||||
lmdb_writer_sx.clone(),
|
||||
extract_facet_number_docids,
|
||||
merge_cbo_roaring_bitmaps,
|
||||
@ -183,12 +183,12 @@ fn spawn_extraction_task<FE, FS, M>(
|
||||
{
|
||||
rayon::spawn(move || {
|
||||
let chunks: Result<M> =
|
||||
chunks.into_par_iter().map(|chunk| extract_fn(chunk, indexer.clone())).collect();
|
||||
chunks.into_par_iter().map(|chunk| extract_fn(chunk, indexer)).collect();
|
||||
rayon::spawn(move || match chunks {
|
||||
Ok(chunks) => {
|
||||
debug!("merge {} database", name);
|
||||
let reader = chunks.merge(merge_fn, &indexer);
|
||||
let _ = lmdb_writer_sx.send(reader.map(|r| serialize_fn(r)));
|
||||
let _ = lmdb_writer_sx.send(reader.map(serialize_fn));
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = lmdb_writer_sx.send(Err(e));
|
||||
@ -255,7 +255,7 @@ fn send_and_extract_flattened_documents_data(
|
||||
|| {
|
||||
let (documents_ids, docid_word_positions_chunk) = extract_docid_word_positions(
|
||||
flattened_documents_chunk.clone(),
|
||||
indexer.clone(),
|
||||
indexer,
|
||||
searchable_fields,
|
||||
stop_words.as_ref(),
|
||||
max_positions_per_attributes,
|
||||
@ -279,7 +279,7 @@ fn send_and_extract_flattened_documents_data(
|
||||
fid_facet_exists_docids_chunk,
|
||||
) = extract_fid_docid_facet_values(
|
||||
flattened_documents_chunk.clone(),
|
||||
indexer.clone(),
|
||||
indexer,
|
||||
faceted_fields,
|
||||
)?;
|
||||
|
||||
|
Reference in New Issue
Block a user