Compare commits

..

2 Commits

Author SHA1 Message Date
Louis Dureuil
79111230ee display fids at the end 2025-01-08 17:24:17 +01:00
Louis Dureuil
d6957a8e5d ignore all string facets 2025-01-08 16:55:31 +01:00
39 changed files with 247 additions and 808 deletions

34
Cargo.lock generated
View File

@@ -496,7 +496,7 @@ source = "git+https://github.com/meilisearch/bbqueue#cbb87cc707b5af415ef203bdaf2
[[package]]
name = "benchmarks"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"anyhow",
"bumpalo",
@@ -689,7 +689,7 @@ dependencies = [
[[package]]
name = "build-info"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"anyhow",
"time",
@@ -1664,7 +1664,7 @@ dependencies = [
[[package]]
name = "dump"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"anyhow",
"big_s",
@@ -1876,7 +1876,7 @@ checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4"
[[package]]
name = "file-store"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"tempfile",
"thiserror",
@@ -1898,7 +1898,7 @@ dependencies = [
[[package]]
name = "filter-parser"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"insta",
"nom",
@@ -1918,7 +1918,7 @@ dependencies = [
[[package]]
name = "flatten-serde-json"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"criterion",
"serde_json",
@@ -2057,7 +2057,7 @@ dependencies = [
[[package]]
name = "fuzzers"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"arbitrary",
"bumpalo",
@@ -2624,7 +2624,7 @@ checksum = "206ca75c9c03ba3d4ace2460e57b189f39f43de612c2f85836e65c929701bb2d"
[[package]]
name = "index-scheduler"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"anyhow",
"arroy 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2822,7 +2822,7 @@ dependencies = [
[[package]]
name = "json-depth-checker"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"criterion",
"serde_json",
@@ -3441,7 +3441,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
[[package]]
name = "meili-snap"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"insta",
"md5",
@@ -3450,7 +3450,7 @@ dependencies = [
[[package]]
name = "meilisearch"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"actix-cors",
"actix-http",
@@ -3540,7 +3540,7 @@ dependencies = [
[[package]]
name = "meilisearch-auth"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"base64 0.22.1",
"enum-iterator",
@@ -3559,7 +3559,7 @@ dependencies = [
[[package]]
name = "meilisearch-types"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"actix-web",
"anyhow",
@@ -3592,7 +3592,7 @@ dependencies = [
[[package]]
name = "meilitool"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"anyhow",
"arroy 0.5.0 (git+https://github.com/meilisearch/arroy/?tag=DO-NOT-DELETE-upgrade-v04-to-v05)",
@@ -3627,7 +3627,7 @@ dependencies = [
[[package]]
name = "milli"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"allocator-api2",
"arroy 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -4083,7 +4083,7 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "permissive-json-pointer"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"big_s",
"serde_json",
@@ -6486,7 +6486,7 @@ dependencies = [
[[package]]
name = "xtask"
version = "1.12.7"
version = "1.12.1"
dependencies = [
"anyhow",
"build-info",

View File

@@ -22,7 +22,7 @@ members = [
]
[workspace.package]
version = "1.12.7"
version = "1.12.1"
authors = [
"Quentin de Quelen <quentin@dequelen.me>",
"Clément Renault <clement@meilisearch.com>",

View File

@@ -29,7 +29,7 @@ use bumpalo::Bump;
use dump::IndexMetadata;
use meilisearch_types::batches::BatchId;
use meilisearch_types::heed::{RoTxn, RwTxn};
use meilisearch_types::milli::documents::PrimaryKey;
use meilisearch_types::milli::documents::{obkv_to_object, DocumentsBatchReader, PrimaryKey};
use meilisearch_types::milli::heed::CompactionOption;
use meilisearch_types::milli::progress::Progress;
use meilisearch_types::milli::update::new::indexer::{self, UpdateByFunction};
@@ -819,13 +819,6 @@ impl IndexScheduler {
t.started_at = Some(started_at);
t.finished_at = Some(finished_at);
}
// Patch the task to remove the batch uid, because as of v1.12.5 batches are not persisted.
// This prevent from referencing *future* batches not actually associated with the task.
//
// See <https://github.com/meilisearch/meilisearch/issues/5247> for details.
t.batch_uid = None;
let mut dump_content_file = dump_tasks.push_task(&t.into())?;
// 2.1. Dump the `content_file` associated with the task if there is one and the task is not finished yet.
@@ -836,20 +829,21 @@ impl IndexScheduler {
if status == Status::Enqueued {
let content_file = self.file_store.get_update(content_file)?;
for document in
serde_json::de::Deserializer::from_reader(content_file).into_iter()
{
let document = document
.map_err(|e| {
Error::from_milli(
milli::InternalError::SerdeJson(e).into(),
None,
)
})
.unwrap();
dump_content_file.push_document(&document)?;
}
let reader = DocumentsBatchReader::from_reader(content_file)
.map_err(|e| Error::from_milli(e.into(), None))?;
let (mut cursor, documents_batch_index) =
reader.into_cursor_and_fields_index();
while let Some(doc) = cursor
.next_document()
.map_err(|e| Error::from_milli(e.into(), None))?
{
dump_content_file.push_document(
&obkv_to_object(doc, &documents_batch_index)
.map_err(|e| Error::from_milli(e, None))?,
)?;
}
dump_content_file.flush()?;
}
}

View File

@@ -1,7 +1,5 @@
use std::collections::BTreeMap;
use std::env::VarError;
use std::path::Path;
use std::str::FromStr;
use std::time::Duration;
use meilisearch_types::heed::{EnvClosingEvent, EnvFlags, EnvOpenOptions};
@@ -304,15 +302,7 @@ fn create_or_open_index(
) -> Result<Index> {
let mut options = EnvOpenOptions::new();
options.map_size(clamp_to_page_size(map_size));
let max_readers = match std::env::var("MEILI_EXPERIMENTAL_INDEX_MAX_READERS") {
Ok(value) => u32::from_str(&value).unwrap(),
Err(VarError::NotPresent) => 1024,
Err(VarError::NotUnicode(value)) => panic!(
"Invalid unicode for the `MEILI_EXPERIMENTAL_INDEX_MAX_READERS` env var: {value:?}"
),
};
options.max_readers(max_readers);
options.max_readers(1024);
if enable_mdb_writemap {
unsafe { options.flags(EnvFlags::WRITE_MAP) };
}

View File

@@ -55,6 +55,7 @@ use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFea
use meilisearch_types::heed::byteorder::BE;
use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str, I128};
use meilisearch_types::heed::{self, Database, Env, PutFlags, RoTxn, RwTxn};
use meilisearch_types::milli::documents::DocumentsBatchBuilder;
use meilisearch_types::milli::index::IndexEmbeddingConfig;
use meilisearch_types::milli::update::IndexerConfig;
use meilisearch_types::milli::vector::{Embedder, EmbedderOptions, EmbeddingConfigs};
@@ -2016,21 +2017,14 @@ impl<'a> Dump<'a> {
task: TaskDump,
content_file: Option<Box<UpdateFile>>,
) -> Result<Task> {
let task_has_no_docs = matches!(task.kind, KindDump::DocumentImport { documents_count, .. } if documents_count == 0);
let content_uuid = match content_file {
Some(content_file) if task.status == Status::Enqueued => {
let (uuid, file) = self.index_scheduler.create_update_file(false)?;
let mut writer = io::BufWriter::new(file);
let (uuid, mut file) = self.index_scheduler.create_update_file(false)?;
let mut builder = DocumentsBatchBuilder::new(&mut file);
for doc in content_file {
let doc = doc?;
serde_json::to_writer(&mut writer, &doc)
.map_err(|e| {
Error::from_milli(milli::InternalError::SerdeJson(e).into(), None)
})
.unwrap();
builder.append_json_object(&doc?)?;
}
let file = writer.into_inner().map_err(|e| e.into_error())?;
builder.into_inner()?;
file.persist()?;
Some(uuid)
@@ -2038,12 +2032,6 @@ impl<'a> Dump<'a> {
// If the task isn't `Enqueued` then just generate a recognisable `Uuid`
// in case we try to open it later.
_ if task.status != Status::Enqueued => Some(Uuid::nil()),
None if task.status == Status::Enqueued && task_has_no_docs => {
let (uuid, file) = self.index_scheduler.create_update_file(false)?;
file.persist()?;
Some(uuid)
}
_ => None,
};

View File

@@ -1337,7 +1337,7 @@ impl<'a> HitMaker<'a> {
ExplicitVectors { embeddings: Some(vector.into()), regenerate: !user_provided };
vectors.insert(
name,
serde_json::to_value(embeddings).map_err(InternalError::SerdeJson).unwrap(),
serde_json::to_value(embeddings).map_err(InternalError::SerdeJson)?,
);
}
document.insert("_vectors".into(), vectors.into());
@@ -1717,7 +1717,7 @@ fn make_document(
// recreate the original json
for (key, value) in obkv.iter() {
let value = serde_json::from_slice(value).map_err(InternalError::SerdeJson).unwrap();
let value = serde_json::from_slice(value).map_err(InternalError::SerdeJson)?;
let key = field_ids_map.name(key).expect("Missing field name").to_string();
document.insert(key, value);

View File

@@ -1746,57 +1746,3 @@ async fn change_attributes_settings() {
)
.await;
}
/// Modifying facets with different casing should work correctly
#[actix_rt::test]
async fn change_facet_casing() {
let server = Server::new().await;
let index = server.index("test");
let (response, code) = index
.update_settings(json!({
"filterableAttributes": ["dog"],
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
let (response, _code) = index
.add_documents(
json!([
{
"id": 1,
"dog": "Bouvier Bernois"
}
]),
None,
)
.await;
index.wait_task(response.uid()).await;
let (response, _code) = index
.add_documents(
json!([
{
"id": 1,
"dog": "bouvier bernois"
}
]),
None,
)
.await;
index.wait_task(response.uid()).await;
index
.search(json!({ "facets": ["dog"] }), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["facetDistribution"]), @r###"
{
"dog": {
"bouvier bernois": 1
}
}
"###);
})
.await;
}

View File

@@ -14,11 +14,11 @@ arroy_v04_to_v05 = { package = "arroy", git = "https://github.com/meilisearch/ar
clap = { version = "4.5.9", features = ["derive"] }
dump = { path = "../dump" }
file-store = { path = "../file-store" }
indexmap = { version = "2.7.0", features = ["serde"] }
indexmap = {version = "2.7.0", features = ["serde"]}
meilisearch-auth = { path = "../meilisearch-auth" }
meilisearch-types = { path = "../meilisearch-types" }
serde = { version = "1.0.209", features = ["derive"] }
serde_json = { version = "1.0.133", features = ["preserve_order"] }
serde_json = {version = "1.0.133", features = ["preserve_order"]}
tempfile = "3.14.0"
time = { version = "0.3.36", features = ["formatting", "parsing", "alloc"] }
uuid = { version = "1.10.0", features = ["v4"], default-features = false }

View File

@@ -88,7 +88,7 @@ fn main() -> anyhow::Result<()> {
match command {
Command::ClearTaskQueue => clear_task_queue(db_path),
Command::ExportADump { dump_dir, skip_enqueued_tasks } => {
export_a_dump(db_path, dump_dir, skip_enqueued_tasks, detected_version)
export_a_dump(db_path, dump_dir, skip_enqueued_tasks)
}
Command::OfflineUpgrade { target_version } => {
let target_version = parse_version(&target_version).context("While parsing `--target-version`. Make sure `--target-version` is in the format MAJOR.MINOR.PATCH")?;
@@ -187,7 +187,6 @@ fn export_a_dump(
db_path: PathBuf,
dump_dir: PathBuf,
skip_enqueued_tasks: bool,
detected_version: (String, String, String),
) -> Result<(), anyhow::Error> {
let started_at = OffsetDateTime::now_utc();
@@ -239,6 +238,9 @@ fn export_a_dump(
if skip_enqueued_tasks {
eprintln!("Skip dumping the enqueued tasks...");
} else {
eprintln!("Dumping the enqueued tasks...");
// 3. dump the tasks
let mut dump_tasks = dump.create_tasks_queue()?;
let mut count = 0;
for ret in all_tasks.iter(&rtxn)? {
@@ -252,39 +254,18 @@ fn export_a_dump(
if status == Status::Enqueued {
let content_file = file_store.get_update(content_file_uuid)?;
if (
detected_version.0.as_str(),
detected_version.1.as_str(),
detected_version.2.as_str(),
) < ("1", "12", "0")
{
eprintln!("Dumping the enqueued tasks reading them in obkv format...");
let reader =
DocumentsBatchReader::from_reader(content_file).with_context(|| {
format!("While reading content file {:?}", content_file_uuid)
})?;
let (mut cursor, documents_batch_index) =
reader.into_cursor_and_fields_index();
while let Some(doc) = cursor.next_document().with_context(|| {
format!("While iterating on content file {:?}", content_file_uuid)
})? {
dump_content_file
.push_document(&obkv_to_object(doc, &documents_batch_index)?)?;
}
} else {
eprintln!(
"Dumping the enqueued tasks reading them in JSON stream format..."
);
for document in
serde_json::de::Deserializer::from_reader(content_file).into_iter()
{
let document = document.with_context(|| {
format!("While reading content file {:?}", content_file_uuid)
})?;
dump_content_file.push_document(&document)?;
}
}
let reader =
DocumentsBatchReader::from_reader(content_file).with_context(|| {
format!("While reading content file {:?}", content_file_uuid)
})?;
let (mut cursor, documents_batch_index) = reader.into_cursor_and_fields_index();
while let Some(doc) = cursor.next_document().with_context(|| {
format!("While iterating on content file {:?}", content_file_uuid)
})? {
dump_content_file
.push_document(&obkv_to_object(doc, &documents_batch_index)?)?;
}
dump_content_file.flush()?;
count += 1;
}

View File

@@ -8,7 +8,7 @@ use std::path::{Path, PathBuf};
use anyhow::{bail, Context};
use meilisearch_types::versioning::create_version_file;
use v1_10::v1_9_to_v1_10;
use v1_12::{v1_11_to_v1_12, v1_12_to_v1_12_3};
use v1_12::v1_11_to_v1_12;
use crate::upgrade::v1_11::v1_10_to_v1_11;
@@ -20,48 +20,12 @@ pub struct OfflineUpgrade {
impl OfflineUpgrade {
pub fn upgrade(self) -> anyhow::Result<()> {
// Adding a version?
//
// 1. Update the LAST_SUPPORTED_UPGRADE_FROM_VERSION and LAST_SUPPORTED_UPGRADE_TO_VERSION.
// 2. Add new version to the upgrade list if necessary
// 3. Use `no_upgrade` as index for versions that are compatible.
if self.current_version == self.target_version {
println!("Database is already at the target version. Exiting.");
return Ok(());
}
if self.current_version > self.target_version {
bail!(
"Cannot downgrade from {}.{}.{} to {}.{}.{}. Downgrade not supported",
self.current_version.0,
self.current_version.1,
self.current_version.2,
self.target_version.0,
self.target_version.1,
self.target_version.2
);
}
const FIRST_SUPPORTED_UPGRADE_FROM_VERSION: &str = "1.9.0";
const LAST_SUPPORTED_UPGRADE_FROM_VERSION: &str = "1.12.7";
const FIRST_SUPPORTED_UPGRADE_TO_VERSION: &str = "1.10.0";
const LAST_SUPPORTED_UPGRADE_TO_VERSION: &str = "1.12.7";
let upgrade_list = [
(
v1_9_to_v1_10 as fn(&Path, &str, &str, &str) -> Result<(), anyhow::Error>,
"1",
"10",
"0",
),
(v1_9_to_v1_10 as fn(&Path) -> Result<(), anyhow::Error>, "1", "10", "0"),
(v1_10_to_v1_11, "1", "11", "0"),
(v1_11_to_v1_12, "1", "12", "0"),
(v1_12_to_v1_12_3, "1", "12", "3"),
];
let no_upgrade: usize = upgrade_list.len();
let (current_major, current_minor, current_patch) = &self.current_version;
let start_at = match (
@@ -72,12 +36,8 @@ impl OfflineUpgrade {
("1", "9", _) => 0,
("1", "10", _) => 1,
("1", "11", _) => 2,
("1", "12", "0" | "1" | "2") => 3,
("1", "12", "3" | "4" | "5" | "6" | "7") => no_upgrade,
_ => {
bail!("Unsupported current version {current_major}.{current_minor}.{current_patch}. Can only upgrade from versions in range [{}-{}]",
FIRST_SUPPORTED_UPGRADE_FROM_VERSION,
LAST_SUPPORTED_UPGRADE_FROM_VERSION);
bail!("Unsupported current version {current_major}.{current_minor}.{current_patch}. Can only upgrade from v1.9 and v1.10")
}
};
@@ -86,32 +46,21 @@ impl OfflineUpgrade {
let ends_at = match (target_major.as_str(), target_minor.as_str(), target_patch.as_str()) {
("1", "10", _) => 0,
("1", "11", _) => 1,
("1", "12", "0" | "1" | "2") => 2,
("1", "12", "3" | "4" | "5" | "6" | "7") => 3,
("1", "12", _) => 2,
(major, _, _) if major.starts_with('v') => {
bail!("Target version must not starts with a `v`. Instead of writing `v1.9.0` write `1.9.0` for example.")
}
_ => {
bail!("Unsupported target version {target_major}.{target_minor}.{target_patch}. Can only upgrade to versions in range [{}-{}]",
FIRST_SUPPORTED_UPGRADE_TO_VERSION,
LAST_SUPPORTED_UPGRADE_TO_VERSION);
bail!("Unsupported target version {target_major}.{target_minor}.{target_patch}. Can only upgrade to v1.10 and v1.11")
}
};
println!("Starting the upgrade from {current_major}.{current_minor}.{current_patch} to {target_major}.{target_minor}.{target_patch}");
if start_at == no_upgrade {
println!("No upgrade operation to perform, writing VERSION file");
create_version_file(&self.db_path, target_major, target_minor, target_patch)
.context("while writing VERSION file after the upgrade")?;
println!("Success");
return Ok(());
}
#[allow(clippy::needless_range_loop)]
for index in start_at..=ends_at {
let (func, major, minor, patch) = upgrade_list[index];
(func)(&self.db_path, current_major, current_minor, current_patch)?;
(func)(&self.db_path)?;
println!("Done");
// We're writing the version file just in case an issue arise _while_ upgrading.
// We don't want the DB to fail in an unknown state.

View File

@@ -151,12 +151,7 @@ fn date_round_trip(
Ok(())
}
pub fn v1_9_to_v1_10(
db_path: &Path,
_origin_major: &str,
_origin_minor: &str,
_origin_patch: &str,
) -> anyhow::Result<()> {
pub fn v1_9_to_v1_10(db_path: &Path) -> anyhow::Result<()> {
println!("Upgrading from v1.9.0 to v1.10.0");
// 2 changes here

View File

@@ -14,12 +14,7 @@ use meilisearch_types::milli::index::db_name;
use crate::uuid_codec::UuidCodec;
use crate::{try_opening_database, try_opening_poly_database};
pub fn v1_10_to_v1_11(
db_path: &Path,
_origin_major: &str,
_origin_minor: &str,
_origin_patch: &str,
) -> anyhow::Result<()> {
pub fn v1_10_to_v1_11(db_path: &Path) -> anyhow::Result<()> {
println!("Upgrading from v1.10.0 to v1.11.0");
let index_scheduler_path = db_path.join("tasks");

View File

@@ -1,34 +1,17 @@
//! The breaking changes that happened between the v1.11 and the v1.12 are:
//! - The new indexer changed the update files format from OBKV to ndjson. https://github.com/meilisearch/meilisearch/pull/4900
use std::borrow::Cow;
use std::io::BufWriter;
use std::path::Path;
use std::sync::atomic::AtomicBool;
use anyhow::Context;
use file_store::FileStore;
use indexmap::IndexMap;
use meilisearch_types::milli::documents::DocumentsBatchReader;
use meilisearch_types::milli::heed::types::{SerdeJson, Str};
use meilisearch_types::milli::heed::{Database, EnvOpenOptions, RoTxn, RwTxn};
use meilisearch_types::milli::progress::Step;
use meilisearch_types::milli::{FieldDistribution, Index};
use serde::Serialize;
use serde_json::value::RawValue;
use tempfile::NamedTempFile;
use time::OffsetDateTime;
use uuid::Uuid;
use crate::try_opening_database;
use crate::uuid_codec::UuidCodec;
pub fn v1_11_to_v1_12(
db_path: &Path,
_origin_major: &str,
_origin_minor: &str,
_origin_patch: &str,
) -> anyhow::Result<()> {
pub fn v1_11_to_v1_12(db_path: &Path) -> anyhow::Result<()> {
println!("Upgrading from v1.11.0 to v1.12.0");
convert_update_files(db_path)?;
@@ -36,23 +19,6 @@ pub fn v1_11_to_v1_12(
Ok(())
}
pub fn v1_12_to_v1_12_3(
db_path: &Path,
origin_major: &str,
origin_minor: &str,
origin_patch: &str,
) -> anyhow::Result<()> {
println!("Upgrading from v1.12.{{0, 1, 2}} to v1.12.3");
if origin_minor == "12" {
rebuild_field_distribution(db_path)?;
} else {
println!("Not rebuilding field distribution as it wasn't corrupted coming from v{origin_major}.{origin_minor}.{origin_patch}");
}
Ok(())
}
/// Convert the update files from OBKV to ndjson format.
///
/// 1) List all the update files using the file store.
@@ -111,188 +77,3 @@ fn convert_update_files(db_path: &Path) -> anyhow::Result<()> {
Ok(())
}
/// Rebuild field distribution as it was wrongly computed in v1.12.x if x < 3
fn rebuild_field_distribution(db_path: &Path) -> anyhow::Result<()> {
let index_scheduler_path = db_path.join("tasks");
let env = unsafe { EnvOpenOptions::new().max_dbs(100).open(&index_scheduler_path) }
.with_context(|| format!("While trying to open {:?}", index_scheduler_path.display()))?;
let mut sched_wtxn = env.write_txn()?;
let index_mapping: Database<Str, UuidCodec> =
try_opening_database(&env, &sched_wtxn, "index-mapping")?;
let stats_db: Database<UuidCodec, SerdeJson<IndexStats>> =
try_opening_database(&env, &sched_wtxn, "index-stats").with_context(|| {
format!("While trying to open {:?}", index_scheduler_path.display())
})?;
let index_count =
index_mapping.len(&sched_wtxn).context("while reading the number of indexes")?;
// FIXME: not ideal, we have to pre-populate all indexes to prevent double borrow of sched_wtxn
// 1. immutably for the iteration
// 2. mutably for updating index stats
let indexes: Vec<_> = index_mapping
.iter(&sched_wtxn)?
.map(|res| res.map(|(uid, uuid)| (uid.to_owned(), uuid)))
.collect();
let progress = meilisearch_types::milli::progress::Progress::default();
let finished = AtomicBool::new(false);
std::thread::scope(|scope| {
let display_progress = std::thread::Builder::new()
.name("display_progress".into())
.spawn_scoped(scope, || {
while !finished.load(std::sync::atomic::Ordering::Relaxed) {
std::thread::sleep(std::time::Duration::from_secs(5));
let view = progress.as_progress_view();
let Ok(view) = serde_json::to_string(&view) else {
continue;
};
println!("{view}");
}
})
.unwrap();
for (index_index, result) in indexes.into_iter().enumerate() {
let (uid, uuid) = result?;
progress.update_progress(VariableNameStep::new(
&uid,
index_index as u32,
index_count as u32,
));
let index_path = db_path.join("indexes").join(uuid.to_string());
println!(
"[{}/{index_count}]Updating index `{uid}` at `{}`",
index_index + 1,
index_path.display()
);
println!("\t- Rebuilding field distribution");
let index = meilisearch_types::milli::Index::new(EnvOpenOptions::new(), &index_path)
.with_context(|| {
format!("while opening index {uid} at '{}'", index_path.display())
})?;
let mut index_txn = index.write_txn()?;
meilisearch_types::milli::update::new::reindex::field_distribution(
&index,
&mut index_txn,
&progress,
)
.context("while rebuilding field distribution")?;
let stats = IndexStats::new(&index, &index_txn)
.with_context(|| format!("computing stats for index `{uid}`"))?;
store_stats_of(stats_db, uuid, &mut sched_wtxn, &uid, &stats)?;
index_txn.commit().context("while committing the write txn for the updated index")?;
}
sched_wtxn.commit().context("while committing the write txn for the index-scheduler")?;
finished.store(true, std::sync::atomic::Ordering::Relaxed);
if let Err(panic) = display_progress.join() {
let msg = match panic.downcast_ref::<&'static str>() {
Some(s) => *s,
None => match panic.downcast_ref::<String>() {
Some(s) => &s[..],
None => "Box<dyn Any>",
},
};
eprintln!("WARN: the display thread panicked with {msg}");
}
println!("Upgrading database succeeded");
Ok(())
})
}
pub struct VariableNameStep {
name: String,
current: u32,
total: u32,
}
impl VariableNameStep {
pub fn new(name: impl Into<String>, current: u32, total: u32) -> Self {
Self { name: name.into(), current, total }
}
}
impl Step for VariableNameStep {
fn name(&self) -> Cow<'static, str> {
self.name.clone().into()
}
fn current(&self) -> u32 {
self.current
}
fn total(&self) -> u32 {
self.total
}
}
pub fn store_stats_of(
stats_db: Database<UuidCodec, SerdeJson<IndexStats>>,
index_uuid: Uuid,
sched_wtxn: &mut RwTxn,
index_uid: &str,
stats: &IndexStats,
) -> anyhow::Result<()> {
stats_db
.put(sched_wtxn, &index_uuid, stats)
.with_context(|| format!("storing stats for index `{index_uid}`"))?;
Ok(())
}
/// The statistics that can be computed from an `Index` object.
#[derive(Serialize, Debug)]
pub struct IndexStats {
/// Number of documents in the index.
pub number_of_documents: u64,
/// Size taken up by the index' DB, in bytes.
///
/// This includes the size taken by both the used and free pages of the DB, and as the free pages
/// are not returned to the disk after a deletion, this number is typically larger than
/// `used_database_size` that only includes the size of the used pages.
pub database_size: u64,
/// Size taken by the used pages of the index' DB, in bytes.
///
/// As the DB backend does not return to the disk the pages that are not currently used by the DB,
/// this value is typically smaller than `database_size`.
pub used_database_size: u64,
/// Association of every field name with the number of times it occurs in the documents.
pub field_distribution: FieldDistribution,
/// Creation date of the index.
#[serde(with = "time::serde::rfc3339")]
pub created_at: OffsetDateTime,
/// Date of the last update of the index.
#[serde(with = "time::serde::rfc3339")]
pub updated_at: OffsetDateTime,
}
impl IndexStats {
/// Compute the stats of an index
///
/// # Parameters
///
/// - rtxn: a RO transaction for the index, obtained from `Index::read_txn()`.
pub fn new(index: &Index, rtxn: &RoTxn) -> meilisearch_types::milli::Result<Self> {
Ok(IndexStats {
number_of_documents: index.number_of_documents(rtxn)?,
database_size: index.on_disk_size()?,
used_database_size: index.used_size()?,
field_distribution: index.field_distribution(rtxn)?,
created_at: index.created_at(rtxn)?,
updated_at: index.updated_at(rtxn)?,
})
}
}

View File

@@ -33,7 +33,7 @@ pub fn obkv_to_object(obkv: &KvReader<FieldId>, index: &DocumentsBatchIndex) ->
let field_name = index
.name(field_id)
.ok_or(FieldIdMapMissingEntry::FieldId { field_id, process: "obkv_to_object" })?;
let value = serde_json::from_slice(value).map_err(InternalError::SerdeJson).unwrap();
let value = serde_json::from_slice(value).map_err(InternalError::SerdeJson)?;
Ok((field_name.to_string(), value))
})
.collect()
@@ -84,8 +84,7 @@ impl DocumentsBatchIndex {
let key =
self.0.get_by_left(&k).ok_or(crate::error::InternalError::DatabaseClosing)?.clone();
let value = serde_json::from_slice::<serde_json::Value>(v)
.map_err(crate::error::InternalError::SerdeJson)
.unwrap();
.map_err(crate::error::InternalError::SerdeJson)?;
map.insert(key, value);
}

View File

@@ -92,8 +92,7 @@ impl<'a> PrimaryKey<'a> {
PrimaryKey::Flat { name: _, field_id } => match document.get(*field_id) {
Some(document_id_bytes) => {
let document_id = serde_json::from_slice(document_id_bytes)
.map_err(InternalError::SerdeJson)
.unwrap();
.map_err(InternalError::SerdeJson)?;
match validate_document_id_value(document_id) {
Ok(document_id) => Ok(Ok(document_id)),
Err(user_error) => {
@@ -109,8 +108,7 @@ impl<'a> PrimaryKey<'a> {
if let Some(field_id) = fields.id(first_level_name) {
if let Some(value_bytes) = document.get(field_id) {
let object = serde_json::from_slice(value_bytes)
.map_err(InternalError::SerdeJson)
.unwrap();
.map_err(InternalError::SerdeJson)?;
fetch_matching_values(object, right, &mut matching_documents_ids);
if matching_documents_ids.len() >= 2 {
@@ -153,12 +151,11 @@ impl<'a> PrimaryKey<'a> {
};
let document_id: &RawValue =
serde_json::from_slice(document_id).map_err(InternalError::SerdeJson).unwrap();
serde_json::from_slice(document_id).map_err(InternalError::SerdeJson)?;
let document_id = document_id
.deserialize_any(crate::update::new::indexer::de::DocumentIdVisitor(indexer))
.map_err(InternalError::SerdeJson)
.unwrap();
.map_err(InternalError::SerdeJson)?;
let external_document_id = match document_id {
Ok(document_id) => Ok(document_id),
@@ -176,7 +173,7 @@ impl<'a> PrimaryKey<'a> {
let Some(value) = document.get(fid) else { continue };
let value: &RawValue =
serde_json::from_slice(value).map_err(InternalError::SerdeJson).unwrap();
serde_json::from_slice(value).map_err(InternalError::SerdeJson)?;
match match_component(first_level, right, value, indexer, &mut docid) {
ControlFlow::Continue(()) => continue,
ControlFlow::Break(Ok(_)) => {
@@ -186,7 +183,7 @@ impl<'a> PrimaryKey<'a> {
.into())
}
ControlFlow::Break(Err(err)) => {
panic!("{err}");
return Err(InternalError::SerdeJson(err).into())
}
}
}

View File

@@ -228,8 +228,7 @@ pub fn obkv_to_json(
field_id: id,
process: "obkv_to_json",
})?;
let value =
serde_json::from_slice(value).map_err(error::InternalError::SerdeJson).unwrap();
let value = serde_json::from_slice(value).map_err(error::InternalError::SerdeJson)?;
Ok((name.to_owned(), value))
})
.collect()

View File

@@ -219,19 +219,12 @@ impl<'a> FacetDistribution<'a> {
let facet_key = StrRefCodec::bytes_decode(facet_key).unwrap();
let key: (FieldId, _, &str) = (field_id, any_docid, facet_key);
let optional_original_string =
self.index.field_id_docid_facet_strings.get(self.rtxn, &key)?;
let original_string = match optional_original_string {
Some(original_string) => original_string.to_owned(),
None => {
tracing::error!(
"Missing original facet string. Using the normalized facet {} instead",
facet_key
);
facet_key.to_string()
}
};
let original_string = self
.index
.field_id_docid_facet_strings
.get(self.rtxn, &key)?
.unwrap()
.to_owned();
distribution.insert(original_string, nbr_docids);
if distribution.len() == self.max_values_per_facet {

View File

@@ -1,4 +1,4 @@
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use rayon::{ThreadPool, ThreadPoolBuilder};
@@ -9,8 +9,6 @@ use thiserror::Error;
#[derive(Debug)]
pub struct ThreadPoolNoAbort {
thread_pool: ThreadPool,
/// The number of active operations.
active_operations: AtomicUsize,
/// Set to true if the thread pool catched a panic.
pool_catched_panic: Arc<AtomicBool>,
}
@@ -21,9 +19,7 @@ impl ThreadPoolNoAbort {
OP: FnOnce() -> R + Send,
R: Send,
{
self.active_operations.fetch_add(1, Ordering::Relaxed);
let output = self.thread_pool.install(op);
self.active_operations.fetch_sub(1, Ordering::Relaxed);
// While reseting the pool panic catcher we return an error if we catched one.
if self.pool_catched_panic.swap(false, Ordering::SeqCst) {
Err(PanicCatched)
@@ -35,11 +31,6 @@ impl ThreadPoolNoAbort {
pub fn current_num_threads(&self) -> usize {
self.thread_pool.current_num_threads()
}
/// The number of active operations.
pub fn active_operations(&self) -> usize {
self.active_operations.load(Ordering::Relaxed)
}
}
#[derive(Error, Debug)]
@@ -73,10 +64,6 @@ impl ThreadPoolNoAbortBuilder {
let catched_panic = pool_catched_panic.clone();
move |_result| catched_panic.store(true, Ordering::SeqCst)
});
Ok(ThreadPoolNoAbort {
thread_pool: self.0.build()?,
active_operations: AtomicUsize::new(0),
pool_catched_panic,
})
Ok(ThreadPoolNoAbort { thread_pool: self.0.build()?, pool_catched_panic })
}
}

View File

@@ -123,8 +123,7 @@ pub fn enrich_documents_batch<R: Read + Seek>(
}
}
let document_id =
serde_json::to_vec(&document_id).map_err(InternalError::SerdeJson).unwrap();
let document_id = serde_json::to_vec(&document_id).map_err(InternalError::SerdeJson)?;
external_ids.insert(count.to_be_bytes(), document_id)?;
count += 1;
@@ -238,7 +237,7 @@ pub fn validate_geo_from_json(id: &DocumentId, bytes: &[u8]) -> Result<StdResult
let debug_id = || {
serde_json::from_slice(id.value().as_bytes()).unwrap_or_else(|_| Value::from(id.debug()))
};
match serde_json::from_slice(bytes).map_err(InternalError::SerdeJson).unwrap() {
match serde_json::from_slice(bytes).map_err(InternalError::SerdeJson)? {
Value::Object(mut object) => match (object.remove("lat"), object.remove("lng")) {
(Some(lat), Some(lng)) => {
match (extract_finite_float_from_value(lat), extract_finite_float_from_value(lng)) {

View File

@@ -206,7 +206,7 @@ fn tokens_from_document<'a>(
if let Some(field_bytes) = KvReaderDelAdd::from_slice(field_bytes).get(del_add) {
// parse json.
let value =
serde_json::from_slice(field_bytes).map_err(InternalError::SerdeJson).unwrap();
serde_json::from_slice(field_bytes).map_err(InternalError::SerdeJson)?;
// prepare writing destination.
buffers.obkv_positions_buffer.clear();

View File

@@ -3334,44 +3334,6 @@ mod tests {
rtxn.commit().unwrap();
}
#[test]
fn incremental_update_without_changing_facet_distribution() {
let index = TempIndex::new();
index
.add_documents(documents!([
{"id": 0, "some_field": "aaa", "other_field": "aaa" },
{"id": 1, "some_field": "bbb", "other_field": "bbb" },
]))
.unwrap();
{
let rtxn = index.read_txn().unwrap();
// count field distribution
let results = index.field_distribution(&rtxn).unwrap();
assert_eq!(Some(&2), results.get("id"));
assert_eq!(Some(&2), results.get("some_field"));
assert_eq!(Some(&2), results.get("other_field"));
}
let mut index = index;
index.index_documents_config.update_method = IndexDocumentsMethod::UpdateDocuments;
index
.add_documents(documents!([
{"id": 0, "other_field": "bbb" },
{"id": 1, "some_field": "ccc" },
]))
.unwrap();
{
let rtxn = index.read_txn().unwrap();
// count field distribution
let results = index.field_distribution(&rtxn).unwrap();
assert_eq!(Some(&2), results.get("id"));
assert_eq!(Some(&2), results.get("some_field"));
assert_eq!(Some(&2), results.get("other_field"));
}
}
#[test]
fn delete_words_exact_attributes() {
let index = TempIndex::new();

View File

@@ -27,12 +27,6 @@ use crate::update::new::KvReaderFieldId;
use crate::vector::Embedding;
use crate::{CboRoaringBitmapCodec, DocumentId, Error, Index, InternalError};
/// Note that the FrameProducer requires up to 9 bytes to
/// encode the length, the max grant has been computed accordingly.
///
/// <https://docs.rs/bbqueue/latest/bbqueue/framed/index.html#frame-header>
const MAX_FRAME_HEADER_SIZE: usize = 9;
/// Creates a tuple of senders/receiver to be used by
/// the extractors and the writer loop.
///
@@ -59,9 +53,8 @@ pub fn extractor_writer_bbqueue(
bbbuffers.resize_with(current_num_threads, || BBBuffer::new(bbbuffer_capacity));
let capacity = bbbuffers.first().unwrap().capacity();
// 1. Due to fragmentation in the bbbuffer, we can only accept up to half the capacity in a single message.
// 2. Read the documentation for `MAX_FRAME_HEADER_SIZE` for more information about why it is here.
let max_grant = capacity.saturating_div(2).checked_sub(MAX_FRAME_HEADER_SIZE).unwrap();
// Read the field description to understand this
let capacity = capacity.checked_sub(9).unwrap();
let producers = ThreadLocal::with_capacity(bbbuffers.len());
let consumers = rayon::broadcast(|bi| {
@@ -72,7 +65,7 @@ pub fn extractor_writer_bbqueue(
});
let (sender, receiver) = flume::bounded(channel_capacity);
let sender = ExtractorBbqueueSender { sender, producers, max_grant };
let sender = ExtractorBbqueueSender { sender, producers, capacity };
let receiver = WriterBbqueueReceiver {
receiver,
look_at_consumer: (0..consumers.len()).cycle(),
@@ -88,10 +81,13 @@ pub struct ExtractorBbqueueSender<'a> {
/// A memory buffer, one by thread, is used to serialize
/// the entries directly in this shared, lock-free space.
producers: ThreadLocal<FullySend<RefCell<FrameProducer<'a>>>>,
/// The maximum frame grant that a producer can reserve.
/// It will never be able to store more than that as the
/// buffer cannot split data into two parts.
max_grant: usize,
/// The capacity of this frame producer, will never be able to store more than that.
///
/// Note that the FrameProducer requires up to 9 bytes to encode the length,
/// the capacity has been shrunk accordingly.
///
/// <https://docs.rs/bbqueue/latest/bbqueue/framed/index.html#frame-header>
capacity: usize,
}
pub struct WriterBbqueueReceiver<'a> {
@@ -447,14 +443,14 @@ impl<'b> ExtractorBbqueueSender<'b> {
}
fn delete_vector(&self, docid: DocumentId) -> crate::Result<()> {
let max_grant = self.max_grant;
let capacity = self.capacity;
let refcell = self.producers.get().unwrap();
let mut producer = refcell.0.borrow_mut_or_yield();
let payload_header = EntryHeader::ArroyDeleteVector(ArroyDeleteVector { docid });
let total_length = EntryHeader::total_delete_vector_size();
if total_length > max_grant {
panic!("The entry is larger ({total_length} bytes) than the BBQueue max grant ({max_grant} bytes)");
if total_length > capacity {
panic!("The entry is larger ({total_length} bytes) than the BBQueue capacity ({capacity} bytes)");
}
// Spin loop to have a frame the size we requested.
@@ -472,7 +468,7 @@ impl<'b> ExtractorBbqueueSender<'b> {
embedder_id: u8,
embeddings: &[Vec<f32>],
) -> crate::Result<()> {
let max_grant = self.max_grant;
let capacity = self.capacity;
let refcell = self.producers.get().unwrap();
let mut producer = refcell.0.borrow_mut_or_yield();
@@ -483,7 +479,7 @@ impl<'b> ExtractorBbqueueSender<'b> {
let arroy_set_vector = ArroySetVectors { docid, embedder_id, _padding: [0; 3] };
let payload_header = EntryHeader::ArroySetVectors(arroy_set_vector);
let total_length = EntryHeader::total_set_vectors_size(embeddings.len(), dimensions);
if total_length > max_grant {
if total_length > capacity {
let mut value_file = tempfile::tempfile().map(BufWriter::new)?;
for embedding in embeddings {
let mut embedding_bytes = bytemuck::cast_slice(embedding);
@@ -544,14 +540,14 @@ impl<'b> ExtractorBbqueueSender<'b> {
where
F: FnOnce(&mut [u8], &mut [u8]) -> crate::Result<()>,
{
let max_grant = self.max_grant;
let capacity = self.capacity;
let refcell = self.producers.get().unwrap();
let mut producer = refcell.0.borrow_mut_or_yield();
let operation = DbOperation { database, key_length: Some(key_length) };
let payload_header = EntryHeader::DbOperation(operation);
let total_length = EntryHeader::total_key_value_size(key_length, value_length);
if total_length > max_grant {
if total_length > capacity {
let mut key_buffer = vec![0; key_length.get() as usize].into_boxed_slice();
let value_file = tempfile::tempfile()?;
value_file.set_len(value_length.try_into().unwrap())?;
@@ -605,7 +601,7 @@ impl<'b> ExtractorBbqueueSender<'b> {
where
F: FnOnce(&mut [u8]) -> crate::Result<()>,
{
let max_grant = self.max_grant;
let capacity = self.capacity;
let refcell = self.producers.get().unwrap();
let mut producer = refcell.0.borrow_mut_or_yield();
@@ -614,8 +610,8 @@ impl<'b> ExtractorBbqueueSender<'b> {
let operation = DbOperation { database, key_length: None };
let payload_header = EntryHeader::DbOperation(operation);
let total_length = EntryHeader::total_key_size(key_length);
if total_length > max_grant {
panic!("The entry is larger ({total_length} bytes) than the BBQueue max grant ({max_grant} bytes)");
if total_length > capacity {
panic!("The entry is larger ({total_length} bytes) than the BBQueue capacity ({capacity} bytes)");
}
// Spin loop to have a frame the size we requested.

View File

@@ -86,7 +86,7 @@ impl<'t, Mapper: FieldIdMapper> Document<'t> for DocumentFromDb<'t, Mapper> {
let res = (|| {
let value =
serde_json::from_slice(value).map_err(crate::InternalError::SerdeJson).unwrap();
serde_json::from_slice(value).map_err(crate::InternalError::SerdeJson)?;
Ok((name, value))
})();
@@ -139,7 +139,7 @@ impl<'t, Mapper: FieldIdMapper> DocumentFromDb<'t, Mapper> {
return Ok(None);
};
let Some(value) = self.content.get(fid) else { return Ok(None) };
Ok(Some(serde_json::from_slice(value).map_err(InternalError::SerdeJson).unwrap()))
Ok(Some(serde_json::from_slice(value).map_err(InternalError::SerdeJson)?))
}
}

View File

@@ -89,8 +89,7 @@ impl<'a, 'b, 'extractor> Extractor<'extractor> for DocumentsExtractor<'a, 'b> {
.or_default();
*entry -= 1;
}
let content =
update.merged(&context.rtxn, context.index, &context.db_fields_ids_map)?;
let content = update.updated();
let geo_iter =
content.geo_field().transpose().map(|res| res.map(|rv| ("_geo", rv)));
for res in content.iter_top_level_fields().chain(geo_iter) {

View File

@@ -283,60 +283,42 @@ impl FacetedDocidsExtractor {
}
struct DelAddFacetValue<'doc> {
strings: HashMap<
(FieldId, &'doc str),
Option<BVec<'doc, u8>>,
hashbrown::DefaultHashBuilder,
&'doc Bump,
>,
strings: HashMap<(FieldId, BVec<'doc, u8>), DelAdd, hashbrown::DefaultHashBuilder, &'doc Bump>,
f64s: HashMap<(FieldId, BVec<'doc, u8>), DelAdd, hashbrown::DefaultHashBuilder, &'doc Bump>,
doc_alloc: &'doc Bump,
}
impl<'doc> DelAddFacetValue<'doc> {
fn new(doc_alloc: &'doc Bump) -> Self {
Self { strings: HashMap::new_in(doc_alloc), f64s: HashMap::new_in(doc_alloc), doc_alloc }
Self { strings: HashMap::new_in(doc_alloc), f64s: HashMap::new_in(doc_alloc) }
}
fn insert_add(&mut self, fid: FieldId, value: BVec<'doc, u8>, kind: FacetKind) {
match kind {
FacetKind::Number => {
let key = (fid, value);
if let Some(DelAdd::Deletion) = self.f64s.get(&key) {
self.f64s.remove(&key);
} else {
self.f64s.insert(key, DelAdd::Addition);
}
}
FacetKind::String => {
if let Ok(s) = std::str::from_utf8(&value) {
let normalized = crate::normalize_facet(s);
let truncated = self.doc_alloc.alloc_str(truncate_str(&normalized));
self.strings.insert((fid, truncated), Some(value));
}
}
_ => (),
let cache = match kind {
FacetKind::String => &mut self.strings,
FacetKind::Number => &mut self.f64s,
_ => return,
};
let key = (fid, value);
if let Some(DelAdd::Deletion) = cache.get(&key) {
cache.remove(&key);
} else {
cache.insert(key, DelAdd::Addition);
}
}
fn insert_del(&mut self, fid: FieldId, value: BVec<'doc, u8>, kind: FacetKind) {
match kind {
FacetKind::Number => {
let key = (fid, value);
if let Some(DelAdd::Addition) = self.f64s.get(&key) {
self.f64s.remove(&key);
} else {
self.f64s.insert(key, DelAdd::Deletion);
}
}
FacetKind::String => {
if let Ok(s) = std::str::from_utf8(&value) {
let normalized = crate::normalize_facet(s);
let truncated = self.doc_alloc.alloc_str(truncate_str(&normalized));
self.strings.insert((fid, truncated), None);
}
}
_ => (),
let cache = match kind {
FacetKind::String => &mut self.strings,
FacetKind::Number => &mut self.f64s,
_ => return,
};
let key = (fid, value);
if let Some(DelAdd::Addition) = cache.get(&key) {
cache.remove(&key);
} else {
cache.insert(key, DelAdd::Deletion);
}
}
@@ -347,14 +329,18 @@ impl<'doc> DelAddFacetValue<'doc> {
doc_alloc: &Bump,
) -> crate::Result<()> {
let mut buffer = bumpalo::collections::Vec::new_in(doc_alloc);
for ((fid, truncated), value) in self.strings {
buffer.clear();
buffer.extend_from_slice(&fid.to_be_bytes());
buffer.extend_from_slice(&docid.to_be_bytes());
buffer.extend_from_slice(truncated.as_bytes());
match &value {
Some(value) => sender.write_facet_string(&buffer, value)?,
None => sender.delete_facet_string(&buffer)?,
for ((fid, value), deladd) in self.strings {
if let Ok(s) = std::str::from_utf8(&value) {
buffer.clear();
buffer.extend_from_slice(&fid.to_be_bytes());
buffer.extend_from_slice(&docid.to_be_bytes());
let normalized = crate::normalize_facet(s);
let truncated = truncate_str(&normalized);
buffer.extend_from_slice(truncated.as_bytes());
match deladd {
DelAdd::Deletion => sender.delete_facet_string(&buffer)?,
DelAdd::Addition => sender.write_facet_string(&buffer, &value)?,
}
}
}

View File

@@ -27,7 +27,7 @@ pub fn extract_document_facets<'doc>(
let selection = perm_json_p::select_field(field_name, Some(attributes_to_extract), &[]);
if selection != perm_json_p::Selection::Skip {
// parse json.
match serde_json::value::to_value(value).map_err(InternalError::SerdeJson).unwrap() {
match serde_json::value::to_value(value).map_err(InternalError::SerdeJson)? {
Value::Object(object) => {
perm_json_p::seek_leaf_values_in_object(
&object,

View File

@@ -256,16 +256,15 @@ pub fn extract_geo_coordinates(
external_id: &str,
raw_value: &RawValue,
) -> Result<Option<[f64; 2]>> {
let mut geo =
match serde_json::from_str(raw_value.get()).map_err(InternalError::SerdeJson).unwrap() {
Value::Null => return Ok(None),
Value::Object(map) => map,
value => {
return Err(
GeoError::NotAnObject { document_id: Value::from(external_id), value }.into()
)
}
};
let mut geo = match serde_json::from_str(raw_value.get()).map_err(InternalError::SerdeJson)? {
Value::Null => return Ok(None),
Value::Object(map) => map,
value => {
return Err(
GeoError::NotAnObject { document_id: Value::from(external_id), value }.into()
)
}
};
let [lat, lng] = match (geo.remove("lat"), geo.remove("lng")) {
(Some(lat), Some(lng)) => {

View File

@@ -94,7 +94,7 @@ impl<'a> DocumentTokenizer<'a> {
};
// parse json.
match serde_json::to_value(value).map_err(InternalError::SerdeJson).unwrap() {
match serde_json::to_value(value).map_err(InternalError::SerdeJson)? {
Value::Object(object) => seek_leaf_values_in_object(
&object,
None,

View File

@@ -158,7 +158,7 @@ fn extract_addition_payload_changes<'r, 'pl: 'r>(
let mut previous_offset = 0;
let mut iter = Deserializer::from_slice(payload).into_iter::<&RawValue>();
while let Some(doc) = iter.next().transpose().map_err(InternalError::SerdeJson).unwrap() {
while let Some(doc) = iter.next().transpose().map_err(InternalError::SerdeJson)? {
*bytes = previous_offset as u64;
// Only guess the primary key if it is the first document

View File

@@ -4,7 +4,6 @@ use std::sync::{OnceLock, RwLock};
use std::thread::{self, Builder};
use big_s::S;
use bstr::ByteSlice as _;
use bumparaw_collections::RawMap;
use document_changes::{extract, DocumentChanges, IndexingContext};
pub use document_deletion::DocumentDeletion;
@@ -93,32 +92,24 @@ where
..grenad_parameters
};
// 5% percent of the allocated memory for the extractors, or min 100MiB
// 5% percent of the allocated memory for the bbqueues, or min 50MiB
//
// Minimum capacity for bbqueues
let minimum_total_bbbuffer_capacity = 50 * 1024 * 1024 * pool.current_num_threads(); // 50 MiB
let minimum_total_extractors_capacity = minimum_total_bbbuffer_capacity * 2;
// We compute and remove the allocated BBQueues buffers capacity from the indexing memory.
let minimum_capacity = 50 * 1024 * 1024 * pool.current_num_threads(); // 50 MiB
let (grenad_parameters, total_bbbuffer_capacity) = grenad_parameters.max_memory.map_or(
(
GrenadParameters {
max_memory: Some(minimum_total_extractors_capacity),
..grenad_parameters
},
minimum_total_bbbuffer_capacity,
), // 100 MiB by thread by default
(grenad_parameters, 2 * minimum_capacity), // 100 MiB by thread by default
|max_memory| {
let total_bbbuffer_capacity = max_memory.max(minimum_total_bbbuffer_capacity);
// 2% of the indexing memory
let total_bbbuffer_capacity = (max_memory / 100 / 2).max(minimum_capacity);
let new_grenad_parameters = GrenadParameters {
max_memory: Some(max_memory.max(minimum_total_extractors_capacity)),
max_memory: Some(
max_memory.saturating_sub(total_bbbuffer_capacity).max(100 * 1024 * 1024),
),
..grenad_parameters
};
(new_grenad_parameters, total_bbbuffer_capacity)
},
);
let (extractor_sender, writer_receiver) = pool
let (extractor_sender, mut writer_receiver) = pool
.install(|| extractor_writer_bbqueue(&mut bbbuffers, total_bbbuffer_capacity, 1000))
.unwrap();
@@ -433,7 +424,6 @@ where
let mut arroy_writers = arroy_writers?;
{
let mut writer_receiver = writer_receiver;
let span = tracing::trace_span!(target: "indexing::write_db", "all");
let _entered = span.enter();
@@ -546,6 +536,9 @@ where
drop(fields_ids_map_store);
let new_fields_ids_map = new_fields_ids_map.into_inner().unwrap();
for (fid, name, metadata) in new_fields_ids_map.iter() {
tracing::debug!("{fid}:{name},{metadata:?}");
}
index.put_fields_ids_map(wtxn, new_fields_ids_map.as_fields_ids_map())?;
if let Some(new_primary_key) = new_primary_key {
@@ -593,12 +586,7 @@ fn write_from_bbqueue(
}
(key, None) => match database.delete(wtxn, key) {
Ok(false) => {
tracing::error!(
database_name,
key_bytes = ?key,
formatted_key = ?key.as_bstr(),
"Attempt to delete an unknown key"
);
unreachable!("We tried to delete an unknown key: {key:?}")
}
Ok(_) => (),
Err(error) => {

View File

@@ -78,8 +78,7 @@ where
let external_document_id = external_document_id.to_de();
let document = RawMap::from_raw_value_and_hasher(document, FxBuildHasher, doc_alloc)
.map_err(InternalError::SerdeJson)
.unwrap();
.map_err(InternalError::SerdeJson)?;
let insertion = Insertion::create(docid, external_document_id, Versions::single(document));
Ok(Some(DocumentChange::Insertion(insertion)))

View File

@@ -58,9 +58,9 @@ impl UpdateByFunction {
let ast = engine.compile(code).map_err(UserError::DocumentEditionCompilationError)?;
let context = match context {
Some(context) => Some(
serde_json::from_value(context.into()).map_err(InternalError::SerdeJson).unwrap(),
),
Some(context) => {
Some(serde_json::from_value(context.into()).map_err(InternalError::SerdeJson)?)
}
None => None,
};
@@ -137,11 +137,9 @@ impl<'index> DocumentChanges<'index> for UpdateByFunctionChanges<'index> {
Some(new_rhai_document) => {
let mut buffer = bumpalo::collections::Vec::new_in(doc_alloc);
serde_json::to_writer(&mut buffer, &new_rhai_document)
.map_err(InternalError::SerdeJson)
.unwrap();
.map_err(InternalError::SerdeJson)?;
let raw_new_doc = serde_json::from_slice(buffer.into_bump_slice())
.map_err(InternalError::SerdeJson)
.unwrap();
.map_err(InternalError::SerdeJson)?;
// Note: This condition is not perfect. Sometimes it detect changes
// like with floating points numbers and consider updating
@@ -168,8 +166,7 @@ impl<'index> DocumentChanges<'index> for UpdateByFunctionChanges<'index> {
FxBuildHasher,
doc_alloc,
)
.map_err(InternalError::SerdeJson)
.unwrap();
.map_err(InternalError::SerdeJson)?;
Ok(Some(DocumentChange::Update(Update::create(
docid,
@@ -203,7 +200,7 @@ fn obkv_to_rhaimap(obkv: &KvReaderFieldId, fields_ids_map: &FieldsIdsMap) -> Res
field_id: id,
process: "all_obkv_to_rhaimap",
})?;
let value = serde_json::from_slice(value).map_err(InternalError::SerdeJson).unwrap();
let value = serde_json::from_slice(value).map_err(InternalError::SerdeJson)?;
Ok((name.into(), value))
})
.collect();

View File

@@ -269,7 +269,8 @@ impl FacetFieldIdsDelta {
pub fn consume_facet_string_delta(
&mut self,
) -> impl Iterator<Item = (FieldId, FacetFieldIdDelta)> + '_ {
self.modified_facet_string_ids.drain()
None.into_iter()
// self.modified_facet_string_ids.drain()
}
pub fn consume_facet_number_delta(

View File

@@ -16,7 +16,6 @@ pub mod indexer;
mod merger;
mod parallel_iterator_ext;
mod ref_cell_ext;
pub mod reindex;
pub(crate) mod steps;
pub(crate) mod thread_local;
pub mod vector_document;

View File

@@ -1,38 +0,0 @@
use heed::RwTxn;
use super::document::{Document, DocumentFromDb};
use crate::progress::{self, AtomicSubStep, Progress};
use crate::{FieldDistribution, Index, Result};
pub fn field_distribution(index: &Index, wtxn: &mut RwTxn<'_>, progress: &Progress) -> Result<()> {
let mut distribution = FieldDistribution::new();
let document_count = index.number_of_documents(wtxn)?;
let field_id_map = index.fields_ids_map(wtxn)?;
let (update_document_count, sub_step) =
AtomicSubStep::<progress::Document>::new(document_count as u32);
progress.update_progress(sub_step);
let docids = index.documents_ids(wtxn)?;
for docid in docids {
update_document_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let Some(document) = DocumentFromDb::new(docid, wtxn, index, &field_id_map)? else {
continue;
};
let geo_iter = document.geo_field().transpose().map(|res| res.map(|rv| ("_geo", rv)));
for res in document.iter_top_level_fields().chain(geo_iter) {
let (field_name, _) = res?;
if let Some(count) = distribution.get_mut(field_name) {
*count += 1;
} else {
distribution.insert(field_name.to_owned(), 1);
}
}
}
index.put_field_distribution(wtxn, &distribution)?;
Ok(())
}

View File

@@ -105,8 +105,7 @@ impl<'t> VectorDocumentFromDb<'t> {
let vectors_field = match vectors {
Some(vectors) => Some(
RawMap::from_raw_value_and_hasher(vectors, FxBuildHasher, doc_alloc)
.map_err(InternalError::SerdeJson)
.unwrap(),
.map_err(InternalError::SerdeJson)?,
),
None => None,
};

View File

@@ -5,7 +5,7 @@ use rayon::slice::ParallelSlice as _;
use super::error::{EmbedError, EmbedErrorKind, NewEmbedderError, NewEmbedderErrorKind};
use super::rest::{Embedder as RestEmbedder, EmbedderOptions as RestEmbedderOptions};
use super::{DistributionShift, REQUEST_PARALLELISM};
use super::DistributionShift;
use crate::error::FaultSource;
use crate::vector::Embedding;
use crate::ThreadPoolNoAbort;
@@ -98,18 +98,14 @@ impl Embedder {
text_chunks: Vec<Vec<String>>,
threads: &ThreadPoolNoAbort,
) -> Result<Vec<Vec<Embedding>>, EmbedError> {
if threads.active_operations() >= REQUEST_PARALLELISM {
text_chunks.into_iter().map(move |chunk| self.embed(&chunk, None)).collect()
} else {
threads
.install(move || {
text_chunks.into_par_iter().map(move |chunk| self.embed(&chunk, None)).collect()
})
.map_err(|error| EmbedError {
kind: EmbedErrorKind::PanicInThreadPool(error),
fault: FaultSource::Bug,
})?
}
threads
.install(move || {
text_chunks.into_par_iter().map(move |chunk| self.embed(&chunk, None)).collect()
})
.map_err(|error| EmbedError {
kind: EmbedErrorKind::PanicInThreadPool(error),
fault: FaultSource::Bug,
})?
}
pub(crate) fn embed_chunks_ref(
@@ -117,30 +113,20 @@ impl Embedder {
texts: &[&str],
threads: &ThreadPoolNoAbort,
) -> Result<Vec<Vec<f32>>, EmbedError> {
if threads.active_operations() >= REQUEST_PARALLELISM {
let embeddings: Result<Vec<Vec<Embedding>>, _> = texts
.chunks(self.prompt_count_in_chunk_hint())
.map(move |chunk| self.embed(chunk, None))
.collect();
threads
.install(move || {
let embeddings: Result<Vec<Vec<Embedding>>, _> = texts
.par_chunks(self.prompt_count_in_chunk_hint())
.map(move |chunk| self.embed(chunk, None))
.collect();
let embeddings = embeddings?;
Ok(embeddings.into_iter().flatten().collect())
} else {
threads
.install(move || {
let embeddings: Result<Vec<Vec<Embedding>>, _> = texts
.par_chunks(self.prompt_count_in_chunk_hint())
.map(move |chunk| self.embed(chunk, None))
.collect();
let embeddings = embeddings?;
Ok(embeddings.into_iter().flatten().collect())
})
.map_err(|error| EmbedError {
kind: EmbedErrorKind::PanicInThreadPool(error),
fault: FaultSource::Bug,
})?
}
let embeddings = embeddings?;
Ok(embeddings.into_iter().flatten().collect())
})
.map_err(|error| EmbedError {
kind: EmbedErrorKind::PanicInThreadPool(error),
fault: FaultSource::Bug,
})?
}
pub fn chunk_count_hint(&self) -> usize {

View File

@@ -6,7 +6,7 @@ use rayon::slice::ParallelSlice as _;
use super::error::{EmbedError, NewEmbedderError};
use super::rest::{Embedder as RestEmbedder, EmbedderOptions as RestEmbedderOptions};
use super::{DistributionShift, REQUEST_PARALLELISM};
use super::DistributionShift;
use crate::error::FaultSource;
use crate::vector::error::EmbedErrorKind;
use crate::vector::Embedding;
@@ -255,18 +255,14 @@ impl Embedder {
text_chunks: Vec<Vec<String>>,
threads: &ThreadPoolNoAbort,
) -> Result<Vec<Vec<Embedding>>, EmbedError> {
if threads.active_operations() >= REQUEST_PARALLELISM {
text_chunks.into_iter().map(move |chunk| self.embed(&chunk, None)).collect()
} else {
threads
.install(move || {
text_chunks.into_par_iter().map(move |chunk| self.embed(&chunk, None)).collect()
})
.map_err(|error| EmbedError {
kind: EmbedErrorKind::PanicInThreadPool(error),
fault: FaultSource::Bug,
})?
}
threads
.install(move || {
text_chunks.into_par_iter().map(move |chunk| self.embed(&chunk, None)).collect()
})
.map_err(|error| EmbedError {
kind: EmbedErrorKind::PanicInThreadPool(error),
fault: FaultSource::Bug,
})?
}
pub(crate) fn embed_chunks_ref(
@@ -274,29 +270,20 @@ impl Embedder {
texts: &[&str],
threads: &ThreadPoolNoAbort,
) -> Result<Vec<Vec<f32>>, EmbedError> {
if threads.active_operations() >= REQUEST_PARALLELISM {
let embeddings: Result<Vec<Vec<Embedding>>, _> = texts
.chunks(self.prompt_count_in_chunk_hint())
.map(move |chunk| self.embed(chunk, None))
.collect();
let embeddings = embeddings?;
Ok(embeddings.into_iter().flatten().collect())
} else {
threads
.install(move || {
let embeddings: Result<Vec<Vec<Embedding>>, _> = texts
.par_chunks(self.prompt_count_in_chunk_hint())
.map(move |chunk| self.embed(chunk, None))
.collect();
threads
.install(move || {
let embeddings: Result<Vec<Vec<Embedding>>, _> = texts
.par_chunks(self.prompt_count_in_chunk_hint())
.map(move |chunk| self.embed(chunk, None))
.collect();
let embeddings = embeddings?;
Ok(embeddings.into_iter().flatten().collect())
})
.map_err(|error| EmbedError {
kind: EmbedErrorKind::PanicInThreadPool(error),
fault: FaultSource::Bug,
})?
}
let embeddings = embeddings?;
Ok(embeddings.into_iter().flatten().collect())
})
.map_err(|error| EmbedError {
kind: EmbedErrorKind::PanicInThreadPool(error),
fault: FaultSource::Bug,
})?
}
pub fn chunk_count_hint(&self) -> usize {

View File

@@ -188,18 +188,14 @@ impl Embedder {
text_chunks: Vec<Vec<String>>,
threads: &ThreadPoolNoAbort,
) -> Result<Vec<Vec<Embedding>>, EmbedError> {
if threads.active_operations() >= REQUEST_PARALLELISM {
text_chunks.into_iter().map(move |chunk| self.embed(chunk, None)).collect()
} else {
threads
.install(move || {
text_chunks.into_par_iter().map(move |chunk| self.embed(chunk, None)).collect()
})
.map_err(|error| EmbedError {
kind: EmbedErrorKind::PanicInThreadPool(error),
fault: FaultSource::Bug,
})?
}
threads
.install(move || {
text_chunks.into_par_iter().map(move |chunk| self.embed(chunk, None)).collect()
})
.map_err(|error| EmbedError {
kind: EmbedErrorKind::PanicInThreadPool(error),
fault: FaultSource::Bug,
})?
}
pub(crate) fn embed_chunks_ref(
@@ -207,30 +203,20 @@ impl Embedder {
texts: &[&str],
threads: &ThreadPoolNoAbort,
) -> Result<Vec<Embedding>, EmbedError> {
if threads.active_operations() >= REQUEST_PARALLELISM {
let embeddings: Result<Vec<Vec<Embedding>>, _> = texts
.chunks(self.prompt_count_in_chunk_hint())
.map(move |chunk| self.embed_ref(chunk, None))
.collect();
threads
.install(move || {
let embeddings: Result<Vec<Vec<Embedding>>, _> = texts
.par_chunks(self.prompt_count_in_chunk_hint())
.map(move |chunk| self.embed_ref(chunk, None))
.collect();
let embeddings = embeddings?;
Ok(embeddings.into_iter().flatten().collect())
} else {
threads
.install(move || {
let embeddings: Result<Vec<Vec<Embedding>>, _> = texts
.par_chunks(self.prompt_count_in_chunk_hint())
.map(move |chunk| self.embed_ref(chunk, None))
.collect();
let embeddings = embeddings?;
Ok(embeddings.into_iter().flatten().collect())
})
.map_err(|error| EmbedError {
kind: EmbedErrorKind::PanicInThreadPool(error),
fault: FaultSource::Bug,
})?
}
let embeddings = embeddings?;
Ok(embeddings.into_iter().flatten().collect())
})
.map_err(|error| EmbedError {
kind: EmbedErrorKind::PanicInThreadPool(error),
fault: FaultSource::Bug,
})?
}
pub fn chunk_count_hint(&self) -> usize {