diff --git a/Cargo.lock b/Cargo.lock index 5f192b6d1..ff4981d11 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1794,7 +1794,7 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "heed" version = "0.12.5" -source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8" +source = "git+https://github.com/meilisearch/heed?tag=v0.12.6#8c5b94225fc949c02bb7b900cc50ffaf6b584b1e" dependencies = [ "byteorder", "heed-traits", @@ -1811,12 +1811,12 @@ dependencies = [ [[package]] name = "heed-traits" version = "0.7.0" -source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8" +source = "git+https://github.com/meilisearch/heed?tag=v0.12.6#8c5b94225fc949c02bb7b900cc50ffaf6b584b1e" [[package]] name = "heed-types" version = "0.7.2" -source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8" +source = "git+https://github.com/meilisearch/heed?tag=v0.12.6#8c5b94225fc949c02bb7b900cc50ffaf6b584b1e" dependencies = [ "bincode", "heed-traits", diff --git a/config.toml b/config.toml index 71087f25f..e5b339ffa 100644 --- a/config.toml +++ b/config.toml @@ -126,3 +126,6 @@ ssl_tickets = false experimental_enable_metrics = false # Experimental metrics feature. For more information, see: # Enables the Prometheus metrics on the `GET /metrics` endpoint. + +experimental_reduce_indexing_memory_usage = false +# Experimental RAM reduction during indexing, do not use in production, see: diff --git a/index-scheduler/src/index_mapper/index_map.rs b/index-scheduler/src/index_mapper/index_map.rs index d140d4944..9bed4fe5d 100644 --- a/index-scheduler/src/index_mapper/index_map.rs +++ b/index-scheduler/src/index_mapper/index_map.rs @@ -5,6 +5,7 @@ use std::collections::BTreeMap; use std::path::Path; use std::time::Duration; +use meilisearch_types::heed::flags::Flags; use meilisearch_types::heed::{EnvClosingEvent, EnvOpenOptions}; use meilisearch_types::milli::Index; use time::OffsetDateTime; @@ -53,6 +54,7 @@ pub struct IndexMap { pub struct ClosingIndex { uuid: Uuid, closing_event: EnvClosingEvent, + enable_mdb_writemap: bool, map_size: usize, generation: usize, } @@ -68,6 +70,7 @@ impl ClosingIndex { pub fn wait_timeout(self, timeout: Duration) -> Option { self.closing_event.wait_timeout(timeout).then_some(ReopenableIndex { uuid: self.uuid, + enable_mdb_writemap: self.enable_mdb_writemap, map_size: self.map_size, generation: self.generation, }) @@ -76,6 +79,7 @@ impl ClosingIndex { pub struct ReopenableIndex { uuid: Uuid, + enable_mdb_writemap: bool, map_size: usize, generation: usize, } @@ -103,7 +107,7 @@ impl ReopenableIndex { return Ok(()); } map.unavailable.remove(&self.uuid); - map.create(&self.uuid, path, None, self.map_size)?; + map.create(&self.uuid, path, None, self.enable_mdb_writemap, self.map_size)?; } Ok(()) } @@ -170,16 +174,17 @@ impl IndexMap { uuid: &Uuid, path: &Path, date: Option<(OffsetDateTime, OffsetDateTime)>, + enable_mdb_writemap: bool, map_size: usize, ) -> Result { if !matches!(self.get_unavailable(uuid), Missing) { panic!("Attempt to open an index that was unavailable"); } - let index = create_or_open_index(path, date, map_size)?; + let index = create_or_open_index(path, date, enable_mdb_writemap, map_size)?; match self.available.insert(*uuid, index.clone()) { InsertionOutcome::InsertedNew => (), InsertionOutcome::Evicted(evicted_uuid, evicted_index) => { - self.close(evicted_uuid, evicted_index, 0); + self.close(evicted_uuid, evicted_index, enable_mdb_writemap, 0); } InsertionOutcome::Replaced(_) => { panic!("Attempt to open an index that was already opened") @@ -212,17 +217,30 @@ impl IndexMap { /// | Closing | Closing | /// | Available | Closing | /// - pub fn close_for_resize(&mut self, uuid: &Uuid, map_size_growth: usize) { + pub fn close_for_resize( + &mut self, + uuid: &Uuid, + enable_mdb_writemap: bool, + map_size_growth: usize, + ) { let Some(index) = self.available.remove(uuid) else { return; }; - self.close(*uuid, index, map_size_growth); + self.close(*uuid, index, enable_mdb_writemap, map_size_growth); } - fn close(&mut self, uuid: Uuid, index: Index, map_size_growth: usize) { + fn close( + &mut self, + uuid: Uuid, + index: Index, + enable_mdb_writemap: bool, + map_size_growth: usize, + ) { let map_size = index.map_size().unwrap_or(DEFAULT_MAP_SIZE) + map_size_growth; let closing_event = index.prepare_for_closing(); let generation = self.next_generation(); - self.unavailable - .insert(uuid, Some(ClosingIndex { uuid, closing_event, map_size, generation })); + self.unavailable.insert( + uuid, + Some(ClosingIndex { uuid, closing_event, enable_mdb_writemap, map_size, generation }), + ); } /// Attempts to delete and index. @@ -282,11 +300,15 @@ impl IndexMap { fn create_or_open_index( path: &Path, date: Option<(OffsetDateTime, OffsetDateTime)>, + enable_mdb_writemap: bool, map_size: usize, ) -> Result { let mut options = EnvOpenOptions::new(); options.map_size(clamp_to_page_size(map_size)); options.max_readers(1024); + if enable_mdb_writemap { + unsafe { options.flag(Flags::MdbWriteMap) }; + } if let Some((created, updated)) = date { Ok(Index::new_with_creation_dates(options, path, created, updated)?) diff --git a/index-scheduler/src/index_mapper/mod.rs b/index-scheduler/src/index_mapper/mod.rs index 2bf6f46ad..2903a824f 100644 --- a/index-scheduler/src/index_mapper/mod.rs +++ b/index-scheduler/src/index_mapper/mod.rs @@ -66,6 +66,8 @@ pub struct IndexMapper { index_base_map_size: usize, /// The quantity by which the map size of an index is incremented upon reopening, in bytes. index_growth_amount: usize, + /// Whether we open a meilisearch index with the MDB_WRITEMAP option or not. + enable_mdb_writemap: bool, pub indexer_config: Arc, } @@ -123,15 +125,22 @@ impl IndexMapper { index_base_map_size: usize, index_growth_amount: usize, index_count: usize, + enable_mdb_writemap: bool, indexer_config: IndexerConfig, ) -> Result { + let mut wtxn = env.write_txn()?; + let index_mapping = env.create_database(&mut wtxn, Some(INDEX_MAPPING))?; + let index_stats = env.create_database(&mut wtxn, Some(INDEX_STATS))?; + wtxn.commit()?; + Ok(Self { index_map: Arc::new(RwLock::new(IndexMap::new(index_count))), - index_mapping: env.create_database(Some(INDEX_MAPPING))?, - index_stats: env.create_database(Some(INDEX_STATS))?, + index_mapping, + index_stats, base_path, index_base_map_size, index_growth_amount, + enable_mdb_writemap, indexer_config: Arc::new(indexer_config), }) } @@ -162,6 +171,7 @@ impl IndexMapper { &uuid, &index_path, date, + self.enable_mdb_writemap, self.index_base_map_size, )?; @@ -273,7 +283,11 @@ impl IndexMapper { .ok_or_else(|| Error::IndexNotFound(name.to_string()))?; // We remove the index from the in-memory index map. - self.index_map.write().unwrap().close_for_resize(&uuid, self.index_growth_amount); + self.index_map.write().unwrap().close_for_resize( + &uuid, + self.enable_mdb_writemap, + self.index_growth_amount, + ); Ok(()) } @@ -338,6 +352,7 @@ impl IndexMapper { &uuid, &index_path, None, + self.enable_mdb_writemap, self.index_base_map_size, )?; } diff --git a/index-scheduler/src/lib.rs b/index-scheduler/src/lib.rs index af20ba1ae..c5eaf2735 100644 --- a/index-scheduler/src/lib.rs +++ b/index-scheduler/src/lib.rs @@ -233,6 +233,8 @@ pub struct IndexSchedulerOptions { pub task_db_size: usize, /// The size, in bytes, with which a meilisearch index is opened the first time of each meilisearch index. pub index_base_map_size: usize, + /// Whether we open a meilisearch index with the MDB_WRITEMAP option or not. + pub enable_mdb_writemap: bool, /// The size, in bytes, by which the map size of an index is increased when it resized due to being full. pub index_growth_amount: usize, /// The number of indexes that can be concurrently opened in memory. @@ -374,6 +376,11 @@ impl IndexScheduler { std::fs::create_dir_all(&options.indexes_path)?; std::fs::create_dir_all(&options.dumps_path)?; + if cfg!(windows) && options.enable_mdb_writemap { + // programmer error if this happens: in normal use passing the option on Windows is an error in main + panic!("Windows doesn't support the MDB_WRITEMAP LMDB option"); + } + let task_db_size = clamp_to_page_size(options.task_db_size); let budget = if options.indexer_config.skip_index_budget { IndexBudget { @@ -396,25 +403,37 @@ impl IndexScheduler { .open(options.tasks_path)?; let file_store = FileStore::new(&options.update_file_path)?; + let mut wtxn = env.write_txn()?; + let all_tasks = env.create_database(&mut wtxn, Some(db_name::ALL_TASKS))?; + let status = env.create_database(&mut wtxn, Some(db_name::STATUS))?; + let kind = env.create_database(&mut wtxn, Some(db_name::KIND))?; + let index_tasks = env.create_database(&mut wtxn, Some(db_name::INDEX_TASKS))?; + let canceled_by = env.create_database(&mut wtxn, Some(db_name::CANCELED_BY))?; + let enqueued_at = env.create_database(&mut wtxn, Some(db_name::ENQUEUED_AT))?; + let started_at = env.create_database(&mut wtxn, Some(db_name::STARTED_AT))?; + let finished_at = env.create_database(&mut wtxn, Some(db_name::FINISHED_AT))?; + wtxn.commit()?; + // allow unreachable_code to get rids of the warning in the case of a test build. let this = Self { must_stop_processing: MustStopProcessing::default(), processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())), file_store, - all_tasks: env.create_database(Some(db_name::ALL_TASKS))?, - status: env.create_database(Some(db_name::STATUS))?, - kind: env.create_database(Some(db_name::KIND))?, - index_tasks: env.create_database(Some(db_name::INDEX_TASKS))?, - canceled_by: env.create_database(Some(db_name::CANCELED_BY))?, - enqueued_at: env.create_database(Some(db_name::ENQUEUED_AT))?, - started_at: env.create_database(Some(db_name::STARTED_AT))?, - finished_at: env.create_database(Some(db_name::FINISHED_AT))?, + all_tasks, + status, + kind, + index_tasks, + canceled_by, + enqueued_at, + started_at, + finished_at, index_mapper: IndexMapper::new( &env, options.indexes_path, budget.map_size, options.index_growth_amount, budget.index_count, + options.enable_mdb_writemap, options.indexer_config, )?, env, @@ -1471,6 +1490,7 @@ mod tests { dumps_path: tempdir.path().join("dumps"), task_db_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose. index_base_map_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose. + enable_mdb_writemap: false, index_growth_amount: 1000 * 1000, // 1 MB index_count: 5, indexer_config, diff --git a/meilisearch-auth/src/store.rs b/meilisearch-auth/src/store.rs index 5c2776154..3e4acc88e 100644 --- a/meilisearch-auth/src/store.rs +++ b/meilisearch-auth/src/store.rs @@ -55,9 +55,11 @@ impl HeedAuthStore { let path = path.as_ref().join(AUTH_DB_PATH); create_dir_all(&path)?; let env = Arc::new(open_auth_store_env(path.as_ref())?); - let keys = env.create_database(Some(KEY_DB_NAME))?; + let mut wtxn = env.write_txn()?; + let keys = env.create_database(&mut wtxn, Some(KEY_DB_NAME))?; let action_keyid_index_expiration = - env.create_database(Some(KEY_ID_ACTION_INDEX_EXPIRATION_DB_NAME))?; + env.create_database(&mut wtxn, Some(KEY_ID_ACTION_INDEX_EXPIRATION_DB_NAME))?; + wtxn.commit()?; Ok(Self { env, keys, action_keyid_index_expiration, should_close_on_drop: true }) } diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 3e40c09e8..9b465b8d8 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -225,6 +225,7 @@ impl super::Analytics for SegmentAnalytics { struct Infos { env: String, experimental_enable_metrics: bool, + experimental_reduce_indexing_memory_usage: bool, db_path: bool, import_dump: bool, dump_dir: bool, @@ -258,6 +259,7 @@ impl From for Infos { let Opt { db_path, experimental_enable_metrics, + experimental_reduce_indexing_memory_usage, http_addr, master_key: _, env, @@ -300,6 +302,7 @@ impl From for Infos { Self { env, experimental_enable_metrics, + experimental_reduce_indexing_memory_usage, db_path: db_path != PathBuf::from("./data.ms"), import_dump: import_dump.is_some(), dump_dir: dump_dir != PathBuf::from("dumps/"), diff --git a/meilisearch/src/lib.rs b/meilisearch/src/lib.rs index 67d8bbd5c..bee53f6f8 100644 --- a/meilisearch/src/lib.rs +++ b/meilisearch/src/lib.rs @@ -232,6 +232,7 @@ fn open_or_create_database_unchecked( dumps_path: opt.dump_dir.clone(), task_db_size: opt.max_task_db_size.get_bytes() as usize, index_base_map_size: opt.max_index_size.get_bytes() as usize, + enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage, indexer_config: (&opt.indexer_options).try_into()?, autobatching_enabled: true, max_number_of_tasks: 1_000_000, diff --git a/meilisearch/src/main.rs b/meilisearch/src/main.rs index 2ab37488c..1b5e918dc 100644 --- a/meilisearch/src/main.rs +++ b/meilisearch/src/main.rs @@ -29,6 +29,11 @@ fn setup(opt: &Opt) -> anyhow::Result<()> { async fn main() -> anyhow::Result<()> { let (opt, config_read_from) = Opt::try_build()?; + anyhow::ensure!( + !(cfg!(windows) && opt.experimental_reduce_indexing_memory_usage), + "The `experimental-reduce-indexing-memory-usage` flag is not supported on Windows" + ); + setup(&opt)?; match (opt.env.as_ref(), &opt.master_key) { diff --git a/meilisearch/src/option.rs b/meilisearch/src/option.rs index 8e6ca9006..0511b5033 100644 --- a/meilisearch/src/option.rs +++ b/meilisearch/src/option.rs @@ -48,6 +48,8 @@ const MEILI_IGNORE_DUMP_IF_DB_EXISTS: &str = "MEILI_IGNORE_DUMP_IF_DB_EXISTS"; const MEILI_DUMP_DIR: &str = "MEILI_DUMP_DIR"; const MEILI_LOG_LEVEL: &str = "MEILI_LOG_LEVEL"; const MEILI_EXPERIMENTAL_ENABLE_METRICS: &str = "MEILI_EXPERIMENTAL_ENABLE_METRICS"; +const MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE: &str = + "MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE"; const DEFAULT_CONFIG_FILE_PATH: &str = "./config.toml"; const DEFAULT_DB_PATH: &str = "./data.ms"; @@ -293,6 +295,11 @@ pub struct Opt { #[serde(default)] pub experimental_enable_metrics: bool, + /// Experimental RAM reduction during indexing, do not use in production, see: + #[clap(long, env = MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE)] + #[serde(default)] + pub experimental_reduce_indexing_memory_usage: bool, + #[serde(flatten)] #[clap(flatten)] pub indexer_options: IndexerOpts, @@ -385,6 +392,7 @@ impl Opt { #[cfg(all(not(debug_assertions), feature = "analytics"))] no_analytics, experimental_enable_metrics: enable_metrics_route, + experimental_reduce_indexing_memory_usage: reduce_indexing_memory_usage, } = self; export_to_env_if_not_present(MEILI_DB_PATH, db_path); export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr); @@ -426,6 +434,10 @@ impl Opt { MEILI_EXPERIMENTAL_ENABLE_METRICS, enable_metrics_route.to_string(), ); + export_to_env_if_not_present( + MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE, + reduce_indexing_memory_usage.to_string(), + ); indexer_options.export_to_env(); } diff --git a/milli/Cargo.toml b/milli/Cargo.toml index de0f4e31d..be4c88f23 100644 --- a/milli/Cargo.toml +++ b/milli/Cargo.toml @@ -25,8 +25,13 @@ flatten-serde-json = { path = "../flatten-serde-json" } fst = "0.4.7" fxhash = "0.2.1" geoutils = "0.5.1" -grenad = { version = "0.4.4", default-features = false, features = ["tempfile"] } -heed = { git = "https://github.com/meilisearch/heed", tag = "v0.12.5", default-features = false, features = ["lmdb", "sync-read-txn"] } +grenad = { version = "0.4.4", default-features = false, features = [ + "tempfile", +] } +heed = { git = "https://github.com/meilisearch/heed", tag = "v0.12.6", default-features = false, features = [ + "lmdb", + "sync-read-txn", +] } json-depth-checker = { path = "../json-depth-checker" } levenshtein_automata = { version = "0.2.1", features = ["fst_automaton"] } memmap2 = "0.5.10" @@ -39,12 +44,17 @@ rstar = { version = "0.10.0", features = ["serde"] } serde = { version = "1.0.160", features = ["derive"] } serde_json = { version = "1.0.95", features = ["preserve_order"] } slice-group-by = "0.3.0" -smallstr = { version = "0.3.0", features = ["serde"] } +smallstr = { version = "0.3.0", features = ["serde"] } smallvec = "1.10.0" smartstring = "1.0.1" tempfile = "3.5.0" thiserror = "1.0.40" -time = { version = "0.3.20", features = ["serde-well-known", "formatting", "parsing", "macros"] } +time = { version = "0.3.20", features = [ + "serde-well-known", + "formatting", + "parsing", + "macros", +] } uuid = { version = "1.3.1", features = ["v4"] } filter-parser = { path = "../filter-parser" } @@ -63,13 +73,13 @@ big_s = "1.0.2" insta = "1.29.0" maplit = "1.0.2" md5 = "0.7.0" -rand = {version = "0.8.5", features = ["small_rng"] } +rand = { version = "0.8.5", features = ["small_rng"] } [target.'cfg(fuzzing)'.dev-dependencies] fuzzcheck = "0.12.1" [features] -all-tokenizations = [ "charabia/default" ] +all-tokenizations = ["charabia/default"] # Use POSIX semaphores instead of SysV semaphores in LMDB # For more information on this feature, see heed's Cargo.toml diff --git a/milli/src/index.rs b/milli/src/index.rs index ad53e79ea..9ea7b628c 100644 --- a/milli/src/index.rs +++ b/milli/src/index.rs @@ -170,33 +170,46 @@ impl Index { unsafe { options.flag(Flags::MdbAlwaysFreePages) }; let env = options.open(path)?; - let main = env.create_poly_database(Some(MAIN))?; - let word_docids = env.create_database(Some(WORD_DOCIDS))?; - let exact_word_docids = env.create_database(Some(EXACT_WORD_DOCIDS))?; - let word_prefix_docids = env.create_database(Some(WORD_PREFIX_DOCIDS))?; - let exact_word_prefix_docids = env.create_database(Some(EXACT_WORD_PREFIX_DOCIDS))?; - let docid_word_positions = env.create_database(Some(DOCID_WORD_POSITIONS))?; - let word_pair_proximity_docids = env.create_database(Some(WORD_PAIR_PROXIMITY_DOCIDS))?; - let script_language_docids = env.create_database(Some(SCRIPT_LANGUAGE_DOCIDS))?; + let mut wtxn = env.write_txn()?; + let main = env.create_poly_database(&mut wtxn, Some(MAIN))?; + let word_docids = env.create_database(&mut wtxn, Some(WORD_DOCIDS))?; + let exact_word_docids = env.create_database(&mut wtxn, Some(EXACT_WORD_DOCIDS))?; + let word_prefix_docids = env.create_database(&mut wtxn, Some(WORD_PREFIX_DOCIDS))?; + let exact_word_prefix_docids = + env.create_database(&mut wtxn, Some(EXACT_WORD_PREFIX_DOCIDS))?; + let docid_word_positions = env.create_database(&mut wtxn, Some(DOCID_WORD_POSITIONS))?; + let word_pair_proximity_docids = + env.create_database(&mut wtxn, Some(WORD_PAIR_PROXIMITY_DOCIDS))?; + let script_language_docids = + env.create_database(&mut wtxn, Some(SCRIPT_LANGUAGE_DOCIDS))?; let word_prefix_pair_proximity_docids = - env.create_database(Some(WORD_PREFIX_PAIR_PROXIMITY_DOCIDS))?; + env.create_database(&mut wtxn, Some(WORD_PREFIX_PAIR_PROXIMITY_DOCIDS))?; let prefix_word_pair_proximity_docids = - env.create_database(Some(PREFIX_WORD_PAIR_PROXIMITY_DOCIDS))?; - let word_position_docids = env.create_database(Some(WORD_POSITION_DOCIDS))?; - let word_fid_docids = env.create_database(Some(WORD_FIELD_ID_DOCIDS))?; - let field_id_word_count_docids = env.create_database(Some(FIELD_ID_WORD_COUNT_DOCIDS))?; - let word_prefix_position_docids = env.create_database(Some(WORD_PREFIX_POSITION_DOCIDS))?; - let word_prefix_fid_docids = env.create_database(Some(WORD_PREFIX_FIELD_ID_DOCIDS))?; - let facet_id_f64_docids = env.create_database(Some(FACET_ID_F64_DOCIDS))?; - let facet_id_string_docids = env.create_database(Some(FACET_ID_STRING_DOCIDS))?; - let facet_id_exists_docids = env.create_database(Some(FACET_ID_EXISTS_DOCIDS))?; - let facet_id_is_null_docids = env.create_database(Some(FACET_ID_IS_NULL_DOCIDS))?; - let facet_id_is_empty_docids = env.create_database(Some(FACET_ID_IS_EMPTY_DOCIDS))?; + env.create_database(&mut wtxn, Some(PREFIX_WORD_PAIR_PROXIMITY_DOCIDS))?; + let word_position_docids = env.create_database(&mut wtxn, Some(WORD_POSITION_DOCIDS))?; + let word_fid_docids = env.create_database(&mut wtxn, Some(WORD_FIELD_ID_DOCIDS))?; + let field_id_word_count_docids = + env.create_database(&mut wtxn, Some(FIELD_ID_WORD_COUNT_DOCIDS))?; + let word_prefix_position_docids = + env.create_database(&mut wtxn, Some(WORD_PREFIX_POSITION_DOCIDS))?; + let word_prefix_fid_docids = + env.create_database(&mut wtxn, Some(WORD_PREFIX_FIELD_ID_DOCIDS))?; + let facet_id_f64_docids = env.create_database(&mut wtxn, Some(FACET_ID_F64_DOCIDS))?; + let facet_id_string_docids = + env.create_database(&mut wtxn, Some(FACET_ID_STRING_DOCIDS))?; + let facet_id_exists_docids = + env.create_database(&mut wtxn, Some(FACET_ID_EXISTS_DOCIDS))?; + let facet_id_is_null_docids = + env.create_database(&mut wtxn, Some(FACET_ID_IS_NULL_DOCIDS))?; + let facet_id_is_empty_docids = + env.create_database(&mut wtxn, Some(FACET_ID_IS_EMPTY_DOCIDS))?; - let field_id_docid_facet_f64s = env.create_database(Some(FIELD_ID_DOCID_FACET_F64S))?; + let field_id_docid_facet_f64s = + env.create_database(&mut wtxn, Some(FIELD_ID_DOCID_FACET_F64S))?; let field_id_docid_facet_strings = - env.create_database(Some(FIELD_ID_DOCID_FACET_STRINGS))?; - let documents = env.create_database(Some(DOCUMENTS))?; + env.create_database(&mut wtxn, Some(FIELD_ID_DOCID_FACET_STRINGS))?; + let documents = env.create_database(&mut wtxn, Some(DOCUMENTS))?; + wtxn.commit()?; Index::set_creation_dates(&env, main, created_at, updated_at)?; diff --git a/milli/src/update/facet/mod.rs b/milli/src/update/facet/mod.rs index 39a3ef437..2fd748d4d 100644 --- a/milli/src/update/facet/mod.rs +++ b/milli/src/update/facet/mod.rs @@ -261,7 +261,9 @@ pub(crate) mod test_helpers { let options = options.map_size(4096 * 4 * 1000 * 100); let tempdir = tempfile::TempDir::new().unwrap(); let env = options.open(tempdir.path()).unwrap(); - let content = env.create_database(None).unwrap(); + let mut wtxn = env.write_txn().unwrap(); + let content = env.create_database(&mut wtxn, None).unwrap(); + wtxn.commit().unwrap(); FacetIndex { content,