mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-11-22 04:36:32 +00:00
Compare commits
9 Commits
v1.23.0
...
prototype-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4492bbb5f9 | ||
|
|
a2fc7ae5e8 | ||
|
|
b817f58991 | ||
|
|
c885171029 | ||
|
|
3870a374af | ||
|
|
d41716d8f0 | ||
|
|
43a6505435 | ||
|
|
467e15d9c0 | ||
|
|
91275adb76 |
@@ -33,6 +33,10 @@ impl FileStore {
|
||||
std::fs::create_dir_all(&path)?;
|
||||
Ok(FileStore { path })
|
||||
}
|
||||
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.path
|
||||
}
|
||||
}
|
||||
|
||||
impl FileStore {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::path::PathBuf;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
use std::{fs, thread};
|
||||
@@ -591,4 +591,8 @@ impl IndexMapper {
|
||||
pub fn set_currently_updating_index(&self, index: Option<(String, Index)>) {
|
||||
*self.currently_updating_index.write().unwrap() = index;
|
||||
}
|
||||
|
||||
pub fn base_path(&self) -> &Path {
|
||||
&self.base_path
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,6 +50,11 @@ impl MustStopProcessing {
|
||||
pub fn reset(&self) {
|
||||
self.0.store(false, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn as_lambda(&self) -> impl Fn() -> bool + Send + Sync + 'static {
|
||||
let clone = self.clone();
|
||||
move || clone.get()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Scheduler {
|
||||
|
||||
@@ -4,6 +4,7 @@ use std::sync::atomic::Ordering;
|
||||
|
||||
use meilisearch_types::heed::CompactionOption;
|
||||
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
||||
use meilisearch_types::milli::InternalError;
|
||||
use meilisearch_types::tasks::{Status, Task};
|
||||
use meilisearch_types::{compression, VERSION_FILE_NAME};
|
||||
|
||||
@@ -76,6 +77,22 @@ unsafe fn remove_tasks(
|
||||
|
||||
impl IndexScheduler {
|
||||
pub(super) fn process_snapshot(
|
||||
&self,
|
||||
progress: Progress,
|
||||
tasks: Vec<Task>,
|
||||
) -> Result<Vec<Task>> {
|
||||
let compaction_option = if self.scheduler.experimental_no_snapshot_compaction {
|
||||
CompactionOption::Disabled
|
||||
} else {
|
||||
CompactionOption::Enabled
|
||||
};
|
||||
match compaction_option {
|
||||
CompactionOption::Enabled => self.process_snapshot_with_temp(progress, tasks),
|
||||
CompactionOption::Disabled => self.process_snapshot_with_pipe(progress, tasks),
|
||||
}
|
||||
}
|
||||
|
||||
fn process_snapshot_with_temp(
|
||||
&self,
|
||||
progress: Progress,
|
||||
mut tasks: Vec<Task>,
|
||||
@@ -105,12 +122,8 @@ impl IndexScheduler {
|
||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexScheduler);
|
||||
let dst = temp_snapshot_dir.path().join("tasks");
|
||||
fs::create_dir_all(&dst)?;
|
||||
let compaction_option = if self.scheduler.experimental_no_snapshot_compaction {
|
||||
CompactionOption::Disabled
|
||||
} else {
|
||||
CompactionOption::Enabled
|
||||
};
|
||||
self.env.copy_to_path(dst.join("data.mdb"), compaction_option)?;
|
||||
|
||||
self.env.copy_to_path(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
||||
|
||||
// 2.2 Remove the current snapshot tasks
|
||||
//
|
||||
@@ -161,7 +174,7 @@ impl IndexScheduler {
|
||||
let dst = temp_snapshot_dir.path().join("indexes").join(uuid.to_string());
|
||||
fs::create_dir_all(&dst)?;
|
||||
index
|
||||
.copy_to_path(dst.join("data.mdb"), compaction_option)
|
||||
.copy_to_path(dst.join("data.mdb"), CompactionOption::Enabled)
|
||||
.map_err(|e| Error::from_milli(e, Some(name.to_string())))?;
|
||||
}
|
||||
|
||||
@@ -171,7 +184,7 @@ impl IndexScheduler {
|
||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheApiKeys);
|
||||
let dst = temp_snapshot_dir.path().join("auth");
|
||||
fs::create_dir_all(&dst)?;
|
||||
self.scheduler.auth_env.copy_to_path(dst.join("data.mdb"), compaction_option)?;
|
||||
self.scheduler.auth_env.copy_to_path(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
||||
|
||||
// 5. Copy and tarball the flat snapshot
|
||||
progress.update_progress(SnapshotCreationProgress::CreateTheTarball);
|
||||
@@ -206,4 +219,138 @@ impl IndexScheduler {
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
|
||||
fn process_snapshot_with_pipe(
|
||||
&self,
|
||||
progress: Progress,
|
||||
mut tasks: Vec<Task>,
|
||||
) -> Result<Vec<Task>> {
|
||||
progress.update_progress(SnapshotCreationProgress::StartTheSnapshotCreation);
|
||||
let must_stop_processing = &self.scheduler.must_stop_processing;
|
||||
let abort_no_index = Err(Error::from_milli(InternalError::AbortedIndexation.into(), None));
|
||||
|
||||
fs::create_dir_all(&self.scheduler.snapshots_path)?;
|
||||
|
||||
// 1. Find the base path and original name of the database
|
||||
|
||||
// TODO find a better way to get this path
|
||||
let mut base_path = self.env.path().to_owned();
|
||||
base_path.pop();
|
||||
let base_path = base_path;
|
||||
let db_name = base_path.file_name().and_then(OsStr::to_str).unwrap_or("data.ms");
|
||||
|
||||
// 2. Start the tarball builder. The tarball will be created on another thread from piped data.
|
||||
|
||||
let mut builder = compression::PipedArchiveBuilder::new(
|
||||
self.scheduler.snapshots_path.clone(),
|
||||
base_path.clone(),
|
||||
)?;
|
||||
|
||||
// 3. Snapshot the VERSION file
|
||||
builder.add_file_to_archive(self.scheduler.version_file_path.clone())?;
|
||||
if must_stop_processing.get() {
|
||||
return abort_no_index;
|
||||
}
|
||||
|
||||
// 4. Snapshot the index-scheduler LMDB env
|
||||
//
|
||||
// When we call copy_to_path, LMDB opens a read transaction by itself,
|
||||
// we can't provide our own. It is an issue as we would like to know
|
||||
// the update files to copy but new ones can be enqueued between the copy
|
||||
// of the env and the new transaction we open to retrieve the enqueued tasks.
|
||||
// So we prefer opening a new transaction after copying the env and copy more
|
||||
// update files than not enough.
|
||||
//
|
||||
// Note that there cannot be any update files deleted between those
|
||||
// two read operations as the task processing is synchronous.
|
||||
|
||||
// 4.1 First copy the LMDB env of the index-scheduler
|
||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexScheduler);
|
||||
builder.add_env_to_archive(&self.env)?;
|
||||
if must_stop_processing.get() {
|
||||
return abort_no_index;
|
||||
}
|
||||
|
||||
// 4.2 Create a read transaction on the index-scheduler
|
||||
let rtxn = self.env.read_txn()?;
|
||||
|
||||
// 4.3 Only copy the update files of the enqueued tasks
|
||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheUpdateFiles);
|
||||
builder.add_dir_to_archive(self.queue.file_store.path().to_path_buf())?;
|
||||
let enqueued = self.queue.tasks.get_status(&rtxn, Status::Enqueued)?;
|
||||
let (atomic, update_file_progress) = AtomicUpdateFileStep::new(enqueued.len() as u32);
|
||||
progress.update_progress(update_file_progress);
|
||||
for task_id in enqueued {
|
||||
if must_stop_processing.get() {
|
||||
return abort_no_index;
|
||||
}
|
||||
let task =
|
||||
self.queue.tasks.get_task(&rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||
if let Some(content_uuid) = task.content_uuid() {
|
||||
let src = self.queue.file_store.get_update_path(content_uuid);
|
||||
builder.add_file_to_archive(src)?;
|
||||
}
|
||||
atomic.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// 5. Snapshot every index
|
||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexes);
|
||||
builder.add_dir_to_archive(self.index_mapper.base_path().to_path_buf())?;
|
||||
let index_mapping = self.index_mapper.index_mapping;
|
||||
let nb_indexes = index_mapping.len(&rtxn)? as u32;
|
||||
|
||||
for (i, result) in index_mapping.iter(&rtxn)?.enumerate() {
|
||||
let (name, _) = result?;
|
||||
let abort_index = || {
|
||||
Err(Error::from_milli(
|
||||
InternalError::AbortedIndexation.into(),
|
||||
Some(name.to_string()), // defer the `to_string`
|
||||
))
|
||||
};
|
||||
|
||||
if must_stop_processing.get() {
|
||||
return abort_index();
|
||||
}
|
||||
|
||||
progress.update_progress(VariableNameStep::<SnapshotCreationProgress>::new(
|
||||
name, i as u32, nb_indexes,
|
||||
));
|
||||
let index = self.index_mapper.index(&rtxn, name)?;
|
||||
builder.add_env_to_archive(index.raw_env())?;
|
||||
}
|
||||
|
||||
drop(rtxn);
|
||||
|
||||
if must_stop_processing.get() {
|
||||
return abort_no_index;
|
||||
}
|
||||
|
||||
// 6. Snapshot the auth LMDB env
|
||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheApiKeys);
|
||||
builder.add_env_to_archive(&self.scheduler.auth_env)?;
|
||||
|
||||
// 7. Finalize the tarball
|
||||
progress.update_progress(SnapshotCreationProgress::CreateTheTarball);
|
||||
let file =
|
||||
builder.finish(&self.scheduler.snapshots_path.join(format!("{db_name}.snapshot")))?;
|
||||
|
||||
// 8. Change the permission to make the snapshot readonly
|
||||
let mut permissions = file.metadata()?.permissions();
|
||||
permissions.set_readonly(true);
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
#[allow(clippy::non_octal_unix_permissions)]
|
||||
// rwxrwxrwx
|
||||
permissions.set_mode(0b100100100);
|
||||
}
|
||||
|
||||
file.set_permissions(permissions)?;
|
||||
|
||||
for task in &mut tasks {
|
||||
task.status = Status::Succeeded;
|
||||
}
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
use std::fs::{create_dir_all, File};
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use flate2::read::GzDecoder;
|
||||
use flate2::write::GzEncoder;
|
||||
use flate2::Compression;
|
||||
use milli::heed::Env;
|
||||
use tar::{Archive, Builder};
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
pub fn to_tar_gz(src: impl AsRef<Path>, dest: impl AsRef<Path>) -> anyhow::Result<()> {
|
||||
let mut f = File::create(dest)?;
|
||||
@@ -26,3 +28,112 @@ pub fn from_tar_gz(src: impl AsRef<Path>, dest: impl AsRef<Path>) -> anyhow::Res
|
||||
ar.unpack(&dest)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct PipedArchiveBuilder {
|
||||
base_path: PathBuf,
|
||||
tar_encoder: tar::Builder<GzEncoder<NamedTempFile>>,
|
||||
}
|
||||
|
||||
impl PipedArchiveBuilder {
|
||||
pub fn new(dest_dir: PathBuf, base_path: PathBuf) -> anyhow::Result<Self> {
|
||||
let temp_archive = tempfile::NamedTempFile::new_in(&dest_dir)?;
|
||||
|
||||
let gz_encoder = GzEncoder::new(temp_archive, Compression::default());
|
||||
let mut tar_encoder = Builder::new(gz_encoder);
|
||||
let base_path_in_archive = PathInArchive::from_absolute_and_base(&base_path, &base_path);
|
||||
tar_encoder.append_dir(base_path_in_archive.as_path(), &base_path)?;
|
||||
|
||||
Ok(Self { base_path, tar_encoder })
|
||||
}
|
||||
|
||||
/// Add a heed environment to the archive.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// - Errors originating with that thread:
|
||||
/// - Heed errors, if taking a write transaction fails
|
||||
/// - If the copy of the environment fails.
|
||||
/// - If there is an I/O error opening the database at the environment's path.
|
||||
/// - Errors originating with another thread:
|
||||
/// - If the cancellation thread panicked or otherwise dropped its receiver.
|
||||
/// - If the processing thread panicked or otherwise dropped its receiver.
|
||||
pub fn add_env_to_archive<T>(&mut self, env: &Env<T>) -> anyhow::Result<()> {
|
||||
let path = env.path().to_path_buf();
|
||||
// make sure that the environment cannot change while it is being added to the archive,
|
||||
// as any concurrent change would corrupt the copy.
|
||||
let env_wtxn = env.write_txn()?;
|
||||
|
||||
let dir_path_in_archive = PathInArchive::from_absolute_and_base(&path, &self.base_path);
|
||||
|
||||
self.tar_encoder.append_dir(dir_path_in_archive.as_path(), &path)?;
|
||||
|
||||
let path = path.join("data.mdb");
|
||||
let path_in_archive = PathInArchive::from_absolute_and_base(&path, &self.base_path);
|
||||
|
||||
self.tar_encoder.append_path_with_name(&path, path_in_archive.as_path())?;
|
||||
|
||||
// no change we might want to commit
|
||||
env_wtxn.abort();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add a file to the archive
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// - If the processing thread panicked or otherwise dropped its receiver.
|
||||
pub fn add_file_to_archive(&mut self, path: PathBuf) -> anyhow::Result<()> {
|
||||
let path_in_archive = PathInArchive::from_absolute_and_base(&path, &self.base_path);
|
||||
self.tar_encoder.append_path_with_name(&path, path_in_archive.as_path())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add a directory name (**without its contents**) to the archive.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// - If the processing thread panicked or otherwise dropped its receiver.
|
||||
pub fn add_dir_to_archive(&mut self, path: PathBuf) -> anyhow::Result<()> {
|
||||
let path_in_archive = PathInArchive::from_absolute_and_base(&path, &self.base_path);
|
||||
|
||||
self.tar_encoder.append_dir(path_in_archive.as_path(), &path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Finalize the archive and persists it to disk.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// - Originating with the current thread:
|
||||
/// - If persisting the archive fails
|
||||
/// - Originating with another thread:
|
||||
/// - If the cancellation thread panicked.
|
||||
/// - If the processing thread panicked or otherwise terminated in error.
|
||||
pub fn finish(self, dest_path: &Path) -> anyhow::Result<File> {
|
||||
let gz_encoder = self.tar_encoder.into_inner()?;
|
||||
let mut temp_archive = gz_encoder.finish()?;
|
||||
temp_archive.flush()?;
|
||||
|
||||
let archive = temp_archive.persist(dest_path)?;
|
||||
Ok(archive)
|
||||
}
|
||||
}
|
||||
|
||||
struct PathInArchive(PathBuf);
|
||||
|
||||
impl PathInArchive {
|
||||
pub fn from_absolute_and_base(absolute: &Path, base: &Path) -> Self {
|
||||
/// FIXME
|
||||
let canonical = absolute.canonicalize().unwrap();
|
||||
let relative = match canonical.strip_prefix(base) {
|
||||
Ok(stripped) => Path::new(&".").join(stripped),
|
||||
Err(_) => absolute.to_path_buf(),
|
||||
};
|
||||
|
||||
Self(relative)
|
||||
}
|
||||
|
||||
pub fn as_path(&self) -> &Path {
|
||||
self.0.as_path()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1983,6 +1983,11 @@ impl Index {
|
||||
|
||||
Ok(sizes)
|
||||
}
|
||||
|
||||
/// The underlying env for raw access
|
||||
pub fn raw_env(&self) -> &heed::Env<WithoutTls> {
|
||||
&self.env
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EmbeddingsWithMetadata {
|
||||
|
||||
Reference in New Issue
Block a user