mirror of
				https://github.com/meilisearch/meilisearch.git
				synced 2025-11-04 09:56:28 +00:00 
			
		
		
		
	get rids of meilisearch-lib
This commit is contained in:
		@@ -1,17 +0,0 @@
 | 
			
		||||
pub mod v2;
 | 
			
		||||
pub mod v3;
 | 
			
		||||
pub mod v4;
 | 
			
		||||
 | 
			
		||||
/// Parses the v1 version of the Asc ranking rules `asc(price)`and returns the field name.
 | 
			
		||||
pub fn asc_ranking_rule(text: &str) -> Option<&str> {
 | 
			
		||||
    text.split_once("asc(")
 | 
			
		||||
        .and_then(|(_, tail)| tail.rsplit_once(')'))
 | 
			
		||||
        .map(|(field, _)| field)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Parses the v1 version of the Desc ranking rules `desc(price)`and returns the field name.
 | 
			
		||||
pub fn desc_ranking_rule(text: &str) -> Option<&str> {
 | 
			
		||||
    text.split_once("desc(")
 | 
			
		||||
        .and_then(|(_, tail)| tail.rsplit_once(')'))
 | 
			
		||||
        .map(|(field, _)| field)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,205 +0,0 @@
 | 
			
		||||
use meilisearch_types::error::{Code, ResponseError};
 | 
			
		||||
use meilisearch_types::index_uid::IndexUid;
 | 
			
		||||
use milli::update::IndexDocumentsMethod;
 | 
			
		||||
use serde::{Deserialize, Serialize};
 | 
			
		||||
use time::OffsetDateTime;
 | 
			
		||||
use uuid::Uuid;
 | 
			
		||||
 | 
			
		||||
use super::v4::{Task, TaskContent, TaskEvent};
 | 
			
		||||
use crate::index::{Settings, Unchecked};
 | 
			
		||||
use crate::tasks::task::{DocumentDeletion, TaskId, TaskResult};
 | 
			
		||||
 | 
			
		||||
use super::v2;
 | 
			
		||||
 | 
			
		||||
#[derive(Serialize, Deserialize)]
 | 
			
		||||
pub struct DumpEntry {
 | 
			
		||||
    pub uuid: Uuid,
 | 
			
		||||
    pub uid: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Serialize, Deserialize)]
 | 
			
		||||
pub struct UpdateEntry {
 | 
			
		||||
    pub uuid: Uuid,
 | 
			
		||||
    pub update: UpdateStatus,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Serialize, Deserialize)]
 | 
			
		||||
#[serde(tag = "status", rename_all = "camelCase")]
 | 
			
		||||
pub enum UpdateStatus {
 | 
			
		||||
    Processing(Processing),
 | 
			
		||||
    Enqueued(Enqueued),
 | 
			
		||||
    Processed(Processed),
 | 
			
		||||
    Failed(Failed),
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl From<v2::UpdateResult> for TaskResult {
 | 
			
		||||
    fn from(other: v2::UpdateResult) -> Self {
 | 
			
		||||
        match other {
 | 
			
		||||
            v2::UpdateResult::DocumentsAddition(result) => TaskResult::DocumentAddition {
 | 
			
		||||
                indexed_documents: result.nb_documents as u64,
 | 
			
		||||
            },
 | 
			
		||||
            v2::UpdateResult::DocumentDeletion { deleted } => TaskResult::DocumentDeletion {
 | 
			
		||||
                deleted_documents: deleted,
 | 
			
		||||
            },
 | 
			
		||||
            v2::UpdateResult::Other => TaskResult::Other,
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[allow(clippy::large_enum_variant)]
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize)]
 | 
			
		||||
pub enum Update {
 | 
			
		||||
    DeleteDocuments(Vec<String>),
 | 
			
		||||
    DocumentAddition {
 | 
			
		||||
        primary_key: Option<String>,
 | 
			
		||||
        method: IndexDocumentsMethod,
 | 
			
		||||
        content_uuid: Uuid,
 | 
			
		||||
    },
 | 
			
		||||
    Settings(Settings<Unchecked>),
 | 
			
		||||
    ClearDocuments,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl From<Update> for super::v4::TaskContent {
 | 
			
		||||
    fn from(update: Update) -> Self {
 | 
			
		||||
        match update {
 | 
			
		||||
            Update::DeleteDocuments(ids) => {
 | 
			
		||||
                TaskContent::DocumentDeletion(DocumentDeletion::Ids(ids))
 | 
			
		||||
            }
 | 
			
		||||
            Update::DocumentAddition {
 | 
			
		||||
                primary_key,
 | 
			
		||||
                method,
 | 
			
		||||
                ..
 | 
			
		||||
            } => TaskContent::DocumentAddition {
 | 
			
		||||
                content_uuid: Uuid::default(),
 | 
			
		||||
                merge_strategy: method,
 | 
			
		||||
                primary_key,
 | 
			
		||||
                // document count is unknown for legacy updates
 | 
			
		||||
                documents_count: 0,
 | 
			
		||||
                allow_index_creation: true,
 | 
			
		||||
            },
 | 
			
		||||
            Update::Settings(settings) => TaskContent::SettingsUpdate {
 | 
			
		||||
                settings,
 | 
			
		||||
                // There is no way to know now, so we assume it isn't
 | 
			
		||||
                is_deletion: false,
 | 
			
		||||
                allow_index_creation: true,
 | 
			
		||||
            },
 | 
			
		||||
            Update::ClearDocuments => TaskContent::DocumentDeletion(DocumentDeletion::Clear),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[allow(clippy::large_enum_variant)]
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize)]
 | 
			
		||||
#[serde(tag = "type")]
 | 
			
		||||
pub enum UpdateMeta {
 | 
			
		||||
    DocumentsAddition {
 | 
			
		||||
        method: IndexDocumentsMethod,
 | 
			
		||||
        primary_key: Option<String>,
 | 
			
		||||
    },
 | 
			
		||||
    ClearDocuments,
 | 
			
		||||
    DeleteDocuments {
 | 
			
		||||
        ids: Vec<String>,
 | 
			
		||||
    },
 | 
			
		||||
    Settings(Settings<Unchecked>),
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Serialize, Deserialize, Clone)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct Enqueued {
 | 
			
		||||
    pub update_id: u64,
 | 
			
		||||
    pub meta: Update,
 | 
			
		||||
    #[serde(with = "time::serde::rfc3339")]
 | 
			
		||||
    pub enqueued_at: OffsetDateTime,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Enqueued {
 | 
			
		||||
    fn update_task(self, task: &mut Task) {
 | 
			
		||||
        // we do not erase the `TaskId` that was given to us.
 | 
			
		||||
        task.content = self.meta.into();
 | 
			
		||||
        task.events.push(TaskEvent::Created(self.enqueued_at));
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Serialize, Deserialize, Clone)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct Processed {
 | 
			
		||||
    pub success: v2::UpdateResult,
 | 
			
		||||
    #[serde(with = "time::serde::rfc3339")]
 | 
			
		||||
    pub processed_at: OffsetDateTime,
 | 
			
		||||
    #[serde(flatten)]
 | 
			
		||||
    pub from: Processing,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Processed {
 | 
			
		||||
    fn update_task(self, task: &mut Task) {
 | 
			
		||||
        self.from.update_task(task);
 | 
			
		||||
 | 
			
		||||
        let event = TaskEvent::Succeded {
 | 
			
		||||
            result: TaskResult::from(self.success),
 | 
			
		||||
            timestamp: self.processed_at,
 | 
			
		||||
        };
 | 
			
		||||
        task.events.push(event);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Serialize, Deserialize, Clone)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct Processing {
 | 
			
		||||
    #[serde(flatten)]
 | 
			
		||||
    pub from: Enqueued,
 | 
			
		||||
    #[serde(with = "time::serde::rfc3339")]
 | 
			
		||||
    pub started_processing_at: OffsetDateTime,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Processing {
 | 
			
		||||
    fn update_task(self, task: &mut Task) {
 | 
			
		||||
        self.from.update_task(task);
 | 
			
		||||
 | 
			
		||||
        let event = TaskEvent::Processing(self.started_processing_at);
 | 
			
		||||
        task.events.push(event);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Serialize, Deserialize)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct Failed {
 | 
			
		||||
    #[serde(flatten)]
 | 
			
		||||
    pub from: Processing,
 | 
			
		||||
    pub msg: String,
 | 
			
		||||
    pub code: Code,
 | 
			
		||||
    #[serde(with = "time::serde::rfc3339")]
 | 
			
		||||
    pub failed_at: OffsetDateTime,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Failed {
 | 
			
		||||
    fn update_task(self, task: &mut Task) {
 | 
			
		||||
        self.from.update_task(task);
 | 
			
		||||
 | 
			
		||||
        let event = TaskEvent::Failed {
 | 
			
		||||
            error: ResponseError::from_msg(self.msg, self.code),
 | 
			
		||||
            timestamp: self.failed_at,
 | 
			
		||||
        };
 | 
			
		||||
        task.events.push(event);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl From<(UpdateStatus, String, TaskId)> for Task {
 | 
			
		||||
    fn from((update, uid, task_id): (UpdateStatus, String, TaskId)) -> Self {
 | 
			
		||||
        // Dummy task
 | 
			
		||||
        let mut task = super::v4::Task {
 | 
			
		||||
            id: task_id,
 | 
			
		||||
            index_uid: IndexUid::new_unchecked(uid),
 | 
			
		||||
            content: super::v4::TaskContent::IndexDeletion,
 | 
			
		||||
            events: Vec::new(),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        match update {
 | 
			
		||||
            UpdateStatus::Processing(u) => u.update_task(&mut task),
 | 
			
		||||
            UpdateStatus::Enqueued(u) => u.update_task(&mut task),
 | 
			
		||||
            UpdateStatus::Processed(u) => u.update_task(&mut task),
 | 
			
		||||
            UpdateStatus::Failed(u) => u.update_task(&mut task),
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        task
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@@ -1,42 +0,0 @@
 | 
			
		||||
use meilisearch_auth::error::AuthControllerError;
 | 
			
		||||
use meilisearch_types::error::{Code, ErrorCode};
 | 
			
		||||
use meilisearch_types::internal_error;
 | 
			
		||||
 | 
			
		||||
use crate::{index_resolver::error::IndexResolverError, tasks::error::TaskError};
 | 
			
		||||
 | 
			
		||||
pub type Result<T> = std::result::Result<T, DumpError>;
 | 
			
		||||
 | 
			
		||||
#[derive(thiserror::Error, Debug)]
 | 
			
		||||
pub enum DumpError {
 | 
			
		||||
    #[error("An internal error has occurred. `{0}`.")]
 | 
			
		||||
    Internal(Box<dyn std::error::Error + Send + Sync + 'static>),
 | 
			
		||||
    #[error("{0}")]
 | 
			
		||||
    IndexResolver(Box<IndexResolverError>),
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
internal_error!(
 | 
			
		||||
    DumpError: milli::heed::Error,
 | 
			
		||||
    std::io::Error,
 | 
			
		||||
    tokio::task::JoinError,
 | 
			
		||||
    tokio::sync::oneshot::error::RecvError,
 | 
			
		||||
    serde_json::error::Error,
 | 
			
		||||
    tempfile::PersistError,
 | 
			
		||||
    fs_extra::error::Error,
 | 
			
		||||
    AuthControllerError,
 | 
			
		||||
    TaskError
 | 
			
		||||
);
 | 
			
		||||
 | 
			
		||||
impl From<IndexResolverError> for DumpError {
 | 
			
		||||
    fn from(e: IndexResolverError) -> Self {
 | 
			
		||||
        Self::IndexResolver(Box::new(e))
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl ErrorCode for DumpError {
 | 
			
		||||
    fn error_code(&self) -> Code {
 | 
			
		||||
        match self {
 | 
			
		||||
            DumpError::Internal(_) => Code::Internal,
 | 
			
		||||
            DumpError::IndexResolver(e) => e.error_code(),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@@ -1,188 +0,0 @@
 | 
			
		||||
#[cfg(not(test))]
 | 
			
		||||
pub use real::DumpHandler;
 | 
			
		||||
 | 
			
		||||
#[cfg(test)]
 | 
			
		||||
pub use test::MockDumpHandler as DumpHandler;
 | 
			
		||||
 | 
			
		||||
use time::{macros::format_description, OffsetDateTime};
 | 
			
		||||
 | 
			
		||||
/// Generate uid from creation date
 | 
			
		||||
pub fn generate_uid() -> String {
 | 
			
		||||
    OffsetDateTime::now_utc()
 | 
			
		||||
        .format(format_description!(
 | 
			
		||||
            "[year repr:full][month repr:numerical][day padding:zero]-[hour padding:zero][minute padding:zero][second padding:zero][subsecond digits:3]"
 | 
			
		||||
        ))
 | 
			
		||||
        .unwrap()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
mod real {
 | 
			
		||||
    use std::path::PathBuf;
 | 
			
		||||
    use std::sync::Arc;
 | 
			
		||||
 | 
			
		||||
    use log::{info, trace};
 | 
			
		||||
    use meilisearch_auth::AuthController;
 | 
			
		||||
    use milli::heed::Env;
 | 
			
		||||
    use tokio::fs::create_dir_all;
 | 
			
		||||
    use tokio::io::AsyncWriteExt;
 | 
			
		||||
 | 
			
		||||
    use crate::analytics;
 | 
			
		||||
    use crate::compression::to_tar_gz;
 | 
			
		||||
    use crate::dump::error::{DumpError, Result};
 | 
			
		||||
    use crate::dump::{MetadataVersion, META_FILE_NAME};
 | 
			
		||||
    use crate::index_resolver::{
 | 
			
		||||
        index_store::IndexStore, meta_store::IndexMetaStore, IndexResolver,
 | 
			
		||||
    };
 | 
			
		||||
    use crate::tasks::TaskStore;
 | 
			
		||||
    use crate::update_file_store::UpdateFileStore;
 | 
			
		||||
 | 
			
		||||
    pub struct DumpHandler<U, I> {
 | 
			
		||||
        dump_path: PathBuf,
 | 
			
		||||
        db_path: PathBuf,
 | 
			
		||||
        update_file_store: UpdateFileStore,
 | 
			
		||||
        task_store_size: usize,
 | 
			
		||||
        index_db_size: usize,
 | 
			
		||||
        env: Arc<Env>,
 | 
			
		||||
        index_resolver: Arc<IndexResolver<U, I>>,
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    impl<U, I> DumpHandler<U, I>
 | 
			
		||||
    where
 | 
			
		||||
        U: IndexMetaStore + Sync + Send + 'static,
 | 
			
		||||
        I: IndexStore + Sync + Send + 'static,
 | 
			
		||||
    {
 | 
			
		||||
        pub fn new(
 | 
			
		||||
            dump_path: PathBuf,
 | 
			
		||||
            db_path: PathBuf,
 | 
			
		||||
            update_file_store: UpdateFileStore,
 | 
			
		||||
            task_store_size: usize,
 | 
			
		||||
            index_db_size: usize,
 | 
			
		||||
            env: Arc<Env>,
 | 
			
		||||
            index_resolver: Arc<IndexResolver<U, I>>,
 | 
			
		||||
        ) -> Self {
 | 
			
		||||
            Self {
 | 
			
		||||
                dump_path,
 | 
			
		||||
                db_path,
 | 
			
		||||
                update_file_store,
 | 
			
		||||
                task_store_size,
 | 
			
		||||
                index_db_size,
 | 
			
		||||
                env,
 | 
			
		||||
                index_resolver,
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        pub async fn run(&self, uid: String) -> Result<()> {
 | 
			
		||||
            trace!("Performing dump.");
 | 
			
		||||
 | 
			
		||||
            create_dir_all(&self.dump_path).await?;
 | 
			
		||||
 | 
			
		||||
            let temp_dump_dir = tokio::task::spawn_blocking(tempfile::TempDir::new).await??;
 | 
			
		||||
            let temp_dump_path = temp_dump_dir.path().to_owned();
 | 
			
		||||
 | 
			
		||||
            let meta = MetadataVersion::new_v5(self.index_db_size, self.task_store_size);
 | 
			
		||||
            let meta_path = temp_dump_path.join(META_FILE_NAME);
 | 
			
		||||
 | 
			
		||||
            let meta_bytes = serde_json::to_vec(&meta)?;
 | 
			
		||||
            let mut meta_file = tokio::fs::File::create(&meta_path).await?;
 | 
			
		||||
            meta_file.write_all(&meta_bytes).await?;
 | 
			
		||||
 | 
			
		||||
            analytics::copy_user_id(&self.db_path, &temp_dump_path);
 | 
			
		||||
 | 
			
		||||
            create_dir_all(&temp_dump_path.join("indexes")).await?;
 | 
			
		||||
 | 
			
		||||
            let db_path = self.db_path.clone();
 | 
			
		||||
            let temp_dump_path_clone = temp_dump_path.clone();
 | 
			
		||||
            tokio::task::spawn_blocking(move || -> Result<()> {
 | 
			
		||||
                AuthController::dump(db_path, temp_dump_path_clone)?;
 | 
			
		||||
                Ok(())
 | 
			
		||||
            })
 | 
			
		||||
            .await??;
 | 
			
		||||
            TaskStore::dump(
 | 
			
		||||
                self.env.clone(),
 | 
			
		||||
                &temp_dump_path,
 | 
			
		||||
                self.update_file_store.clone(),
 | 
			
		||||
            )
 | 
			
		||||
            .await?;
 | 
			
		||||
            self.index_resolver.dump(&temp_dump_path).await?;
 | 
			
		||||
 | 
			
		||||
            let dump_path = self.dump_path.clone();
 | 
			
		||||
            let dump_path = tokio::task::spawn_blocking(move || -> Result<PathBuf> {
 | 
			
		||||
                // for now we simply copy the updates/updates_files
 | 
			
		||||
                // FIXME: We may copy more files than necessary, if new files are added while we are
 | 
			
		||||
                // performing the dump. We need a way to filter them out.
 | 
			
		||||
 | 
			
		||||
                let temp_dump_file = tempfile::NamedTempFile::new_in(&dump_path)?;
 | 
			
		||||
                to_tar_gz(temp_dump_path, temp_dump_file.path())
 | 
			
		||||
                    .map_err(|e| DumpError::Internal(e.into()))?;
 | 
			
		||||
 | 
			
		||||
                let dump_path = dump_path.join(uid).with_extension("dump");
 | 
			
		||||
                temp_dump_file.persist(&dump_path)?;
 | 
			
		||||
 | 
			
		||||
                Ok(dump_path)
 | 
			
		||||
            })
 | 
			
		||||
            .await??;
 | 
			
		||||
 | 
			
		||||
            info!("Created dump in {:?}.", dump_path);
 | 
			
		||||
 | 
			
		||||
            Ok(())
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[cfg(test)]
 | 
			
		||||
mod test {
 | 
			
		||||
    use std::path::PathBuf;
 | 
			
		||||
    use std::sync::Arc;
 | 
			
		||||
 | 
			
		||||
    use milli::heed::Env;
 | 
			
		||||
    use nelson::Mocker;
 | 
			
		||||
 | 
			
		||||
    use crate::dump::error::Result;
 | 
			
		||||
    use crate::index_resolver::IndexResolver;
 | 
			
		||||
    use crate::index_resolver::{index_store::IndexStore, meta_store::IndexMetaStore};
 | 
			
		||||
    use crate::update_file_store::UpdateFileStore;
 | 
			
		||||
 | 
			
		||||
    use super::*;
 | 
			
		||||
 | 
			
		||||
    pub enum MockDumpHandler<U, I> {
 | 
			
		||||
        Real(super::real::DumpHandler<U, I>),
 | 
			
		||||
        Mock(Mocker),
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    impl<U, I> MockDumpHandler<U, I> {
 | 
			
		||||
        pub fn mock(mocker: Mocker) -> Self {
 | 
			
		||||
            Self::Mock(mocker)
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    impl<U, I> MockDumpHandler<U, I>
 | 
			
		||||
    where
 | 
			
		||||
        U: IndexMetaStore + Sync + Send + 'static,
 | 
			
		||||
        I: IndexStore + Sync + Send + 'static,
 | 
			
		||||
    {
 | 
			
		||||
        pub fn new(
 | 
			
		||||
            dump_path: PathBuf,
 | 
			
		||||
            db_path: PathBuf,
 | 
			
		||||
            update_file_store: UpdateFileStore,
 | 
			
		||||
            task_store_size: usize,
 | 
			
		||||
            index_db_size: usize,
 | 
			
		||||
            env: Arc<Env>,
 | 
			
		||||
            index_resolver: Arc<IndexResolver<U, I>>,
 | 
			
		||||
        ) -> Self {
 | 
			
		||||
            Self::Real(super::real::DumpHandler::new(
 | 
			
		||||
                dump_path,
 | 
			
		||||
                db_path,
 | 
			
		||||
                update_file_store,
 | 
			
		||||
                task_store_size,
 | 
			
		||||
                index_db_size,
 | 
			
		||||
                env,
 | 
			
		||||
                index_resolver,
 | 
			
		||||
            ))
 | 
			
		||||
        }
 | 
			
		||||
        pub async fn run(&self, uid: String) -> Result<()> {
 | 
			
		||||
            match self {
 | 
			
		||||
                DumpHandler::Real(real) => real.run(uid).await,
 | 
			
		||||
                DumpHandler::Mock(mocker) => unsafe { mocker.get("run").call(uid) },
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@@ -1,4 +0,0 @@
 | 
			
		||||
pub mod v2;
 | 
			
		||||
pub mod v3;
 | 
			
		||||
pub mod v4;
 | 
			
		||||
pub mod v5;
 | 
			
		||||
@@ -1,216 +0,0 @@
 | 
			
		||||
use std::fs::{File, OpenOptions};
 | 
			
		||||
use std::io::Write;
 | 
			
		||||
use std::path::{Path, PathBuf};
 | 
			
		||||
 | 
			
		||||
use serde_json::{Deserializer, Value};
 | 
			
		||||
use tempfile::NamedTempFile;
 | 
			
		||||
 | 
			
		||||
use crate::dump::compat::{self, v2, v3};
 | 
			
		||||
use crate::dump::Metadata;
 | 
			
		||||
use crate::options::IndexerOpts;
 | 
			
		||||
 | 
			
		||||
/// The dump v2 reads the dump folder and patches all the needed file to make it compatible with a
 | 
			
		||||
/// dump v3, then calls the dump v3 to actually handle the dump.
 | 
			
		||||
pub fn load_dump(
 | 
			
		||||
    meta: Metadata,
 | 
			
		||||
    src: impl AsRef<Path>,
 | 
			
		||||
    dst: impl AsRef<Path>,
 | 
			
		||||
    index_db_size: usize,
 | 
			
		||||
    update_db_size: usize,
 | 
			
		||||
    indexing_options: &IndexerOpts,
 | 
			
		||||
) -> anyhow::Result<()> {
 | 
			
		||||
    log::info!("Patching dump V2 to dump V3...");
 | 
			
		||||
    let indexes_path = src.as_ref().join("indexes");
 | 
			
		||||
 | 
			
		||||
    let dir_entries = std::fs::read_dir(indexes_path)?;
 | 
			
		||||
    for entry in dir_entries {
 | 
			
		||||
        let entry = entry?;
 | 
			
		||||
 | 
			
		||||
        // rename the index folder
 | 
			
		||||
        let path = entry.path();
 | 
			
		||||
        let new_path = patch_index_uuid_path(&path).expect("invalid index folder.");
 | 
			
		||||
 | 
			
		||||
        std::fs::rename(path, &new_path)?;
 | 
			
		||||
 | 
			
		||||
        let settings_path = new_path.join("meta.json");
 | 
			
		||||
 | 
			
		||||
        patch_settings(settings_path)?;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    let update_dir = src.as_ref().join("updates");
 | 
			
		||||
    let update_path = update_dir.join("data.jsonl");
 | 
			
		||||
    patch_updates(update_dir, update_path)?;
 | 
			
		||||
 | 
			
		||||
    super::v3::load_dump(
 | 
			
		||||
        meta,
 | 
			
		||||
        src,
 | 
			
		||||
        dst,
 | 
			
		||||
        index_db_size,
 | 
			
		||||
        update_db_size,
 | 
			
		||||
        indexing_options,
 | 
			
		||||
    )
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn patch_index_uuid_path(path: &Path) -> Option<PathBuf> {
 | 
			
		||||
    let uuid = path.file_name()?.to_str()?.trim_start_matches("index-");
 | 
			
		||||
    let new_path = path.parent()?.join(uuid);
 | 
			
		||||
    Some(new_path)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn patch_settings(path: impl AsRef<Path>) -> anyhow::Result<()> {
 | 
			
		||||
    let mut meta_file = File::open(&path)?;
 | 
			
		||||
    let mut meta: Value = serde_json::from_reader(&mut meta_file)?;
 | 
			
		||||
 | 
			
		||||
    // We first deserialize the dump meta into a serde_json::Value and change
 | 
			
		||||
    // the custom ranking rules settings from the old format to the new format.
 | 
			
		||||
    if let Some(ranking_rules) = meta.pointer_mut("/settings/rankingRules") {
 | 
			
		||||
        patch_custom_ranking_rules(ranking_rules);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    let mut meta_file = OpenOptions::new().truncate(true).write(true).open(path)?;
 | 
			
		||||
 | 
			
		||||
    serde_json::to_writer(&mut meta_file, &meta)?;
 | 
			
		||||
 | 
			
		||||
    Ok(())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn patch_updates(dir: impl AsRef<Path>, path: impl AsRef<Path>) -> anyhow::Result<()> {
 | 
			
		||||
    let mut output_update_file = NamedTempFile::new_in(&dir)?;
 | 
			
		||||
    let update_file = File::open(&path)?;
 | 
			
		||||
 | 
			
		||||
    let stream = Deserializer::from_reader(update_file).into_iter::<v2::UpdateEntry>();
 | 
			
		||||
 | 
			
		||||
    for update in stream {
 | 
			
		||||
        let update_entry = update?;
 | 
			
		||||
 | 
			
		||||
        let update_entry = v3::UpdateEntry::from(update_entry);
 | 
			
		||||
 | 
			
		||||
        serde_json::to_writer(&mut output_update_file, &update_entry)?;
 | 
			
		||||
        output_update_file.write_all(b"\n")?;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    output_update_file.flush()?;
 | 
			
		||||
    output_update_file.persist(path)?;
 | 
			
		||||
 | 
			
		||||
    Ok(())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Converts the ranking rules from the format `asc(_)`, `desc(_)` to the format `_:asc`, `_:desc`.
 | 
			
		||||
///
 | 
			
		||||
/// This is done for compatibility reasons, and to avoid a new dump version,
 | 
			
		||||
/// since the new syntax was introduced soon after the new dump version.
 | 
			
		||||
fn patch_custom_ranking_rules(ranking_rules: &mut Value) {
 | 
			
		||||
    *ranking_rules = match ranking_rules.take() {
 | 
			
		||||
        Value::Array(values) => values
 | 
			
		||||
            .into_iter()
 | 
			
		||||
            .filter_map(|value| match value {
 | 
			
		||||
                Value::String(s) if s.starts_with("asc") => compat::asc_ranking_rule(&s)
 | 
			
		||||
                    .map(|f| format!("{}:asc", f))
 | 
			
		||||
                    .map(Value::String),
 | 
			
		||||
                Value::String(s) if s.starts_with("desc") => compat::desc_ranking_rule(&s)
 | 
			
		||||
                    .map(|f| format!("{}:desc", f))
 | 
			
		||||
                    .map(Value::String),
 | 
			
		||||
                otherwise => Some(otherwise),
 | 
			
		||||
            })
 | 
			
		||||
            .collect(),
 | 
			
		||||
        otherwise => otherwise,
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl From<v2::UpdateEntry> for v3::UpdateEntry {
 | 
			
		||||
    fn from(v2::UpdateEntry { uuid, update }: v2::UpdateEntry) -> Self {
 | 
			
		||||
        let update = match update {
 | 
			
		||||
            v2::UpdateStatus::Processing(meta) => v3::UpdateStatus::Processing(meta.into()),
 | 
			
		||||
            v2::UpdateStatus::Enqueued(meta) => v3::UpdateStatus::Enqueued(meta.into()),
 | 
			
		||||
            v2::UpdateStatus::Processed(meta) => v3::UpdateStatus::Processed(meta.into()),
 | 
			
		||||
            v2::UpdateStatus::Aborted(_) => unreachable!("Updates could never be aborted."),
 | 
			
		||||
            v2::UpdateStatus::Failed(meta) => v3::UpdateStatus::Failed(meta.into()),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        Self { uuid, update }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl From<v2::Failed> for v3::Failed {
 | 
			
		||||
    fn from(other: v2::Failed) -> Self {
 | 
			
		||||
        let v2::Failed {
 | 
			
		||||
            from,
 | 
			
		||||
            error,
 | 
			
		||||
            failed_at,
 | 
			
		||||
        } = other;
 | 
			
		||||
 | 
			
		||||
        Self {
 | 
			
		||||
            from: from.into(),
 | 
			
		||||
            msg: error.message,
 | 
			
		||||
            code: v2::error_code_from_str(&error.error_code)
 | 
			
		||||
                .expect("Invalid update: Invalid error code"),
 | 
			
		||||
            failed_at,
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl From<v2::Processing> for v3::Processing {
 | 
			
		||||
    fn from(other: v2::Processing) -> Self {
 | 
			
		||||
        let v2::Processing {
 | 
			
		||||
            from,
 | 
			
		||||
            started_processing_at,
 | 
			
		||||
        } = other;
 | 
			
		||||
 | 
			
		||||
        Self {
 | 
			
		||||
            from: from.into(),
 | 
			
		||||
            started_processing_at,
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl From<v2::Enqueued> for v3::Enqueued {
 | 
			
		||||
    fn from(other: v2::Enqueued) -> Self {
 | 
			
		||||
        let v2::Enqueued {
 | 
			
		||||
            update_id,
 | 
			
		||||
            meta,
 | 
			
		||||
            enqueued_at,
 | 
			
		||||
            content,
 | 
			
		||||
        } = other;
 | 
			
		||||
 | 
			
		||||
        let meta = match meta {
 | 
			
		||||
            v2::UpdateMeta::DocumentsAddition {
 | 
			
		||||
                method,
 | 
			
		||||
                primary_key,
 | 
			
		||||
                ..
 | 
			
		||||
            } => {
 | 
			
		||||
                v3::Update::DocumentAddition {
 | 
			
		||||
                    primary_key,
 | 
			
		||||
                    method,
 | 
			
		||||
                    // Just ignore if the uuid is no present. If it is needed later, an error will
 | 
			
		||||
                    // be thrown.
 | 
			
		||||
                    content_uuid: content.unwrap_or_default(),
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
            v2::UpdateMeta::ClearDocuments => v3::Update::ClearDocuments,
 | 
			
		||||
            v2::UpdateMeta::DeleteDocuments { ids } => v3::Update::DeleteDocuments(ids),
 | 
			
		||||
            v2::UpdateMeta::Settings(settings) => v3::Update::Settings(settings),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        Self {
 | 
			
		||||
            update_id,
 | 
			
		||||
            meta,
 | 
			
		||||
            enqueued_at,
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl From<v2::Processed> for v3::Processed {
 | 
			
		||||
    fn from(other: v2::Processed) -> Self {
 | 
			
		||||
        let v2::Processed {
 | 
			
		||||
            from,
 | 
			
		||||
            success,
 | 
			
		||||
            processed_at,
 | 
			
		||||
        } = other;
 | 
			
		||||
 | 
			
		||||
        Self {
 | 
			
		||||
            success,
 | 
			
		||||
            processed_at,
 | 
			
		||||
            from: from.into(),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@@ -1,136 +0,0 @@
 | 
			
		||||
use std::collections::HashMap;
 | 
			
		||||
use std::fs::{self, File};
 | 
			
		||||
use std::io::{BufReader, BufWriter, Write};
 | 
			
		||||
use std::path::Path;
 | 
			
		||||
 | 
			
		||||
use anyhow::Context;
 | 
			
		||||
use fs_extra::dir::{self, CopyOptions};
 | 
			
		||||
use log::info;
 | 
			
		||||
use tempfile::tempdir;
 | 
			
		||||
use uuid::Uuid;
 | 
			
		||||
 | 
			
		||||
use crate::dump::compat::{self, v3};
 | 
			
		||||
use crate::dump::Metadata;
 | 
			
		||||
use crate::index_resolver::meta_store::{DumpEntry, IndexMeta};
 | 
			
		||||
use crate::options::IndexerOpts;
 | 
			
		||||
use crate::tasks::task::TaskId;
 | 
			
		||||
 | 
			
		||||
/// dump structure for V3:
 | 
			
		||||
/// .
 | 
			
		||||
/// ├── indexes
 | 
			
		||||
/// │   └── 25f10bb8-6ea8-42f0-bd48-ad5857f77648
 | 
			
		||||
/// │       ├── documents.jsonl
 | 
			
		||||
/// │       └── meta.json
 | 
			
		||||
/// ├── index_uuids
 | 
			
		||||
/// │   └── data.jsonl
 | 
			
		||||
/// ├── metadata.json
 | 
			
		||||
/// └── updates
 | 
			
		||||
///     └── data.jsonl
 | 
			
		||||
 | 
			
		||||
pub fn load_dump(
 | 
			
		||||
    meta: Metadata,
 | 
			
		||||
    src: impl AsRef<Path>,
 | 
			
		||||
    dst: impl AsRef<Path>,
 | 
			
		||||
    index_db_size: usize,
 | 
			
		||||
    meta_env_size: usize,
 | 
			
		||||
    indexing_options: &IndexerOpts,
 | 
			
		||||
) -> anyhow::Result<()> {
 | 
			
		||||
    info!("Patching dump V3 to dump V4...");
 | 
			
		||||
 | 
			
		||||
    let patched_dir = tempdir()?;
 | 
			
		||||
 | 
			
		||||
    let options = CopyOptions::default();
 | 
			
		||||
    dir::copy(src.as_ref().join("indexes"), patched_dir.path(), &options)?;
 | 
			
		||||
    dir::copy(
 | 
			
		||||
        src.as_ref().join("index_uuids"),
 | 
			
		||||
        patched_dir.path(),
 | 
			
		||||
        &options,
 | 
			
		||||
    )?;
 | 
			
		||||
 | 
			
		||||
    let uuid_map = patch_index_meta(
 | 
			
		||||
        src.as_ref().join("index_uuids/data.jsonl"),
 | 
			
		||||
        patched_dir.path(),
 | 
			
		||||
    )?;
 | 
			
		||||
 | 
			
		||||
    fs::copy(
 | 
			
		||||
        src.as_ref().join("metadata.json"),
 | 
			
		||||
        patched_dir.path().join("metadata.json"),
 | 
			
		||||
    )?;
 | 
			
		||||
 | 
			
		||||
    patch_updates(&src, patched_dir.path(), uuid_map)?;
 | 
			
		||||
 | 
			
		||||
    super::v4::load_dump(
 | 
			
		||||
        meta,
 | 
			
		||||
        patched_dir.path(),
 | 
			
		||||
        dst,
 | 
			
		||||
        index_db_size,
 | 
			
		||||
        meta_env_size,
 | 
			
		||||
        indexing_options,
 | 
			
		||||
    )
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn patch_index_meta(
 | 
			
		||||
    path: impl AsRef<Path>,
 | 
			
		||||
    dst: impl AsRef<Path>,
 | 
			
		||||
) -> anyhow::Result<HashMap<Uuid, String>> {
 | 
			
		||||
    let file = BufReader::new(File::open(path)?);
 | 
			
		||||
    let dst = dst.as_ref().join("index_uuids");
 | 
			
		||||
    fs::create_dir_all(&dst)?;
 | 
			
		||||
    let mut dst_file = File::create(dst.join("data.jsonl"))?;
 | 
			
		||||
 | 
			
		||||
    let map = serde_json::Deserializer::from_reader(file)
 | 
			
		||||
        .into_iter::<v3::DumpEntry>()
 | 
			
		||||
        .try_fold(HashMap::new(), |mut map, entry| -> anyhow::Result<_> {
 | 
			
		||||
            let entry = entry?;
 | 
			
		||||
            map.insert(entry.uuid, entry.uid.clone());
 | 
			
		||||
            let meta = IndexMeta {
 | 
			
		||||
                uuid: entry.uuid,
 | 
			
		||||
                // This is lost information, we patch it to 0;
 | 
			
		||||
                creation_task_id: 0,
 | 
			
		||||
            };
 | 
			
		||||
            let entry = DumpEntry {
 | 
			
		||||
                uid: entry.uid,
 | 
			
		||||
                index_meta: meta,
 | 
			
		||||
            };
 | 
			
		||||
            serde_json::to_writer(&mut dst_file, &entry)?;
 | 
			
		||||
            dst_file.write_all(b"\n")?;
 | 
			
		||||
            Ok(map)
 | 
			
		||||
        })?;
 | 
			
		||||
 | 
			
		||||
    dst_file.flush()?;
 | 
			
		||||
 | 
			
		||||
    Ok(map)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn patch_updates(
 | 
			
		||||
    src: impl AsRef<Path>,
 | 
			
		||||
    dst: impl AsRef<Path>,
 | 
			
		||||
    uuid_map: HashMap<Uuid, String>,
 | 
			
		||||
) -> anyhow::Result<()> {
 | 
			
		||||
    let dst = dst.as_ref().join("updates");
 | 
			
		||||
    fs::create_dir_all(&dst)?;
 | 
			
		||||
 | 
			
		||||
    let mut dst_file = BufWriter::new(File::create(dst.join("data.jsonl"))?);
 | 
			
		||||
    let src_file = BufReader::new(File::open(src.as_ref().join("updates/data.jsonl"))?);
 | 
			
		||||
 | 
			
		||||
    serde_json::Deserializer::from_reader(src_file)
 | 
			
		||||
        .into_iter::<v3::UpdateEntry>()
 | 
			
		||||
        .enumerate()
 | 
			
		||||
        .try_for_each(|(task_id, entry)| -> anyhow::Result<()> {
 | 
			
		||||
            let entry = entry?;
 | 
			
		||||
            let name = uuid_map
 | 
			
		||||
                .get(&entry.uuid)
 | 
			
		||||
                .with_context(|| format!("Unknown index uuid: {}", entry.uuid))?
 | 
			
		||||
                .clone();
 | 
			
		||||
            serde_json::to_writer(
 | 
			
		||||
                &mut dst_file,
 | 
			
		||||
                &compat::v4::Task::from((entry.update, name, task_id as TaskId)),
 | 
			
		||||
            )?;
 | 
			
		||||
            dst_file.write_all(b"\n")?;
 | 
			
		||||
            Ok(())
 | 
			
		||||
        })?;
 | 
			
		||||
 | 
			
		||||
    dst_file.flush()?;
 | 
			
		||||
 | 
			
		||||
    Ok(())
 | 
			
		||||
}
 | 
			
		||||
@@ -1,47 +0,0 @@
 | 
			
		||||
use std::{path::Path, sync::Arc};
 | 
			
		||||
 | 
			
		||||
use log::info;
 | 
			
		||||
use meilisearch_auth::AuthController;
 | 
			
		||||
use milli::heed::EnvOpenOptions;
 | 
			
		||||
 | 
			
		||||
use crate::analytics;
 | 
			
		||||
use crate::dump::Metadata;
 | 
			
		||||
use crate::index_resolver::IndexResolver;
 | 
			
		||||
use crate::options::IndexerOpts;
 | 
			
		||||
use crate::tasks::TaskStore;
 | 
			
		||||
use crate::update_file_store::UpdateFileStore;
 | 
			
		||||
 | 
			
		||||
pub fn load_dump(
 | 
			
		||||
    meta: Metadata,
 | 
			
		||||
    src: impl AsRef<Path>,
 | 
			
		||||
    dst: impl AsRef<Path>,
 | 
			
		||||
    index_db_size: usize,
 | 
			
		||||
    meta_env_size: usize,
 | 
			
		||||
    indexing_options: &IndexerOpts,
 | 
			
		||||
) -> anyhow::Result<()> {
 | 
			
		||||
    info!(
 | 
			
		||||
        "Loading dump from {}, dump database version: {}, dump version: V5",
 | 
			
		||||
        meta.dump_date, meta.db_version
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    let mut options = EnvOpenOptions::new();
 | 
			
		||||
    options.map_size(meta_env_size);
 | 
			
		||||
    options.max_dbs(100);
 | 
			
		||||
    let env = Arc::new(options.open(&dst)?);
 | 
			
		||||
 | 
			
		||||
    IndexResolver::load_dump(
 | 
			
		||||
        src.as_ref(),
 | 
			
		||||
        &dst,
 | 
			
		||||
        index_db_size,
 | 
			
		||||
        env.clone(),
 | 
			
		||||
        indexing_options,
 | 
			
		||||
    )?;
 | 
			
		||||
    UpdateFileStore::load_dump(src.as_ref(), &dst)?;
 | 
			
		||||
    TaskStore::load_dump(&src, env)?;
 | 
			
		||||
    AuthController::load_dump(&src, &dst)?;
 | 
			
		||||
    analytics::copy_user_id(src.as_ref(), dst.as_ref());
 | 
			
		||||
 | 
			
		||||
    info!("Loading indexes.");
 | 
			
		||||
 | 
			
		||||
    Ok(())
 | 
			
		||||
}
 | 
			
		||||
@@ -1,262 +0,0 @@
 | 
			
		||||
use std::fs::File;
 | 
			
		||||
use std::path::Path;
 | 
			
		||||
 | 
			
		||||
use anyhow::bail;
 | 
			
		||||
use log::info;
 | 
			
		||||
use serde::{Deserialize, Serialize};
 | 
			
		||||
use time::OffsetDateTime;
 | 
			
		||||
 | 
			
		||||
use tempfile::TempDir;
 | 
			
		||||
 | 
			
		||||
use crate::compression::from_tar_gz;
 | 
			
		||||
use crate::options::IndexerOpts;
 | 
			
		||||
 | 
			
		||||
use self::loaders::{v2, v3, v4, v5};
 | 
			
		||||
 | 
			
		||||
pub use handler::{generate_uid, DumpHandler};
 | 
			
		||||
 | 
			
		||||
mod compat;
 | 
			
		||||
pub mod error;
 | 
			
		||||
mod handler;
 | 
			
		||||
mod loaders;
 | 
			
		||||
 | 
			
		||||
const META_FILE_NAME: &str = "metadata.json";
 | 
			
		||||
 | 
			
		||||
#[derive(Serialize, Deserialize, Debug)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct Metadata {
 | 
			
		||||
    db_version: String,
 | 
			
		||||
    index_db_size: usize,
 | 
			
		||||
    update_db_size: usize,
 | 
			
		||||
    #[serde(with = "time::serde::rfc3339")]
 | 
			
		||||
    dump_date: OffsetDateTime,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Metadata {
 | 
			
		||||
    pub fn new(index_db_size: usize, update_db_size: usize) -> Self {
 | 
			
		||||
        Self {
 | 
			
		||||
            db_version: env!("CARGO_PKG_VERSION").to_string(),
 | 
			
		||||
            index_db_size,
 | 
			
		||||
            update_db_size,
 | 
			
		||||
            dump_date: OffsetDateTime::now_utc(),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Serialize, Deserialize, Debug)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct MetadataV1 {
 | 
			
		||||
    pub db_version: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Serialize, Deserialize)]
 | 
			
		||||
#[serde(tag = "dumpVersion")]
 | 
			
		||||
pub enum MetadataVersion {
 | 
			
		||||
    V1(MetadataV1),
 | 
			
		||||
    V2(Metadata),
 | 
			
		||||
    V3(Metadata),
 | 
			
		||||
    V4(Metadata),
 | 
			
		||||
    // V5 is forward compatible with V4 but not backward compatible.
 | 
			
		||||
    V5(Metadata),
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl MetadataVersion {
 | 
			
		||||
    pub fn load_dump(
 | 
			
		||||
        self,
 | 
			
		||||
        src: impl AsRef<Path>,
 | 
			
		||||
        dst: impl AsRef<Path>,
 | 
			
		||||
        index_db_size: usize,
 | 
			
		||||
        meta_env_size: usize,
 | 
			
		||||
        indexing_options: &IndexerOpts,
 | 
			
		||||
    ) -> anyhow::Result<()> {
 | 
			
		||||
        match self {
 | 
			
		||||
            MetadataVersion::V1(_meta) => {
 | 
			
		||||
                anyhow::bail!("The version 1 of the dumps is not supported anymore. You can re-export your dump from a version between 0.21 and 0.24, or start fresh from a version 0.25 onwards.")
 | 
			
		||||
            }
 | 
			
		||||
            MetadataVersion::V2(meta) => v2::load_dump(
 | 
			
		||||
                meta,
 | 
			
		||||
                src,
 | 
			
		||||
                dst,
 | 
			
		||||
                index_db_size,
 | 
			
		||||
                meta_env_size,
 | 
			
		||||
                indexing_options,
 | 
			
		||||
            )?,
 | 
			
		||||
            MetadataVersion::V3(meta) => v3::load_dump(
 | 
			
		||||
                meta,
 | 
			
		||||
                src,
 | 
			
		||||
                dst,
 | 
			
		||||
                index_db_size,
 | 
			
		||||
                meta_env_size,
 | 
			
		||||
                indexing_options,
 | 
			
		||||
            )?,
 | 
			
		||||
            MetadataVersion::V4(meta) => v4::load_dump(
 | 
			
		||||
                meta,
 | 
			
		||||
                src,
 | 
			
		||||
                dst,
 | 
			
		||||
                index_db_size,
 | 
			
		||||
                meta_env_size,
 | 
			
		||||
                indexing_options,
 | 
			
		||||
            )?,
 | 
			
		||||
            MetadataVersion::V5(meta) => v5::load_dump(
 | 
			
		||||
                meta,
 | 
			
		||||
                src,
 | 
			
		||||
                dst,
 | 
			
		||||
                index_db_size,
 | 
			
		||||
                meta_env_size,
 | 
			
		||||
                indexing_options,
 | 
			
		||||
            )?,
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn new_v5(index_db_size: usize, update_db_size: usize) -> Self {
 | 
			
		||||
        let meta = Metadata::new(index_db_size, update_db_size);
 | 
			
		||||
        Self::V5(meta)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn db_version(&self) -> &str {
 | 
			
		||||
        match self {
 | 
			
		||||
            Self::V1(meta) => &meta.db_version,
 | 
			
		||||
            Self::V2(meta) | Self::V3(meta) | Self::V4(meta) | Self::V5(meta) => &meta.db_version,
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn version(&self) -> &'static str {
 | 
			
		||||
        match self {
 | 
			
		||||
            MetadataVersion::V1(_) => "V1",
 | 
			
		||||
            MetadataVersion::V2(_) => "V2",
 | 
			
		||||
            MetadataVersion::V3(_) => "V3",
 | 
			
		||||
            MetadataVersion::V4(_) => "V4",
 | 
			
		||||
            MetadataVersion::V5(_) => "V5",
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn dump_date(&self) -> Option<&OffsetDateTime> {
 | 
			
		||||
        match self {
 | 
			
		||||
            MetadataVersion::V1(_) => None,
 | 
			
		||||
            MetadataVersion::V2(meta)
 | 
			
		||||
            | MetadataVersion::V3(meta)
 | 
			
		||||
            | MetadataVersion::V4(meta)
 | 
			
		||||
            | MetadataVersion::V5(meta) => Some(&meta.dump_date),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
 | 
			
		||||
#[serde(rename_all = "snake_case")]
 | 
			
		||||
pub enum DumpStatus {
 | 
			
		||||
    Done,
 | 
			
		||||
    InProgress,
 | 
			
		||||
    Failed,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn load_dump(
 | 
			
		||||
    dst_path: impl AsRef<Path>,
 | 
			
		||||
    src_path: impl AsRef<Path>,
 | 
			
		||||
    ignore_dump_if_db_exists: bool,
 | 
			
		||||
    ignore_missing_dump: bool,
 | 
			
		||||
    index_db_size: usize,
 | 
			
		||||
    update_db_size: usize,
 | 
			
		||||
    indexer_opts: &IndexerOpts,
 | 
			
		||||
) -> anyhow::Result<()> {
 | 
			
		||||
    let empty_db = crate::is_empty_db(&dst_path);
 | 
			
		||||
    let src_path_exists = src_path.as_ref().exists();
 | 
			
		||||
 | 
			
		||||
    if empty_db && src_path_exists {
 | 
			
		||||
        let (tmp_src, tmp_dst, meta) = extract_dump(&dst_path, &src_path)?;
 | 
			
		||||
        meta.load_dump(
 | 
			
		||||
            tmp_src.path(),
 | 
			
		||||
            tmp_dst.path(),
 | 
			
		||||
            index_db_size,
 | 
			
		||||
            update_db_size,
 | 
			
		||||
            indexer_opts,
 | 
			
		||||
        )?;
 | 
			
		||||
        persist_dump(&dst_path, tmp_dst)?;
 | 
			
		||||
        Ok(())
 | 
			
		||||
    } else if !empty_db && !ignore_dump_if_db_exists {
 | 
			
		||||
        bail!(
 | 
			
		||||
            "database already exists at {:?}, try to delete it or rename it",
 | 
			
		||||
            dst_path
 | 
			
		||||
                .as_ref()
 | 
			
		||||
                .canonicalize()
 | 
			
		||||
                .unwrap_or_else(|_| dst_path.as_ref().to_owned())
 | 
			
		||||
        )
 | 
			
		||||
    } else if !src_path_exists && !ignore_missing_dump {
 | 
			
		||||
        bail!("dump doesn't exist at {:?}", src_path.as_ref())
 | 
			
		||||
    } else {
 | 
			
		||||
        // there is nothing to do
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn extract_dump(
 | 
			
		||||
    dst_path: impl AsRef<Path>,
 | 
			
		||||
    src_path: impl AsRef<Path>,
 | 
			
		||||
) -> anyhow::Result<(TempDir, TempDir, MetadataVersion)> {
 | 
			
		||||
    // Setup a temp directory path in the same path as the database, to prevent cross devices
 | 
			
		||||
    // references.
 | 
			
		||||
    let temp_path = dst_path
 | 
			
		||||
        .as_ref()
 | 
			
		||||
        .parent()
 | 
			
		||||
        .map(ToOwned::to_owned)
 | 
			
		||||
        .unwrap_or_else(|| ".".into());
 | 
			
		||||
 | 
			
		||||
    let tmp_src = tempfile::tempdir_in(temp_path)?;
 | 
			
		||||
    let tmp_src_path = tmp_src.path();
 | 
			
		||||
 | 
			
		||||
    from_tar_gz(&src_path, tmp_src_path)?;
 | 
			
		||||
 | 
			
		||||
    let meta_path = tmp_src_path.join(META_FILE_NAME);
 | 
			
		||||
    let mut meta_file = File::open(&meta_path)?;
 | 
			
		||||
    let meta: MetadataVersion = serde_json::from_reader(&mut meta_file)?;
 | 
			
		||||
 | 
			
		||||
    if !dst_path.as_ref().exists() {
 | 
			
		||||
        std::fs::create_dir_all(dst_path.as_ref())?;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    let tmp_dst = tempfile::tempdir_in(dst_path.as_ref())?;
 | 
			
		||||
 | 
			
		||||
    info!(
 | 
			
		||||
        "Loading dump {}, dump database version: {}, dump version: {}",
 | 
			
		||||
        meta.dump_date()
 | 
			
		||||
            .map(|t| format!("from {}", t))
 | 
			
		||||
            .unwrap_or_else(String::new),
 | 
			
		||||
        meta.db_version(),
 | 
			
		||||
        meta.version()
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    Ok((tmp_src, tmp_dst, meta))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn persist_dump(dst_path: impl AsRef<Path>, tmp_dst: TempDir) -> anyhow::Result<()> {
 | 
			
		||||
    let persisted_dump = tmp_dst.into_path();
 | 
			
		||||
 | 
			
		||||
    // Delete everything in the `data.ms` except the tempdir.
 | 
			
		||||
    if dst_path.as_ref().exists() {
 | 
			
		||||
        for file in dst_path.as_ref().read_dir().unwrap() {
 | 
			
		||||
            let file = file.unwrap().path();
 | 
			
		||||
            if file.file_name() == persisted_dump.file_name() {
 | 
			
		||||
                continue;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            if file.is_file() {
 | 
			
		||||
                std::fs::remove_file(&file)?;
 | 
			
		||||
            } else {
 | 
			
		||||
                std::fs::remove_dir_all(&file)?;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Move the whole content of the tempdir into the `data.ms`.
 | 
			
		||||
    for file in persisted_dump.read_dir().unwrap() {
 | 
			
		||||
        let file = file.unwrap().path();
 | 
			
		||||
 | 
			
		||||
        std::fs::rename(&file, &dst_path.as_ref().join(file.file_name().unwrap()))?;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Delete the empty tempdir.
 | 
			
		||||
    std::fs::remove_dir_all(&persisted_dump)?;
 | 
			
		||||
 | 
			
		||||
    Ok(())
 | 
			
		||||
}
 | 
			
		||||
		Reference in New Issue
	
	Block a user