Provide a rename argument to the swap

This commit is contained in:
Tamo
2025-08-07 19:35:07 +02:00
parent ae5bd9d0e3
commit ecea247e5d
12 changed files with 358 additions and 34 deletions

View File

@ -67,6 +67,8 @@ pub enum Error {
SwapDuplicateIndexesFound(Vec<String>), SwapDuplicateIndexesFound(Vec<String>),
#[error("Index `{0}` not found.")] #[error("Index `{0}` not found.")]
SwapIndexNotFound(String), SwapIndexNotFound(String),
#[error("Index `{0}` found during a rename. Renaming doen't overwrite the other index name.")]
SwapIndexFoundDuringRename(String),
#[error("Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.")] #[error("Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.")]
NoSpaceLeftInTaskQueue, NoSpaceLeftInTaskQueue,
#[error( #[error(
@ -74,6 +76,10 @@ pub enum Error {
.0.iter().map(|s| format!("`{}`", s)).collect::<Vec<_>>().join(", ") .0.iter().map(|s| format!("`{}`", s)).collect::<Vec<_>>().join(", ")
)] )]
SwapIndexesNotFound(Vec<String>), SwapIndexesNotFound(Vec<String>),
#[error("Index {} found during a rename. Renaming doen't overwrite the other index name.",
.0.iter().map(|s| format!("`{}`", s)).collect::<Vec<_>>().join(", ")
)]
SwapIndexesFoundDuringRename(Vec<String>),
#[error("Corrupted dump.")] #[error("Corrupted dump.")]
CorruptedDump, CorruptedDump,
#[error( #[error(
@ -203,6 +209,8 @@ impl Error {
| Error::SwapIndexNotFound(_) | Error::SwapIndexNotFound(_)
| Error::NoSpaceLeftInTaskQueue | Error::NoSpaceLeftInTaskQueue
| Error::SwapIndexesNotFound(_) | Error::SwapIndexesNotFound(_)
| Error::SwapIndexFoundDuringRename(_)
| Error::SwapIndexesFoundDuringRename(_)
| Error::CorruptedDump | Error::CorruptedDump
| Error::InvalidTaskDate { .. } | Error::InvalidTaskDate { .. }
| Error::InvalidTaskUid { .. } | Error::InvalidTaskUid { .. }
@ -271,6 +279,8 @@ impl ErrorCode for Error {
Error::SwapDuplicateIndexFound(_) => Code::InvalidSwapDuplicateIndexFound, Error::SwapDuplicateIndexFound(_) => Code::InvalidSwapDuplicateIndexFound,
Error::SwapIndexNotFound(_) => Code::IndexNotFound, Error::SwapIndexNotFound(_) => Code::IndexNotFound,
Error::SwapIndexesNotFound(_) => Code::IndexNotFound, Error::SwapIndexesNotFound(_) => Code::IndexNotFound,
Error::SwapIndexFoundDuringRename(_) => Code::IndexNotFound,
Error::SwapIndexesFoundDuringRename(_) => Code::IndexNotFound,
Error::InvalidTaskDate { field, .. } => (*field).into(), Error::InvalidTaskDate { field, .. } => (*field).into(),
Error::InvalidTaskUid { .. } => Code::InvalidTaskUids, Error::InvalidTaskUid { .. } => Code::InvalidTaskUids,
Error::InvalidBatchUid { .. } => Code::InvalidBatchUids, Error::InvalidBatchUid { .. } => Code::InvalidBatchUids,

View File

@ -334,11 +334,11 @@ fn query_batches_special_rules() {
let kind = index_creation_task("doggo", "sheep"); let kind = index_creation_task("doggo", "sheep");
let _task = index_scheduler.register(kind, None, false).unwrap(); let _task = index_scheduler.register(kind, None, false).unwrap();
let kind = KindWithContent::IndexSwap { let kind = KindWithContent::IndexSwap {
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()) }], swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()), rename: false }],
}; };
let _task = index_scheduler.register(kind, None, false).unwrap(); let _task = index_scheduler.register(kind, None, false).unwrap();
let kind = KindWithContent::IndexSwap { let kind = KindWithContent::IndexSwap {
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "whalo".to_owned()) }], swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "whalo".to_owned()), rename: false }],
}; };
let _task = index_scheduler.register(kind, None, false).unwrap(); let _task = index_scheduler.register(kind, None, false).unwrap();
@ -442,7 +442,7 @@ fn query_batches_canceled_by() {
let kind = index_creation_task("doggo", "sheep"); let kind = index_creation_task("doggo", "sheep");
let _ = index_scheduler.register(kind, None, false).unwrap(); let _ = index_scheduler.register(kind, None, false).unwrap();
let kind = KindWithContent::IndexSwap { let kind = KindWithContent::IndexSwap {
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()) }], swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()), rename: false }],
}; };
let _task = index_scheduler.register(kind, None, false).unwrap(); let _task = index_scheduler.register(kind, None, false).unwrap();

View File

@ -304,11 +304,11 @@ fn query_tasks_special_rules() {
let kind = index_creation_task("doggo", "sheep"); let kind = index_creation_task("doggo", "sheep");
let _task = index_scheduler.register(kind, None, false).unwrap(); let _task = index_scheduler.register(kind, None, false).unwrap();
let kind = KindWithContent::IndexSwap { let kind = KindWithContent::IndexSwap {
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()) }], swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()), rename: false }],
}; };
let _task = index_scheduler.register(kind, None, false).unwrap(); let _task = index_scheduler.register(kind, None, false).unwrap();
let kind = KindWithContent::IndexSwap { let kind = KindWithContent::IndexSwap {
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "whalo".to_owned()) }], swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "whalo".to_owned()), rename: false }],
}; };
let _task = index_scheduler.register(kind, None, false).unwrap(); let _task = index_scheduler.register(kind, None, false).unwrap();
@ -399,7 +399,7 @@ fn query_tasks_canceled_by() {
let kind = index_creation_task("doggo", "sheep"); let kind = index_creation_task("doggo", "sheep");
let _ = index_scheduler.register(kind, None, false).unwrap(); let _ = index_scheduler.register(kind, None, false).unwrap();
let kind = KindWithContent::IndexSwap { let kind = KindWithContent::IndexSwap {
swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()) }], swaps: vec![IndexSwap { indexes: ("catto".to_owned(), "doggo".to_owned()), rename: false }],
}; };
let _task = index_scheduler.register(kind, None, false).unwrap(); let _task = index_scheduler.register(kind, None, false).unwrap();

View File

@ -88,7 +88,10 @@ fn idx_del() -> KindWithContent {
fn idx_swap() -> KindWithContent { fn idx_swap() -> KindWithContent {
KindWithContent::IndexSwap { KindWithContent::IndexSwap {
swaps: vec![IndexSwap { indexes: (String::from("doggo"), String::from("catto")) }], swaps: vec![IndexSwap {
indexes: (String::from("doggo"), String::from("catto")),
rename: false,
}],
} }
} }

View File

@ -360,13 +360,18 @@ impl IndexScheduler {
unreachable!() unreachable!()
}; };
let mut not_found_indexes = BTreeSet::new(); let mut not_found_indexes = BTreeSet::new();
for IndexSwap { indexes: (lhs, rhs) } in swaps { let mut found_indexes_but_should_not = BTreeSet::new();
for index in [lhs, rhs] { for IndexSwap { indexes: (lhs, rhs), rename } in swaps {
let index_exists = self.index_mapper.index_exists(&wtxn, index)?; let index_exists = self.index_mapper.index_exists(&wtxn, lhs)?;
if !index_exists { if !index_exists {
not_found_indexes.insert(index); not_found_indexes.insert(lhs);
}
} }
let index_exists = self.index_mapper.index_exists(&wtxn, rhs)?;
match (index_exists, rename) {
(true, true) => found_indexes_but_should_not.insert(rhs),
(false, false) => not_found_indexes.insert(rhs),
(true, false) | (false, true) => true, // random value we don't read it anyway
};
} }
if !not_found_indexes.is_empty() { if !not_found_indexes.is_empty() {
if not_found_indexes.len() == 1 { if not_found_indexes.len() == 1 {
@ -379,6 +384,17 @@ impl IndexScheduler {
)); ));
} }
} }
if !found_indexes_but_should_not.is_empty() {
if found_indexes_but_should_not.len() == 1 {
return Err(Error::SwapIndexFoundDuringRename(
found_indexes_but_should_not.into_iter().next().unwrap().clone(),
));
} else {
return Err(Error::SwapIndexesFoundDuringRename(
found_indexes_but_should_not.into_iter().cloned().collect(),
));
}
}
progress.update_progress(SwappingTheIndexes::SwappingTheIndexes); progress.update_progress(SwappingTheIndexes::SwappingTheIndexes);
for (step, swap) in swaps.iter().enumerate() { for (step, swap) in swaps.iter().enumerate() {
progress.update_progress(VariableNameStep::<SwappingTheIndexes>::new( progress.update_progress(VariableNameStep::<SwappingTheIndexes>::new(
@ -392,6 +408,7 @@ impl IndexScheduler {
task.uid, task.uid,
&swap.indexes.0, &swap.indexes.0,
&swap.indexes.1, &swap.indexes.1,
swap.rename,
)?; )?;
} }
wtxn.commit()?; wtxn.commit()?;
@ -481,6 +498,7 @@ impl IndexScheduler {
task_id: u32, task_id: u32,
lhs: &str, lhs: &str,
rhs: &str, rhs: &str,
rename: bool,
) -> Result<()> { ) -> Result<()> {
progress.update_progress(InnerSwappingTwoIndexes::RetrieveTheTasks); progress.update_progress(InnerSwappingTwoIndexes::RetrieveTheTasks);
// 1. Verify that both lhs and rhs are existing indexes // 1. Verify that both lhs and rhs are existing indexes
@ -488,16 +506,23 @@ impl IndexScheduler {
if !index_lhs_exists { if !index_lhs_exists {
return Err(Error::IndexNotFound(lhs.to_owned())); return Err(Error::IndexNotFound(lhs.to_owned()));
} }
let index_rhs_exists = self.index_mapper.index_exists(wtxn, rhs)?; if !rename {
if !index_rhs_exists { let index_rhs_exists = self.index_mapper.index_exists(wtxn, rhs)?;
return Err(Error::IndexNotFound(rhs.to_owned())); if !index_rhs_exists {
return Err(Error::IndexNotFound(rhs.to_owned()));
}
} }
// 2. Get the task set for index = name that appeared before the index swap task // 2. Get the task set for index = name that appeared before the index swap task
let mut index_lhs_task_ids = self.queue.tasks.index_tasks(wtxn, lhs)?; let mut index_lhs_task_ids = self.queue.tasks.index_tasks(wtxn, lhs)?;
index_lhs_task_ids.remove_range(task_id..); index_lhs_task_ids.remove_range(task_id..);
let mut index_rhs_task_ids = self.queue.tasks.index_tasks(wtxn, rhs)?; let index_rhs_task_ids = if rename {
index_rhs_task_ids.remove_range(task_id..); let mut index_rhs_task_ids = self.queue.tasks.index_tasks(wtxn, rhs)?;
index_rhs_task_ids.remove_range(task_id..);
index_rhs_task_ids
} else {
RoaringBitmap::new()
};
// 3. before_name -> new_name in the task's KindWithContent // 3. before_name -> new_name in the task's KindWithContent
progress.update_progress(InnerSwappingTwoIndexes::UpdateTheTasks); progress.update_progress(InnerSwappingTwoIndexes::UpdateTheTasks);
@ -526,7 +551,11 @@ impl IndexScheduler {
})?; })?;
// 6. Swap in the index mapper // 6. Swap in the index mapper
self.index_mapper.swap(wtxn, lhs, rhs)?; if rename {
self.index_mapper.rename(wtxn, lhs, rhs)?;
} else {
self.index_mapper.swap(wtxn, lhs, rhs)?;
}
Ok(()) Ok(())
} }

View File

@ -372,8 +372,8 @@ fn swap_indexes() {
.register( .register(
KindWithContent::IndexSwap { KindWithContent::IndexSwap {
swaps: vec![ swaps: vec![
IndexSwap { indexes: ("a".to_owned(), "b".to_owned()) }, IndexSwap { indexes: ("a".to_owned(), "b".to_owned()), rename: false },
IndexSwap { indexes: ("c".to_owned(), "d".to_owned()) }, IndexSwap { indexes: ("c".to_owned(), "d".to_owned()), rename: false },
], ],
}, },
None, None,
@ -384,7 +384,7 @@ fn swap_indexes() {
index_scheduler index_scheduler
.register( .register(
KindWithContent::IndexSwap { KindWithContent::IndexSwap {
swaps: vec![IndexSwap { indexes: ("a".to_owned(), "c".to_owned()) }], swaps: vec![IndexSwap { indexes: ("a".to_owned(), "c".to_owned()), rename: false }],
}, },
None, None,
false, false,
@ -428,8 +428,8 @@ fn swap_indexes_errors() {
.register( .register(
KindWithContent::IndexSwap { KindWithContent::IndexSwap {
swaps: vec![ swaps: vec![
IndexSwap { indexes: ("a".to_owned(), "b".to_owned()) }, IndexSwap { indexes: ("a".to_owned(), "b".to_owned()), rename: false },
IndexSwap { indexes: ("b".to_owned(), "a".to_owned()) }, IndexSwap { indexes: ("b".to_owned(), "a".to_owned()), rename: false },
], ],
}, },
None, None,
@ -446,9 +446,9 @@ fn swap_indexes_errors() {
.register( .register(
KindWithContent::IndexSwap { KindWithContent::IndexSwap {
swaps: vec![ swaps: vec![
IndexSwap { indexes: ("a".to_owned(), "b".to_owned()) }, IndexSwap { indexes: ("a".to_owned(), "b".to_owned()), rename: false },
IndexSwap { indexes: ("c".to_owned(), "e".to_owned()) }, IndexSwap { indexes: ("c".to_owned(), "e".to_owned()), rename: false },
IndexSwap { indexes: ("d".to_owned(), "f".to_owned()) }, IndexSwap { indexes: ("d".to_owned(), "f".to_owned()), rename: false },
], ],
}, },
None, None,

View File

@ -271,7 +271,7 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
} }
} }
K::IndexSwap { swaps } => { K::IndexSwap { swaps } => {
for IndexSwap { indexes: (lhs, rhs) } in swaps.iter_mut() { for IndexSwap { indexes: (lhs, rhs), rename: _ } in swaps.iter_mut() {
if lhs == swap.0 || lhs == swap.1 { if lhs == swap.0 || lhs == swap.1 {
index_uids.push(lhs); index_uids.push(lhs);
} }
@ -288,7 +288,7 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
| K::SnapshotCreation => (), | K::SnapshotCreation => (),
}; };
if let Some(Details::IndexSwap { swaps }) = &mut task.details { if let Some(Details::IndexSwap { swaps }) = &mut task.details {
for IndexSwap { indexes: (lhs, rhs) } in swaps.iter_mut() { for IndexSwap { indexes: (lhs, rhs), rename: _ } in swaps.iter_mut() {
if lhs == swap.0 || lhs == swap.1 { if lhs == swap.0 || lhs == swap.1 {
index_uids.push(lhs); index_uids.push(lhs);
} }
@ -330,7 +330,7 @@ pub(crate) fn check_index_swap_validity(task: &Task) -> Result<()> {
if let KindWithContent::IndexSwap { swaps } = &task.kind { swaps } else { return Ok(()) }; if let KindWithContent::IndexSwap { swaps } = &task.kind { swaps } else { return Ok(()) };
let mut all_indexes = HashSet::new(); let mut all_indexes = HashSet::new();
let mut duplicate_indexes = BTreeSet::new(); let mut duplicate_indexes = BTreeSet::new();
for IndexSwap { indexes: (lhs, rhs) } in swaps { for IndexSwap { indexes: (lhs, rhs), rename: _ } in swaps {
for name in [lhs, rhs] { for name in [lhs, rhs] {
let is_new = all_indexes.insert(name); let is_new = all_indexes.insert(name);
if !is_new { if !is_new {

View File

@ -335,6 +335,7 @@ InvalidState , Internal , INTERNAL
InvalidStoreFile , Internal , INTERNAL_SERVER_ERROR ; InvalidStoreFile , Internal , INTERNAL_SERVER_ERROR ;
InvalidSwapDuplicateIndexFound , InvalidRequest , BAD_REQUEST ; InvalidSwapDuplicateIndexFound , InvalidRequest , BAD_REQUEST ;
InvalidSwapIndexes , InvalidRequest , BAD_REQUEST ; InvalidSwapIndexes , InvalidRequest , BAD_REQUEST ;
InvalidSwapRename , InvalidRequest , BAD_REQUEST ;
InvalidTaskAfterEnqueuedAt , InvalidRequest , BAD_REQUEST ; InvalidTaskAfterEnqueuedAt , InvalidRequest , BAD_REQUEST ;
InvalidTaskAfterFinishedAt , InvalidRequest , BAD_REQUEST ; InvalidTaskAfterFinishedAt , InvalidRequest , BAD_REQUEST ;
InvalidTaskAfterStartedAt , InvalidRequest , BAD_REQUEST ; InvalidTaskAfterStartedAt , InvalidRequest , BAD_REQUEST ;

View File

@ -173,6 +173,7 @@ pub enum KindWithContent {
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct IndexSwap { pub struct IndexSwap {
pub indexes: (String, String), pub indexes: (String, String),
pub rename: bool,
} }
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)] #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]

View File

@ -4,7 +4,7 @@ use deserr::actix_web::AwebJson;
use deserr::Deserr; use deserr::Deserr;
use index_scheduler::IndexScheduler; use index_scheduler::IndexScheduler;
use meilisearch_types::deserr::DeserrJsonError; use meilisearch_types::deserr::DeserrJsonError;
use meilisearch_types::error::deserr_codes::InvalidSwapIndexes; use meilisearch_types::error::deserr_codes::{InvalidSwapIndexes, InvalidSwapRename};
use meilisearch_types::error::ResponseError; use meilisearch_types::error::ResponseError;
use meilisearch_types::index_uid::IndexUid; use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::tasks::{IndexSwap, KindWithContent}; use meilisearch_types::tasks::{IndexSwap, KindWithContent};
@ -33,11 +33,15 @@ pub struct SwapIndexesPayload {
/// Array of the two indexUids to be swapped /// Array of the two indexUids to be swapped
#[deserr(error = DeserrJsonError<InvalidSwapIndexes>, missing_field_error = DeserrJsonError::missing_swap_indexes)] #[deserr(error = DeserrJsonError<InvalidSwapIndexes>, missing_field_error = DeserrJsonError::missing_swap_indexes)]
indexes: Vec<IndexUid>, indexes: Vec<IndexUid>,
/// If set to true, instead of swapping the left and right indexes it'll change the name of the first index to the second
#[deserr(default, error = DeserrJsonError<InvalidSwapRename>)]
rename: bool,
} }
#[derive(Serialize)] #[derive(Serialize)]
struct IndexSwappedAnalytics { struct IndexSwappedAnalytics {
swap_operation_number: usize, swap_operation_number: usize,
rename_used: bool,
} }
impl Aggregate for IndexSwappedAnalytics { impl Aggregate for IndexSwappedAnalytics {
@ -48,6 +52,7 @@ impl Aggregate for IndexSwappedAnalytics {
fn aggregate(self: Box<Self>, new: Box<Self>) -> Box<Self> { fn aggregate(self: Box<Self>, new: Box<Self>) -> Box<Self> {
Box::new(Self { Box::new(Self {
swap_operation_number: self.swap_operation_number.max(new.swap_operation_number), swap_operation_number: self.swap_operation_number.max(new.swap_operation_number),
rename_used: self.rename_used | new.rename_used,
}) })
} }
@ -95,11 +100,17 @@ pub async fn swap_indexes(
analytics: web::Data<Analytics>, analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> { ) -> Result<HttpResponse, ResponseError> {
let params = params.into_inner(); let params = params.into_inner();
analytics.publish(IndexSwappedAnalytics { swap_operation_number: params.len() }, &req); analytics.publish(
IndexSwappedAnalytics {
swap_operation_number: params.len(),
rename_used: params.iter().any(|obj| obj.rename),
},
&req,
);
let filters = index_scheduler.filters(); let filters = index_scheduler.filters();
let mut swaps = vec![]; let mut swaps = vec![];
for SwapIndexesPayload { indexes } in params.into_iter() { for SwapIndexesPayload { indexes, rename } in params.into_iter() {
// TODO: switch to deserr // TODO: switch to deserr
let (lhs, rhs) = match indexes.as_slice() { let (lhs, rhs) = match indexes.as_slice() {
[lhs, rhs] => (lhs, rhs), [lhs, rhs] => (lhs, rhs),
@ -110,7 +121,7 @@ pub async fn swap_indexes(
if !filters.is_index_authorized(lhs) || !filters.is_index_authorized(rhs) { if !filters.is_index_authorized(lhs) || !filters.is_index_authorized(rhs) {
return Err(AuthenticationError::InvalidToken.into()); return Err(AuthenticationError::InvalidToken.into());
} }
swaps.push(IndexSwap { indexes: (lhs.to_string(), rhs.to_string()) }); swaps.push(IndexSwap { indexes: (lhs.to_string(), rhs.to_string()), rename });
} }
let task = KindWithContent::IndexSwap { swaps }; let task = KindWithContent::IndexSwap { swaps };

View File

@ -92,3 +92,20 @@ async fn swap_indexes_bad_indexes() {
} }
"###); "###);
} }
#[actix_rt::test]
async fn swap_indexes_bad_rename() {
let server = Server::new_shared();
let (response, code) =
server.index_swap(json!([{ "indexes": ["kefir", "intel"], "rename": "hello" }])).await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Invalid value type at `[0].rename`: expected a boolean, but found a string: `\"hello\"`",
"code": "invalid_swap_rename",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_swap_rename"
}
"#);
}

View File

@ -372,3 +372,255 @@ async fn swap_indexes() {
let (res, _) = d.get_all_documents(GetAllDocumentsOptions::default()).await; let (res, _) = d.get_all_documents(GetAllDocumentsOptions::default()).await;
snapshot!(res["results"], @r###"[{"id":1,"index":"c"}]"###); snapshot!(res["results"], @r###"[{"id":1,"index":"c"}]"###);
} }
#[actix_rt::test]
async fn swap_rename_indexes() {
let server = Server::new().await;
let a = server.index("a");
let b = server.index("b");
a.create(None).await;
a.add_documents(json!({ "id": 1, "index": "a"}), None).await;
let (res, _code) = server.index_swap(json!([{ "indexes": ["a", "b"], "rename": true }])).await;
server.wait_task(res.uid()).await.succeeded();
let (tasks, _code) = server.tasks().await;
// Notice how the task 0 which was initially representing the creation of the index `A` now represents the creation of the index `B`.
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [
{
"uid": 2,
"batchUid": 2,
"indexUid": null,
"status": "succeeded",
"type": "indexSwap",
"canceledBy": null,
"details": {
"swaps": [
{
"indexes": [
"a",
"b"
],
"rename": true
}
]
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 1,
"batchUid": 1,
"indexUid": "b",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 0,
"batchUid": 0,
"indexUid": "b",
"status": "succeeded",
"type": "indexCreation",
"canceledBy": null,
"details": {
"primaryKey": null
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
],
"total": 3,
"limit": 20,
"from": 2,
"next": null
}
"#);
// BUT, `a` should not exists
let (res, code) = a.get_all_documents(GetAllDocumentsOptions::default()).await;
snapshot!(code, @"404 Not Found");
snapshot!(res["results"], @"null");
// And its data should be in b
let (res, code) = b.get_all_documents(GetAllDocumentsOptions::default()).await;
snapshot!(code, @"200 OK");
snapshot!(res["results"], @r#"[{"id":1,"index":"a"}]"#);
// No tasks should be linked to the index a
let (tasks, _code) = server.tasks_filter("indexUids=a").await;
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [],
"total": 1,
"limit": 20,
"from": null,
"next": null
}
"#);
// They should be linked to the index b
let (tasks, _code) = server.tasks_filter("indexUids=b").await;
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [
{
"uid": 1,
"batchUid": 1,
"indexUid": "b",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 0,
"batchUid": 0,
"indexUid": "b",
"status": "succeeded",
"type": "indexCreation",
"canceledBy": null,
"details": {
"primaryKey": null
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
],
"total": 3,
"limit": 20,
"from": 1,
"next": null
}
"#);
// ===== Now, we can delete the index `b`, but its tasks will stays
// if we then make a new `b` index and rename it to be called `a`
// the tasks currently available in `b` should not be moved
let (res, _) = b.delete().await;
server.wait_task(res.uid()).await.succeeded();
let (res, _) = b.create(Some("kefir")).await;
let (res, _code) = server.index_swap(json!([{ "indexes": ["b", "a"], "rename": true }])).await;
server.wait_task(res.uid()).await.succeeded();
// `a` now contains everything
let (tasks, _code) = server.tasks_filter("indexUids=a").await;
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [
{
"uid": 4,
"batchUid": 4,
"indexUid": "a",
"status": "succeeded",
"type": "indexCreation",
"canceledBy": null,
"details": {
"primaryKey": "kefir"
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 3,
"batchUid": 3,
"indexUid": "a",
"status": "succeeded",
"type": "indexDeletion",
"canceledBy": null,
"details": {
"deletedDocuments": 1
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 1,
"batchUid": 1,
"indexUid": "a",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 0,
"batchUid": 0,
"indexUid": "a",
"status": "succeeded",
"type": "indexCreation",
"canceledBy": null,
"details": {
"primaryKey": null
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
],
"total": 6,
"limit": 20,
"from": 4,
"next": null
}
"#);
// And `b` is empty
let (tasks, _code) = server.tasks_filter("indexUids=b").await;
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [],
"total": 2,
"limit": 20,
"from": null,
"next": null
}
"#);
}