fix error code and add a bunch of tests for the swap and index rename

This commit is contained in:
Tamo
2025-08-11 17:37:30 +02:00
parent ecea247e5d
commit a904ce109a
7 changed files with 182 additions and 39 deletions

View File

@ -67,8 +67,8 @@ pub enum Error {
SwapDuplicateIndexesFound(Vec<String>),
#[error("Index `{0}` not found.")]
SwapIndexNotFound(String),
#[error("Index `{0}` found during a rename. Renaming doen't overwrite the other index name.")]
SwapIndexFoundDuringRename(String),
#[error("Cannot rename `{0}` to `{1}` as the index already exists. Hint: You can remove `{1}` first and then do your remove.")]
SwapIndexFoundDuringRename(String, String),
#[error("Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.")]
NoSpaceLeftInTaskQueue,
#[error(
@ -76,7 +76,7 @@ pub enum Error {
.0.iter().map(|s| format!("`{}`", s)).collect::<Vec<_>>().join(", ")
)]
SwapIndexesNotFound(Vec<String>),
#[error("Index {} found during a rename. Renaming doen't overwrite the other index name.",
#[error("The following indexes are being renamed but cannot because their new name conflicts with an already existing index: {}. Renaming doesn't overwrite the other index name.",
.0.iter().map(|s| format!("`{}`", s)).collect::<Vec<_>>().join(", ")
)]
SwapIndexesFoundDuringRename(Vec<String>),
@ -209,7 +209,7 @@ impl Error {
| Error::SwapIndexNotFound(_)
| Error::NoSpaceLeftInTaskQueue
| Error::SwapIndexesNotFound(_)
| Error::SwapIndexFoundDuringRename(_)
| Error::SwapIndexFoundDuringRename(_, _)
| Error::SwapIndexesFoundDuringRename(_)
| Error::CorruptedDump
| Error::InvalidTaskDate { .. }
@ -279,8 +279,8 @@ impl ErrorCode for Error {
Error::SwapDuplicateIndexFound(_) => Code::InvalidSwapDuplicateIndexFound,
Error::SwapIndexNotFound(_) => Code::IndexNotFound,
Error::SwapIndexesNotFound(_) => Code::IndexNotFound,
Error::SwapIndexFoundDuringRename(_) => Code::IndexNotFound,
Error::SwapIndexesFoundDuringRename(_) => Code::IndexNotFound,
Error::SwapIndexFoundDuringRename(_, _) => Code::IndexAlreadyExists,
Error::SwapIndexesFoundDuringRename(_) => Code::IndexAlreadyExists,
Error::InvalidTaskDate { field, .. } => (*field).into(),
Error::InvalidTaskUid { .. } => Code::InvalidTaskUids,
Error::InvalidBatchUid { .. } => Code::InvalidBatchUids,

View File

@ -368,7 +368,7 @@ impl IndexScheduler {
}
let index_exists = self.index_mapper.index_exists(&wtxn, rhs)?;
match (index_exists, rename) {
(true, true) => found_indexes_but_should_not.insert(rhs),
(true, true) => found_indexes_but_should_not.insert((lhs, rhs)),
(false, false) => not_found_indexes.insert(rhs),
(true, false) | (false, true) => true, // random value we don't read it anyway
};
@ -386,12 +386,18 @@ impl IndexScheduler {
}
if !found_indexes_but_should_not.is_empty() {
if found_indexes_but_should_not.len() == 1 {
return Err(Error::SwapIndexFoundDuringRename(
found_indexes_but_should_not.into_iter().next().unwrap().clone(),
));
let (lhs, rhs) = found_indexes_but_should_not
.into_iter()
.next()
.map(|(lhs, rhs)| (lhs.clone(), rhs.clone()))
.unwrap();
return Err(Error::SwapIndexFoundDuringRename(lhs, rhs));
} else {
return Err(Error::SwapIndexesFoundDuringRename(
found_indexes_but_should_not.into_iter().cloned().collect(),
found_indexes_but_should_not
.into_iter()
.map(|(_, rhs)| rhs.to_string())
.collect(),
));
}
}

View File

@ -1142,7 +1142,7 @@ async fn test_summarized_index_swap() {
".stats.writeChannelCongestion" => "[writeChannelCongestion]",
".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction),
},
@r###"
@r#"
{
"uid": "[uid]",
"progress": null,
@ -1152,7 +1152,8 @@ async fn test_summarized_index_swap() {
"indexes": [
"doggos",
"cattos"
]
],
"rename": false
}
]
},
@ -1172,7 +1173,7 @@ async fn test_summarized_index_swap() {
"finishedAt": "[date]",
"batchStrategy": "created batch containing only task with id X of type `indexSwap` that cannot be batched with any other task."
}
"###);
"#);
let doggos_index = server.unique_index();
doggos_index.create(None).await;

View File

@ -4,7 +4,9 @@ use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use crate::common::encoder::Encoder;
use crate::common::{shared_does_not_exists_index, shared_index_with_documents, Server};
use crate::common::{
shared_does_not_exists_index, shared_empty_index, shared_index_with_documents, Server,
};
use crate::json;
#[actix_rt::test]
@ -113,17 +115,14 @@ async fn error_update_unexisting_index() {
async fn update_index_name() {
let server = Server::new_shared();
let index = server.unique_index();
let (task, code) = index.create(None).await;
assert_eq!(code, 202);
let (task, _code) = index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let new_index = server.unique_index();
let (task, _status_code) = index.update_raw(json!({ "uid": new_index.uid })).await;
let (task, _code) = index.update_raw(json!({ "uid": new_index.uid })).await;
server.wait_task(task.uid()).await.succeeded();
let (response, code) = new_index.get().await;
snapshot!(code, @"200 OK");
assert_eq!(response["uid"], new_index.uid);
@ -139,3 +138,39 @@ async fn update_index_name() {
snapshot!(response["primaryKey"], @"null");
snapshot!(response.as_object().unwrap().len(), @"4");
}
#[actix_rt::test]
async fn error_update_index_name_to_already_existing_index() {
let server = Server::new_shared();
let base_index = shared_empty_index().await;
let index = server.unique_index();
let (task, _code) = index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index.update_raw(json!({ "uid": base_index.uid })).await;
let task = server.wait_task(task.uid()).await;
snapshot!(task, @r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "indexUpdate",
"canceledBy": null,
"details": {
"primaryKey": null,
"newIndexUid": "EMPTY_INDEX"
},
"error": {
"message": "Index `EMPTY_INDEX` already exists.",
"code": "index_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_already_exists"
},
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
"#);
}

View File

@ -1,6 +1,9 @@
use meili_snap::*;
use crate::common::Server;
use crate::common::{
shared_empty_index, shared_index_with_documents, shared_index_with_geo_documents,
shared_index_with_nested_documents, Server,
};
use crate::json;
#[actix_rt::test]
@ -109,3 +112,95 @@ async fn swap_indexes_bad_rename() {
}
"#);
}
#[actix_rt::test]
async fn swap_indexes_rename_to_already_existing_index() {
let server = Server::new_shared();
let already_existing_index = shared_empty_index().await;
let base_index = shared_index_with_documents().await;
let (response, _code) = server
.index_swap(
json!([{ "indexes": [base_index.uid, already_existing_index.uid], "rename": true }]),
)
.await;
let response = server.wait_task(response.uid()).await;
snapshot!(response, @r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "failed",
"type": "indexSwap",
"canceledBy": null,
"details": {
"swaps": [
{
"indexes": [
"SHARED_DOCUMENTS",
"EMPTY_INDEX"
],
"rename": true
}
]
},
"error": {
"message": "Cannot rename `SHARED_DOCUMENTS` to `EMPTY_INDEX` as the index already exists. Hint: You can remove `EMPTY_INDEX` first and then do your remove.",
"code": "index_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_already_exists"
},
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
"#);
let base_index_2 = shared_index_with_geo_documents().await;
let already_existing_index_2 = shared_index_with_nested_documents().await;
let (response, _code) = server
.index_swap(
json!([{ "indexes": [base_index.uid, already_existing_index.uid], "rename": true }, { "indexes": [base_index_2.uid, already_existing_index_2.uid], "rename": true }]),
)
.await;
let response = server.wait_task(response.uid()).await;
snapshot!(response, @r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "failed",
"type": "indexSwap",
"canceledBy": null,
"details": {
"swaps": [
{
"indexes": [
"SHARED_DOCUMENTS",
"EMPTY_INDEX"
],
"rename": true
},
{
"indexes": [
"SHARED_GEO_DOCUMENTS",
"SHARED_NESTED_DOCUMENTS"
],
"rename": true
}
]
},
"error": {
"message": "The following indexes are being renamed but cannot because their new name conflicts with an already existing index: `EMPTY_INDEX`, `SHARED_NESTED_DOCUMENTS`. Renaming doesn't overwrite the other index name.",
"code": "index_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_already_exists"
},
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
"#);
}

View File

@ -73,7 +73,7 @@ async fn swap_indexes() {
snapshot!(code, @"200 OK");
// Notice how the task 0 which was initially representing the creation of the index `A` now represents the creation of the index `B`.
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r###"
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [
{
@ -89,7 +89,8 @@ async fn swap_indexes() {
"indexes": [
"a",
"b"
]
],
"rename": false
}
]
},
@ -102,7 +103,7 @@ async fn swap_indexes() {
{
"uid": 1,
"batchUid": 1,
"indexUid": "a",
"indexUid": "b",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
@ -139,7 +140,7 @@ async fn swap_indexes() {
"from": 2,
"next": null
}
"###);
"#);
// BUT, the data in `a` should now points to the data that was in `b`.
// And the opposite is true as well
@ -228,7 +229,7 @@ async fn swap_indexes() {
// 2. stays unchanged
// 3. now have the indexUid `d` instead of `c`
// 4. now have the indexUid `c` instead of `d`
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r###"
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [
{
@ -244,13 +245,15 @@ async fn swap_indexes() {
"indexes": [
"a",
"b"
]
],
"rename": false
},
{
"indexes": [
"c",
"d"
]
],
"rename": false
}
]
},
@ -263,7 +266,7 @@ async fn swap_indexes() {
{
"uid": 4,
"batchUid": 4,
"indexUid": "c",
"indexUid": "d",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
@ -307,7 +310,8 @@ async fn swap_indexes() {
"indexes": [
"b",
"a"
]
],
"rename": false
}
]
},
@ -337,7 +341,7 @@ async fn swap_indexes() {
{
"uid": 0,
"batchUid": 0,
"indexUid": "a",
"indexUid": "b",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
@ -357,7 +361,7 @@ async fn swap_indexes() {
"from": 5,
"next": null
}
"###);
"#);
// - The data in `a` should point to `a`.
// - The data in `b` should point to `b`.
@ -530,7 +534,7 @@ async fn swap_rename_indexes() {
let (res, _) = b.delete().await;
server.wait_task(res.uid()).await.succeeded();
let (res, _) = b.create(Some("kefir")).await;
b.create(Some("kefir")).await;
let (res, _code) = server.index_swap(json!([{ "indexes": ["b", "a"], "rename": true }])).await;
server.wait_task(res.uid()).await.succeeded();

View File

@ -895,7 +895,7 @@ async fn test_summarized_index_swap() {
server.wait_task(task.uid()).await.failed();
let (task, _) = server.get_task(task.uid()).await;
snapshot!(task,
@r###"
@r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
@ -909,7 +909,8 @@ async fn test_summarized_index_swap() {
"indexes": [
"doggos",
"cattos"
]
],
"rename": false
}
]
},
@ -924,7 +925,7 @@ async fn test_summarized_index_swap() {
"startedAt": "[date]",
"finishedAt": "[date]"
}
"###);
"#);
let doggos_index = server.unique_index();
let (task, _code) = doggos_index.create(None).await;
@ -941,7 +942,7 @@ async fn test_summarized_index_swap() {
let (task, _) = server.get_task(task.uid()).await;
snapshot!(json_string!(task,
{ ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".**.indexes[0]" => "doggos", ".**.indexes[1]" => "cattos", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###"
@r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
@ -955,7 +956,8 @@ async fn test_summarized_index_swap() {
"indexes": [
"doggos",
"cattos"
]
],
"rename": false
}
]
},
@ -965,7 +967,7 @@ async fn test_summarized_index_swap() {
"startedAt": "[date]",
"finishedAt": "[date]"
}
"###);
"#);
}
#[actix_web::test]