Merge pull request #5829 from meilisearch/index-rename

Index rename
This commit is contained in:
Tamo
2025-08-14 15:39:28 +00:00
committed by GitHub
120 changed files with 1423 additions and 385 deletions

View File

@ -288,7 +288,6 @@ fn deny_immutable_fields_index(
location: ValuePointerRef,
) -> DeserrJsonError {
match field {
"uid" => immutable_field_error(field, accepted, Code::ImmutableIndexUid),
"createdAt" => immutable_field_error(field, accepted, Code::ImmutableIndexCreatedAt),
"updatedAt" => immutable_field_error(field, accepted, Code::ImmutableIndexUpdatedAt),
_ => deserr::take_cf_content(DeserrJsonError::<BadRequest>::error::<Infallible>(
@ -375,6 +374,9 @@ pub struct UpdateIndexRequest {
/// The new primary key of the index
#[deserr(default, error = DeserrJsonError<InvalidIndexPrimaryKey>)]
primary_key: Option<String>,
/// The new uid of the index (for renaming)
#[deserr(default, error = DeserrJsonError<InvalidIndexUid>)]
uid: Option<String>,
}
/// Update index
@ -419,6 +421,12 @@ pub async fn update_index(
debug!(parameters = ?body, "Update index");
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let body = body.into_inner();
// Validate new uid if provided
if let Some(ref new_uid) = body.uid {
let _ = IndexUid::try_from(new_uid.clone())?;
}
analytics.publish(
IndexUpdatedAggregate { primary_key: body.primary_key.iter().cloned().collect() },
&req,
@ -427,6 +435,7 @@ pub async fn update_index(
let task = KindWithContent::IndexUpdate {
index_uid: index_uid.into_inner(),
primary_key: body.primary_key,
new_index_uid: body.uid,
};
let uid = get_task_id(&req, &opt)?;

View File

@ -4,7 +4,7 @@ use deserr::actix_web::AwebJson;
use deserr::Deserr;
use index_scheduler::IndexScheduler;
use meilisearch_types::deserr::DeserrJsonError;
use meilisearch_types::error::deserr_codes::InvalidSwapIndexes;
use meilisearch_types::error::deserr_codes::{InvalidSwapIndexes, InvalidSwapRename};
use meilisearch_types::error::ResponseError;
use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::tasks::{IndexSwap, KindWithContent};
@ -33,11 +33,15 @@ pub struct SwapIndexesPayload {
/// Array of the two indexUids to be swapped
#[deserr(error = DeserrJsonError<InvalidSwapIndexes>, missing_field_error = DeserrJsonError::missing_swap_indexes)]
indexes: Vec<IndexUid>,
/// If set to true, instead of swapping the left and right indexes it'll change the name of the first index to the second
#[deserr(default, error = DeserrJsonError<InvalidSwapRename>)]
rename: bool,
}
#[derive(Serialize)]
struct IndexSwappedAnalytics {
swap_operation_number: usize,
rename_used: bool,
}
impl Aggregate for IndexSwappedAnalytics {
@ -48,6 +52,7 @@ impl Aggregate for IndexSwappedAnalytics {
fn aggregate(self: Box<Self>, new: Box<Self>) -> Box<Self> {
Box::new(Self {
swap_operation_number: self.swap_operation_number.max(new.swap_operation_number),
rename_used: self.rename_used | new.rename_used,
})
}
@ -95,11 +100,17 @@ pub async fn swap_indexes(
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let params = params.into_inner();
analytics.publish(IndexSwappedAnalytics { swap_operation_number: params.len() }, &req);
analytics.publish(
IndexSwappedAnalytics {
swap_operation_number: params.len(),
rename_used: params.iter().any(|obj| obj.rename),
},
&req,
);
let filters = index_scheduler.filters();
let mut swaps = vec![];
for SwapIndexesPayload { indexes } in params.into_iter() {
for SwapIndexesPayload { indexes, rename } in params.into_iter() {
// TODO: switch to deserr
let (lhs, rhs) = match indexes.as_slice() {
[lhs, rhs] => (lhs, rhs),
@ -110,7 +121,7 @@ pub async fn swap_indexes(
if !filters.is_index_authorized(lhs) || !filters.is_index_authorized(rhs) {
return Err(AuthenticationError::InvalidToken.into());
}
swaps.push(IndexSwap { indexes: (lhs.to_string(), rhs.to_string()) });
swaps.push(IndexSwap { indexes: (lhs.to_string(), rhs.to_string()), rename });
}
let task = KindWithContent::IndexSwap { swaps };

View File

@ -1142,7 +1142,7 @@ async fn test_summarized_index_swap() {
".stats.writeChannelCongestion" => "[writeChannelCongestion]",
".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction),
},
@r###"
@r#"
{
"uid": "[uid]",
"progress": null,
@ -1152,7 +1152,8 @@ async fn test_summarized_index_swap() {
"indexes": [
"doggos",
"cattos"
]
],
"rename": false
}
]
},
@ -1172,7 +1173,7 @@ async fn test_summarized_index_swap() {
"finishedAt": "[date]",
"batchStrategy": "created batch containing only task with id X of type `indexSwap` that cannot be batched with any other task."
}
"###);
"#);
let doggos_index = server.unique_index();
doggos_index.create(None).await;

View File

@ -319,6 +319,24 @@ impl Index<'_, Shared> {
}
(task, code)
}
pub async fn update_raw_index_fail<State>(
&self,
body: Value,
waiter: &Server<State>,
) -> (Value, StatusCode) {
let (mut task, code) = self._update_raw(body).await;
if code.is_success() {
task = waiter.wait_task(task.uid()).await;
if task.is_success() {
panic!(
"`update_raw_index_fail` succeeded: {}",
serde_json::to_string_pretty(&task).unwrap()
);
}
}
(task, code)
}
}
#[allow(dead_code)]
@ -370,6 +388,11 @@ impl<State> Index<'_, State> {
self.service.patch_encoded(url, body, self.encoder).await
}
pub(super) async fn _update_raw(&self, body: Value) -> (Value, StatusCode) {
let url = format!("/indexes/{}", urlencode(self.uid.as_ref()));
self.service.patch_encoded(url, body, self.encoder).await
}
pub(super) async fn _delete(&self) -> (Value, StatusCode) {
let url = format!("/indexes/{}", urlencode(self.uid.as_ref()));
self.service.delete(url).await

View File

@ -160,36 +160,20 @@ async fn update_index_bad_primary_key() {
"###);
}
#[actix_rt::test]
async fn update_index_immutable_uid() {
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index.update_raw(json!({ "uid": "doggo" })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Immutable field `uid`: expected one of `primaryKey`",
"code": "immutable_index_uid",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#immutable_index_uid"
}
"###);
}
#[actix_rt::test]
async fn update_index_immutable_created_at() {
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index.update_raw(json!({ "createdAt": "doggo" })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
snapshot!(json_string!(response), @r#"
{
"message": "Immutable field `createdAt`: expected one of `primaryKey`",
"message": "Immutable field `createdAt`: expected one of `primaryKey`, `uid`",
"code": "immutable_index_created_at",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#immutable_index_created_at"
}
"###);
"#);
}
#[actix_rt::test]
@ -198,14 +182,14 @@ async fn update_index_immutable_updated_at() {
let index = server.unique_index();
let (response, code) = index.update_raw(json!({ "updatedAt": "doggo" })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
snapshot!(json_string!(response), @r#"
{
"message": "Immutable field `updatedAt`: expected one of `primaryKey`",
"message": "Immutable field `updatedAt`: expected one of `primaryKey`, `uid`",
"code": "immutable_index_updated_at",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#immutable_index_updated_at"
}
"###);
"#);
}
#[actix_rt::test]
@ -214,14 +198,14 @@ async fn update_index_unknown_field() {
let index = server.unique_index();
let (response, code) = index.update_raw(json!({ "doggo": "bork" })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
snapshot!(json_string!(response), @r#"
{
"message": "Unknown field `doggo`: expected one of `primaryKey`",
"message": "Unknown field `doggo`: expected one of `primaryKey`, `uid`",
"code": "bad_request",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#bad_request"
}
"###);
"#);
}
#[actix_rt::test]

View File

@ -2,5 +2,6 @@ mod create_index;
mod delete_index;
mod errors;
mod get_index;
mod rename_index;
mod stats;
mod update_index;

View File

@ -0,0 +1,392 @@
use crate::common::{shared_does_not_exists_index, Server};
use crate::json;
#[actix_rt::test]
async fn rename_index_via_patch() {
let server = Server::new_shared();
let index = server.unique_index();
// Create index first
let (task, code) = index.create(None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Rename via PATCH update endpoint
let new_uid = format!("{}_renamed", index.uid);
let body = json!({ "uid": &new_uid });
let (task, code) = index.service.patch(format!("/indexes/{}", index.uid), body).await;
assert_eq!(code, 202);
let response = server.wait_task(task.uid()).await.succeeded();
// Verify the rename succeeded
assert_eq!(response["status"], "succeeded");
assert_eq!(response["type"], "indexUpdate");
assert_eq!(response["details"]["newIndexUid"], new_uid);
// Check that old index doesn't exist
let (_, code) = index.get().await;
assert_eq!(code, 404);
// Check that new index exists
let (response, code) = server.service.get(format!("/indexes/{}", new_uid)).await;
assert_eq!(code, 200);
assert_eq!(response["uid"], new_uid);
}
#[actix_rt::test]
async fn rename_to_existing_index_via_patch() {
let server = Server::new_shared();
let index1 = server.unique_index();
let index2 = server.unique_index();
// Create both indexes
let (task, code) = index1.create(None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
let (task, code) = index2.create(None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Try to rename index1 to index2's uid via PATCH (should fail)
let body = json!({ "uid": index2.uid });
let (task, code) = index1.service.patch(format!("/indexes/{}", index1.uid), body).await;
assert_eq!(code, 202);
let response = server.wait_task(task.uid()).await.failed();
let expected_response = json!({
"message": format!("Index `{}` already exists.", index2.uid),
"code": "index_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_already_exists"
});
assert_eq!(response["error"], expected_response);
}
#[actix_rt::test]
async fn rename_non_existent_index_via_patch() {
let server = Server::new_shared();
let index = shared_does_not_exists_index().await;
// Try to rename non-existent index via PATCH
let body = json!({ "uid": "new_name" });
let (task, code) = index.service.patch(format!("/indexes/{}", index.uid), body).await;
assert_eq!(code, 202);
let response = server.wait_task(task.uid()).await.failed();
let expected_response = json!({
"message": format!("Index `{}` not found.", index.uid),
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
});
assert_eq!(response["error"], expected_response);
}
#[actix_rt::test]
async fn rename_with_invalid_uid_via_patch() {
let server = Server::new_shared();
let index = server.unique_index();
// Create index first
let (task, code) = index.create(None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Try to rename with invalid uid via PATCH
let body = json!({ "uid": "Invalid UID!" });
let (_, code) = index.service.patch(format!("/indexes/{}", index.uid), body).await;
assert_eq!(code, 400);
}
#[actix_rt::test]
async fn rename_index_with_documents_via_patch() {
let server = Server::new_shared();
let index = server.unique_index();
// Create index and add documents
let (task, code) = index.create(None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
let documents = json!([
{ "id": 1, "title": "Movie 1" },
{ "id": 2, "title": "Movie 2" }
]);
let (task, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Rename the index via PATCH
let new_uid = format!("{}_renamed", index.uid);
let body = json!({ "uid": &new_uid });
let (task, code) = index.service.patch(format!("/indexes/{}", index.uid), body).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Verify documents are accessible in renamed index
let (response, code) = server.service.get(format!("/indexes/{}/documents", new_uid)).await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 2);
}
#[actix_rt::test]
async fn rename_index_and_update_primary_key_via_patch() {
let server = Server::new_shared();
let index = server.unique_index();
// Create index without primary key
let (task, code) = index.create(None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Rename index and set primary key at the same time
let new_uid = format!("{}_renamed", index.uid);
let body = json!({
"uid": &new_uid,
"primaryKey": "id"
});
let (task, code) = index.service.patch(format!("/indexes/{}", index.uid), body).await;
assert_eq!(code, 202);
let response = server.wait_task(task.uid()).await.succeeded();
// Verify the rename succeeded and primary key was set
assert_eq!(response["status"], "succeeded");
assert_eq!(response["type"], "indexUpdate");
assert_eq!(response["details"]["newIndexUid"], new_uid);
assert_eq!(response["details"]["primaryKey"], "id");
// Check that old index doesn't exist
let (_, code) = index.get().await;
assert_eq!(code, 404);
// Check that new index exists with correct primary key
let (response, code) = server.service.get(format!("/indexes/{}", new_uid)).await;
assert_eq!(code, 200);
assert_eq!(response["uid"], new_uid);
assert_eq!(response["primaryKey"], "id");
}
#[actix_rt::test]
async fn rename_index_and_verify_stats() {
let server = Server::new_shared();
let index = server.unique_index();
// Create index and add documents
let (task, code) = index.create(None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
let documents = json!([
{ "id": 1, "title": "Movie 1", "genre": "Action" },
{ "id": 2, "title": "Movie 2", "genre": "Drama" },
{ "id": 3, "title": "Movie 3", "genre": "Comedy" }
]);
let (task, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Get stats before rename
let (stats_before, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(stats_before["numberOfDocuments"], 3);
// Rename the index
let new_uid = format!("{}_renamed", index.uid);
let body = json!({ "uid": &new_uid });
let (task, code) = index.service.patch(format!("/indexes/{}", index.uid), body).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Get stats after rename using the new uid
let (stats_after, code) = server.service.get(format!("/indexes/{}/stats", new_uid)).await;
assert_eq!(code, 200);
assert_eq!(stats_after["numberOfDocuments"], 3);
assert_eq!(stats_after["numberOfDocuments"], stats_before["numberOfDocuments"]);
}
#[actix_rt::test]
async fn rename_index_preserves_settings() {
let server = Server::new_shared();
let index = server.unique_index();
// Create index
let (task, code) = index.create(None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Configure settings
let settings = json!({
"searchableAttributes": ["title", "description"],
"filterableAttributes": ["genre", "year"],
"sortableAttributes": ["year"],
"rankingRules": [
"words",
"typo",
"proximity",
"attribute",
"sort",
"exactness"
],
"stopWords": ["the", "a", "an"],
"synonyms": {
"movie": ["film", "picture"],
"great": ["awesome", "excellent"]
}
});
let (task, code) = index.update_settings(settings.clone()).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Rename the index
let new_uid = format!("{}_renamed", index.uid);
let body = json!({ "uid": &new_uid });
let (task, code) = index.service.patch(format!("/indexes/{}", index.uid), body).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Verify settings are preserved
let (settings_after, code) = server.service.get(format!("/indexes/{}/settings", new_uid)).await;
assert_eq!(code, 200);
assert_eq!(settings_after["searchableAttributes"], json!(["title", "description"]));
assert_eq!(settings_after["filterableAttributes"], json!(["genre", "year"]));
assert_eq!(settings_after["sortableAttributes"], json!(["year"]));
// Check stopWords contains the same items (order may vary)
let stop_words = settings_after["stopWords"].as_array().unwrap();
assert_eq!(stop_words.len(), 3);
assert!(stop_words.contains(&json!("the")));
assert!(stop_words.contains(&json!("a")));
assert!(stop_words.contains(&json!("an")));
assert_eq!(settings_after["synonyms"]["movie"], json!(["film", "picture"]));
assert_eq!(settings_after["synonyms"]["great"], json!(["awesome", "excellent"]));
}
#[actix_rt::test]
async fn rename_index_preserves_search_functionality() {
let server = Server::new_shared();
let index = server.unique_index();
// Create index and add documents
let (task, code) = index.create(None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
let documents = json!([
{ "id": 1, "title": "The Matrix", "genre": "Sci-Fi", "year": 1999 },
{ "id": 2, "title": "Inception", "genre": "Sci-Fi", "year": 2010 },
{ "id": 3, "title": "The Dark Knight", "genre": "Action", "year": 2008 }
]);
let (task, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Make settings filterable
let settings = json!({
"filterableAttributes": ["genre", "year"],
"sortableAttributes": ["year"]
});
let (task, code) = index.update_settings(settings).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Search before rename
let search_params = json!({
"q": "matrix",
"filter": "genre = 'Sci-Fi'",
"sort": ["year:asc"]
});
let (results_before, code) = index.search_post(search_params.clone()).await;
assert_eq!(code, 200);
assert_eq!(results_before["hits"].as_array().unwrap().len(), 1);
assert_eq!(results_before["hits"][0]["title"], "The Matrix");
// Rename the index
let new_uid = format!("{}_renamed", index.uid);
let body = json!({ "uid": &new_uid });
let (task, code) = index.service.patch(format!("/indexes/{}", index.uid), body).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Search after rename
let (results_after, code) =
server.service.post(format!("/indexes/{}/search", new_uid), search_params).await;
assert_eq!(code, 200);
assert_eq!(results_after["hits"].as_array().unwrap().len(), 1);
assert_eq!(results_after["hits"][0]["title"], "The Matrix");
// Verify facet search also works
let facet_search = json!({
"facetQuery": "Sci",
"facetName": "genre"
});
let (facet_results, code) =
server.service.post(format!("/indexes/{}/facet-search", new_uid), facet_search).await;
assert_eq!(code, 200);
assert_eq!(facet_results["facetHits"].as_array().unwrap().len(), 1);
assert_eq!(facet_results["facetHits"][0]["value"], "Sci-Fi");
}
#[actix_rt::test]
async fn rename_index_with_pending_tasks() {
let server = Server::new_shared();
let index = server.unique_index();
// Create index
let (task, code) = index.create(None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Add initial documents
let documents = json!([
{ "id": 1, "title": "Document 1" }
]);
let (task, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Start a rename
let new_uid = format!("{}_renamed", index.uid);
let body = json!({ "uid": &new_uid });
let (rename_task, code) = index.service.patch(format!("/indexes/{}", index.uid), body).await;
assert_eq!(code, 202);
// Try to add documents to the old index while rename is pending
let more_documents = json!([
{ "id": 2, "title": "Document 2" }
]);
let (_, code) = index.add_documents(more_documents, None).await;
assert_eq!(code, 202);
// Wait for rename to complete
server.wait_task(rename_task.uid()).await.succeeded();
// Add documents to the new index
let final_documents = json!([
{ "id": 3, "title": "Document 3" }
]);
let (task, code) =
server.service.post(format!("/indexes/{}/documents", new_uid), final_documents).await;
assert_eq!(code, 202);
server.wait_task(task.uid()).await.succeeded();
// Verify all documents are accessible
let (response, code) = server.service.get(format!("/indexes/{}/documents", new_uid)).await;
assert_eq!(code, 200);
let docs = response["results"].as_array().unwrap();
assert!(!docs.is_empty()); // At least the initial document should be there
}

View File

@ -1,8 +1,12 @@
use meili_snap::snapshot;
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use crate::common::encoder::Encoder;
use crate::common::{shared_does_not_exists_index, shared_index_with_documents, Server};
use crate::common::{
shared_does_not_exists_index, shared_empty_index, shared_index_with_documents, Server,
};
use crate::json;
#[actix_rt::test]
@ -106,3 +110,85 @@ async fn error_update_unexisting_index() {
assert_eq!(response["error"], expected_response);
}
#[actix_rt::test]
async fn update_index_name() {
let server = Server::new_shared();
let index = server.unique_index();
let (task, _code) = index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let new_index = server.unique_index();
let (task, _code) = index.update_raw(json!({ "uid": new_index.uid })).await;
server.wait_task(task.uid()).await.succeeded();
let (response, code) = new_index.get().await;
snapshot!(code, @"200 OK");
assert_eq!(response["uid"], new_index.uid);
assert!(response.get("createdAt").is_some());
assert!(response.get("updatedAt").is_some());
let created_at =
OffsetDateTime::parse(response["createdAt"].as_str().unwrap(), &Rfc3339).unwrap();
let updated_at =
OffsetDateTime::parse(response["updatedAt"].as_str().unwrap(), &Rfc3339).unwrap();
assert!(created_at < updated_at, "{created_at} should be inferior to {updated_at}");
snapshot!(response["primaryKey"], @"null");
snapshot!(response.as_object().unwrap().len(), @"4");
}
#[actix_rt::test]
async fn update_index_name_to_itself() {
let server = Server::new_shared();
let index = server.unique_index();
let (task, _code) = index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let (initial_response, code) = index.get().await;
snapshot!(code, @"200 OK");
let (task, _code) = index.update_raw(json!({ "uid": index.uid })).await;
server.wait_task(task.uid()).await.succeeded();
let (new_response, code) = index.get().await;
snapshot!(code, @"200 OK");
// Renaming an index to its own name should not change anything
assert_eq!(initial_response, new_response);
}
#[actix_rt::test]
async fn error_update_index_name_to_already_existing_index() {
let server = Server::new_shared();
let base_index = shared_empty_index().await;
let index = shared_index_with_documents().await;
let (task, _status_code) =
index.update_raw_index_fail(json!({ "uid": base_index.uid }), server).await;
snapshot!(task, @r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "SHARED_DOCUMENTS",
"status": "failed",
"type": "indexUpdate",
"canceledBy": null,
"details": {
"primaryKey": null,
"oldIndexUid": "SHARED_DOCUMENTS",
"newIndexUid": "EMPTY_INDEX"
},
"error": {
"message": "Index `EMPTY_INDEX` already exists.",
"code": "index_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_already_exists"
},
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
"#);
}

View File

@ -1,6 +1,9 @@
use meili_snap::*;
use crate::common::Server;
use crate::common::{
shared_empty_index, shared_index_with_documents, shared_index_with_geo_documents,
shared_index_with_nested_documents, Server,
};
use crate::json;
#[actix_rt::test]
@ -92,3 +95,112 @@ async fn swap_indexes_bad_indexes() {
}
"###);
}
#[actix_rt::test]
async fn swap_indexes_bad_rename() {
let server = Server::new_shared();
let (response, code) =
server.index_swap(json!([{ "indexes": ["kefir", "intel"], "rename": "hello" }])).await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Invalid value type at `[0].rename`: expected a boolean, but found a string: `\"hello\"`",
"code": "invalid_swap_rename",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_swap_rename"
}
"#);
}
#[actix_rt::test]
async fn swap_indexes_rename_to_already_existing_index() {
let server = Server::new_shared();
let already_existing_index = shared_empty_index().await;
let base_index = shared_index_with_documents().await;
let (response, _code) = server
.index_swap(
json!([{ "indexes": [base_index.uid, already_existing_index.uid], "rename": true }]),
)
.await;
let response = server.wait_task(response.uid()).await;
snapshot!(response, @r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "failed",
"type": "indexSwap",
"canceledBy": null,
"details": {
"swaps": [
{
"indexes": [
"SHARED_DOCUMENTS",
"EMPTY_INDEX"
],
"rename": true
}
]
},
"error": {
"message": "Cannot rename `SHARED_DOCUMENTS` to `EMPTY_INDEX` as the index already exists. Hint: You can remove `EMPTY_INDEX` first and then do your remove.",
"code": "index_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_already_exists"
},
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
"#);
let base_index_2 = shared_index_with_geo_documents().await;
let already_existing_index_2 = shared_index_with_nested_documents().await;
let (response, _code) = server
.index_swap(
json!([{ "indexes": [base_index.uid, already_existing_index.uid], "rename": true }, { "indexes": [base_index_2.uid, already_existing_index_2.uid], "rename": true }]),
)
.await;
let response = server.wait_task(response.uid()).await;
snapshot!(response, @r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "failed",
"type": "indexSwap",
"canceledBy": null,
"details": {
"swaps": [
{
"indexes": [
"SHARED_DOCUMENTS",
"EMPTY_INDEX"
],
"rename": true
},
{
"indexes": [
"SHARED_GEO_DOCUMENTS",
"SHARED_NESTED_DOCUMENTS"
],
"rename": true
}
]
},
"error": {
"message": "The following indexes are being renamed but cannot because their new name conflicts with an already existing index: `EMPTY_INDEX`, `SHARED_NESTED_DOCUMENTS`. Renaming doesn't overwrite the other index name.",
"code": "index_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_already_exists"
},
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
"#);
}

View File

@ -73,7 +73,7 @@ async fn swap_indexes() {
snapshot!(code, @"200 OK");
// Notice how the task 0 which was initially representing the creation of the index `A` now represents the creation of the index `B`.
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r###"
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [
{
@ -89,7 +89,8 @@ async fn swap_indexes() {
"indexes": [
"a",
"b"
]
],
"rename": false
}
]
},
@ -102,7 +103,7 @@ async fn swap_indexes() {
{
"uid": 1,
"batchUid": 1,
"indexUid": "a",
"indexUid": "b",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
@ -139,7 +140,7 @@ async fn swap_indexes() {
"from": 2,
"next": null
}
"###);
"#);
// BUT, the data in `a` should now points to the data that was in `b`.
// And the opposite is true as well
@ -228,7 +229,7 @@ async fn swap_indexes() {
// 2. stays unchanged
// 3. now have the indexUid `d` instead of `c`
// 4. now have the indexUid `c` instead of `d`
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r###"
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [
{
@ -244,13 +245,15 @@ async fn swap_indexes() {
"indexes": [
"a",
"b"
]
],
"rename": false
},
{
"indexes": [
"c",
"d"
]
],
"rename": false
}
]
},
@ -263,7 +266,7 @@ async fn swap_indexes() {
{
"uid": 4,
"batchUid": 4,
"indexUid": "c",
"indexUid": "d",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
@ -307,7 +310,8 @@ async fn swap_indexes() {
"indexes": [
"b",
"a"
]
],
"rename": false
}
]
},
@ -337,7 +341,7 @@ async fn swap_indexes() {
{
"uid": 0,
"batchUid": 0,
"indexUid": "a",
"indexUid": "b",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
@ -357,7 +361,7 @@ async fn swap_indexes() {
"from": 5,
"next": null
}
"###);
"#);
// - The data in `a` should point to `a`.
// - The data in `b` should point to `b`.
@ -372,3 +376,255 @@ async fn swap_indexes() {
let (res, _) = d.get_all_documents(GetAllDocumentsOptions::default()).await;
snapshot!(res["results"], @r###"[{"id":1,"index":"c"}]"###);
}
#[actix_rt::test]
async fn swap_rename_indexes() {
let server = Server::new().await;
let a = server.index("a");
let b = server.index("b");
a.create(None).await;
a.add_documents(json!({ "id": 1, "index": "a"}), None).await;
let (res, _code) = server.index_swap(json!([{ "indexes": ["a", "b"], "rename": true }])).await;
server.wait_task(res.uid()).await.succeeded();
let (tasks, _code) = server.tasks().await;
// Notice how the task 0 which was initially representing the creation of the index `A` now represents the creation of the index `B`.
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [
{
"uid": 2,
"batchUid": 2,
"indexUid": null,
"status": "succeeded",
"type": "indexSwap",
"canceledBy": null,
"details": {
"swaps": [
{
"indexes": [
"a",
"b"
],
"rename": true
}
]
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 1,
"batchUid": 1,
"indexUid": "b",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 0,
"batchUid": 0,
"indexUid": "b",
"status": "succeeded",
"type": "indexCreation",
"canceledBy": null,
"details": {
"primaryKey": null
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
],
"total": 3,
"limit": 20,
"from": 2,
"next": null
}
"#);
// BUT, `a` should not exists
let (res, code) = a.get_all_documents(GetAllDocumentsOptions::default()).await;
snapshot!(code, @"404 Not Found");
snapshot!(res["results"], @"null");
// And its data should be in b
let (res, code) = b.get_all_documents(GetAllDocumentsOptions::default()).await;
snapshot!(code, @"200 OK");
snapshot!(res["results"], @r#"[{"id":1,"index":"a"}]"#);
// No tasks should be linked to the index a
let (tasks, _code) = server.tasks_filter("indexUids=a").await;
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [],
"total": 1,
"limit": 20,
"from": null,
"next": null
}
"#);
// They should be linked to the index b
let (tasks, _code) = server.tasks_filter("indexUids=b").await;
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [
{
"uid": 1,
"batchUid": 1,
"indexUid": "b",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 0,
"batchUid": 0,
"indexUid": "b",
"status": "succeeded",
"type": "indexCreation",
"canceledBy": null,
"details": {
"primaryKey": null
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
],
"total": 3,
"limit": 20,
"from": 1,
"next": null
}
"#);
// ===== Now, we can delete the index `b`, but its tasks will stays
// if we then make a new `b` index and rename it to be called `a`
// the tasks currently available in `b` should not be moved
let (res, _) = b.delete().await;
server.wait_task(res.uid()).await.succeeded();
b.create(Some("kefir")).await;
let (res, _code) = server.index_swap(json!([{ "indexes": ["b", "a"], "rename": true }])).await;
server.wait_task(res.uid()).await.succeeded();
// `a` now contains everything
let (tasks, _code) = server.tasks_filter("indexUids=a").await;
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [
{
"uid": 4,
"batchUid": 4,
"indexUid": "a",
"status": "succeeded",
"type": "indexCreation",
"canceledBy": null,
"details": {
"primaryKey": "kefir"
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 3,
"batchUid": 3,
"indexUid": "a",
"status": "succeeded",
"type": "indexDeletion",
"canceledBy": null,
"details": {
"deletedDocuments": 1
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 1,
"batchUid": 1,
"indexUid": "a",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 0,
"batchUid": 0,
"indexUid": "a",
"status": "succeeded",
"type": "indexCreation",
"canceledBy": null,
"details": {
"primaryKey": null
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]"
}
],
"total": 6,
"limit": 20,
"from": 4,
"next": null
}
"#);
// And `b` is empty
let (tasks, _code) = server.tasks_filter("indexUids=b").await;
snapshot!(json_string!(tasks, { ".results[].duration" => "[duration]", ".results[].enqueuedAt" => "[date]", ".results[].startedAt" => "[date]", ".results[].finishedAt" => "[date]" }), @r#"
{
"results": [],
"total": 2,
"limit": 20,
"from": null,
"next": null
}
"#);
}

View File

@ -895,7 +895,7 @@ async fn test_summarized_index_swap() {
server.wait_task(task.uid()).await.failed();
let (task, _) = server.get_task(task.uid()).await;
snapshot!(task,
@r###"
@r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
@ -909,7 +909,8 @@ async fn test_summarized_index_swap() {
"indexes": [
"doggos",
"cattos"
]
],
"rename": false
}
]
},
@ -924,7 +925,7 @@ async fn test_summarized_index_swap() {
"startedAt": "[date]",
"finishedAt": "[date]"
}
"###);
"#);
let doggos_index = server.unique_index();
let (task, _code) = doggos_index.create(None).await;
@ -941,7 +942,7 @@ async fn test_summarized_index_swap() {
let (task, _) = server.get_task(task.uid()).await;
snapshot!(json_string!(task,
{ ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".**.indexes[0]" => "doggos", ".**.indexes[1]" => "cattos", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###"
@r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
@ -955,7 +956,8 @@ async fn test_summarized_index_swap() {
"indexes": [
"doggos",
"cattos"
]
],
"rename": false
}
]
},
@ -965,7 +967,7 @@ async fn test_summarized_index_swap() {
"startedAt": "[date]",
"finishedAt": "[date]"
}
"###);
"#);
}
#[actix_web::test]