diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index a409aba03..9d6bee7c1 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -1,21 +1,37 @@ mod errors; +use insta::internals::{Content, ContentPath}; use meili_snap::insta::assert_json_snapshot; -use meili_snap::snapshot; +use meili_snap::{json_string, snapshot}; +use once_cell::sync::Lazy; +use regex::Regex; use crate::common::Server; use crate::json; +static TASK_WITH_ID_RE: Lazy = + Lazy::new(|| Regex::new(r"task with id (\d+) of type").unwrap()); + +fn task_with_id_redaction(value: Content, _path: ContentPath) -> Content { + match value { + Content::String(s) => { + let replaced = TASK_WITH_ID_RE.replace_all(&s, "task with id X of type"); + Content::String(replaced.to_string()) + } + _ => value.clone(), + } +} + #[actix_rt::test] async fn error_get_unexisting_batch_status() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _coder) = index.create(None).await; server.wait_task(task.uid()).await.succeeded(); - let (response, code) = index.get_batch(1).await; + let (response, code) = index.get_batch(u32::MAX).await; let expected_response = json!({ - "message": "Batch `1` not found.", + "message": format!("Batch `{}` not found.", u32::MAX), "code": "batch_not_found", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#batch_not_found" @@ -27,18 +43,18 @@ async fn error_get_unexisting_batch_status() { #[actix_rt::test] async fn get_batch_status() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - server.wait_task(task.uid()).await.succeeded(); - let (_response, code) = index.get_batch(0).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (_response, code) = index.get_batch(task.batch_uid()).await; assert_eq!(code, 200); } #[actix_rt::test] async fn list_batches() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.create(None).await; @@ -62,7 +78,7 @@ async fn list_batches_pagination_and_reverse() { let index = server.index(format!("test-{i}")); last_batch = Some(index.create(None).await.0.uid()); } - server.wait_task(last_batch.unwrap()).await; + server.wait_task(last_batch.unwrap()).await.succeeded(); let (response, code) = server.batches_filter("limit=3").await; assert_eq!(code, 200); @@ -119,91 +135,91 @@ async fn list_batches_with_star_filters() { let (response, code) = index.service.get("/batches?types=*,documentAdditionOrUpdate&statuses=*").await; - assert_eq!(code, 200, "{:?}", response); + assert_eq!(code, 200, "{response:?}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); let (response, code) = index .service .get("/batches?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids=test") .await; - assert_eq!(code, 200, "{:?}", response); + assert_eq!(code, 200, "{response:?}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); let (response, code) = index .service .get("/batches?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids=test,*") .await; - assert_eq!(code, 200, "{:?}", response); + assert_eq!(code, 200, "{response:?}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); } #[actix_rt::test] async fn list_batches_status_filtered() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.create(None).await; server.wait_task(task.uid()).await.failed(); let (response, code) = index.filtered_batches(&[], &["succeeded"], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); let (response, code) = index.filtered_batches(&[], &["succeeded"], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); let (response, code) = index.filtered_batches(&[], &["succeeded", "failed"], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); } #[actix_rt::test] async fn list_batches_type_filtered() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _) = index.create(None).await; server.wait_task(task.uid()).await.succeeded(); let (task, _) = index.delete().await; server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&["indexCreation"], &[], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); let (response, code) = - index.filtered_batches(&["indexCreation", "IndexDeletion"], &[], &[]).await; - assert_eq!(code, 200, "{}", response); + index.filtered_batches(&["indexCreation", "indexDeletion"], &[], &[]).await; + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); let (response, code) = index.filtered_batches(&["indexCreation"], &[], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); } #[actix_rt::test] async fn list_batches_invalid_canceled_by_filter() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&[], &[], &["0"]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 0); } #[actix_rt::test] async fn list_batches_status_and_type_filtered() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.update(Some("id")).await; server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&["indexCreation"], &["failed"], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 0); let (response, code) = index @@ -213,17 +229,17 @@ async fn list_batches_status_and_type_filtered() { &[], ) .await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); } #[actix_rt::test] async fn list_batch_filter_error() { - let server = Server::new().await; + let server = Server::new_shared(); let (response, code) = server.batches_filter("lol=pied").await; - assert_eq!(code, 400, "{}", response); - meili_snap::snapshot!(meili_snap::json_string!(response), @r#" + assert_eq!(code, 400, "{response}"); + snapshot!(json_string!(response), @r#" { "message": "Unknown parameter `lol`: expected one of `limit`, `from`, `reverse`, `batchUids`, `uids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`", "code": "bad_request", @@ -233,8 +249,8 @@ async fn list_batch_filter_error() { "#); let (response, code) = server.batches_filter("uids=pied").await; - assert_eq!(code, 400, "{}", response); - meili_snap::snapshot!(meili_snap::json_string!(response), @r#" + assert_eq!(code, 400, "{response}"); + snapshot!(json_string!(response), @r#" { "message": "Invalid value in parameter `uids`: could not parse `pied` as a positive integer", "code": "invalid_task_uids", @@ -244,8 +260,8 @@ async fn list_batch_filter_error() { "#); let (response, code) = server.batches_filter("from=pied").await; - assert_eq!(code, 400, "{}", response); - meili_snap::snapshot!(meili_snap::json_string!(response), @r#" + assert_eq!(code, 400, "{response}"); + snapshot!(json_string!(response), @r#" { "message": "Invalid value in parameter `from`: could not parse `pied` as a positive integer", "code": "invalid_task_from", @@ -255,8 +271,8 @@ async fn list_batch_filter_error() { "#); let (response, code) = server.batches_filter("beforeStartedAt=pied").await; - assert_eq!(code, 400, "{}", response); - meili_snap::snapshot!(meili_snap::json_string!(response), @r#" + assert_eq!(code, 400, "{response}"); + snapshot!(json_string!(response), @r#" { "message": "Invalid value in parameter `beforeStartedAt`: `pied` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.", "code": "invalid_task_before_started_at", @@ -268,25 +284,28 @@ async fn list_batch_filter_error() { #[actix_web::test] async fn test_summarized_document_addition_or_update() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(0).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "receivedDocuments": 1, @@ -300,9 +319,7 @@ async fn test_summarized_document_addition_or_update() { "types": { "documentAdditionOrUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]", "writeChannelCongestion": "[writeChannelCongestion]", "internalDatabaseSizes": "[internalDatabaseSizes]" @@ -316,21 +333,24 @@ async fn test_summarized_document_addition_or_update() { let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(1).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "receivedDocuments": 1, @@ -344,9 +364,7 @@ async fn test_summarized_document_addition_or_update() { "types": { "documentAdditionOrUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]", "writeChannelCongestion": "[writeChannelCongestion]" }, @@ -360,23 +378,29 @@ async fn test_summarized_document_addition_or_update() { #[actix_web::test] async fn test_summarized_delete_documents_by_batch() { - let server = Server::new().await; - let index = server.index("test"); - let (task, _status_code) = index.delete_batch(vec![1, 2, 3]).await; - server.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(0).await; + let server = Server::new_shared(); + let index = server.unique_index(); + let task_uid_1 = (u32::MAX - 1) as u64; + let task_uid_2 = (u32::MAX - 2) as u64; + let task_uid_3 = (u32::MAX - 3) as u64; + let (task, _status_code) = index.delete_batch(vec![task_uid_1, task_uid_2, task_uid_3]).await; + let task = server.wait_task(task.uid()).await.failed(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "providedIds": 3, @@ -390,9 +414,7 @@ async fn test_summarized_delete_documents_by_batch() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -404,21 +426,24 @@ async fn test_summarized_delete_documents_by_batch() { index.create(None).await; let (task, _status_code) = index.delete_batch(vec![42]).await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(2).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { - "uid": 2, + "uid": "[uid]", "progress": null, "details": { "providedIds": 1, @@ -432,9 +457,7 @@ async fn test_summarized_delete_documents_by_batch() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -447,25 +470,28 @@ async fn test_summarized_delete_documents_by_batch() { #[actix_web::test] async fn test_summarized_delete_documents_by_filter() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - server.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(0).await; + let task = server.wait_task(task.uid()).await.failed(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "providedIds": 0, @@ -480,9 +506,7 @@ async fn test_summarized_delete_documents_by_filter() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -495,21 +519,24 @@ async fn test_summarized_delete_documents_by_filter() { index.create(None).await; let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - server.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(2).await; + let task = server.wait_task(task.uid()).await.failed(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { - "uid": 2, + "uid": "[uid]", "progress": null, "details": { "providedIds": 0, @@ -524,9 +551,7 @@ async fn test_summarized_delete_documents_by_filter() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -539,21 +564,24 @@ async fn test_summarized_delete_documents_by_filter() { index.update_settings(json!({ "filterableAttributes": ["doggo"] })).await; let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(4).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => "batched all enqueued tasks" }, @r###" { - "uid": 4, + "uid": "[uid]", "progress": null, "details": { "providedIds": 0, @@ -568,9 +596,7 @@ async fn test_summarized_delete_documents_by_filter() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -583,11 +609,11 @@ async fn test_summarized_delete_documents_by_filter() { #[actix_web::test] async fn test_summarized_delete_document_by_id() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.delete_document(1).await; - server.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(0).await; + let task = server.wait_task(task.uid()).await.failed(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -596,7 +622,9 @@ async fn test_summarized_delete_document_by_id() { ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -614,9 +642,7 @@ async fn test_summarized_delete_document_by_id() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -628,21 +654,24 @@ async fn test_summarized_delete_document_by_id() { index.create(None).await; let (task, _status_code) = index.delete_document(42).await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(2).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { - "uid": 2, + "uid": "[uid]", "progress": null, "details": { "providedIds": 1, @@ -656,9 +685,7 @@ async fn test_summarized_delete_document_by_id() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -671,12 +698,12 @@ async fn test_summarized_delete_document_by_id() { #[actix_web::test] async fn test_summarized_settings_update() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); // here we should find my payload even in the failed batch. let (response, code) = index.update_settings(json!({ "rankingRules": ["custom"] })).await; - meili_snap::snapshot!(code, @"400 Bad Request"); - meili_snap::snapshot!(meili_snap::json_string!(response), @r###" + snapshot!(code, @"400 Bad Request"); + snapshot!(json_string!(response), @r###" { "message": "Invalid value at `.rankingRules[0]`: `custom` ranking rule is invalid. Valid ranking rules are words, typo, sort, proximity, attribute, exactness and custom ranking rules.", "code": "invalid_settings_ranking_rules", @@ -686,21 +713,24 @@ async fn test_summarized_settings_update() { "###); let (task,_status_code) = index.update_settings(json!({ "displayedAttributes": ["doggos", "name"], "filterableAttributes": ["age", "nb_paw_pads"], "sortableAttributes": ["iq"] })).await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(0).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => "batched all enqueued tasks" }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "displayedAttributes": [ @@ -723,9 +753,7 @@ async fn test_summarized_settings_update() { "types": { "settingsUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -738,23 +766,26 @@ async fn test_summarized_settings_update() { #[actix_web::test] async fn test_summarized_index_creation() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(0).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": {}, "stats": { @@ -765,33 +796,34 @@ async fn test_summarized_index_creation() { "types": { "indexCreation": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task." + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); let (task, _status_code) = index.create(Some("doggos")).await; - server.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(1).await; + let task = server.wait_task(task.uid()).await.failed(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "primaryKey": "doggos" @@ -804,23 +836,21 @@ async fn test_summarized_index_creation() { "types": { "indexCreation": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task." + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); } #[actix_web::test] async fn test_summarized_index_deletion() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (ret, _code) = index.delete().await; let batch = server.wait_task(ret.uid()).await.failed(); snapshot!(batch, @@ -828,7 +858,7 @@ async fn test_summarized_index_deletion() { { "uid": "[uid]", "batchUid": "[batch_uid]", - "indexUid": "test", + "indexUid": "[uuid]", "status": "failed", "type": "indexDeletion", "canceledBy": null, @@ -836,7 +866,7 @@ async fn test_summarized_index_deletion() { "deletedDocuments": 0 }, "error": { - "message": "Index `test` not found.", + "message": "Index `[uuid]` not found.", "code": "index_not_found", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#index_not_found" @@ -859,7 +889,7 @@ async fn test_summarized_index_deletion() { { "uid": "[uid]", "batchUid": "[batch_uid]", - "indexUid": "test", + "indexUid": "[uuid]", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, @@ -882,7 +912,7 @@ async fn test_summarized_index_deletion() { { "uid": "[uid]", "batchUid": "[batch_uid]", - "indexUid": "test", + "indexUid": "[uuid]", "status": "succeeded", "type": "indexDeletion", "canceledBy": null, @@ -905,7 +935,7 @@ async fn test_summarized_index_deletion() { { "uid": "[uid]", "batchUid": "[batch_uid]", - "indexUid": "test", + "indexUid": "[uuid]", "status": "failed", "type": "indexDeletion", "canceledBy": null, @@ -913,7 +943,7 @@ async fn test_summarized_index_deletion() { "deletedDocuments": 0 }, "error": { - "message": "Index `test` not found.", + "message": "Index `[uuid]` not found.", "code": "index_not_found", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#index_not_found" @@ -928,24 +958,27 @@ async fn test_summarized_index_deletion() { #[actix_web::test] async fn test_summarized_index_update() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); // If the index doesn't exist yet, we should get errors with or without the primary key. let (task, _status_code) = index.update(None).await; - server.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(0).await; + let task = server.wait_task(task.uid()).await.failed(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": {}, "stats": { @@ -956,33 +989,34 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id 0 of type `indexUpdate` that cannot be batched with any other task." + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); let (task, _status_code) = index.update(Some("bones")).await; - server.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(1).await; + let task = server.wait_task(task.uid()).await.failed(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "primaryKey": "bones" @@ -995,36 +1029,37 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id 1 of type `indexUpdate` that cannot be batched with any other task." + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); - // And run the same two tests once the index do exists. + // And run the same two tests once the index does exist. index.create(None).await; let (task, _status_code) = index.update(None).await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(3).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { - "uid": 3, + "uid": "[uid]", "progress": null, "details": {}, "stats": { @@ -1035,33 +1070,34 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id 3 of type `indexUpdate` that cannot be batched with any other task." + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); let (task, _status_code) = index.update(Some("bones")).await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(4).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { - "uid": 4, + "uid": "[uid]", "progress": null, "details": { "primaryKey": "bones" @@ -1074,41 +1110,41 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id 4 of type `indexUpdate` that cannot be batched with any other task." + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); } #[actix_web::test] async fn test_summarized_index_swap() { - let server = Server::new().await; + let server = Server::new_shared(); let (task, _status_code) = server .index_swap(json!([ { "indexes": ["doggos", "cattos"] } ])) .await; - server.wait_task(task.uid()).await.failed(); - let (batch, _) = server.get_batch(0).await; + let task = server.wait_task(task.uid()).await.failed(); + let (batch, _) = server.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "swaps": [ @@ -1134,31 +1170,36 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id 0 of type `indexSwap` that cannot be batched with any other task." + "batchStrategy": "created batch containing only task with id X of type `indexSwap` that cannot be batched with any other task." } "###); - server.index("doggos").create(None).await; - let (task, _status_code) = server.index("cattos").create(None).await; + let doggos_index = server.unique_index(); + doggos_index.create(None).await; + let cattos_index = server.unique_index(); + let (task, _status_code) = cattos_index.create(None).await; server .index_swap(json!([ - { "indexes": ["doggos", "cattos"] } + { "indexes": [doggos_index.uid, cattos_index.uid] } ])) .await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = server.get_batch(1).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = server.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": {}, "stats": { @@ -1169,46 +1210,47 @@ async fn test_summarized_index_swap() { "types": { "indexCreation": 1 }, - "indexUids": { - "doggos": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task." + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); } #[actix_web::test] async fn test_summarized_batch_cancelation() { - let server = Server::new().await; - let index = server.index("doggos"); + let server = Server::new_shared(); + let index = server.unique_index(); // to avoid being flaky we're only going to cancel an already finished batch :( let (task, _status_code) = index.create(None).await; server.wait_task(task.uid()).await.succeeded(); - let (task, _status_code) = server.cancel_tasks("uids=0").await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(1).await; + let (task, _status_code) = server.cancel_tasks(format!("uids={}", task.uid()).as_str()).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".details.originalFilter" => "?uids=X", + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "matchedTasks": 1, "canceledTasks": 0, - "originalFilter": "?uids=0" + "originalFilter": "?uids=X" }, "stats": { "totalNbTasks": 1, @@ -1224,38 +1266,40 @@ async fn test_summarized_batch_cancelation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id 1 of type `taskCancelation` that cannot be batched with any other task." + "batchStrategy": "created batch containing only task with id X of type `taskCancelation` that cannot be batched with any other task." } "###); } #[actix_web::test] async fn test_summarized_batch_deletion() { - let server = Server::new().await; - let index = server.index("doggos"); + let server = Server::new_shared(); + let index = server.unique_index(); // to avoid being flaky we're only going to delete an already finished batch :( let (task, _status_code) = index.create(None).await; server.wait_task(task.uid()).await.succeeded(); - let (task, _status_code) = server.delete_tasks("uids=0").await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(1).await; + let (task, _status_code) = server.delete_tasks(format!("uids={}", task.uid()).as_str()).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".details.originalFilter" => "?uids=X" }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "matchedTasks": 1, "deletedTasks": 1, - "originalFilter": "?uids=0" + "originalFilter": "?uids=X" }, "stats": { "totalNbTasks": 1, @@ -1278,23 +1322,25 @@ async fn test_summarized_batch_deletion() { #[actix_web::test] async fn test_summarized_dump_creation() { - let server = Server::new().await; + let server = Server::new_shared(); let (task, _status_code) = server.create_dump().await; - server.wait_task(task.uid()).await; - let (batch, _) = server.get_batch(0).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = server.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".details.dumpUid" => "[dumpUid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "dumpUid": "[dumpUid]" @@ -1313,7 +1359,7 @@ async fn test_summarized_dump_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id 0 of type `dumpCreation` that cannot be batched with any other task." + "batchStrategy": "created batch containing only task with id X of type `dumpCreation` that cannot be batched with any other task." } "###); } diff --git a/crates/meilisearch/tests/common/mod.rs b/crates/meilisearch/tests/common/mod.rs index d023d464e..1395aac5a 100644 --- a/crates/meilisearch/tests/common/mod.rs +++ b/crates/meilisearch/tests/common/mod.rs @@ -38,6 +38,15 @@ impl Value { self["uid"].as_u64().is_some() || self["taskUid"].as_u64().is_some() } + #[track_caller] + pub fn batch_uid(&self) -> u32 { + if let Some(batch_uid) = self["batchUid"].as_u64() { + batch_uid as u32 + } else { + panic!("Didn't find `batchUid` in: {self}"); + } + } + /// Return `true` if the `status` field is set to `succeeded`. /// Panic if the `status` field doesn't exists. #[track_caller]