From 7380808b26a5fb30440fcc72a06fc58e0ca2ebf3 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Mon, 2 Jun 2025 16:39:21 +0300 Subject: [PATCH 01/12] tests: Faster batches:: IT tests Use shared server + unique indices where possible Related-to: https://github.com/meilisearch/meilisearch/issues/4840 Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 190 ++++++++++++++---------- 1 file changed, 108 insertions(+), 82 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index e775d1ea4..4613f71fc 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -8,8 +8,8 @@ use crate::json; #[actix_rt::test] async fn error_get_unexisting_batch_status() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _coder) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (response, code) = index.get_batch(1).await; @@ -27,8 +27,8 @@ async fn error_get_unexisting_batch_status() { #[actix_rt::test] async fn get_batch_status() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (_response, code) = index.get_batch(0).await; @@ -37,8 +37,8 @@ async fn get_batch_status() { #[actix_rt::test] async fn list_batches() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.create(None).await; @@ -62,7 +62,7 @@ async fn list_batches_pagination_and_reverse() { let index = server.index(format!("test-{i}")); last_batch = Some(index.create(None).await.0.uid()); } - server.wait_task(last_batch.unwrap()).await; + server.wait_task(last_batch.unwrap()).await.succeeded(); let (response, code) = server.batches_filter("limit=3").await; assert_eq!(code, 200); @@ -139,8 +139,8 @@ async fn list_batches_with_star_filters() { #[actix_rt::test] async fn list_batches_status_filtered() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.create(None).await; @@ -161,8 +161,8 @@ async fn list_batches_status_filtered() { #[actix_rt::test] async fn list_batches_type_filtered() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (task, _) = index.delete().await; @@ -183,8 +183,8 @@ async fn list_batches_type_filtered() { #[actix_rt::test] async fn list_batches_invalid_canceled_by_filter() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); @@ -195,8 +195,8 @@ async fn list_batches_invalid_canceled_by_filter() { #[actix_rt::test] async fn list_batches_status_and_type_filtered() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.update(Some("id")).await; @@ -219,7 +219,7 @@ async fn list_batches_status_and_type_filtered() { #[actix_rt::test] async fn list_batch_filter_error() { - let server = Server::new().await; + let server = Server::new_shared(); let (response, code) = server.batches_filter("lol=pied").await; assert_eq!(code, 400, "{}", response); @@ -268,14 +268,15 @@ async fn list_batch_filter_error() { #[actix_web::test] async fn test_summarized_document_addition_or_update() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", @@ -286,7 +287,7 @@ async fn test_summarized_document_addition_or_update() { }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "receivedDocuments": 1, @@ -320,6 +321,7 @@ async fn test_summarized_document_addition_or_update() { let (batch, _) = index.get_batch(1).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", @@ -330,7 +332,7 @@ async fn test_summarized_document_addition_or_update() { }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "receivedDocuments": 1, @@ -360,23 +362,25 @@ async fn test_summarized_document_addition_or_update() { #[actix_web::test] async fn test_summarized_delete_documents_by_batch() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.delete_batch(vec![1, 2, 3]).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => "{\n\t\"test\": 1}" }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "providedIds": 3, @@ -447,25 +451,27 @@ async fn test_summarized_delete_documents_by_batch() { #[actix_web::test] async fn test_summarized_delete_documents_by_filter() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => "{\n\t\"test\": 1}" }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "providedIds": 0, @@ -583,11 +589,11 @@ async fn test_summarized_delete_documents_by_filter() { #[actix_web::test] async fn test_summarized_delete_document_by_id() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.delete_document(1).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -596,7 +602,8 @@ async fn test_summarized_delete_document_by_id() { ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => "{\n\t\"test\": 1}" }, @r###" { @@ -671,8 +678,8 @@ async fn test_summarized_delete_document_by_id() { #[actix_web::test] async fn test_summarized_settings_update() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); // here we should find my payload even in the failed batch. let (response, code) = index.update_settings(json!({ "rankingRules": ["custom"] })).await; meili_snap::snapshot!(code, @"400 Bad Request"); @@ -687,20 +694,24 @@ async fn test_summarized_settings_update() { let (task,_status_code) = index.update_settings(json!({ "displayedAttributes": ["doggos", "name"], "filterableAttributes": ["age", "nb_paw_pads"], "sortableAttributes": ["iq"] })).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => "{\n\t\"test\": 1}", + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" + }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "displayedAttributes": [ @@ -731,30 +742,33 @@ async fn test_summarized_settings_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); } #[actix_web::test] async fn test_summarized_index_creation() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => "{\n\t\"test\": 1}", + ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": {}, "stats": { @@ -773,7 +787,7 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 0 of type `indexCreation` cannot be batched" + "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" } "###); @@ -819,8 +833,8 @@ async fn test_summarized_index_creation() { #[actix_web::test] async fn test_summarized_index_deletion() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (ret, _code) = index.delete().await; let batch = index.wait_task(ret.uid()).await.failed(); snapshot!(batch, @@ -828,7 +842,7 @@ async fn test_summarized_index_deletion() { { "uid": "[uid]", "batchUid": "[batch_uid]", - "indexUid": "test", + "indexUid": "[uuid]", "status": "failed", "type": "indexDeletion", "canceledBy": null, @@ -836,7 +850,7 @@ async fn test_summarized_index_deletion() { "deletedDocuments": 0 }, "error": { - "message": "Index `test` not found.", + "message": "Index `[uuid]` not found.", "code": "index_not_found", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#index_not_found" @@ -859,7 +873,7 @@ async fn test_summarized_index_deletion() { { "uid": "[uid]", "batchUid": "[batch_uid]", - "indexUid": "test", + "indexUid": "[uuid]", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, @@ -928,24 +942,27 @@ async fn test_summarized_index_deletion() { #[actix_web::test] async fn test_summarized_index_update() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); // If the index doesn't exist yet, we should get errors with or without the primary key. let (task, _status_code) = index.update(None).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => "{\n\t\"test\": 1}", + ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": {}, "stats": { @@ -964,7 +981,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 0 of type `indexUpdate` cannot be batched" + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); @@ -1089,26 +1106,28 @@ async fn test_summarized_index_update() { #[actix_web::test] async fn test_summarized_index_swap() { - let server = Server::new().await; + let server = Server::new_shared(); let (task, _status_code) = server .index_swap(json!([ { "indexes": ["doggos", "cattos"] } ])) .await; server.wait_task(task.uid()).await.failed(); - let (batch, _) = server.get_batch(0).await; + let (batch, _) = server.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".batchCreationComplete" => "task with id X of type `indexSwap` cannot be batched" }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "swaps": [ @@ -1134,31 +1153,35 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 0 of type `indexSwap` cannot be batched" + "batchCreationComplete": "task with id X of type `indexSwap` cannot be batched" } "###); - server.index("doggos").create(None).await; - let (task, _status_code) = server.index("cattos").create(None).await; + let doggos_index = server.unique_index(); + doggos_index.create(None).await; + let cattos_index = server.unique_index(); + let (task, _status_code) = cattos_index.create(None).await; server .index_swap(json!([ - { "indexes": ["doggos", "cattos"] } + { "indexes": [doggos_index.uid, cattos_index.uid] } ])) .await; server.wait_task(task.uid()).await.succeeded(); let (batch, _) = server.get_batch(1).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => "{\n\t\"doggos\": 1}" }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": {}, "stats": { @@ -1184,12 +1207,12 @@ async fn test_summarized_index_swap() { #[actix_web::test] async fn test_summarized_batch_cancelation() { - let server = Server::new().await; - let index = server.index("doggos"); + let server = Server::new_shared(); + let index = server.unique_index(); // to avoid being flaky we're only going to cancel an already finished batch :( let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (task, _status_code) = server.cancel_tasks("uids=0").await; + let (task, _status_code) = server.cancel_tasks(format!("uids={}", task.uid()).as_str()).await; index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(1).await; assert_json_snapshot!(batch, @@ -1231,31 +1254,33 @@ async fn test_summarized_batch_cancelation() { #[actix_web::test] async fn test_summarized_batch_deletion() { - let server = Server::new().await; - let index = server.index("doggos"); + let server = Server::new_shared(); + let index = server.unique_index(); // to avoid being flaky we're only going to delete an already finished batch :( let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (task, _status_code) = server.delete_tasks("uids=0").await; + let (task, _status_code) = server.delete_tasks(format!("uids={}", task.uid()).as_str()).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(1).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".details.originalFilter" => "?uids=X" }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "matchedTasks": 1, "deletedTasks": 1, - "originalFilter": "?uids=0" + "originalFilter": "?uids=X" }, "stats": { "totalNbTasks": 1, @@ -1278,12 +1303,13 @@ async fn test_summarized_batch_deletion() { #[actix_web::test] async fn test_summarized_dump_creation() { - let server = Server::new().await; + let server = Server::new_shared(); let (task, _status_code) = server.create_dump().await; - server.wait_task(task.uid()).await; - let (batch, _) = server.get_batch(0).await; + server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = server.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".details.dumpUid" => "[dumpUid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", @@ -1294,7 +1320,7 @@ async fn test_summarized_dump_creation() { }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "dumpUid": "[dumpUid]" From cb15e5c67e1c6fed85f3d6c85875b9bb34e75093 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Tue, 3 Jun 2025 09:13:56 +0300 Subject: [PATCH 02/12] WIP: More snapshot updates Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 107 +++++++++++------------- 1 file changed, 48 insertions(+), 59 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index 4613f71fc..bb926af70 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -12,10 +12,10 @@ async fn error_get_unexisting_batch_status() { let index = server.unique_index(); let (task, _coder) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (response, code) = index.get_batch(1).await; + let (response, code) = index.get_batch(task.uid() as u32).await; let expected_response = json!({ - "message": "Batch `1` not found.", + "message": format!("Batch `{}` not found.", task.uid()), "code": "batch_not_found", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#batch_not_found" @@ -147,15 +147,15 @@ async fn list_batches_status_filtered() { index.wait_task(task.uid()).await.failed(); let (response, code) = index.filtered_batches(&[], &["succeeded"], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); let (response, code) = index.filtered_batches(&[], &["succeeded"], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); let (response, code) = index.filtered_batches(&[], &["succeeded", "failed"], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); } @@ -168,16 +168,16 @@ async fn list_batches_type_filtered() { let (task, _) = index.delete().await; index.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&["indexCreation"], &[], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); let (response, code) = - index.filtered_batches(&["indexCreation", "IndexDeletion"], &[], &[]).await; - assert_eq!(code, 200, "{}", response); + index.filtered_batches(&["indexCreation", "indexDeletion"], &[], &[]).await; + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); let (response, code) = index.filtered_batches(&["indexCreation"], &[], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); } @@ -189,7 +189,7 @@ async fn list_batches_invalid_canceled_by_filter() { index.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&[], &[], &["0"]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 0); } @@ -203,7 +203,7 @@ async fn list_batches_status_and_type_filtered() { index.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&["indexCreation"], &["failed"], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 0); let (response, code) = index @@ -213,7 +213,7 @@ async fn list_batches_status_and_type_filtered() { &[], ) .await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); } @@ -222,7 +222,7 @@ async fn list_batch_filter_error() { let server = Server::new_shared(); let (response, code) = server.batches_filter("lol=pied").await; - assert_eq!(code, 400, "{}", response); + assert_eq!(code, 400, "{response}"); meili_snap::snapshot!(meili_snap::json_string!(response), @r#" { "message": "Unknown parameter `lol`: expected one of `limit`, `from`, `reverse`, `batchUids`, `uids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`", @@ -233,7 +233,7 @@ async fn list_batch_filter_error() { "#); let (response, code) = server.batches_filter("uids=pied").await; - assert_eq!(code, 400, "{}", response); + assert_eq!(code, 400, "{response}"); meili_snap::snapshot!(meili_snap::json_string!(response), @r#" { "message": "Invalid value in parameter `uids`: could not parse `pied` as a positive integer", @@ -244,7 +244,7 @@ async fn list_batch_filter_error() { "#); let (response, code) = server.batches_filter("from=pied").await; - assert_eq!(code, 400, "{}", response); + assert_eq!(code, 400, "{response}"); meili_snap::snapshot!(meili_snap::json_string!(response), @r#" { "message": "Invalid value in parameter `from`: could not parse `pied` as a positive integer", @@ -255,7 +255,7 @@ async fn list_batch_filter_error() { "#); let (response, code) = server.batches_filter("beforeStartedAt=pied").await; - assert_eq!(code, 400, "{}", response); + assert_eq!(code, 400, "{response}"); meili_snap::snapshot!(meili_snap::json_string!(response), @r#" { "message": "Invalid value in parameter `beforeStartedAt`: `pied` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.", @@ -283,7 +283,8 @@ async fn test_summarized_document_addition_or_update() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"""{"test": 1}"""# }, @r###" { @@ -301,9 +302,7 @@ async fn test_summarized_document_addition_or_update() { "types": { "documentAdditionOrUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]", "writeChannelCongestion": "[writeChannelCongestion]", "internalDatabaseSizes": "[internalDatabaseSizes]" @@ -376,7 +375,7 @@ async fn test_summarized_delete_documents_by_batch() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => "{\n\t\"test\": 1}" + ".stats.indexUids" => r#"""{"test": 1}"""# }, @r###" { @@ -394,9 +393,7 @@ async fn test_summarized_delete_documents_by_batch() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -467,7 +464,8 @@ async fn test_summarized_delete_documents_by_filter() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => "{\n\t\"test\": 1}" + ".stats.indexUids" => r#"""{"test": 1}"""#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid`" }, @r###" { @@ -486,15 +484,13 @@ async fn test_summarized_delete_documents_by_filter() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid`" } "###); @@ -603,7 +599,7 @@ async fn test_summarized_delete_document_by_id() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => "{\n\t\"test\": 1}" + ".stats.indexUids" => r#"""{"test": 1}"""# }, @r###" { @@ -621,9 +617,7 @@ async fn test_summarized_delete_document_by_id() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -705,7 +699,7 @@ async fn test_summarized_settings_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", - ".stats.indexUids" => "{\n\t\"test\": 1}", + ".stats.indexUids" => r#"""{"test": 1}"""#, ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @@ -734,9 +728,7 @@ async fn test_summarized_settings_update() { "types": { "settingsUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -763,7 +755,7 @@ async fn test_summarized_index_creation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => "{\n\t\"test\": 1}", + ".stats.indexUids" => r#"""{"test": 1}"""#, ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" }, @r###" @@ -779,9 +771,7 @@ async fn test_summarized_index_creation() { "types": { "indexCreation": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -896,7 +886,7 @@ async fn test_summarized_index_deletion() { { "uid": "[uid]", "batchUid": "[batch_uid]", - "indexUid": "test", + "indexUid": "[uuid]", "status": "succeeded", "type": "indexDeletion", "canceledBy": null, @@ -919,7 +909,7 @@ async fn test_summarized_index_deletion() { { "uid": "[uid]", "batchUid": "[batch_uid]", - "indexUid": "test", + "indexUid": "[uuid]", "status": "failed", "type": "indexDeletion", "canceledBy": null, @@ -927,7 +917,7 @@ async fn test_summarized_index_deletion() { "deletedDocuments": 0 }, "error": { - "message": "Index `test` not found.", + "message": "Index `[uuid]` not found.", "code": "index_not_found", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#index_not_found" @@ -947,7 +937,7 @@ async fn test_summarized_index_update() { // If the index doesn't exist yet, we should get errors with or without the primary key. let (task, _status_code) = index.update(None).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -957,7 +947,7 @@ async fn test_summarized_index_update() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => "{\n\t\"test\": 1}", + ".stats.indexUids" => r#"{"test": 1}"#, ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" }, @r###" @@ -973,9 +963,7 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -1177,7 +1165,7 @@ async fn test_summarized_index_swap() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => "{\n\t\"doggos\": 1}" + ".stats.indexUids" => r#"""{"doggos": 1}"""# }, @r###" { @@ -1192,9 +1180,7 @@ async fn test_summarized_index_swap() { "types": { "indexCreation": 1 }, - "indexUids": { - "doggos": 1 - }, + "indexUids": {"doggos": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -1214,19 +1200,21 @@ async fn test_summarized_batch_cancelation() { index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.cancel_tasks(format!("uids={}", task.uid()).as_str()).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(1).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".batchCreationComplete" => "task with id X of type `taskCancelation` cannot be batched" }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "matchedTasks": 1, @@ -1247,7 +1235,7 @@ async fn test_summarized_batch_cancelation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 1 of type `taskCancelation` cannot be batched" + "batchCreationComplete": "task with id X of type `taskCancelation` cannot be batched" } "###); } @@ -1316,7 +1304,8 @@ async fn test_summarized_dump_creation() { ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".batchCreationComplete" => "task with id X of type `dumpCreation` cannot be batched" }, @r###" { @@ -1339,7 +1328,7 @@ async fn test_summarized_dump_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 0 of type `dumpCreation` cannot be batched" + "batchCreationComplete": "task with id X of type `dumpCreation` cannot be batched" } "###); } From 48460678dfa2a62b4fe8676a0dc0474da90f1d74 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Tue, 3 Jun 2025 10:50:22 +0300 Subject: [PATCH 03/12] More assertion fixes Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 195 +++++++++++++----------- 1 file changed, 105 insertions(+), 90 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index bb926af70..ce5ad41b6 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -12,10 +12,10 @@ async fn error_get_unexisting_batch_status() { let index = server.unique_index(); let (task, _coder) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (response, code) = index.get_batch(task.uid() as u32).await; + let (response, code) = index.get_batch(u32::MAX).await; let expected_response = json!({ - "message": format!("Batch `{}` not found.", task.uid()), + "message": format!("Batch `{}` not found.", u32::MAX), "code": "batch_not_found", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#batch_not_found" @@ -31,7 +31,7 @@ async fn get_batch_status() { let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (_response, code) = index.get_batch(0).await; + let (_response, code) = index.get_batch(task.uid() as u32).await; assert_eq!(code, 200); } @@ -284,7 +284,8 @@ async fn test_summarized_document_addition_or_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", - ".stats.indexUids" => r#"""{"test": 1}"""# + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -302,7 +303,7 @@ async fn test_summarized_document_addition_or_update() { "types": { "documentAdditionOrUpdate": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]", "writeChannelCongestion": "[writeChannelCongestion]", "internalDatabaseSizes": "[internalDatabaseSizes]" @@ -310,14 +311,14 @@ async fn test_summarized_document_addition_or_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(1).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -327,7 +328,9 @@ async fn test_summarized_document_addition_or_update() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -345,16 +348,14 @@ async fn test_summarized_document_addition_or_update() { "types": { "documentAdditionOrUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]", "writeChannelCongestion": "[writeChannelCongestion]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); } @@ -363,7 +364,10 @@ async fn test_summarized_document_addition_or_update() { async fn test_summarized_delete_documents_by_batch() { let server = Server::new_shared(); let index = server.unique_index(); - let (task, _status_code) = index.delete_batch(vec![1, 2, 3]).await; + let task_uid_1 = (u32::MAX - 1) as u64; + let task_uid_2 = (u32::MAX - 2) as u64; + let task_uid_3 = (u32::MAX - 3) as u64; + let (task, _status_code) = index.delete_batch(vec![task_uid_1, task_uid_2, task_uid_3]).await; index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, @@ -375,7 +379,8 @@ async fn test_summarized_delete_documents_by_batch() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => r#"""{"test": 1}"""# + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -393,33 +398,35 @@ async fn test_summarized_delete_documents_by_batch() { "types": { "documentDeletion": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); index.create(None).await; let (task, _status_code) = index.delete_batch(vec![42]).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(2).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, }, @r###" { - "uid": 2, + "uid": "[uid]", "progress": null, "details": { "providedIds": 1, @@ -433,9 +440,7 @@ async fn test_summarized_delete_documents_by_batch() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -464,8 +469,8 @@ async fn test_summarized_delete_documents_by_filter() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => r#"""{"test": 1}"""#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid`" + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -484,13 +489,13 @@ async fn test_summarized_delete_documents_by_filter() { "types": { "documentDeletion": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid`" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); @@ -498,20 +503,23 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(2).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { - "uid": 2, + "uid": "[uid]", "progress": null, "details": { "providedIds": 0, @@ -526,15 +534,13 @@ async fn test_summarized_delete_documents_by_filter() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); @@ -542,20 +548,23 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(4).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { - "uid": 4, + "uid": "[uid]", "progress": null, "details": { "providedIds": 0, @@ -570,15 +579,13 @@ async fn test_summarized_delete_documents_by_filter() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); } @@ -599,7 +606,8 @@ async fn test_summarized_delete_document_by_id() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => r#"""{"test": 1}"""# + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -617,33 +625,36 @@ async fn test_summarized_delete_document_by_id() { "types": { "documentDeletion": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); index.create(None).await; let (task, _status_code) = index.delete_document(42).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(2).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { - "uid": 2, + "uid": "[uid]", "progress": null, "details": { "providedIds": 1, @@ -657,15 +668,13 @@ async fn test_summarized_delete_document_by_id() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); } @@ -699,7 +708,7 @@ async fn test_summarized_settings_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", - ".stats.indexUids" => r#"""{"test": 1}"""#, + ".stats.indexUids" => r#"{"[uuid]": 1}"#, ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @@ -728,7 +737,7 @@ async fn test_summarized_settings_update() { "types": { "settingsUpdate": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -755,7 +764,7 @@ async fn test_summarized_index_creation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => r#"""{"test": 1}"""#, + ".stats.indexUids" => r#"{"[uuid]": 1}"#, ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" }, @r###" @@ -771,7 +780,7 @@ async fn test_summarized_index_creation() { "types": { "indexCreation": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -783,19 +792,22 @@ async fn test_summarized_index_creation() { let (task, _status_code) = index.create(Some("doggos")).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(1).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "primaryKey": "doggos" @@ -808,15 +820,13 @@ async fn test_summarized_index_creation() { "types": { "indexCreation": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 1 of type `indexCreation` cannot be batched" + "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" } "###); } @@ -947,7 +957,7 @@ async fn test_summarized_index_update() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => r#"{"test": 1}"#, + ".stats.indexUids" => r#"{"[uuid]": 1}"#, ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" }, @r###" @@ -963,7 +973,7 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -975,19 +985,22 @@ async fn test_summarized_index_update() { let (task, _status_code) = index.update(Some("bones")).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(1).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "primaryKey": "bones" @@ -1000,15 +1013,13 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 1 of type `indexUpdate` cannot be batched" + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); @@ -1017,19 +1028,22 @@ async fn test_summarized_index_update() { let (task, _status_code) = index.update(None).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(3).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" }, @r###" { - "uid": 3, + "uid": "[uid]", "progress": null, "details": {}, "stats": { @@ -1040,33 +1054,34 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 3 of type `indexUpdate` cannot be batched" + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); let (task, _status_code) = index.update(Some("bones")).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(4).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" }, @r###" { - "uid": 4, + "uid": "[uid]", "progress": null, "details": { "primaryKey": "bones" @@ -1079,15 +1094,13 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 4 of type `indexUpdate` cannot be batched" + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); } @@ -1155,7 +1168,7 @@ async fn test_summarized_index_swap() { ])) .await; server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = server.get_batch(1).await; + let (batch, _) = server.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1165,7 +1178,8 @@ async fn test_summarized_index_swap() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => r#"""{"doggos": 1}"""# + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" }, @r###" { @@ -1180,13 +1194,13 @@ async fn test_summarized_index_swap() { "types": { "indexCreation": 1 }, - "indexUids": {"doggos": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 1 of type `indexCreation` cannot be batched" + "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" } "###); } @@ -1210,7 +1224,8 @@ async fn test_summarized_batch_cancelation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchCreationComplete" => "task with id X of type `taskCancelation` cannot be batched" + ".batchCreationComplete" => "task with id X of type `taskCancelation` cannot be batched", + ".details.originalFilter" => "?uids=X", }, @r###" { @@ -1219,7 +1234,7 @@ async fn test_summarized_batch_cancelation() { "details": { "matchedTasks": 1, "canceledTasks": 0, - "originalFilter": "?uids=0" + "originalFilter": "?uids=X" }, "stats": { "totalNbTasks": 1, From 2691999bd3d0f1929108ad6db769f1dc7e7b7e63 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Tue, 3 Jun 2025 11:15:27 +0300 Subject: [PATCH 04/12] Add a helper method for getting the latest batch Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 2 +- crates/meilisearch/tests/common/server.rs | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index ce5ad41b6..82403fe3b 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -1309,7 +1309,7 @@ async fn test_summarized_dump_creation() { let server = Server::new_shared(); let (task, _status_code) = server.create_dump().await; server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = server.get_batch(task.uid() as u32).await; + let (batch, _) = server.get_latest_batch().await; assert_json_snapshot!(batch, { ".uid" => "[uid]", diff --git a/crates/meilisearch/tests/common/server.rs b/crates/meilisearch/tests/common/server.rs index 431972983..787cafc9f 100644 --- a/crates/meilisearch/tests/common/server.rs +++ b/crates/meilisearch/tests/common/server.rs @@ -429,6 +429,19 @@ impl Server { self.service.get(url).await } + // https://www.meilisearch.com/docs/reference/api/batches#get-batches states: + // "Batches are always returned in descending order of uid. This means that by default, + // the most recently created batch objects appear first." + pub async fn get_latest_batch(&self) -> (Option, StatusCode) { + let url = "/batches?limit=1&offset=0"; + let (value, code) = self.service.get(url).await; + value + .get("results") + .and_then(|results| results.as_array()) + .and_then(|array| array.first()) + .map_or((None, code), |latest| (Some(Value(latest.clone())), code)) + } + pub async fn get_features(&self) -> (Value, StatusCode) { self.service.get("/experimental-features").await } From 139ec8c7827c3694be24cb9090edc4b4056cd15c Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Tue, 3 Jun 2025 15:23:14 +0300 Subject: [PATCH 05/12] Add task.batch_uid() helper method Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 38 ++++++++++++------------- crates/meilisearch/tests/common/mod.rs | 9 ++++++ 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index 82403fe3b..e6801f269 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -31,7 +31,7 @@ async fn get_batch_status() { let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (_response, code) = index.get_batch(task.uid() as u32).await; + let (_response, code) = index.get_batch(task.batch_uid()).await; assert_eq!(code, 200); } @@ -273,7 +273,7 @@ async fn test_summarized_document_addition_or_update() { let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -318,7 +318,7 @@ async fn test_summarized_document_addition_or_update() { let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -369,7 +369,7 @@ async fn test_summarized_delete_documents_by_batch() { let task_uid_3 = (u32::MAX - 3) as u64; let (task, _status_code) = index.delete_batch(vec![task_uid_1, task_uid_2, task_uid_3]).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -411,7 +411,7 @@ async fn test_summarized_delete_documents_by_batch() { index.create(None).await; let (task, _status_code) = index.delete_batch(vec![42]).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -459,7 +459,7 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -503,7 +503,7 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -548,7 +548,7 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -596,7 +596,7 @@ async fn test_summarized_delete_document_by_id() { let index = server.unique_index(); let (task, _status_code) = index.delete_document(1).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -638,7 +638,7 @@ async fn test_summarized_delete_document_by_id() { index.create(None).await; let (task, _status_code) = index.delete_document(42).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -697,7 +697,7 @@ async fn test_summarized_settings_update() { let (task,_status_code) = index.update_settings(json!({ "displayedAttributes": ["doggos", "name"], "filterableAttributes": ["age", "nb_paw_pads"], "sortableAttributes": ["iq"] })).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -754,7 +754,7 @@ async fn test_summarized_index_creation() { let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -792,7 +792,7 @@ async fn test_summarized_index_creation() { let (task, _status_code) = index.create(Some("doggos")).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -947,7 +947,7 @@ async fn test_summarized_index_update() { // If the index doesn't exist yet, we should get errors with or without the primary key. let (task, _status_code) = index.update(None).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -985,7 +985,7 @@ async fn test_summarized_index_update() { let (task, _status_code) = index.update(Some("bones")).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1028,7 +1028,7 @@ async fn test_summarized_index_update() { let (task, _status_code) = index.update(None).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1066,7 +1066,7 @@ async fn test_summarized_index_update() { let (task, _status_code) = index.update(Some("bones")).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1214,7 +1214,7 @@ async fn test_summarized_batch_cancelation() { index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.cancel_tasks(format!("uids={}", task.uid()).as_str()).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1264,7 +1264,7 @@ async fn test_summarized_batch_deletion() { index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.delete_tasks(format!("uids={}", task.uid()).as_str()).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", diff --git a/crates/meilisearch/tests/common/mod.rs b/crates/meilisearch/tests/common/mod.rs index 373f89f78..d1da616ad 100644 --- a/crates/meilisearch/tests/common/mod.rs +++ b/crates/meilisearch/tests/common/mod.rs @@ -38,6 +38,15 @@ impl Value { self["uid"].as_u64().is_some() || self["taskUid"].as_u64().is_some() } + #[track_caller] + pub fn batch_uid(&self) -> u32 { + if let Some(batch_uid) = self["batchUid"].as_u64() { + batch_uid as u32 + } else { + panic!("Didn't find `batchUid` in: {self}"); + } + } + /// Return `true` if the `status` field is set to `succeeded`. /// Panic if the `status` field doesn't exists. #[track_caller] From 9e31d6ceff910fd8d6eba731fd665e69de4544c2 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Thu, 5 Jun 2025 11:11:54 +0300 Subject: [PATCH 06/12] Add batch_uid to all successful and failed tasks too Signed-off-by: Martin Tzvetanov Grigorov --- crates/index-scheduler/src/queue/tasks.rs | 7 ++++++- crates/meilisearch/tests/batches/mod.rs | 20 ++++++++++---------- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/crates/index-scheduler/src/queue/tasks.rs b/crates/index-scheduler/src/queue/tasks.rs index 74192232e..92789b93f 100644 --- a/crates/index-scheduler/src/queue/tasks.rs +++ b/crates/index-scheduler/src/queue/tasks.rs @@ -530,7 +530,12 @@ impl Queue { ..task } } else { - task + dbg!(&task); + if task.status == Status::Succeeded || task.status == Status::Failed { + Task { batch_uid: Some(batch.uid), ..task } + } else { + task + } } }) .collect(), diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index e6801f269..d5374a144 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -1,7 +1,7 @@ mod errors; use meili_snap::insta::assert_json_snapshot; -use meili_snap::snapshot; +use meili_snap::{json_string, snapshot}; use crate::common::Server; use crate::json; @@ -119,21 +119,21 @@ async fn list_batches_with_star_filters() { let (response, code) = index.service.get("/batches?types=*,documentAdditionOrUpdate&statuses=*").await; - assert_eq!(code, 200, "{:?}", response); + assert_eq!(code, 200, "{response:?}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); let (response, code) = index .service .get("/batches?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids=test") .await; - assert_eq!(code, 200, "{:?}", response); + assert_eq!(code, 200, "{response:?}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); let (response, code) = index .service .get("/batches?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids=test,*") .await; - assert_eq!(code, 200, "{:?}", response); + assert_eq!(code, 200, "{response:?}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); } @@ -223,7 +223,7 @@ async fn list_batch_filter_error() { let (response, code) = server.batches_filter("lol=pied").await; assert_eq!(code, 400, "{response}"); - meili_snap::snapshot!(meili_snap::json_string!(response), @r#" + snapshot!(json_string!(response), @r#" { "message": "Unknown parameter `lol`: expected one of `limit`, `from`, `reverse`, `batchUids`, `uids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`", "code": "bad_request", @@ -234,7 +234,7 @@ async fn list_batch_filter_error() { let (response, code) = server.batches_filter("uids=pied").await; assert_eq!(code, 400, "{response}"); - meili_snap::snapshot!(meili_snap::json_string!(response), @r#" + snapshot!(json_string!(response), @r#" { "message": "Invalid value in parameter `uids`: could not parse `pied` as a positive integer", "code": "invalid_task_uids", @@ -245,7 +245,7 @@ async fn list_batch_filter_error() { let (response, code) = server.batches_filter("from=pied").await; assert_eq!(code, 400, "{response}"); - meili_snap::snapshot!(meili_snap::json_string!(response), @r#" + snapshot!(json_string!(response), @r#" { "message": "Invalid value in parameter `from`: could not parse `pied` as a positive integer", "code": "invalid_task_from", @@ -256,7 +256,7 @@ async fn list_batch_filter_error() { let (response, code) = server.batches_filter("beforeStartedAt=pied").await; assert_eq!(code, 400, "{response}"); - meili_snap::snapshot!(meili_snap::json_string!(response), @r#" + snapshot!(json_string!(response), @r#" { "message": "Invalid value in parameter `beforeStartedAt`: `pied` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.", "code": "invalid_task_before_started_at", @@ -685,8 +685,8 @@ async fn test_summarized_settings_update() { let index = server.unique_index(); // here we should find my payload even in the failed batch. let (response, code) = index.update_settings(json!({ "rankingRules": ["custom"] })).await; - meili_snap::snapshot!(code, @"400 Bad Request"); - meili_snap::snapshot!(meili_snap::json_string!(response), @r###" + snapshot!(code, @"400 Bad Request"); + snapshot!(json_string!(response), @r###" { "message": "Invalid value at `.rankingRules[0]`: `custom` ranking rule is invalid. Valid ranking rules are words, typo, sort, proximity, attribute, exactness and custom ranking rules.", "code": "invalid_settings_ranking_rules", From 126aefc2073b0b24c64a9444be7cfa0802ce2b89 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Thu, 10 Jul 2025 16:39:58 +0300 Subject: [PATCH 07/12] Fix more tests Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 129 ++++++++++++---------- crates/meilisearch/tests/common/server.rs | 13 --- 2 files changed, 70 insertions(+), 72 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index d5374a144..5b67dc50c 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -1,11 +1,30 @@ mod errors; +use insta::internals::{Content, ContentPath}; +use once_cell::sync::Lazy; +use regex::Regex; use meili_snap::insta::assert_json_snapshot; use meili_snap::{json_string, snapshot}; use crate::common::Server; use crate::json; + +static TASK_WITH_ID_RE: Lazy = Lazy::new(|| { + Regex::new(r"task with id (\d+) of type") + .unwrap() +}); + +fn task_with_id_redaction(value: Content, _path: ContentPath) -> Content { + match value { + Content::String(s) => { + let replaced = TASK_WITH_ID_RE.replace_all(&s, "task with id X of type"); + Content::String(replaced.to_string()) + } + _ => value.clone(), + } +} + #[actix_rt::test] async fn error_get_unexisting_batch_status() { let server = Server::new_shared(); @@ -30,7 +49,7 @@ async fn get_batch_status() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (_response, code) = index.get_batch(task.batch_uid()).await; assert_eq!(code, 200); } @@ -272,7 +291,7 @@ async fn test_summarized_document_addition_or_update() { let index = server.unique_index(); let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -285,7 +304,6 @@ async fn test_summarized_document_addition_or_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -311,13 +329,13 @@ async fn test_summarized_document_addition_or_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -330,7 +348,6 @@ async fn test_summarized_document_addition_or_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -355,7 +372,7 @@ async fn test_summarized_document_addition_or_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); } @@ -368,7 +385,7 @@ async fn test_summarized_delete_documents_by_batch() { let task_uid_2 = (u32::MAX - 2) as u64; let task_uid_3 = (u32::MAX - 3) as u64; let (task, _status_code) = index.delete_batch(vec![task_uid_1, task_uid_2, task_uid_3]).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -380,7 +397,6 @@ async fn test_summarized_delete_documents_by_batch() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -404,13 +420,13 @@ async fn test_summarized_delete_documents_by_batch() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); index.create(None).await; let (task, _status_code) = index.delete_batch(vec![42]).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -458,7 +474,7 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -470,7 +486,6 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -495,14 +510,14 @@ async fn test_summarized_delete_documents_by_filter() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); index.create(None).await; let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -515,7 +530,6 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -540,14 +554,14 @@ async fn test_summarized_delete_documents_by_filter() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); index.update_settings(json!({ "filterableAttributes": ["doggo"] })).await; let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -595,7 +609,7 @@ async fn test_summarized_delete_document_by_id() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.delete_document(1).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -607,7 +621,6 @@ async fn test_summarized_delete_document_by_id() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -631,13 +644,13 @@ async fn test_summarized_delete_document_by_id() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); index.create(None).await; let (task, _status_code) = index.delete_document(42).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -650,7 +663,6 @@ async fn test_summarized_delete_document_by_id() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -674,7 +686,7 @@ async fn test_summarized_delete_document_by_id() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); } @@ -696,7 +708,7 @@ async fn test_summarized_settings_update() { "###); let (task,_status_code) = index.update_settings(json!({ "displayedAttributes": ["doggos", "name"], "filterableAttributes": ["age", "nb_paw_pads"], "sortableAttributes": ["iq"] })).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -709,8 +721,6 @@ async fn test_summarized_settings_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" - }, @r###" { @@ -743,7 +753,7 @@ async fn test_summarized_settings_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); } @@ -753,7 +763,7 @@ async fn test_summarized_index_creation() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -765,7 +775,7 @@ async fn test_summarized_index_creation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -786,12 +796,12 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); let (task, _status_code) = index.create(Some("doggos")).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -803,7 +813,7 @@ async fn test_summarized_index_creation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -826,7 +836,7 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); } @@ -946,7 +956,7 @@ async fn test_summarized_index_update() { let index = server.unique_index(); // If the index doesn't exist yet, we should get errors with or without the primary key. let (task, _status_code) = index.update(None).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -958,7 +968,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -979,12 +989,12 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); let (task, _status_code) = index.update(Some("bones")).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -996,7 +1006,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1019,15 +1029,15 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); - // And run the same two tests once the index do exists. + // And run the same two tests once the index does exist. index.create(None).await; let (task, _status_code) = index.update(None).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1039,7 +1049,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1060,12 +1070,12 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); let (task, _status_code) = index.update(Some("bones")).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1077,7 +1087,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1100,7 +1110,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); } @@ -1124,7 +1134,7 @@ async fn test_summarized_index_swap() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchCreationComplete" => "task with id X of type `indexSwap` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1154,7 +1164,7 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexSwap` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexSwap` that cannot be batched with any other task." } "###); @@ -1179,7 +1189,7 @@ async fn test_summarized_index_swap() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1200,11 +1210,12 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); } + #[actix_web::test] async fn test_summarized_batch_cancelation() { let server = Server::new_shared(); @@ -1213,7 +1224,7 @@ async fn test_summarized_batch_cancelation() { let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.cancel_tasks(format!("uids={}", task.uid()).as_str()).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1224,8 +1235,8 @@ async fn test_summarized_batch_cancelation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchCreationComplete" => "task with id X of type `taskCancelation` cannot be batched", ".details.originalFilter" => "?uids=X", + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1250,7 +1261,7 @@ async fn test_summarized_batch_cancelation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `taskCancelation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `taskCancelation` that cannot be batched with any other task." } "###); } @@ -1263,7 +1274,7 @@ async fn test_summarized_batch_deletion() { let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.delete_tasks(format!("uids={}", task.uid()).as_str()).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1308,8 +1319,8 @@ async fn test_summarized_batch_deletion() { async fn test_summarized_dump_creation() { let server = Server::new_shared(); let (task, _status_code) = server.create_dump().await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = server.get_latest_batch().await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = server.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1320,7 +1331,7 @@ async fn test_summarized_dump_creation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchCreationComplete" => "task with id X of type `dumpCreation` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1343,7 +1354,7 @@ async fn test_summarized_dump_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `dumpCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `dumpCreation` that cannot be batched with any other task." } "###); } diff --git a/crates/meilisearch/tests/common/server.rs b/crates/meilisearch/tests/common/server.rs index 787cafc9f..431972983 100644 --- a/crates/meilisearch/tests/common/server.rs +++ b/crates/meilisearch/tests/common/server.rs @@ -429,19 +429,6 @@ impl Server { self.service.get(url).await } - // https://www.meilisearch.com/docs/reference/api/batches#get-batches states: - // "Batches are always returned in descending order of uid. This means that by default, - // the most recently created batch objects appear first." - pub async fn get_latest_batch(&self) -> (Option, StatusCode) { - let url = "/batches?limit=1&offset=0"; - let (value, code) = self.service.get(url).await; - value - .get("results") - .and_then(|results| results.as_array()) - .and_then(|array| array.first()) - .map_or((None, code), |latest| (Some(Value(latest.clone())), code)) - } - pub async fn get_features(&self) -> (Value, StatusCode) { self.service.get("/experimental-features").await } From 9f89881b0df27b5e155a3ec2e1c63831d0573174 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Fri, 11 Jul 2025 10:11:58 +0300 Subject: [PATCH 08/12] More tests fixes Signed-off-by: Martin Tzvetanov Grigorov --- crates/index-scheduler/src/queue/tasks.rs | 7 +- crates/meili-snap/src/lib.rs | 2 +- crates/meilisearch/tests/batches/mod.rs | 79 ++++++++++++----------- 3 files changed, 43 insertions(+), 45 deletions(-) diff --git a/crates/index-scheduler/src/queue/tasks.rs b/crates/index-scheduler/src/queue/tasks.rs index 92789b93f..74192232e 100644 --- a/crates/index-scheduler/src/queue/tasks.rs +++ b/crates/index-scheduler/src/queue/tasks.rs @@ -530,12 +530,7 @@ impl Queue { ..task } } else { - dbg!(&task); - if task.status == Status::Succeeded || task.status == Status::Failed { - Task { batch_uid: Some(batch.uid), ..task } - } else { - task - } + task } }) .collect(), diff --git a/crates/meili-snap/src/lib.rs b/crates/meili-snap/src/lib.rs index 1641a6335..a59732f04 100644 --- a/crates/meili-snap/src/lib.rs +++ b/crates/meili-snap/src/lib.rs @@ -55,7 +55,7 @@ pub fn default_snapshot_settings_for_test<'a>( settings.add_dynamic_redaction(".error.message", |content, _content_path| match &content { Content::String(s) => { - let uuid_replaced = UUID_IN_MESSAGE_RE.replace_all(s, "$before[uuid]$after"); + let uuid_replaced = UUID_IN_MESSAGE_RE.replace_all(s, "[uuid]"); Content::String(uuid_replaced.to_string()) } _ => content, diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index 5b67dc50c..e68eb3fc6 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -1,19 +1,16 @@ mod errors; use insta::internals::{Content, ContentPath}; -use once_cell::sync::Lazy; -use regex::Regex; use meili_snap::insta::assert_json_snapshot; use meili_snap::{json_string, snapshot}; +use once_cell::sync::Lazy; +use regex::Regex; use crate::common::Server; use crate::json; - -static TASK_WITH_ID_RE: Lazy = Lazy::new(|| { - Regex::new(r"task with id (\d+) of type") - .unwrap() -}); +static TASK_WITH_ID_RE: Lazy = + Lazy::new(|| Regex::new(r"task with id (\d+) of type").unwrap()); fn task_with_id_redaction(value: Content, _path: ContentPath) -> Content { match value { @@ -304,6 +301,7 @@ async fn test_summarized_document_addition_or_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -329,7 +327,7 @@ async fn test_summarized_document_addition_or_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); @@ -372,7 +370,7 @@ async fn test_summarized_document_addition_or_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); } @@ -397,6 +395,7 @@ async fn test_summarized_delete_documents_by_batch() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -420,7 +419,7 @@ async fn test_summarized_delete_documents_by_batch() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); @@ -486,6 +485,7 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -510,7 +510,7 @@ async fn test_summarized_delete_documents_by_filter() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); @@ -530,6 +530,7 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -554,7 +555,7 @@ async fn test_summarized_delete_documents_by_filter() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); @@ -574,7 +575,7 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" + ".batchCreationComplete" => "batched all enqueued tasks" }, @r###" { @@ -599,7 +600,7 @@ async fn test_summarized_delete_documents_by_filter() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchCreationComplete": "batched all enqueued tasks" } "###); } @@ -621,6 +622,7 @@ async fn test_summarized_delete_document_by_id() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -644,7 +646,7 @@ async fn test_summarized_delete_document_by_id() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); @@ -663,6 +665,7 @@ async fn test_summarized_delete_document_by_id() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -686,7 +689,7 @@ async fn test_summarized_delete_document_by_id() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); } @@ -721,6 +724,7 @@ async fn test_summarized_settings_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks" }, @r###" { @@ -753,7 +757,7 @@ async fn test_summarized_settings_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); } @@ -775,7 +779,7 @@ async fn test_summarized_index_creation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -796,7 +800,7 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" } "###); @@ -813,7 +817,7 @@ async fn test_summarized_index_creation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -836,7 +840,7 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" } "###); } @@ -968,7 +972,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -989,7 +993,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); @@ -1006,7 +1010,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1029,7 +1033,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); @@ -1049,7 +1053,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1070,7 +1074,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); @@ -1087,7 +1091,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1110,7 +1114,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); } @@ -1134,7 +1138,7 @@ async fn test_summarized_index_swap() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1164,7 +1168,7 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexSwap` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexSwap` cannot be batched" } "###); @@ -1189,7 +1193,7 @@ async fn test_summarized_index_swap() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1210,12 +1214,11 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" } "###); } - #[actix_web::test] async fn test_summarized_batch_cancelation() { let server = Server::new_shared(); @@ -1236,7 +1239,7 @@ async fn test_summarized_batch_cancelation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".details.originalFilter" => "?uids=X", - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1261,7 +1264,7 @@ async fn test_summarized_batch_cancelation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `taskCancelation` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `taskCancelation` cannot be batched" } "###); } @@ -1331,7 +1334,7 @@ async fn test_summarized_dump_creation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1354,7 +1357,7 @@ async fn test_summarized_dump_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `dumpCreation` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `dumpCreation` cannot be batched" } "###); } From 3bef4f4413ccdaa5c769c6d95c5211032d46e22b Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Fri, 11 Jul 2025 10:16:25 +0300 Subject: [PATCH 09/12] Use Server::wait_task() instead of Index::wait_task() Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 74 ++++++++++++------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index e68eb3fc6..f763a8fc0 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -27,7 +27,7 @@ async fn error_get_unexisting_batch_status() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _coder) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.get_batch(u32::MAX).await; let expected_response = json!({ @@ -46,7 +46,7 @@ async fn get_batch_status() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (_response, code) = index.get_batch(task.batch_uid()).await; assert_eq!(code, 200); } @@ -56,9 +56,9 @@ async fn list_batches() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (response, code) = index.list_batches().await; assert_eq!(code, 200); assert_eq!( @@ -112,10 +112,10 @@ async fn list_batches_with_star_filters() { let server = Server::new().await; let index = server.index("test"); let (task, _code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let index = server.index("test"); let (task, _code) = index.create(None).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (response, code) = index.service.get("/batches?indexUids=test").await; assert_eq!(code, 200); @@ -158,9 +158,9 @@ async fn list_batches_status_filtered() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (response, code) = index.filtered_batches(&[], &["succeeded"], &[]).await; assert_eq!(code, 200, "{response}"); @@ -180,9 +180,9 @@ async fn list_batches_type_filtered() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _) = index.delete().await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&["indexCreation"], &[], &[]).await; assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); @@ -202,7 +202,7 @@ async fn list_batches_invalid_canceled_by_filter() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&[], &[], &["0"]).await; assert_eq!(code, 200, "{response}"); @@ -214,9 +214,9 @@ async fn list_batches_status_and_type_filtered() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.update(Some("id")).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&["indexCreation"], &["failed"], &[]).await; assert_eq!(code, 200, "{response}"); @@ -288,7 +288,7 @@ async fn test_summarized_document_addition_or_update() { let index = server.unique_index(); let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -333,7 +333,7 @@ async fn test_summarized_document_addition_or_update() { let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -383,7 +383,7 @@ async fn test_summarized_delete_documents_by_batch() { let task_uid_2 = (u32::MAX - 2) as u64; let task_uid_3 = (u32::MAX - 3) as u64; let (task, _status_code) = index.delete_batch(vec![task_uid_1, task_uid_2, task_uid_3]).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -425,7 +425,7 @@ async fn test_summarized_delete_documents_by_batch() { index.create(None).await; let (task, _status_code) = index.delete_batch(vec![42]).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -473,7 +473,7 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -517,7 +517,7 @@ async fn test_summarized_delete_documents_by_filter() { index.create(None).await; let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -562,7 +562,7 @@ async fn test_summarized_delete_documents_by_filter() { index.update_settings(json!({ "filterableAttributes": ["doggo"] })).await; let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -610,7 +610,7 @@ async fn test_summarized_delete_document_by_id() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.delete_document(1).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -652,7 +652,7 @@ async fn test_summarized_delete_document_by_id() { index.create(None).await; let (task, _status_code) = index.delete_document(42).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -711,7 +711,7 @@ async fn test_summarized_settings_update() { "###); let (task,_status_code) = index.update_settings(json!({ "displayedAttributes": ["doggos", "name"], "filterableAttributes": ["age", "nb_paw_pads"], "sortableAttributes": ["iq"] })).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -767,7 +767,7 @@ async fn test_summarized_index_creation() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -805,7 +805,7 @@ async fn test_summarized_index_creation() { "###); let (task, _status_code) = index.create(Some("doggos")).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -850,7 +850,7 @@ async fn test_summarized_index_deletion() { let server = Server::new_shared(); let index = server.unique_index(); let (ret, _code) = index.delete().await; - let batch = index.wait_task(ret.uid()).await.failed(); + let batch = server.wait_task(ret.uid()).await.failed(); snapshot!(batch, @r###" { @@ -881,7 +881,7 @@ async fn test_summarized_index_deletion() { // both batches may get autobatched and the deleted documents count will be wrong. let (ret, _code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; - let batch = index.wait_task(ret.uid()).await.succeeded(); + let batch = server.wait_task(ret.uid()).await.succeeded(); snapshot!(batch, @r###" { @@ -904,7 +904,7 @@ async fn test_summarized_index_deletion() { "###); let (ret, _code) = index.delete().await; - let batch = index.wait_task(ret.uid()).await.succeeded(); + let batch = server.wait_task(ret.uid()).await.succeeded(); snapshot!(batch, @r###" { @@ -927,7 +927,7 @@ async fn test_summarized_index_deletion() { // What happens when you delete an index that doesn't exists. let (ret, _code) = index.delete().await; - let batch = index.wait_task(ret.uid()).await.failed(); + let batch = server.wait_task(ret.uid()).await.failed(); snapshot!(batch, @r###" { @@ -960,7 +960,7 @@ async fn test_summarized_index_update() { let index = server.unique_index(); // If the index doesn't exist yet, we should get errors with or without the primary key. let (task, _status_code) = index.update(None).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -998,7 +998,7 @@ async fn test_summarized_index_update() { "###); let (task, _status_code) = index.update(Some("bones")).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1041,7 +1041,7 @@ async fn test_summarized_index_update() { index.create(None).await; let (task, _status_code) = index.update(None).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1079,7 +1079,7 @@ async fn test_summarized_index_update() { "###); let (task, _status_code) = index.update(Some("bones")).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1225,9 +1225,9 @@ async fn test_summarized_batch_cancelation() { let index = server.unique_index(); // to avoid being flaky we're only going to cancel an already finished batch :( let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.cancel_tasks(format!("uids={}", task.uid()).as_str()).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1275,9 +1275,9 @@ async fn test_summarized_batch_deletion() { let index = server.unique_index(); // to avoid being flaky we're only going to delete an already finished batch :( let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.delete_tasks(format!("uids={}", task.uid()).as_str()).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { From eb6ad3ef9c4fab57d80bc127b29e25e18b4491c4 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Fri, 11 Jul 2025 10:24:25 +0300 Subject: [PATCH 10/12] Fix batch id detection Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index f763a8fc0..268147d02 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -346,6 +346,7 @@ async fn test_summarized_document_addition_or_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -438,6 +439,7 @@ async fn test_summarized_delete_documents_by_batch() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -1127,8 +1129,8 @@ async fn test_summarized_index_swap() { { "indexes": ["doggos", "cattos"] } ])) .await; - server.wait_task(task.uid()).await.failed(); - let (batch, _) = server.get_batch(task.uid() as u32).await; + let task = server.wait_task(task.uid()).await.failed(); + let (batch, _) = server.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1181,8 +1183,8 @@ async fn test_summarized_index_swap() { { "indexes": [doggos_index.uid, cattos_index.uid] } ])) .await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = server.get_batch(task.uid() as u32).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = server.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", From a39223822af690800ee0ce12806ceb41dce71f68 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Fri, 11 Jul 2025 11:11:46 +0300 Subject: [PATCH 11/12] More tests fixes Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 48 ++++++++++++------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index 7a21f1eca..bb8f3b6aa 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -396,7 +396,7 @@ async fn test_summarized_delete_documents_by_batch() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -439,7 +439,7 @@ async fn test_summarized_delete_documents_by_batch() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -532,7 +532,7 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -667,7 +667,7 @@ async fn test_summarized_delete_document_by_id() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -781,7 +781,7 @@ async fn test_summarized_index_creation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -802,7 +802,7 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); @@ -819,7 +819,7 @@ async fn test_summarized_index_creation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -842,7 +842,7 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); } @@ -974,7 +974,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -995,7 +995,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); @@ -1012,7 +1012,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1035,7 +1035,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); @@ -1055,7 +1055,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1076,7 +1076,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); @@ -1093,7 +1093,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1116,7 +1116,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); } @@ -1140,7 +1140,7 @@ async fn test_summarized_index_swap() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1170,7 +1170,7 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexSwap` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexSwap` that cannot be batched with any other task." } "###); @@ -1195,7 +1195,7 @@ async fn test_summarized_index_swap() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1216,7 +1216,7 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); } @@ -1241,7 +1241,7 @@ async fn test_summarized_batch_cancelation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".details.originalFilter" => "?uids=X", - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1266,7 +1266,7 @@ async fn test_summarized_batch_cancelation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `taskCancelation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `taskCancelation` that cannot be batched with any other task." } "###); } @@ -1336,7 +1336,7 @@ async fn test_summarized_dump_creation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1359,7 +1359,7 @@ async fn test_summarized_dump_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `dumpCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `dumpCreation` that cannot be batched with any other task." } "###); } From e3daa907c5fe904951f47d10886cfc7b3cfcdf8c Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Fri, 11 Jul 2025 11:14:39 +0300 Subject: [PATCH 12/12] Update redactions Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index bb8f3b6aa..9d6bee7c1 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -301,7 +301,7 @@ async fn test_summarized_document_addition_or_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -346,7 +346,7 @@ async fn test_summarized_document_addition_or_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -487,7 +487,7 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -577,7 +577,7 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks" + ".batchStrategy" => "batched all enqueued tasks" }, @r###" { @@ -624,7 +624,7 @@ async fn test_summarized_delete_document_by_id() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -726,7 +726,7 @@ async fn test_summarized_settings_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks" + ".batchStrategy" => "batched all enqueued tasks" }, @r###" {