From 7380808b26a5fb30440fcc72a06fc58e0ca2ebf3 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Mon, 2 Jun 2025 16:39:21 +0300 Subject: [PATCH 01/21] tests: Faster batches:: IT tests Use shared server + unique indices where possible Related-to: https://github.com/meilisearch/meilisearch/issues/4840 Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 190 ++++++++++++++---------- 1 file changed, 108 insertions(+), 82 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index e775d1ea4..4613f71fc 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -8,8 +8,8 @@ use crate::json; #[actix_rt::test] async fn error_get_unexisting_batch_status() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _coder) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (response, code) = index.get_batch(1).await; @@ -27,8 +27,8 @@ async fn error_get_unexisting_batch_status() { #[actix_rt::test] async fn get_batch_status() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (_response, code) = index.get_batch(0).await; @@ -37,8 +37,8 @@ async fn get_batch_status() { #[actix_rt::test] async fn list_batches() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.create(None).await; @@ -62,7 +62,7 @@ async fn list_batches_pagination_and_reverse() { let index = server.index(format!("test-{i}")); last_batch = Some(index.create(None).await.0.uid()); } - server.wait_task(last_batch.unwrap()).await; + server.wait_task(last_batch.unwrap()).await.succeeded(); let (response, code) = server.batches_filter("limit=3").await; assert_eq!(code, 200); @@ -139,8 +139,8 @@ async fn list_batches_with_star_filters() { #[actix_rt::test] async fn list_batches_status_filtered() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.create(None).await; @@ -161,8 +161,8 @@ async fn list_batches_status_filtered() { #[actix_rt::test] async fn list_batches_type_filtered() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (task, _) = index.delete().await; @@ -183,8 +183,8 @@ async fn list_batches_type_filtered() { #[actix_rt::test] async fn list_batches_invalid_canceled_by_filter() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); @@ -195,8 +195,8 @@ async fn list_batches_invalid_canceled_by_filter() { #[actix_rt::test] async fn list_batches_status_and_type_filtered() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.update(Some("id")).await; @@ -219,7 +219,7 @@ async fn list_batches_status_and_type_filtered() { #[actix_rt::test] async fn list_batch_filter_error() { - let server = Server::new().await; + let server = Server::new_shared(); let (response, code) = server.batches_filter("lol=pied").await; assert_eq!(code, 400, "{}", response); @@ -268,14 +268,15 @@ async fn list_batch_filter_error() { #[actix_web::test] async fn test_summarized_document_addition_or_update() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", @@ -286,7 +287,7 @@ async fn test_summarized_document_addition_or_update() { }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "receivedDocuments": 1, @@ -320,6 +321,7 @@ async fn test_summarized_document_addition_or_update() { let (batch, _) = index.get_batch(1).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", @@ -330,7 +332,7 @@ async fn test_summarized_document_addition_or_update() { }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "receivedDocuments": 1, @@ -360,23 +362,25 @@ async fn test_summarized_document_addition_or_update() { #[actix_web::test] async fn test_summarized_delete_documents_by_batch() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.delete_batch(vec![1, 2, 3]).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => "{\n\t\"test\": 1}" }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "providedIds": 3, @@ -447,25 +451,27 @@ async fn test_summarized_delete_documents_by_batch() { #[actix_web::test] async fn test_summarized_delete_documents_by_filter() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => "{\n\t\"test\": 1}" }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "providedIds": 0, @@ -583,11 +589,11 @@ async fn test_summarized_delete_documents_by_filter() { #[actix_web::test] async fn test_summarized_delete_document_by_id() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.delete_document(1).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -596,7 +602,8 @@ async fn test_summarized_delete_document_by_id() { ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => "{\n\t\"test\": 1}" }, @r###" { @@ -671,8 +678,8 @@ async fn test_summarized_delete_document_by_id() { #[actix_web::test] async fn test_summarized_settings_update() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); // here we should find my payload even in the failed batch. let (response, code) = index.update_settings(json!({ "rankingRules": ["custom"] })).await; meili_snap::snapshot!(code, @"400 Bad Request"); @@ -687,20 +694,24 @@ async fn test_summarized_settings_update() { let (task,_status_code) = index.update_settings(json!({ "displayedAttributes": ["doggos", "name"], "filterableAttributes": ["age", "nb_paw_pads"], "sortableAttributes": ["iq"] })).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => "{\n\t\"test\": 1}", + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" + }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "displayedAttributes": [ @@ -731,30 +742,33 @@ async fn test_summarized_settings_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); } #[actix_web::test] async fn test_summarized_index_creation() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => "{\n\t\"test\": 1}", + ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": {}, "stats": { @@ -773,7 +787,7 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 0 of type `indexCreation` cannot be batched" + "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" } "###); @@ -819,8 +833,8 @@ async fn test_summarized_index_creation() { #[actix_web::test] async fn test_summarized_index_deletion() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (ret, _code) = index.delete().await; let batch = index.wait_task(ret.uid()).await.failed(); snapshot!(batch, @@ -828,7 +842,7 @@ async fn test_summarized_index_deletion() { { "uid": "[uid]", "batchUid": "[batch_uid]", - "indexUid": "test", + "indexUid": "[uuid]", "status": "failed", "type": "indexDeletion", "canceledBy": null, @@ -836,7 +850,7 @@ async fn test_summarized_index_deletion() { "deletedDocuments": 0 }, "error": { - "message": "Index `test` not found.", + "message": "Index `[uuid]` not found.", "code": "index_not_found", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#index_not_found" @@ -859,7 +873,7 @@ async fn test_summarized_index_deletion() { { "uid": "[uid]", "batchUid": "[batch_uid]", - "indexUid": "test", + "indexUid": "[uuid]", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, @@ -928,24 +942,27 @@ async fn test_summarized_index_deletion() { #[actix_web::test] async fn test_summarized_index_update() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); // If the index doesn't exist yet, we should get errors with or without the primary key. let (task, _status_code) = index.update(None).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(0).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => "{\n\t\"test\": 1}", + ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": {}, "stats": { @@ -964,7 +981,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 0 of type `indexUpdate` cannot be batched" + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); @@ -1089,26 +1106,28 @@ async fn test_summarized_index_update() { #[actix_web::test] async fn test_summarized_index_swap() { - let server = Server::new().await; + let server = Server::new_shared(); let (task, _status_code) = server .index_swap(json!([ { "indexes": ["doggos", "cattos"] } ])) .await; server.wait_task(task.uid()).await.failed(); - let (batch, _) = server.get_batch(0).await; + let (batch, _) = server.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".batchCreationComplete" => "task with id X of type `indexSwap` cannot be batched" }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "swaps": [ @@ -1134,31 +1153,35 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 0 of type `indexSwap` cannot be batched" + "batchCreationComplete": "task with id X of type `indexSwap` cannot be batched" } "###); - server.index("doggos").create(None).await; - let (task, _status_code) = server.index("cattos").create(None).await; + let doggos_index = server.unique_index(); + doggos_index.create(None).await; + let cattos_index = server.unique_index(); + let (task, _status_code) = cattos_index.create(None).await; server .index_swap(json!([ - { "indexes": ["doggos", "cattos"] } + { "indexes": [doggos_index.uid, cattos_index.uid] } ])) .await; server.wait_task(task.uid()).await.succeeded(); let (batch, _) = server.get_batch(1).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => "{\n\t\"doggos\": 1}" }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": {}, "stats": { @@ -1184,12 +1207,12 @@ async fn test_summarized_index_swap() { #[actix_web::test] async fn test_summarized_batch_cancelation() { - let server = Server::new().await; - let index = server.index("doggos"); + let server = Server::new_shared(); + let index = server.unique_index(); // to avoid being flaky we're only going to cancel an already finished batch :( let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (task, _status_code) = server.cancel_tasks("uids=0").await; + let (task, _status_code) = server.cancel_tasks(format!("uids={}", task.uid()).as_str()).await; index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(1).await; assert_json_snapshot!(batch, @@ -1231,31 +1254,33 @@ async fn test_summarized_batch_cancelation() { #[actix_web::test] async fn test_summarized_batch_deletion() { - let server = Server::new().await; - let index = server.index("doggos"); + let server = Server::new_shared(); + let index = server.unique_index(); // to avoid being flaky we're only going to delete an already finished batch :( let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (task, _status_code) = server.delete_tasks("uids=0").await; + let (task, _status_code) = server.delete_tasks(format!("uids={}", task.uid()).as_str()).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(1).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".details.originalFilter" => "?uids=X" }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "matchedTasks": 1, "deletedTasks": 1, - "originalFilter": "?uids=0" + "originalFilter": "?uids=X" }, "stats": { "totalNbTasks": 1, @@ -1278,12 +1303,13 @@ async fn test_summarized_batch_deletion() { #[actix_web::test] async fn test_summarized_dump_creation() { - let server = Server::new().await; + let server = Server::new_shared(); let (task, _status_code) = server.create_dump().await; - server.wait_task(task.uid()).await; - let (batch, _) = server.get_batch(0).await; + server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = server.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".details.dumpUid" => "[dumpUid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", @@ -1294,7 +1320,7 @@ async fn test_summarized_dump_creation() { }, @r###" { - "uid": 0, + "uid": "[uid]", "progress": null, "details": { "dumpUid": "[dumpUid]" From cb15e5c67e1c6fed85f3d6c85875b9bb34e75093 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Tue, 3 Jun 2025 09:13:56 +0300 Subject: [PATCH 02/21] WIP: More snapshot updates Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 107 +++++++++++------------- 1 file changed, 48 insertions(+), 59 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index 4613f71fc..bb926af70 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -12,10 +12,10 @@ async fn error_get_unexisting_batch_status() { let index = server.unique_index(); let (task, _coder) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (response, code) = index.get_batch(1).await; + let (response, code) = index.get_batch(task.uid() as u32).await; let expected_response = json!({ - "message": "Batch `1` not found.", + "message": format!("Batch `{}` not found.", task.uid()), "code": "batch_not_found", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#batch_not_found" @@ -147,15 +147,15 @@ async fn list_batches_status_filtered() { index.wait_task(task.uid()).await.failed(); let (response, code) = index.filtered_batches(&[], &["succeeded"], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); let (response, code) = index.filtered_batches(&[], &["succeeded"], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); let (response, code) = index.filtered_batches(&[], &["succeeded", "failed"], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); } @@ -168,16 +168,16 @@ async fn list_batches_type_filtered() { let (task, _) = index.delete().await; index.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&["indexCreation"], &[], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); let (response, code) = - index.filtered_batches(&["indexCreation", "IndexDeletion"], &[], &[]).await; - assert_eq!(code, 200, "{}", response); + index.filtered_batches(&["indexCreation", "indexDeletion"], &[], &[]).await; + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); let (response, code) = index.filtered_batches(&["indexCreation"], &[], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); } @@ -189,7 +189,7 @@ async fn list_batches_invalid_canceled_by_filter() { index.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&[], &[], &["0"]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 0); } @@ -203,7 +203,7 @@ async fn list_batches_status_and_type_filtered() { index.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&["indexCreation"], &["failed"], &[]).await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 0); let (response, code) = index @@ -213,7 +213,7 @@ async fn list_batches_status_and_type_filtered() { &[], ) .await; - assert_eq!(code, 200, "{}", response); + assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); } @@ -222,7 +222,7 @@ async fn list_batch_filter_error() { let server = Server::new_shared(); let (response, code) = server.batches_filter("lol=pied").await; - assert_eq!(code, 400, "{}", response); + assert_eq!(code, 400, "{response}"); meili_snap::snapshot!(meili_snap::json_string!(response), @r#" { "message": "Unknown parameter `lol`: expected one of `limit`, `from`, `reverse`, `batchUids`, `uids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`", @@ -233,7 +233,7 @@ async fn list_batch_filter_error() { "#); let (response, code) = server.batches_filter("uids=pied").await; - assert_eq!(code, 400, "{}", response); + assert_eq!(code, 400, "{response}"); meili_snap::snapshot!(meili_snap::json_string!(response), @r#" { "message": "Invalid value in parameter `uids`: could not parse `pied` as a positive integer", @@ -244,7 +244,7 @@ async fn list_batch_filter_error() { "#); let (response, code) = server.batches_filter("from=pied").await; - assert_eq!(code, 400, "{}", response); + assert_eq!(code, 400, "{response}"); meili_snap::snapshot!(meili_snap::json_string!(response), @r#" { "message": "Invalid value in parameter `from`: could not parse `pied` as a positive integer", @@ -255,7 +255,7 @@ async fn list_batch_filter_error() { "#); let (response, code) = server.batches_filter("beforeStartedAt=pied").await; - assert_eq!(code, 400, "{}", response); + assert_eq!(code, 400, "{response}"); meili_snap::snapshot!(meili_snap::json_string!(response), @r#" { "message": "Invalid value in parameter `beforeStartedAt`: `pied` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.", @@ -283,7 +283,8 @@ async fn test_summarized_document_addition_or_update() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"""{"test": 1}"""# }, @r###" { @@ -301,9 +302,7 @@ async fn test_summarized_document_addition_or_update() { "types": { "documentAdditionOrUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]", "writeChannelCongestion": "[writeChannelCongestion]", "internalDatabaseSizes": "[internalDatabaseSizes]" @@ -376,7 +375,7 @@ async fn test_summarized_delete_documents_by_batch() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => "{\n\t\"test\": 1}" + ".stats.indexUids" => r#"""{"test": 1}"""# }, @r###" { @@ -394,9 +393,7 @@ async fn test_summarized_delete_documents_by_batch() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -467,7 +464,8 @@ async fn test_summarized_delete_documents_by_filter() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => "{\n\t\"test\": 1}" + ".stats.indexUids" => r#"""{"test": 1}"""#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid`" }, @r###" { @@ -486,15 +484,13 @@ async fn test_summarized_delete_documents_by_filter() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid`" } "###); @@ -603,7 +599,7 @@ async fn test_summarized_delete_document_by_id() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => "{\n\t\"test\": 1}" + ".stats.indexUids" => r#"""{"test": 1}"""# }, @r###" { @@ -621,9 +617,7 @@ async fn test_summarized_delete_document_by_id() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -705,7 +699,7 @@ async fn test_summarized_settings_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", - ".stats.indexUids" => "{\n\t\"test\": 1}", + ".stats.indexUids" => r#"""{"test": 1}"""#, ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @@ -734,9 +728,7 @@ async fn test_summarized_settings_update() { "types": { "settingsUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -763,7 +755,7 @@ async fn test_summarized_index_creation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => "{\n\t\"test\": 1}", + ".stats.indexUids" => r#"""{"test": 1}"""#, ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" }, @r###" @@ -779,9 +771,7 @@ async fn test_summarized_index_creation() { "types": { "indexCreation": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -896,7 +886,7 @@ async fn test_summarized_index_deletion() { { "uid": "[uid]", "batchUid": "[batch_uid]", - "indexUid": "test", + "indexUid": "[uuid]", "status": "succeeded", "type": "indexDeletion", "canceledBy": null, @@ -919,7 +909,7 @@ async fn test_summarized_index_deletion() { { "uid": "[uid]", "batchUid": "[batch_uid]", - "indexUid": "test", + "indexUid": "[uuid]", "status": "failed", "type": "indexDeletion", "canceledBy": null, @@ -927,7 +917,7 @@ async fn test_summarized_index_deletion() { "deletedDocuments": 0 }, "error": { - "message": "Index `test` not found.", + "message": "Index `[uuid]` not found.", "code": "index_not_found", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#index_not_found" @@ -947,7 +937,7 @@ async fn test_summarized_index_update() { // If the index doesn't exist yet, we should get errors with or without the primary key. let (task, _status_code) = index.update(None).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -957,7 +947,7 @@ async fn test_summarized_index_update() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => "{\n\t\"test\": 1}", + ".stats.indexUids" => r#"{"test": 1}"#, ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" }, @r###" @@ -973,9 +963,7 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": {"test": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -1177,7 +1165,7 @@ async fn test_summarized_index_swap() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => "{\n\t\"doggos\": 1}" + ".stats.indexUids" => r#"""{"doggos": 1}"""# }, @r###" { @@ -1192,9 +1180,7 @@ async fn test_summarized_index_swap() { "types": { "indexCreation": 1 }, - "indexUids": { - "doggos": 1 - }, + "indexUids": {"doggos": 1}, "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -1214,19 +1200,21 @@ async fn test_summarized_batch_cancelation() { index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.cancel_tasks(format!("uids={}", task.uid()).as_str()).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(1).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".batchCreationComplete" => "task with id X of type `taskCancelation` cannot be batched" }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "matchedTasks": 1, @@ -1247,7 +1235,7 @@ async fn test_summarized_batch_cancelation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 1 of type `taskCancelation` cannot be batched" + "batchCreationComplete": "task with id X of type `taskCancelation` cannot be batched" } "###); } @@ -1316,7 +1304,8 @@ async fn test_summarized_dump_creation() { ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".batchCreationComplete" => "task with id X of type `dumpCreation` cannot be batched" }, @r###" { @@ -1339,7 +1328,7 @@ async fn test_summarized_dump_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 0 of type `dumpCreation` cannot be batched" + "batchCreationComplete": "task with id X of type `dumpCreation` cannot be batched" } "###); } From 48460678dfa2a62b4fe8676a0dc0474da90f1d74 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Tue, 3 Jun 2025 10:50:22 +0300 Subject: [PATCH 03/21] More assertion fixes Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 195 +++++++++++++----------- 1 file changed, 105 insertions(+), 90 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index bb926af70..ce5ad41b6 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -12,10 +12,10 @@ async fn error_get_unexisting_batch_status() { let index = server.unique_index(); let (task, _coder) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (response, code) = index.get_batch(task.uid() as u32).await; + let (response, code) = index.get_batch(u32::MAX).await; let expected_response = json!({ - "message": format!("Batch `{}` not found.", task.uid()), + "message": format!("Batch `{}` not found.", u32::MAX), "code": "batch_not_found", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#batch_not_found" @@ -31,7 +31,7 @@ async fn get_batch_status() { let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (_response, code) = index.get_batch(0).await; + let (_response, code) = index.get_batch(task.uid() as u32).await; assert_eq!(code, 200); } @@ -284,7 +284,8 @@ async fn test_summarized_document_addition_or_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", - ".stats.indexUids" => r#"""{"test": 1}"""# + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -302,7 +303,7 @@ async fn test_summarized_document_addition_or_update() { "types": { "documentAdditionOrUpdate": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]", "writeChannelCongestion": "[writeChannelCongestion]", "internalDatabaseSizes": "[internalDatabaseSizes]" @@ -310,14 +311,14 @@ async fn test_summarized_document_addition_or_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(1).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -327,7 +328,9 @@ async fn test_summarized_document_addition_or_update() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -345,16 +348,14 @@ async fn test_summarized_document_addition_or_update() { "types": { "documentAdditionOrUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]", "writeChannelCongestion": "[writeChannelCongestion]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); } @@ -363,7 +364,10 @@ async fn test_summarized_document_addition_or_update() { async fn test_summarized_delete_documents_by_batch() { let server = Server::new_shared(); let index = server.unique_index(); - let (task, _status_code) = index.delete_batch(vec![1, 2, 3]).await; + let task_uid_1 = (u32::MAX - 1) as u64; + let task_uid_2 = (u32::MAX - 2) as u64; + let task_uid_3 = (u32::MAX - 3) as u64; + let (task, _status_code) = index.delete_batch(vec![task_uid_1, task_uid_2, task_uid_3]).await; index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, @@ -375,7 +379,8 @@ async fn test_summarized_delete_documents_by_batch() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => r#"""{"test": 1}"""# + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -393,33 +398,35 @@ async fn test_summarized_delete_documents_by_batch() { "types": { "documentDeletion": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); index.create(None).await; let (task, _status_code) = index.delete_batch(vec![42]).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(2).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, }, @r###" { - "uid": 2, + "uid": "[uid]", "progress": null, "details": { "providedIds": 1, @@ -433,9 +440,7 @@ async fn test_summarized_delete_documents_by_batch() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -464,8 +469,8 @@ async fn test_summarized_delete_documents_by_filter() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => r#"""{"test": 1}"""#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid`" + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -484,13 +489,13 @@ async fn test_summarized_delete_documents_by_filter() { "types": { "documentDeletion": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid`" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); @@ -498,20 +503,23 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(2).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { - "uid": 2, + "uid": "[uid]", "progress": null, "details": { "providedIds": 0, @@ -526,15 +534,13 @@ async fn test_summarized_delete_documents_by_filter() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); @@ -542,20 +548,23 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(4).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { - "uid": 4, + "uid": "[uid]", "progress": null, "details": { "providedIds": 0, @@ -570,15 +579,13 @@ async fn test_summarized_delete_documents_by_filter() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); } @@ -599,7 +606,8 @@ async fn test_summarized_delete_document_by_id() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => r#"""{"test": 1}"""# + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -617,33 +625,36 @@ async fn test_summarized_delete_document_by_id() { "types": { "documentDeletion": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); index.create(None).await; let (task, _status_code) = index.delete_document(42).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(2).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]" + ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { - "uid": 2, + "uid": "[uid]", "progress": null, "details": { "providedIds": 1, @@ -657,15 +668,13 @@ async fn test_summarized_delete_document_by_id() { "types": { "documentDeletion": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" } "###); } @@ -699,7 +708,7 @@ async fn test_summarized_settings_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", - ".stats.indexUids" => r#"""{"test": 1}"""#, + ".stats.indexUids" => r#"{"[uuid]": 1}"#, ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @@ -728,7 +737,7 @@ async fn test_summarized_settings_update() { "types": { "settingsUpdate": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -755,7 +764,7 @@ async fn test_summarized_index_creation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => r#"""{"test": 1}"""#, + ".stats.indexUids" => r#"{"[uuid]": 1}"#, ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" }, @r###" @@ -771,7 +780,7 @@ async fn test_summarized_index_creation() { "types": { "indexCreation": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -783,19 +792,22 @@ async fn test_summarized_index_creation() { let (task, _status_code) = index.create(Some("doggos")).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(1).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "primaryKey": "doggos" @@ -808,15 +820,13 @@ async fn test_summarized_index_creation() { "types": { "indexCreation": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 1 of type `indexCreation` cannot be batched" + "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" } "###); } @@ -947,7 +957,7 @@ async fn test_summarized_index_update() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => r#"{"test": 1}"#, + ".stats.indexUids" => r#"{"[uuid]": 1}"#, ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" }, @r###" @@ -963,7 +973,7 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": {"test": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", @@ -975,19 +985,22 @@ async fn test_summarized_index_update() { let (task, _status_code) = index.update(Some("bones")).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(1).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" }, @r###" { - "uid": 1, + "uid": "[uid]", "progress": null, "details": { "primaryKey": "bones" @@ -1000,15 +1013,13 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 1 of type `indexUpdate` cannot be batched" + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); @@ -1017,19 +1028,22 @@ async fn test_summarized_index_update() { let (task, _status_code) = index.update(None).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(3).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" }, @r###" { - "uid": 3, + "uid": "[uid]", "progress": null, "details": {}, "stats": { @@ -1040,33 +1054,34 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 3 of type `indexUpdate` cannot be batched" + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); let (task, _status_code) = index.update(Some("bones")).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(4).await; + let (batch, _) = index.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { + ".uid" => "[uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", - ".stats.writeChannelCongestion" => "[writeChannelCongestion]" + ".stats.writeChannelCongestion" => "[writeChannelCongestion]", + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" }, @r###" { - "uid": 4, + "uid": "[uid]", "progress": null, "details": { "primaryKey": "bones" @@ -1079,15 +1094,13 @@ async fn test_summarized_index_update() { "types": { "indexUpdate": 1 }, - "indexUids": { - "test": 1 - }, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 4 of type `indexUpdate` cannot be batched" + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); } @@ -1155,7 +1168,7 @@ async fn test_summarized_index_swap() { ])) .await; server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = server.get_batch(1).await; + let (batch, _) = server.get_batch(task.uid() as u32).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1165,7 +1178,8 @@ async fn test_summarized_index_swap() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".stats.indexUids" => r#"""{"doggos": 1}"""# + ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" }, @r###" { @@ -1180,13 +1194,13 @@ async fn test_summarized_index_swap() { "types": { "indexCreation": 1 }, - "indexUids": {"doggos": 1}, + "indexUids": "{\"[uuid]\": 1}", "progressTrace": "[progressTrace]" }, "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id 1 of type `indexCreation` cannot be batched" + "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" } "###); } @@ -1210,7 +1224,8 @@ async fn test_summarized_batch_cancelation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchCreationComplete" => "task with id X of type `taskCancelation` cannot be batched" + ".batchCreationComplete" => "task with id X of type `taskCancelation` cannot be batched", + ".details.originalFilter" => "?uids=X", }, @r###" { @@ -1219,7 +1234,7 @@ async fn test_summarized_batch_cancelation() { "details": { "matchedTasks": 1, "canceledTasks": 0, - "originalFilter": "?uids=0" + "originalFilter": "?uids=X" }, "stats": { "totalNbTasks": 1, From 2691999bd3d0f1929108ad6db769f1dc7e7b7e63 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Tue, 3 Jun 2025 11:15:27 +0300 Subject: [PATCH 04/21] Add a helper method for getting the latest batch Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 2 +- crates/meilisearch/tests/common/server.rs | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index ce5ad41b6..82403fe3b 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -1309,7 +1309,7 @@ async fn test_summarized_dump_creation() { let server = Server::new_shared(); let (task, _status_code) = server.create_dump().await; server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = server.get_batch(task.uid() as u32).await; + let (batch, _) = server.get_latest_batch().await; assert_json_snapshot!(batch, { ".uid" => "[uid]", diff --git a/crates/meilisearch/tests/common/server.rs b/crates/meilisearch/tests/common/server.rs index 431972983..787cafc9f 100644 --- a/crates/meilisearch/tests/common/server.rs +++ b/crates/meilisearch/tests/common/server.rs @@ -429,6 +429,19 @@ impl Server { self.service.get(url).await } + // https://www.meilisearch.com/docs/reference/api/batches#get-batches states: + // "Batches are always returned in descending order of uid. This means that by default, + // the most recently created batch objects appear first." + pub async fn get_latest_batch(&self) -> (Option, StatusCode) { + let url = "/batches?limit=1&offset=0"; + let (value, code) = self.service.get(url).await; + value + .get("results") + .and_then(|results| results.as_array()) + .and_then(|array| array.first()) + .map_or((None, code), |latest| (Some(Value(latest.clone())), code)) + } + pub async fn get_features(&self) -> (Value, StatusCode) { self.service.get("/experimental-features").await } From 139ec8c7827c3694be24cb9090edc4b4056cd15c Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Tue, 3 Jun 2025 15:23:14 +0300 Subject: [PATCH 05/21] Add task.batch_uid() helper method Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 38 ++++++++++++------------- crates/meilisearch/tests/common/mod.rs | 9 ++++++ 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index 82403fe3b..e6801f269 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -31,7 +31,7 @@ async fn get_batch_status() { let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (_response, code) = index.get_batch(task.uid() as u32).await; + let (_response, code) = index.get_batch(task.batch_uid()).await; assert_eq!(code, 200); } @@ -273,7 +273,7 @@ async fn test_summarized_document_addition_or_update() { let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -318,7 +318,7 @@ async fn test_summarized_document_addition_or_update() { let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -369,7 +369,7 @@ async fn test_summarized_delete_documents_by_batch() { let task_uid_3 = (u32::MAX - 3) as u64; let (task, _status_code) = index.delete_batch(vec![task_uid_1, task_uid_2, task_uid_3]).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -411,7 +411,7 @@ async fn test_summarized_delete_documents_by_batch() { index.create(None).await; let (task, _status_code) = index.delete_batch(vec![42]).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -459,7 +459,7 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -503,7 +503,7 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -548,7 +548,7 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -596,7 +596,7 @@ async fn test_summarized_delete_document_by_id() { let index = server.unique_index(); let (task, _status_code) = index.delete_document(1).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -638,7 +638,7 @@ async fn test_summarized_delete_document_by_id() { index.create(None).await; let (task, _status_code) = index.delete_document(42).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -697,7 +697,7 @@ async fn test_summarized_settings_update() { let (task,_status_code) = index.update_settings(json!({ "displayedAttributes": ["doggos", "name"], "filterableAttributes": ["age", "nb_paw_pads"], "sortableAttributes": ["iq"] })).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -754,7 +754,7 @@ async fn test_summarized_index_creation() { let index = server.unique_index(); let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -792,7 +792,7 @@ async fn test_summarized_index_creation() { let (task, _status_code) = index.create(Some("doggos")).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -947,7 +947,7 @@ async fn test_summarized_index_update() { // If the index doesn't exist yet, we should get errors with or without the primary key. let (task, _status_code) = index.update(None).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -985,7 +985,7 @@ async fn test_summarized_index_update() { let (task, _status_code) = index.update(Some("bones")).await; index.wait_task(task.uid()).await.failed(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1028,7 +1028,7 @@ async fn test_summarized_index_update() { let (task, _status_code) = index.update(None).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1066,7 +1066,7 @@ async fn test_summarized_index_update() { let (task, _status_code) = index.update(Some("bones")).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1214,7 +1214,7 @@ async fn test_summarized_batch_cancelation() { index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.cancel_tasks(format!("uids={}", task.uid()).as_str()).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1264,7 +1264,7 @@ async fn test_summarized_batch_deletion() { index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.delete_tasks(format!("uids={}", task.uid()).as_str()).await; index.wait_task(task.uid()).await.succeeded(); - let (batch, _) = index.get_batch(task.uid() as u32).await; + let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", diff --git a/crates/meilisearch/tests/common/mod.rs b/crates/meilisearch/tests/common/mod.rs index 373f89f78..d1da616ad 100644 --- a/crates/meilisearch/tests/common/mod.rs +++ b/crates/meilisearch/tests/common/mod.rs @@ -38,6 +38,15 @@ impl Value { self["uid"].as_u64().is_some() || self["taskUid"].as_u64().is_some() } + #[track_caller] + pub fn batch_uid(&self) -> u32 { + if let Some(batch_uid) = self["batchUid"].as_u64() { + batch_uid as u32 + } else { + panic!("Didn't find `batchUid` in: {self}"); + } + } + /// Return `true` if the `status` field is set to `succeeded`. /// Panic if the `status` field doesn't exists. #[track_caller] From 9e31d6ceff910fd8d6eba731fd665e69de4544c2 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Thu, 5 Jun 2025 11:11:54 +0300 Subject: [PATCH 06/21] Add batch_uid to all successful and failed tasks too Signed-off-by: Martin Tzvetanov Grigorov --- crates/index-scheduler/src/queue/tasks.rs | 7 ++++++- crates/meilisearch/tests/batches/mod.rs | 20 ++++++++++---------- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/crates/index-scheduler/src/queue/tasks.rs b/crates/index-scheduler/src/queue/tasks.rs index 74192232e..92789b93f 100644 --- a/crates/index-scheduler/src/queue/tasks.rs +++ b/crates/index-scheduler/src/queue/tasks.rs @@ -530,7 +530,12 @@ impl Queue { ..task } } else { - task + dbg!(&task); + if task.status == Status::Succeeded || task.status == Status::Failed { + Task { batch_uid: Some(batch.uid), ..task } + } else { + task + } } }) .collect(), diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index e6801f269..d5374a144 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -1,7 +1,7 @@ mod errors; use meili_snap::insta::assert_json_snapshot; -use meili_snap::snapshot; +use meili_snap::{json_string, snapshot}; use crate::common::Server; use crate::json; @@ -119,21 +119,21 @@ async fn list_batches_with_star_filters() { let (response, code) = index.service.get("/batches?types=*,documentAdditionOrUpdate&statuses=*").await; - assert_eq!(code, 200, "{:?}", response); + assert_eq!(code, 200, "{response:?}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); let (response, code) = index .service .get("/batches?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids=test") .await; - assert_eq!(code, 200, "{:?}", response); + assert_eq!(code, 200, "{response:?}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); let (response, code) = index .service .get("/batches?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids=test,*") .await; - assert_eq!(code, 200, "{:?}", response); + assert_eq!(code, 200, "{response:?}"); assert_eq!(response["results"].as_array().unwrap().len(), 2); } @@ -223,7 +223,7 @@ async fn list_batch_filter_error() { let (response, code) = server.batches_filter("lol=pied").await; assert_eq!(code, 400, "{response}"); - meili_snap::snapshot!(meili_snap::json_string!(response), @r#" + snapshot!(json_string!(response), @r#" { "message": "Unknown parameter `lol`: expected one of `limit`, `from`, `reverse`, `batchUids`, `uids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`", "code": "bad_request", @@ -234,7 +234,7 @@ async fn list_batch_filter_error() { let (response, code) = server.batches_filter("uids=pied").await; assert_eq!(code, 400, "{response}"); - meili_snap::snapshot!(meili_snap::json_string!(response), @r#" + snapshot!(json_string!(response), @r#" { "message": "Invalid value in parameter `uids`: could not parse `pied` as a positive integer", "code": "invalid_task_uids", @@ -245,7 +245,7 @@ async fn list_batch_filter_error() { let (response, code) = server.batches_filter("from=pied").await; assert_eq!(code, 400, "{response}"); - meili_snap::snapshot!(meili_snap::json_string!(response), @r#" + snapshot!(json_string!(response), @r#" { "message": "Invalid value in parameter `from`: could not parse `pied` as a positive integer", "code": "invalid_task_from", @@ -256,7 +256,7 @@ async fn list_batch_filter_error() { let (response, code) = server.batches_filter("beforeStartedAt=pied").await; assert_eq!(code, 400, "{response}"); - meili_snap::snapshot!(meili_snap::json_string!(response), @r#" + snapshot!(json_string!(response), @r#" { "message": "Invalid value in parameter `beforeStartedAt`: `pied` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.", "code": "invalid_task_before_started_at", @@ -685,8 +685,8 @@ async fn test_summarized_settings_update() { let index = server.unique_index(); // here we should find my payload even in the failed batch. let (response, code) = index.update_settings(json!({ "rankingRules": ["custom"] })).await; - meili_snap::snapshot!(code, @"400 Bad Request"); - meili_snap::snapshot!(meili_snap::json_string!(response), @r###" + snapshot!(code, @"400 Bad Request"); + snapshot!(json_string!(response), @r###" { "message": "Invalid value at `.rankingRules[0]`: `custom` ranking rule is invalid. Valid ranking rules are words, typo, sort, proximity, attribute, exactness and custom ranking rules.", "code": "invalid_settings_ranking_rules", From 074744b8a6989df8c0021bd5b9396afc510d8c5f Mon Sep 17 00:00:00 2001 From: Louis Dureuil Date: Tue, 8 Jul 2025 10:54:39 +0200 Subject: [PATCH 07/21] Ignore yet-another flaky test --- crates/meilisearch/tests/search/multi/proxy.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/meilisearch/tests/search/multi/proxy.rs b/crates/meilisearch/tests/search/multi/proxy.rs index 55736d058..c537f5ae8 100644 --- a/crates/meilisearch/tests/search/multi/proxy.rs +++ b/crates/meilisearch/tests/search/multi/proxy.rs @@ -1224,6 +1224,7 @@ async fn error_bad_request_facets_by_index_facet() { } #[actix_rt::test] +#[ignore] async fn error_remote_does_not_answer() { let ms0 = Server::new().await; let ms1 = Server::new().await; From 5342df26feadb02b0a14669f1ffca5ce499c6220 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Sun, 22 Jun 2025 14:50:11 +0300 Subject: [PATCH 08/21] tests: Use Server::wait_task() instead of Index::wait_task() The code is mostly duplicated. Server::wait_task() has better handling for errors and more retries. Signed-off-by: Martin Tzvetanov Grigorov --- .../meilisearch/tests/auth/authorization.rs | 34 ++++----- crates/meilisearch/tests/auth/tenant_token.rs | 6 +- .../tests/auth/tenant_token_multi_search.rs | 32 ++++---- crates/meilisearch/tests/batches/mod.rs | 74 +++++++++---------- crates/meilisearch/tests/common/index.rs | 19 ----- crates/meilisearch/tests/common/mod.rs | 20 ++--- crates/meilisearch/tests/dumps/mod.rs | 6 +- crates/meilisearch/tests/similar/errors.rs | 26 +++---- crates/meilisearch/tests/snapshot/mod.rs | 8 +- crates/meilisearch/tests/stats/mod.rs | 18 ++--- .../tests/vector/binary_quantized.rs | 6 +- crates/meilisearch/tests/vector/mod.rs | 46 ++++++------ crates/meilisearch/tests/vector/ollama.rs | 2 +- crates/meilisearch/tests/vector/openai.rs | 20 ++--- crates/meilisearch/tests/vector/rest.rs | 8 +- 15 files changed, 153 insertions(+), 172 deletions(-) diff --git a/crates/meilisearch/tests/auth/authorization.rs b/crates/meilisearch/tests/auth/authorization.rs index 277911fb8..20815d987 100644 --- a/crates/meilisearch/tests/auth/authorization.rs +++ b/crates/meilisearch/tests/auth/authorization.rs @@ -304,7 +304,7 @@ async fn access_authorized_stats_restricted_index() { let (response, code) = index.create(Some("product_id")).await; assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - index.wait_task(task_id).await; + server.wait_task(task_id).await; // create key with access on `products` index only. let content = json!({ @@ -344,7 +344,7 @@ async fn access_authorized_stats_no_index_restriction() { let (response, code) = index.create(Some("product_id")).await; assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - index.wait_task(task_id).await; + server.wait_task(task_id).await; // create key with access on all indexes. let content = json!({ @@ -384,7 +384,7 @@ async fn list_authorized_indexes_restricted_index() { let (response, code) = index.create(Some("product_id")).await; assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - index.wait_task(task_id).await; + server.wait_task(task_id).await; // create key with access on `products` index only. let content = json!({ @@ -425,7 +425,7 @@ async fn list_authorized_indexes_no_index_restriction() { let (response, code) = index.create(Some("product_id")).await; assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - index.wait_task(task_id).await; + server.wait_task(task_id).await; // create key with access on all indexes. let content = json!({ @@ -589,8 +589,8 @@ async fn raise_error_non_authorized_index_patterns() { // refer to products_2 with modified api key. let product_2_index = server.index("products_2"); - product_1_index.wait_task(task1_id).await; - product_2_index.wait_task(task2_id).await; + product_1_server.wait_task(task1_id).await; + product_2_server.wait_task(task2_id).await; let (response, code) = product_1_index.get_task(task1_id).await; assert_eq!(200, code, "{:?}", &response); @@ -650,7 +650,7 @@ async fn list_authorized_tasks_restricted_index() { let (response, code) = index.create(Some("product_id")).await; assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - index.wait_task(task_id).await; + server.wait_task(task_id).await; // create key with access on `products` index only. let content = json!({ @@ -690,7 +690,7 @@ async fn list_authorized_tasks_no_index_restriction() { let (response, code) = index.create(Some("product_id")).await; assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - index.wait_task(task_id).await; + server.wait_task(task_id).await; // create key with access on all indexes. let content = json!({ @@ -757,7 +757,7 @@ async fn error_creating_index_without_action() { assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - let response = index.wait_task(task_id).await; + let response = server.wait_task(task_id).await; assert_eq!(response["status"], "failed"); assert_eq!(response["error"], expected_error.clone()); @@ -768,7 +768,7 @@ async fn error_creating_index_without_action() { assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - let response = index.wait_task(task_id).await; + let response = server.wait_task(task_id).await; assert_eq!(response["status"], "failed"); assert_eq!(response["error"], expected_error.clone()); @@ -778,7 +778,7 @@ async fn error_creating_index_without_action() { assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - let response = index.wait_task(task_id).await; + let response = server.wait_task(task_id).await; assert_eq!(response["status"], "failed"); assert_eq!(response["error"], expected_error.clone()); @@ -830,7 +830,7 @@ async fn lazy_create_index() { assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - index.wait_task(task_id).await; + server.wait_task(task_id).await; let (response, code) = index.get_task(task_id).await; assert_eq!(200, code, "{:?}", &response); @@ -844,7 +844,7 @@ async fn lazy_create_index() { assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - index.wait_task(task_id).await; + server.wait_task(task_id).await; let (response, code) = index.get_task(task_id).await; assert_eq!(200, code, "{:?}", &response); @@ -856,7 +856,7 @@ async fn lazy_create_index() { assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - index.wait_task(task_id).await; + server.wait_task(task_id).await; let (response, code) = index.get_task(task_id).await; assert_eq!(200, code, "{:?}", &response); @@ -911,7 +911,7 @@ async fn lazy_create_index_from_pattern() { assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - index.wait_task(task_id).await; + server.wait_task(task_id).await; let (response, code) = index.get_task(task_id).await; assert_eq!(200, code, "{:?}", &response); @@ -929,7 +929,7 @@ async fn lazy_create_index_from_pattern() { assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - index.wait_task(task_id).await; + server.wait_task(task_id).await; let (response, code) = index.get_task(task_id).await; assert_eq!(200, code, "{:?}", &response); @@ -949,7 +949,7 @@ async fn lazy_create_index_from_pattern() { assert_eq!(202, code, "{:?}", &response); let task_id = response["taskUid"].as_u64().unwrap(); - index.wait_task(task_id).await; + server.wait_task(task_id).await; let (response, code) = index.get_task(task_id).await; assert_eq!(200, code, "{:?}", &response); diff --git a/crates/meilisearch/tests/auth/tenant_token.rs b/crates/meilisearch/tests/auth/tenant_token.rs index a3f89e70b..0259c7d32 100644 --- a/crates/meilisearch/tests/auth/tenant_token.rs +++ b/crates/meilisearch/tests/auth/tenant_token.rs @@ -100,11 +100,11 @@ macro_rules! compute_authorized_search { let index = server.index("sales"); let documents = DOCUMENTS.clone(); let (task1,_status_code) = index.add_documents(documents, None).await; - index.wait_task(task1.uid()).await.succeeded(); + server.wait_task(task1.uid()).await.succeeded(); let (task2,_status_code) = index .update_settings(json!({"filterableAttributes": ["color"]})) .await; - index.wait_task(task2.uid()).await.succeeded(); + server.wait_task(task2.uid()).await.succeeded(); drop(index); for key_content in ACCEPTED_KEYS.iter() { @@ -147,7 +147,7 @@ macro_rules! compute_forbidden_search { let index = server.index("sales"); let documents = DOCUMENTS.clone(); let (task, _status_code) = index.add_documents(documents, None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); drop(index); for key_content in $parent_keys.iter() { diff --git a/crates/meilisearch/tests/auth/tenant_token_multi_search.rs b/crates/meilisearch/tests/auth/tenant_token_multi_search.rs index 9059299f3..5fd8d29e2 100644 --- a/crates/meilisearch/tests/auth/tenant_token_multi_search.rs +++ b/crates/meilisearch/tests/auth/tenant_token_multi_search.rs @@ -268,21 +268,21 @@ macro_rules! compute_authorized_single_search { let index = server.index("sales"); let documents = DOCUMENTS.clone(); let (add_task,_status_code) = index.add_documents(documents, None).await; - index.wait_task(add_task.uid()).await.succeeded(); + server.wait_task(add_task.uid()).await.succeeded(); let (update_task,_status_code) = index .update_settings(json!({"filterableAttributes": ["color"]})) .await; - index.wait_task(update_task.uid()).await.succeeded(); + server.wait_task(update_task.uid()).await.succeeded(); drop(index); let index = server.index("products"); let documents = NESTED_DOCUMENTS.clone(); let (add_task2,_status_code) = index.add_documents(documents, None).await; - index.wait_task(add_task2.uid()).await.succeeded(); + server.wait_task(add_task2.uid()).await.succeeded(); let (update_task2,_status_code) = index .update_settings(json!({"filterableAttributes": ["doggos"]})) .await; - index.wait_task(update_task2.uid()).await.succeeded(); + server.wait_task(update_task2.uid()).await.succeeded(); drop(index); @@ -339,21 +339,21 @@ macro_rules! compute_authorized_multiple_search { let index = server.index("sales"); let documents = DOCUMENTS.clone(); let (task,_status_code) = index.add_documents(documents, None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task,_status_code) = index .update_settings(json!({"filterableAttributes": ["color"]})) .await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); drop(index); let index = server.index("products"); let documents = NESTED_DOCUMENTS.clone(); let (task,_status_code) = index.add_documents(documents, None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task,_status_code) = index .update_settings(json!({"filterableAttributes": ["doggos"]})) .await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); drop(index); @@ -423,21 +423,21 @@ macro_rules! compute_forbidden_single_search { let index = server.index("sales"); let documents = DOCUMENTS.clone(); let (task,_status_code) = index.add_documents(documents, None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task,_status_code) = index .update_settings(json!({"filterableAttributes": ["color"]})) .await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); drop(index); let index = server.index("products"); let documents = NESTED_DOCUMENTS.clone(); let (task,_status_code) = index.add_documents(documents, None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task,_status_code) = index .update_settings(json!({"filterableAttributes": ["doggos"]})) .await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); drop(index); assert_eq!($parent_keys.len(), $failed_query_indexes.len(), "keys != query_indexes"); @@ -499,21 +499,21 @@ macro_rules! compute_forbidden_multiple_search { let index = server.index("sales"); let documents = DOCUMENTS.clone(); let (task,_status_code) = index.add_documents(documents, None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task,_status_code) = index .update_settings(json!({"filterableAttributes": ["color"]})) .await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); drop(index); let index = server.index("products"); let documents = NESTED_DOCUMENTS.clone(); let (task,_status_code) = index.add_documents(documents, None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task,_status_code) = index .update_settings(json!({"filterableAttributes": ["doggos"]})) .await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); drop(index); assert_eq!($parent_keys.len(), $failed_query_indexes.len(), "keys != query_indexes"); diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index 50c9bdafd..a409aba03 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -11,7 +11,7 @@ async fn error_get_unexisting_batch_status() { let server = Server::new().await; let index = server.index("test"); let (task, _coder) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.get_batch(1).await; let expected_response = json!({ @@ -30,7 +30,7 @@ async fn get_batch_status() { let server = Server::new().await; let index = server.index("test"); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (_response, code) = index.get_batch(0).await; assert_eq!(code, 200); } @@ -40,9 +40,9 @@ async fn list_batches() { let server = Server::new().await; let index = server.index("test"); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (response, code) = index.list_batches().await; assert_eq!(code, 200); assert_eq!( @@ -96,10 +96,10 @@ async fn list_batches_with_star_filters() { let server = Server::new().await; let index = server.index("test"); let (task, _code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let index = server.index("test"); let (task, _code) = index.create(None).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (response, code) = index.service.get("/batches?indexUids=test").await; assert_eq!(code, 200); @@ -142,9 +142,9 @@ async fn list_batches_status_filtered() { let server = Server::new().await; let index = server.index("test"); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (response, code) = index.filtered_batches(&[], &["succeeded"], &[]).await; assert_eq!(code, 200, "{}", response); @@ -164,9 +164,9 @@ async fn list_batches_type_filtered() { let server = Server::new().await; let index = server.index("test"); let (task, _) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _) = index.delete().await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&["indexCreation"], &[], &[]).await; assert_eq!(code, 200, "{}", response); assert_eq!(response["results"].as_array().unwrap().len(), 1); @@ -186,7 +186,7 @@ async fn list_batches_invalid_canceled_by_filter() { let server = Server::new().await; let index = server.index("test"); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&[], &[], &["0"]).await; assert_eq!(code, 200, "{}", response); @@ -198,9 +198,9 @@ async fn list_batches_status_and_type_filtered() { let server = Server::new().await; let index = server.index("test"); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.update(Some("id")).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&["indexCreation"], &["failed"], &[]).await; assert_eq!(code, 200, "{}", response); @@ -272,7 +272,7 @@ async fn test_summarized_document_addition_or_update() { let index = server.index("test"); let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(0).await; assert_json_snapshot!(batch, { @@ -316,7 +316,7 @@ async fn test_summarized_document_addition_or_update() { let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(1).await; assert_json_snapshot!(batch, { @@ -363,7 +363,7 @@ async fn test_summarized_delete_documents_by_batch() { let server = Server::new().await; let index = server.index("test"); let (task, _status_code) = index.delete_batch(vec![1, 2, 3]).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(0).await; assert_json_snapshot!(batch, { @@ -404,7 +404,7 @@ async fn test_summarized_delete_documents_by_batch() { index.create(None).await; let (task, _status_code) = index.delete_batch(vec![42]).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(2).await; assert_json_snapshot!(batch, { @@ -452,7 +452,7 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(0).await; assert_json_snapshot!(batch, { @@ -495,7 +495,7 @@ async fn test_summarized_delete_documents_by_filter() { index.create(None).await; let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(2).await; assert_json_snapshot!(batch, { @@ -539,7 +539,7 @@ async fn test_summarized_delete_documents_by_filter() { index.update_settings(json!({ "filterableAttributes": ["doggo"] })).await; let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(4).await; assert_json_snapshot!(batch, { @@ -586,7 +586,7 @@ async fn test_summarized_delete_document_by_id() { let server = Server::new().await; let index = server.index("test"); let (task, _status_code) = index.delete_document(1).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(0).await; assert_json_snapshot!(batch, { @@ -628,7 +628,7 @@ async fn test_summarized_delete_document_by_id() { index.create(None).await; let (task, _status_code) = index.delete_document(42).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(2).await; assert_json_snapshot!(batch, { @@ -686,7 +686,7 @@ async fn test_summarized_settings_update() { "###); let (task,_status_code) = index.update_settings(json!({ "displayedAttributes": ["doggos", "name"], "filterableAttributes": ["age", "nb_paw_pads"], "sortableAttributes": ["iq"] })).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(0).await; assert_json_snapshot!(batch, { @@ -741,7 +741,7 @@ async fn test_summarized_index_creation() { let server = Server::new().await; let index = server.index("test"); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(0).await; assert_json_snapshot!(batch, { @@ -778,7 +778,7 @@ async fn test_summarized_index_creation() { "###); let (task, _status_code) = index.create(Some("doggos")).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(1).await; assert_json_snapshot!(batch, { @@ -822,7 +822,7 @@ async fn test_summarized_index_deletion() { let server = Server::new().await; let index = server.index("test"); let (ret, _code) = index.delete().await; - let batch = index.wait_task(ret.uid()).await.failed(); + let batch = server.wait_task(ret.uid()).await.failed(); snapshot!(batch, @r###" { @@ -853,7 +853,7 @@ async fn test_summarized_index_deletion() { // both batches may get autobatched and the deleted documents count will be wrong. let (ret, _code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; - let batch = index.wait_task(ret.uid()).await.succeeded(); + let batch = server.wait_task(ret.uid()).await.succeeded(); snapshot!(batch, @r###" { @@ -876,7 +876,7 @@ async fn test_summarized_index_deletion() { "###); let (ret, _code) = index.delete().await; - let batch = index.wait_task(ret.uid()).await.succeeded(); + let batch = server.wait_task(ret.uid()).await.succeeded(); snapshot!(batch, @r###" { @@ -899,7 +899,7 @@ async fn test_summarized_index_deletion() { // What happens when you delete an index that doesn't exists. let (ret, _code) = index.delete().await; - let batch = index.wait_task(ret.uid()).await.failed(); + let batch = server.wait_task(ret.uid()).await.failed(); snapshot!(batch, @r###" { @@ -932,7 +932,7 @@ async fn test_summarized_index_update() { let index = server.index("test"); // If the index doesn't exist yet, we should get errors with or without the primary key. let (task, _status_code) = index.update(None).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(0).await; assert_json_snapshot!(batch, { @@ -969,7 +969,7 @@ async fn test_summarized_index_update() { "###); let (task, _status_code) = index.update(Some("bones")).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(1).await; assert_json_snapshot!(batch, { @@ -1011,7 +1011,7 @@ async fn test_summarized_index_update() { index.create(None).await; let (task, _status_code) = index.update(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(3).await; assert_json_snapshot!(batch, { @@ -1048,7 +1048,7 @@ async fn test_summarized_index_update() { "###); let (task, _status_code) = index.update(Some("bones")).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(4).await; assert_json_snapshot!(batch, { @@ -1188,9 +1188,9 @@ async fn test_summarized_batch_cancelation() { let index = server.index("doggos"); // to avoid being flaky we're only going to cancel an already finished batch :( let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.cancel_tasks("uids=0").await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(1).await; assert_json_snapshot!(batch, { @@ -1235,9 +1235,9 @@ async fn test_summarized_batch_deletion() { let index = server.index("doggos"); // to avoid being flaky we're only going to delete an already finished batch :( let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.delete_tasks("uids=0").await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(1).await; assert_json_snapshot!(batch, { diff --git a/crates/meilisearch/tests/common/index.rs b/crates/meilisearch/tests/common/index.rs index e324d2ff5..4da96403c 100644 --- a/crates/meilisearch/tests/common/index.rs +++ b/crates/meilisearch/tests/common/index.rs @@ -1,10 +1,8 @@ use std::fmt::Write; use std::marker::PhantomData; use std::panic::{catch_unwind, resume_unwind, UnwindSafe}; -use std::time::Duration; use actix_web::http::StatusCode; -use tokio::time::sleep; use urlencoding::encode as urlencode; use super::encoder::Encoder; @@ -364,23 +362,6 @@ impl Index<'_, State> { self.service.delete(url).await } - pub async fn wait_task(&self, update_id: u64) -> Value { - // try several times to get status, or panic to not wait forever - let url = format!("/tasks/{}", update_id); - for _ in 0..100 { - let (response, status_code) = self.service.get(&url).await; - assert_eq!(200, status_code, "response: {}", response); - - if response["status"] == "succeeded" || response["status"] == "failed" { - return response; - } - - // wait 0.5 second. - sleep(Duration::from_millis(500)).await; - } - panic!("Timeout waiting for update id"); - } - pub async fn get_task(&self, update_id: u64) -> (Value, StatusCode) { let url = format!("/tasks/{}", update_id); self.service.get(url).await diff --git a/crates/meilisearch/tests/common/mod.rs b/crates/meilisearch/tests/common/mod.rs index 1a73a7532..d023d464e 100644 --- a/crates/meilisearch/tests/common/mod.rs +++ b/crates/meilisearch/tests/common/mod.rs @@ -181,7 +181,7 @@ pub async fn shared_empty_index() -> &'static Index<'static, Shared> { let server = Server::new_shared(); let index = server._index("EMPTY_INDEX").to_shared(); let (response, _code) = index._create(None).await; - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); index }) .await @@ -229,13 +229,13 @@ pub async fn shared_index_with_documents() -> &'static Index<'static, Shared> { let index = server._index("SHARED_DOCUMENTS").to_shared(); let documents = DOCUMENTS.clone(); let (response, _code) = index._add_documents(documents, None).await; - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); let (response, _code) = index ._update_settings( json!({"filterableAttributes": ["id", "title"], "sortableAttributes": ["id", "title"]}), ) .await; - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); index }).await } @@ -272,13 +272,13 @@ pub async fn shared_index_with_score_documents() -> &'static Index<'static, Shar let index = server._index("SHARED_SCORE_DOCUMENTS").to_shared(); let documents = SCORE_DOCUMENTS.clone(); let (response, _code) = index._add_documents(documents, None).await; - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); let (response, _code) = index ._update_settings( json!({"filterableAttributes": ["id", "title"], "sortableAttributes": ["id", "title"]}), ) .await; - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); index }).await } @@ -349,13 +349,13 @@ pub async fn shared_index_with_nested_documents() -> &'static Index<'static, Sha let index = server._index("SHARED_NESTED_DOCUMENTS").to_shared(); let documents = NESTED_DOCUMENTS.clone(); let (response, _code) = index._add_documents(documents, None).await; - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); let (response, _code) = index ._update_settings( json!({"filterableAttributes": ["father", "doggos", "cattos"], "sortableAttributes": ["doggos"]}), ) .await; - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); index }).await } @@ -449,7 +449,7 @@ pub async fn shared_index_with_test_set() -> &'static Index<'static, Shared> { ) .await; assert_eq!(code, 202); - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); index }) .await @@ -496,14 +496,14 @@ pub async fn shared_index_with_geo_documents() -> &'static Index<'static, Shared let server = Server::new_shared(); let index = server._index("SHARED_GEO_DOCUMENTS").to_shared(); let (response, _code) = index._add_documents(GEO_DOCUMENTS.clone(), None).await; - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); let (response, _code) = index ._update_settings( json!({"filterableAttributes": ["_geo"], "sortableAttributes": ["_geo"]}), ) .await; - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); index }) .await diff --git a/crates/meilisearch/tests/dumps/mod.rs b/crates/meilisearch/tests/dumps/mod.rs index 9b111186d..f1bac5297 100644 --- a/crates/meilisearch/tests/dumps/mod.rs +++ b/crates/meilisearch/tests/dumps/mod.rs @@ -2366,7 +2366,7 @@ async fn generate_and_import_dump_containing_vectors() { )) .await; snapshot!(code, @"202 Accepted"); - let response = index.wait_task(response.uid()).await; + let response = server.wait_task(response.uid()).await; snapshot!(response); let (response, code) = index .add_documents( @@ -2381,12 +2381,12 @@ async fn generate_and_import_dump_containing_vectors() { ) .await; snapshot!(code, @"202 Accepted"); - let response = index.wait_task(response.uid()).await; + let response = server.wait_task(response.uid()).await; snapshot!(response); let (response, code) = server.create_dump().await; snapshot!(code, @"202 Accepted"); - let response = index.wait_task(response.uid()).await; + let response = server.wait_task(response.uid()).await; snapshot!(response["status"], @r###""succeeded""###); // ========= We made a dump, now we should clear the DB and try to import our dump diff --git a/crates/meilisearch/tests/similar/errors.rs b/crates/meilisearch/tests/similar/errors.rs index fa4118fe3..925db985a 100644 --- a/crates/meilisearch/tests/similar/errors.rs +++ b/crates/meilisearch/tests/similar/errors.rs @@ -298,7 +298,7 @@ async fn similar_bad_filter() { let documents = DOCUMENTS.clone(); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); let (response, code) = index.similar_post(json!({ "id": 287947, "filter": true, "embedder": "manual" })).await; @@ -335,7 +335,7 @@ async fn filter_invalid_syntax_object() { let documents = DOCUMENTS.clone(); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); index .similar(json!({"id": 287947, "filter": "title & Glass", "embedder": "manual"}), |response, code| { @@ -373,7 +373,7 @@ async fn filter_invalid_syntax_array() { let documents = DOCUMENTS.clone(); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); index .similar(json!({"id": 287947, "filter": ["title & Glass"], "embedder": "manual"}), |response, code| { @@ -411,7 +411,7 @@ async fn filter_invalid_syntax_string() { let documents = DOCUMENTS.clone(); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); let expected_response = json!({ "message": "Found unexpected characters at the end of the filter: `XOR title = Glass`. You probably forgot an `OR` or an `AND` rule.\n15:32 title = Glass XOR title = Glass", @@ -451,7 +451,7 @@ async fn filter_invalid_attribute_array() { let documents = DOCUMENTS.clone(); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); index .similar( @@ -492,7 +492,7 @@ async fn filter_invalid_attribute_string() { let documents = DOCUMENTS.clone(); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); index .similar( @@ -533,7 +533,7 @@ async fn filter_reserved_geo_attribute_array() { let documents = DOCUMENTS.clone(); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); let expected_response = json!({ "message": "`_geo` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:13 _geo = Glass", @@ -573,7 +573,7 @@ async fn filter_reserved_geo_attribute_string() { let documents = DOCUMENTS.clone(); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); let expected_response = json!({ "message": "`_geo` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:13 _geo = Glass", @@ -613,7 +613,7 @@ async fn filter_reserved_attribute_array() { let documents = DOCUMENTS.clone(); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); let expected_response = json!({ "message": "`_geoDistance` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:21 _geoDistance = Glass", @@ -653,7 +653,7 @@ async fn filter_reserved_attribute_string() { let documents = DOCUMENTS.clone(); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); let expected_response = json!({ "message": "`_geoDistance` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:21 _geoDistance = Glass", @@ -693,7 +693,7 @@ async fn filter_reserved_geo_point_array() { let documents = DOCUMENTS.clone(); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); let expected_response = json!({ "message": "`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:18 _geoPoint = Glass", @@ -733,7 +733,7 @@ async fn filter_reserved_geo_point_string() { let documents = DOCUMENTS.clone(); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); let expected_response = json!({ "message": "`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.\n1:18 _geoPoint = Glass", @@ -825,7 +825,7 @@ async fn similar_bad_embedder() { let documents = DOCUMENTS.clone(); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await; + server.wait_task(value.uid()).await; let expected_response = json!({ "message": "Cannot find embedder with name `auto`.", diff --git a/crates/meilisearch/tests/snapshot/mod.rs b/crates/meilisearch/tests/snapshot/mod.rs index a8f93f467..987c9cc33 100644 --- a/crates/meilisearch/tests/snapshot/mod.rs +++ b/crates/meilisearch/tests/snapshot/mod.rs @@ -56,7 +56,7 @@ async fn perform_snapshot() { let (task, code) = server.index("test1").create(Some("prim")).await; meili_snap::snapshot!(code, @"202 Accepted"); - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); // wait for the _next task_ to process, aka the snapshot that should be enqueued at some point @@ -131,10 +131,10 @@ async fn perform_on_demand_snapshot() { index.load_test_set().await; let (task, _status_code) = server.index("doggo").create(Some("bone")).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.index("doggo").create(Some("bone")).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (task, code) = server.create_snapshot().await; snapshot!(code, @"202 Accepted"); @@ -147,7 +147,7 @@ async fn perform_on_demand_snapshot() { "enqueuedAt": "[date]" } "###); - let task = index.wait_task(task.uid()).await; + let task = server.wait_task(task.uid()).await; snapshot!(json_string!(task, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###" { "uid": 4, diff --git a/crates/meilisearch/tests/stats/mod.rs b/crates/meilisearch/tests/stats/mod.rs index f44812014..84422bbaa 100644 --- a/crates/meilisearch/tests/stats/mod.rs +++ b/crates/meilisearch/tests/stats/mod.rs @@ -32,7 +32,7 @@ async fn stats() { let (task, code) = index.create(Some("id")).await; assert_eq!(code, 202); - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (response, code) = server.stats().await; @@ -58,7 +58,7 @@ async fn stats() { assert_eq!(code, 202, "{response}"); assert_eq!(response["taskUid"], 1); - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); let timestamp = OffsetDateTime::now_utc(); let (response, code) = server.stats().await; @@ -107,7 +107,7 @@ async fn add_remove_embeddings() { let (response, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); let (stats, _code) = index.stats().await; snapshot!(json_string!(stats, { @@ -135,7 +135,7 @@ async fn add_remove_embeddings() { let (response, code) = index.update_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); let (stats, _code) = index.stats().await; snapshot!(json_string!(stats, { @@ -163,7 +163,7 @@ async fn add_remove_embeddings() { let (response, code) = index.update_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); let (stats, _code) = index.stats().await; snapshot!(json_string!(stats, { @@ -192,7 +192,7 @@ async fn add_remove_embeddings() { let (response, code) = index.update_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); let (stats, _code) = index.stats().await; snapshot!(json_string!(stats, { @@ -245,7 +245,7 @@ async fn add_remove_embedded_documents() { let (response, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); let (stats, _code) = index.stats().await; snapshot!(json_string!(stats, { @@ -269,7 +269,7 @@ async fn add_remove_embedded_documents() { // delete one embedded document, remaining 1 embedded documents for 3 embeddings in total let (response, code) = index.delete_document(0).await; snapshot!(code, @"202 Accepted"); - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); let (stats, _code) = index.stats().await; snapshot!(json_string!(stats, { @@ -305,7 +305,7 @@ async fn update_embedder_settings() { let (response, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(response.uid()).await.succeeded(); + server.wait_task(response.uid()).await.succeeded(); let (stats, _code) = index.stats().await; snapshot!(json_string!(stats, { diff --git a/crates/meilisearch/tests/vector/binary_quantized.rs b/crates/meilisearch/tests/vector/binary_quantized.rs index 89d32cc50..6fcfa3563 100644 --- a/crates/meilisearch/tests/vector/binary_quantized.rs +++ b/crates/meilisearch/tests/vector/binary_quantized.rs @@ -88,7 +88,7 @@ async fn binary_quantize_before_sending_documents() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); // Make sure the documents are binary quantized let (documents, _code) = index @@ -161,7 +161,7 @@ async fn binary_quantize_after_sending_documents() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); let (response, code) = index .update_settings(json!({ @@ -305,7 +305,7 @@ async fn binary_quantize_clear_documents() { server.wait_task(response.uid()).await.succeeded(); let (value, _code) = index.clear_all_documents().await; - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); // Make sure the documents DB has been cleared let (documents, _code) = index diff --git a/crates/meilisearch/tests/vector/mod.rs b/crates/meilisearch/tests/vector/mod.rs index 98555dfac..ca2ecc998 100644 --- a/crates/meilisearch/tests/vector/mod.rs +++ b/crates/meilisearch/tests/vector/mod.rs @@ -42,7 +42,7 @@ async fn add_remove_user_provided() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); let (documents, _code) = index .get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() }) @@ -95,7 +95,7 @@ async fn add_remove_user_provided() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); let (documents, _code) = index .get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() }) @@ -138,7 +138,7 @@ async fn add_remove_user_provided() { let (value, code) = index.delete_document(0).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); let (documents, _code) = index .get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() }) @@ -187,7 +187,7 @@ async fn user_provide_mismatched_embedding_dimension() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -218,7 +218,7 @@ async fn user_provide_mismatched_embedding_dimension() { ]); let (response, code) = index.add_documents(new_document, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(response.uid()).await; + let task = server.wait_task(response.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -270,7 +270,7 @@ async fn generate_default_user_provided_documents(server: &Server) -> Index { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); index } @@ -285,7 +285,7 @@ async fn user_provided_embeddings_error() { json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": [0, 0, 0] }}}); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -315,7 +315,7 @@ async fn user_provided_embeddings_error() { let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": {}}}); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -346,7 +346,7 @@ async fn user_provided_embeddings_error() { json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": "yes please" }}}); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -375,7 +375,7 @@ async fn user_provided_embeddings_error() { let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": true, "regenerate": true }}}); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -404,7 +404,7 @@ async fn user_provided_embeddings_error() { let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": [true], "regenerate": true }}}); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -433,7 +433,7 @@ async fn user_provided_embeddings_error() { let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": [[true]], "regenerate": false }}}); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -462,20 +462,20 @@ async fn user_provided_embeddings_error() { let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "embeddings": [23, 0.1, -12], "regenerate": true }}}); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task["status"], @r###""succeeded""###); let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": false }}}); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task["status"], @r###""succeeded""###); let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": false, "embeddings": [0.1, [0.2, 0.3]] }}}); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -504,7 +504,7 @@ async fn user_provided_embeddings_error() { let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": false, "embeddings": [[0.1, 0.2], 0.3] }}}); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -533,7 +533,7 @@ async fn user_provided_embeddings_error() { let documents = json!({"id": 0, "name": "kefir", "_vectors": { "manual": { "regenerate": false, "embeddings": [[0.1, true], 0.3] }}}); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -574,7 +574,7 @@ async fn user_provided_vectors_error() { let documents = json!([{"id": 40, "name": "kefir"}, {"id": 41, "name": "intel"}, {"id": 42, "name": "max"}, {"id": 43, "name": "venus"}, {"id": 44, "name": "eva"}]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -604,7 +604,7 @@ async fn user_provided_vectors_error() { let documents = json!({"id": 42, "name": "kefir", "_vector": { "manaul": [0, 0, 0] }}); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -634,7 +634,7 @@ async fn user_provided_vectors_error() { let documents = json!({"id": 42, "name": "kefir", "_vectors": { "manaul": [0, 0, 0] }}); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -667,7 +667,7 @@ async fn clear_documents() { let index = generate_default_user_provided_documents(&server).await; let (value, _code) = index.clear_all_documents().await; - index.wait_task(value.uid()).await.succeeded(); + server.wait_task(value.uid()).await.succeeded(); // Make sure the documents DB has been cleared let (documents, _code) = index @@ -723,7 +723,7 @@ async fn add_remove_one_vector_4588() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, name: "document-added"); let documents = json!([ @@ -731,7 +731,7 @@ async fn add_remove_one_vector_4588() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, name: "document-deleted"); let (documents, _code) = index diff --git a/crates/meilisearch/tests/vector/ollama.rs b/crates/meilisearch/tests/vector/ollama.rs index eb80758df..27232df11 100644 --- a/crates/meilisearch/tests/vector/ollama.rs +++ b/crates/meilisearch/tests/vector/ollama.rs @@ -117,7 +117,7 @@ async fn test_both_apis() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", diff --git a/crates/meilisearch/tests/vector/openai.rs b/crates/meilisearch/tests/vector/openai.rs index 4ae8cb041..19b13228a 100644 --- a/crates/meilisearch/tests/vector/openai.rs +++ b/crates/meilisearch/tests/vector/openai.rs @@ -370,7 +370,7 @@ async fn it_works() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -601,7 +601,7 @@ async fn tokenize_long_text() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -657,7 +657,7 @@ async fn bad_api_key() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { @@ -805,7 +805,7 @@ async fn bad_model() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { @@ -883,7 +883,7 @@ async fn bad_dimensions() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { @@ -992,7 +992,7 @@ async fn smaller_dimensions() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -1224,7 +1224,7 @@ async fn small_embedding_model() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -1455,7 +1455,7 @@ async fn legacy_embedding_model() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -1687,7 +1687,7 @@ async fn it_still_works() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -1916,7 +1916,7 @@ async fn timeout() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", diff --git a/crates/meilisearch/tests/vector/rest.rs b/crates/meilisearch/tests/vector/rest.rs index e03563bcc..768d03eb9 100644 --- a/crates/meilisearch/tests/vector/rest.rs +++ b/crates/meilisearch/tests/vector/rest.rs @@ -1099,7 +1099,7 @@ async fn add_vector_and_user_provided() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -1616,7 +1616,7 @@ async fn server_returns_multiple() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -1722,7 +1722,7 @@ async fn server_single_input_returns_in_array() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", @@ -1828,7 +1828,7 @@ async fn server_raw() { ]); let (value, code) = index.add_documents(documents, None).await; snapshot!(code, @"202 Accepted"); - let task = index.wait_task(value.uid()).await; + let task = server.wait_task(value.uid()).await; snapshot!(task, @r###" { "uid": "[uid]", From 13ea29e511d70cbe690b53c6274d819f2c8f09a8 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Tue, 24 Jun 2025 00:07:58 +0300 Subject: [PATCH 09/21] Fix some search+replace issues. Make Server::wait_task() available for Index:: methods Signed-off-by: Martin Tzvetanov Grigorov --- .../meilisearch/tests/auth/authorization.rs | 18 +++--- crates/meilisearch/tests/common/index.rs | 18 +++--- crates/meilisearch/tests/common/server.rs | 15 +++-- .../meilisearch/tests/index/create_index.rs | 6 +- crates/meilisearch/tests/index/get_index.rs | 9 +-- .../meilisearch/tests/search/multi/proxy.rs | 60 +++++++++---------- 6 files changed, 69 insertions(+), 57 deletions(-) diff --git a/crates/meilisearch/tests/auth/authorization.rs b/crates/meilisearch/tests/auth/authorization.rs index 20815d987..607ecf67a 100644 --- a/crates/meilisearch/tests/auth/authorization.rs +++ b/crates/meilisearch/tests/auth/authorization.rs @@ -507,10 +507,10 @@ async fn access_authorized_index_patterns() { server.use_api_key(MASTER_KEY); - // refer to products_1 with modified api key. + // refer to products_1 with a modified api key. let index_1 = server.index("products_1"); - index_1.wait_task(task_id).await; + server.wait_task(task_id).await; let (response, code) = index_1.get_task(task_id).await; assert_eq!(200, code, "{:?}", &response); @@ -578,19 +578,19 @@ async fn raise_error_non_authorized_index_patterns() { assert_eq!(202, code, "{:?}", &response); let task2_id = response["taskUid"].as_u64().unwrap(); - // Adding document to test index. Should Fail with 403 -- invalid_api_key + // Adding a document to test index. Should Fail with 403 -- invalid_api_key let (response, code) = test_index.add_documents(documents, None).await; assert_eq!(403, code, "{:?}", &response); server.use_api_key(MASTER_KEY); - // refer to products_1 with modified api key. + // refer to products_1 with a modified api key. let product_1_index = server.index("products_1"); - // refer to products_2 with modified api key. - let product_2_index = server.index("products_2"); + // refer to products_2 with a modified api key. + // let product_2_index = server.index("products_2"); - product_1_server.wait_task(task1_id).await; - product_2_server.wait_task(task2_id).await; + server.wait_task(task1_id).await; + server.wait_task(task2_id).await; let (response, code) = product_1_index.get_task(task1_id).await; assert_eq!(200, code, "{:?}", &response); @@ -603,7 +603,7 @@ async fn raise_error_non_authorized_index_patterns() { #[actix_rt::test] async fn pattern_indexes() { - // Create server with master key + // Create a server with master key let mut server = Server::new_auth().await; server.use_admin_key(MASTER_KEY).await; diff --git a/crates/meilisearch/tests/common/index.rs b/crates/meilisearch/tests/common/index.rs index 4da96403c..61bfced72 100644 --- a/crates/meilisearch/tests/common/index.rs +++ b/crates/meilisearch/tests/common/index.rs @@ -7,7 +7,7 @@ use urlencoding::encode as urlencode; use super::encoder::Encoder; use super::service::Service; -use super::{Owned, Shared, Value}; +use super::{Owned, Server, Shared, Value}; use crate::json; pub struct Index<'a, State = Owned> { @@ -42,9 +42,9 @@ impl<'a> Index<'a, Owned> { ) .await; assert_eq!(code, 202); - let update_id = response["taskUid"].as_i64().unwrap(); - self.wait_task(update_id as u64).await; - update_id as u64 + let update_id = response["taskUid"].as_u64().unwrap(); + self.wait_task(update_id).await; + update_id } pub async fn load_test_set_ndjson(&self) -> u64 { @@ -58,9 +58,9 @@ impl<'a> Index<'a, Owned> { ) .await; assert_eq!(code, 202); - let update_id = response["taskUid"].as_i64().unwrap(); - self.wait_task(update_id as u64).await; - update_id as u64 + let update_id = response["taskUid"].as_u64().unwrap(); + self.wait_task(update_id).await; + update_id } pub async fn create(&self, primary_key: Option<&str>) -> (Value, StatusCode) { @@ -362,6 +362,10 @@ impl Index<'_, State> { self.service.delete(url).await } + async fn wait_task(&self, update_id: u64) -> Value { + Server::::_wait_task(async |url| self.service.get(url).await, update_id).await + } + pub async fn get_task(&self, update_id: u64) -> (Value, StatusCode) { let url = format!("/tasks/{}", update_id); self.service.get(url).await diff --git a/crates/meilisearch/tests/common/server.rs b/crates/meilisearch/tests/common/server.rs index 4367650c5..73b1033f2 100644 --- a/crates/meilisearch/tests/common/server.rs +++ b/crates/meilisearch/tests/common/server.rs @@ -407,13 +407,20 @@ impl Server { } pub async fn wait_task(&self, update_id: u64) -> Value { + Server::::_wait_task(async |url| self.service.get(url).await, update_id).await + } + + pub(super) async fn _wait_task(request_fn: F, update_id: u64) -> Value + where + F: AsyncFnOnce(String) -> (Value, StatusCode) + Copy, + { // try several times to get status, or panic to not wait forever - let url = format!("/tasks/{}", update_id); - let max_attempts = 400; // 200 seconds total, 0.5s per attempt + let url = format!("/tasks/{update_id}"); + let max_attempts = 400; // 200 seconds in total, 0.5secs per attempt for i in 0..max_attempts { - let (response, status_code) = self.service.get(&url).await; - assert_eq!(200, status_code, "response: {}", response); + let (response, status_code) = request_fn(url.clone()).await; + assert_eq!(200, status_code, "response: {response}"); if response["status"] == "succeeded" || response["status"] == "failed" { return response; diff --git a/crates/meilisearch/tests/index/create_index.rs b/crates/meilisearch/tests/index/create_index.rs index dc178919e..625d7afd0 100644 --- a/crates/meilisearch/tests/index/create_index.rs +++ b/crates/meilisearch/tests/index/create_index.rs @@ -161,9 +161,9 @@ async fn test_create_multiple_indexes() { let (task2, _) = index2.create(None).await; let (task3, _) = index3.create(None).await; - index1.wait_task(task1.uid()).await.succeeded(); - index2.wait_task(task2.uid()).await.succeeded(); - index3.wait_task(task3.uid()).await.succeeded(); + server.wait_task(task1.uid()).await.succeeded(); + server.wait_task(task2.uid()).await.succeeded(); + server.wait_task(task3.uid()).await.succeeded(); assert_eq!(index1.get().await.1, 200); assert_eq!(index2.get().await.1, 200); diff --git a/crates/meilisearch/tests/index/get_index.rs b/crates/meilisearch/tests/index/get_index.rs index ece479513..82f086f08 100644 --- a/crates/meilisearch/tests/index/get_index.rs +++ b/crates/meilisearch/tests/index/get_index.rs @@ -60,8 +60,8 @@ async fn list_multiple_indexes() { let index_with_key = server.unique_index(); let (response_with_key, _status_code) = index_with_key.create(Some("key")).await; - index_without_key.wait_task(response_without_key.uid()).await.succeeded(); - index_with_key.wait_task(response_with_key.uid()).await.succeeded(); + server.wait_task(response_without_key.uid()).await.succeeded(); + server.wait_task(response_with_key.uid()).await.succeeded(); let (response, code) = server.list_indexes(None, Some(1000)).await; assert_eq!(code, 200); @@ -81,8 +81,9 @@ async fn get_and_paginate_indexes() { let server = Server::new().await; const NB_INDEXES: usize = 50; for i in 0..NB_INDEXES { - server.index(format!("test_{i:02}")).create(None).await; - server.index(format!("test_{i:02}")).wait_task(i as u64).await; + let (task, code) = server.index(format!("test_{i:02}")).create(None).await; + assert_eq!(code, 202); + server.wait_task(task.uid()).await; } // basic diff --git a/crates/meilisearch/tests/search/multi/proxy.rs b/crates/meilisearch/tests/search/multi/proxy.rs index c537f5ae8..311f69d9e 100644 --- a/crates/meilisearch/tests/search/multi/proxy.rs +++ b/crates/meilisearch/tests/search/multi/proxy.rs @@ -158,11 +158,11 @@ async fn remote_sharding() { let index1 = ms1.index("test"); let index2 = ms2.index("test"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index2.add_documents(json!(documents[3..5]), None).await; - index2.wait_task(task.uid()).await.succeeded(); + ms2.wait_task(task.uid()).await.succeeded(); // wrap servers let ms0 = Arc::new(ms0); @@ -454,9 +454,9 @@ async fn error_unregistered_remote() { let index0 = ms0.index("test"); let index1 = ms1.index("test"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); // wrap servers let ms0 = Arc::new(ms0); @@ -572,9 +572,9 @@ async fn error_no_weighted_score() { let index0 = ms0.index("test"); let index1 = ms1.index("test"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); // wrap servers let ms0 = Arc::new(ms0); @@ -705,9 +705,9 @@ async fn error_bad_response() { let index0 = ms0.index("test"); let index1 = ms1.index("test"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); // wrap servers let ms0 = Arc::new(ms0); @@ -842,9 +842,9 @@ async fn error_bad_request() { let index0 = ms0.index("test"); let index1 = ms1.index("test"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); // wrap servers let ms0 = Arc::new(ms0); @@ -972,10 +972,10 @@ async fn error_bad_request_facets_by_index() { let index0 = ms0.index("test0"); let index1 = ms1.index("test1"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); // wrap servers let ms0 = Arc::new(ms0); @@ -1113,13 +1113,13 @@ async fn error_bad_request_facets_by_index_facet() { let index0 = ms0.index("test"); let index1 = ms1.index("test"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index0.update_settings_filterable_attributes(json!(["id"])).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); // wrap servers let ms0 = Arc::new(ms0); @@ -1263,9 +1263,9 @@ async fn error_remote_does_not_answer() { let index0 = ms0.index("test"); let index1 = ms1.index("test"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); // wrap servers let ms0 = Arc::new(ms0); @@ -1464,9 +1464,9 @@ async fn error_remote_404() { let index0 = ms0.index("test"); let index1 = ms1.index("test"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); // wrap servers let ms0 = Arc::new(ms0); @@ -1659,9 +1659,9 @@ async fn error_remote_sharding_auth() { let index0 = ms0.index("test"); let index1 = ms1.index("test"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); // wrap servers ms1.clear_api_key(); @@ -1819,9 +1819,9 @@ async fn remote_sharding_auth() { let index0 = ms0.index("test"); let index1 = ms1.index("test"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); // wrap servers ms1.clear_api_key(); @@ -1974,9 +1974,9 @@ async fn error_remote_500() { let index0 = ms0.index("test"); let index1 = ms1.index("test"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); // wrap servers let ms0 = Arc::new(ms0); @@ -2153,9 +2153,9 @@ async fn error_remote_500_once() { let index0 = ms0.index("test"); let index1 = ms1.index("test"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); // wrap servers let ms0 = Arc::new(ms0); @@ -2336,9 +2336,9 @@ async fn error_remote_timeout() { let index0 = ms0.index("test"); let index1 = ms1.index("test"); let (task, _status_code) = index0.add_documents(json!(documents[0..2]), None).await; - index0.wait_task(task.uid()).await.succeeded(); + ms0.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index1.add_documents(json!(documents[2..3]), None).await; - index1.wait_task(task.uid()).await.succeeded(); + ms1.wait_task(task.uid()).await.succeeded(); // wrap servers let ms0 = Arc::new(ms0); From ae912c4c3f2d0065f2251299e0c34cf588fc2e20 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Thu, 10 Jul 2025 14:28:57 +0300 Subject: [PATCH 10/21] Pass the Server as an extra parameter when the Index needs to wait for a task Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/common/index.rs | 24 ++++++++----------- crates/meilisearch/tests/common/server.rs | 9 +------ .../tests/documents/add_documents.rs | 4 ++-- .../tests/documents/delete_documents.rs | 2 +- crates/meilisearch/tests/documents/errors.rs | 6 ++--- .../tests/documents/get_documents.rs | 2 +- .../meilisearch/tests/index/delete_index.rs | 2 +- .../meilisearch/tests/index/update_index.rs | 4 ++-- crates/meilisearch/tests/snapshot/mod.rs | 4 ++-- 9 files changed, 23 insertions(+), 34 deletions(-) diff --git a/crates/meilisearch/tests/common/index.rs b/crates/meilisearch/tests/common/index.rs index 61bfced72..76e72a1e5 100644 --- a/crates/meilisearch/tests/common/index.rs +++ b/crates/meilisearch/tests/common/index.rs @@ -31,7 +31,7 @@ impl<'a> Index<'a, Owned> { Index { uid: self.uid.clone(), service: self.service, encoder, marker: PhantomData } } - pub async fn load_test_set(&self) -> u64 { + pub async fn load_test_set(&self, waiter: &Server) -> u64 { let url = format!("/indexes/{}/documents", urlencode(self.uid.as_ref())); let (response, code) = self .service @@ -43,11 +43,11 @@ impl<'a> Index<'a, Owned> { .await; assert_eq!(code, 202); let update_id = response["taskUid"].as_u64().unwrap(); - self.wait_task(update_id).await; + waiter.wait_task(update_id).await; update_id } - pub async fn load_test_set_ndjson(&self) -> u64 { + pub async fn load_test_set_ndjson(&self, waiter: &Server) -> u64 { let url = format!("/indexes/{}/documents", urlencode(self.uid.as_ref())); let (response, code) = self .service @@ -59,7 +59,7 @@ impl<'a> Index<'a, Owned> { .await; assert_eq!(code, 202); let update_id = response["taskUid"].as_u64().unwrap(); - self.wait_task(update_id).await; + waiter.wait_task(update_id).await; update_id } @@ -265,10 +265,10 @@ impl Index<'_, Shared> { /// You cannot modify the content of a shared index, thus the delete_document_by_filter call /// must fail. If the task successfully enqueue itself, we'll wait for the task to finishes, /// and if it succeed the function will panic. - pub async fn delete_document_by_filter_fail(&self, body: Value) -> (Value, StatusCode) { + pub async fn delete_document_by_filter_fail(&self, body: Value, waiter: &Server) -> (Value, StatusCode) { let (mut task, code) = self._delete_document_by_filter(body).await; if code.is_success() { - task = self.wait_task(task.uid()).await; + task = waiter.wait_task(task.uid()).await; if task.is_success() { panic!( "`delete_document_by_filter_fail` succeeded: {}", @@ -279,10 +279,10 @@ impl Index<'_, Shared> { (task, code) } - pub async fn delete_index_fail(&self) -> (Value, StatusCode) { + pub async fn delete_index_fail(&self, waiter: &Server) -> (Value, StatusCode) { let (mut task, code) = self._delete().await; if code.is_success() { - task = self.wait_task(task.uid()).await; + task = waiter.wait_task(task.uid()).await; if task.is_success() { panic!( "`delete_index_fail` succeeded: {}", @@ -293,10 +293,10 @@ impl Index<'_, Shared> { (task, code) } - pub async fn update_index_fail(&self, primary_key: Option<&str>) -> (Value, StatusCode) { + pub async fn update_index_fail(&self, primary_key: Option<&str>, waiter: &Server) -> (Value, StatusCode) { let (mut task, code) = self._update(primary_key).await; if code.is_success() { - task = self.wait_task(task.uid()).await; + task = waiter.wait_task(task.uid()).await; if task.is_success() { panic!( "`update_index_fail` succeeded: {}", @@ -362,10 +362,6 @@ impl Index<'_, State> { self.service.delete(url).await } - async fn wait_task(&self, update_id: u64) -> Value { - Server::::_wait_task(async |url| self.service.get(url).await, update_id).await - } - pub async fn get_task(&self, update_id: u64) -> (Value, StatusCode) { let url = format!("/tasks/{}", update_id); self.service.get(url).await diff --git a/crates/meilisearch/tests/common/server.rs b/crates/meilisearch/tests/common/server.rs index 73b1033f2..89c5a3aaa 100644 --- a/crates/meilisearch/tests/common/server.rs +++ b/crates/meilisearch/tests/common/server.rs @@ -407,19 +407,12 @@ impl Server { } pub async fn wait_task(&self, update_id: u64) -> Value { - Server::::_wait_task(async |url| self.service.get(url).await, update_id).await - } - - pub(super) async fn _wait_task(request_fn: F, update_id: u64) -> Value - where - F: AsyncFnOnce(String) -> (Value, StatusCode) + Copy, - { // try several times to get status, or panic to not wait forever let url = format!("/tasks/{update_id}"); let max_attempts = 400; // 200 seconds in total, 0.5secs per attempt for i in 0..max_attempts { - let (response, status_code) = request_fn(url.clone()).await; + let (response, status_code) = self.service.get(url.clone()).await; assert_eq!(200, status_code, "response: {response}"); if response["status"] == "succeeded" || response["status"] == "failed" { diff --git a/crates/meilisearch/tests/documents/add_documents.rs b/crates/meilisearch/tests/documents/add_documents.rs index b69d289e1..5f66c107d 100644 --- a/crates/meilisearch/tests/documents/add_documents.rs +++ b/crates/meilisearch/tests/documents/add_documents.rs @@ -1318,7 +1318,7 @@ async fn add_no_documents() { async fn add_larger_dataset() { let server = Server::new_shared(); let index = server.unique_index(); - let update_id = index.load_test_set().await; + let update_id = index.load_test_set(server).await; let (response, code) = index.get_task(update_id).await; assert_eq!(code, 200); assert_eq!(response["status"], "succeeded"); @@ -1333,7 +1333,7 @@ async fn add_larger_dataset() { // x-ndjson add large test let index = server.unique_index(); - let update_id = index.load_test_set_ndjson().await; + let update_id = index.load_test_set_ndjson(server).await; let (response, code) = index.get_task(update_id).await; assert_eq!(code, 200); assert_eq!(response["status"], "succeeded"); diff --git a/crates/meilisearch/tests/documents/delete_documents.rs b/crates/meilisearch/tests/documents/delete_documents.rs index 9c367cb51..cc2d19b1c 100644 --- a/crates/meilisearch/tests/documents/delete_documents.rs +++ b/crates/meilisearch/tests/documents/delete_documents.rs @@ -7,7 +7,7 @@ use crate::json; async fn delete_one_document_unexisting_index() { let server = Server::new_shared(); let index = shared_does_not_exists_index().await; - let (task, code) = index.delete_document_by_filter_fail(json!({"filter": "a = b"})).await; + let (task, code) = index.delete_document_by_filter_fail(json!({"filter": "a = b"}), server).await; assert_eq!(code, 202); server.wait_task(task.uid()).await.failed(); diff --git a/crates/meilisearch/tests/documents/errors.rs b/crates/meilisearch/tests/documents/errors.rs index 506be97d5..ed1aec7e5 100644 --- a/crates/meilisearch/tests/documents/errors.rs +++ b/crates/meilisearch/tests/documents/errors.rs @@ -559,7 +559,7 @@ async fn delete_document_by_filter() { let index = shared_does_not_exists_index().await; // index does not exists let (response, _code) = - index.delete_document_by_filter_fail(json!({ "filter": "doggo = bernese"})).await; + index.delete_document_by_filter_fail(json!({ "filter": "doggo = bernese"}), server).await; snapshot!(response, @r###" { "uid": "[uid]", @@ -589,7 +589,7 @@ async fn delete_document_by_filter() { // no filterable are set let index = shared_empty_index().await; let (response, _code) = - index.delete_document_by_filter_fail(json!({ "filter": "doggo = bernese"})).await; + index.delete_document_by_filter_fail(json!({ "filter": "doggo = bernese"}), server).await; snapshot!(response, @r###" { "uid": "[uid]", @@ -619,7 +619,7 @@ async fn delete_document_by_filter() { // not filterable while there is a filterable attribute let index = shared_index_with_documents().await; let (response, code) = - index.delete_document_by_filter_fail(json!({ "filter": "catto = jorts"})).await; + index.delete_document_by_filter_fail(json!({ "filter": "catto = jorts"}), server).await; snapshot!(code, @"202 Accepted"); let response = server.wait_task(response.uid()).await.failed(); snapshot!(response, @r###" diff --git a/crates/meilisearch/tests/documents/get_documents.rs b/crates/meilisearch/tests/documents/get_documents.rs index 63dc224c2..44eb181df 100644 --- a/crates/meilisearch/tests/documents/get_documents.rs +++ b/crates/meilisearch/tests/documents/get_documents.rs @@ -334,7 +334,7 @@ async fn get_document_s_nested_attributes_to_retrieve() { async fn get_documents_displayed_attributes_is_ignored() { let server = Server::new_shared(); let index = server.unique_index(); - index.load_test_set().await; + index.load_test_set(server).await; index.update_settings(json!({"displayedAttributes": ["gender"]})).await; let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await; diff --git a/crates/meilisearch/tests/index/delete_index.rs b/crates/meilisearch/tests/index/delete_index.rs index 085b47294..08d3642cb 100644 --- a/crates/meilisearch/tests/index/delete_index.rs +++ b/crates/meilisearch/tests/index/delete_index.rs @@ -26,7 +26,7 @@ async fn create_and_delete_index() { async fn error_delete_unexisting_index() { let server = Server::new_shared(); let index = shared_does_not_exists_index().await; - let (task, code) = index.delete_index_fail().await; + let (task, code) = index.delete_index_fail(server).await; assert_eq!(code, 202); server.wait_task(task.uid()).await.failed(); diff --git a/crates/meilisearch/tests/index/update_index.rs b/crates/meilisearch/tests/index/update_index.rs index 1c781c386..262324bcf 100644 --- a/crates/meilisearch/tests/index/update_index.rs +++ b/crates/meilisearch/tests/index/update_index.rs @@ -72,7 +72,7 @@ async fn error_update_existing_primary_key() { let server = Server::new_shared(); let index = shared_index_with_documents().await; - let (update_task, code) = index.update_index_fail(Some("primary")).await; + let (update_task, code) = index.update_index_fail(Some("primary"), server).await; assert_eq!(code, 202); let response = server.wait_task(update_task.uid()).await.failed(); @@ -91,7 +91,7 @@ async fn error_update_existing_primary_key() { async fn error_update_unexisting_index() { let server = Server::new_shared(); let index = shared_does_not_exists_index().await; - let (task, code) = index.update_index_fail(Some("my-primary-key")).await; + let (task, code) = index.update_index_fail(Some("my-primary-key"), server).await; assert_eq!(code, 202); diff --git a/crates/meilisearch/tests/snapshot/mod.rs b/crates/meilisearch/tests/snapshot/mod.rs index 987c9cc33..32946b06e 100644 --- a/crates/meilisearch/tests/snapshot/mod.rs +++ b/crates/meilisearch/tests/snapshot/mod.rs @@ -51,7 +51,7 @@ async fn perform_snapshot() { })) .await; - index.load_test_set().await; + index.load_test_set(&server).await; let (task, code) = server.index("test1").create(Some("prim")).await; meili_snap::snapshot!(code, @"202 Accepted"); @@ -128,7 +128,7 @@ async fn perform_on_demand_snapshot() { })) .await; - index.load_test_set().await; + index.load_test_set(&server).await; let (task, _status_code) = server.index("doggo").create(Some("bone")).await; server.wait_task(task.uid()).await.succeeded(); From e7a60555d60e27645922102b7897ccc880cd3567 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Thu, 10 Jul 2025 14:35:40 +0300 Subject: [PATCH 11/21] Formatting Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/common/index.rs | 12 ++++++++++-- .../meilisearch/tests/documents/delete_documents.rs | 3 ++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/crates/meilisearch/tests/common/index.rs b/crates/meilisearch/tests/common/index.rs index 76e72a1e5..b4ae151f3 100644 --- a/crates/meilisearch/tests/common/index.rs +++ b/crates/meilisearch/tests/common/index.rs @@ -265,7 +265,11 @@ impl Index<'_, Shared> { /// You cannot modify the content of a shared index, thus the delete_document_by_filter call /// must fail. If the task successfully enqueue itself, we'll wait for the task to finishes, /// and if it succeed the function will panic. - pub async fn delete_document_by_filter_fail(&self, body: Value, waiter: &Server) -> (Value, StatusCode) { + pub async fn delete_document_by_filter_fail( + &self, + body: Value, + waiter: &Server, + ) -> (Value, StatusCode) { let (mut task, code) = self._delete_document_by_filter(body).await; if code.is_success() { task = waiter.wait_task(task.uid()).await; @@ -293,7 +297,11 @@ impl Index<'_, Shared> { (task, code) } - pub async fn update_index_fail(&self, primary_key: Option<&str>, waiter: &Server) -> (Value, StatusCode) { + pub async fn update_index_fail( + &self, + primary_key: Option<&str>, + waiter: &Server, + ) -> (Value, StatusCode) { let (mut task, code) = self._update(primary_key).await; if code.is_success() { task = waiter.wait_task(task.uid()).await; diff --git a/crates/meilisearch/tests/documents/delete_documents.rs b/crates/meilisearch/tests/documents/delete_documents.rs index cc2d19b1c..ffe4ff443 100644 --- a/crates/meilisearch/tests/documents/delete_documents.rs +++ b/crates/meilisearch/tests/documents/delete_documents.rs @@ -7,7 +7,8 @@ use crate::json; async fn delete_one_document_unexisting_index() { let server = Server::new_shared(); let index = shared_does_not_exists_index().await; - let (task, code) = index.delete_document_by_filter_fail(json!({"filter": "a = b"}), server).await; + let (task, code) = + index.delete_document_by_filter_fail(json!({"filter": "a = b"}), server).await; assert_eq!(code, 202); server.wait_task(task.uid()).await.failed(); From 126aefc2073b0b24c64a9444be7cfa0802ce2b89 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Thu, 10 Jul 2025 16:39:58 +0300 Subject: [PATCH 12/21] Fix more tests Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 129 ++++++++++++---------- crates/meilisearch/tests/common/server.rs | 13 --- 2 files changed, 70 insertions(+), 72 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index d5374a144..5b67dc50c 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -1,11 +1,30 @@ mod errors; +use insta::internals::{Content, ContentPath}; +use once_cell::sync::Lazy; +use regex::Regex; use meili_snap::insta::assert_json_snapshot; use meili_snap::{json_string, snapshot}; use crate::common::Server; use crate::json; + +static TASK_WITH_ID_RE: Lazy = Lazy::new(|| { + Regex::new(r"task with id (\d+) of type") + .unwrap() +}); + +fn task_with_id_redaction(value: Content, _path: ContentPath) -> Content { + match value { + Content::String(s) => { + let replaced = TASK_WITH_ID_RE.replace_all(&s, "task with id X of type"); + Content::String(replaced.to_string()) + } + _ => value.clone(), + } +} + #[actix_rt::test] async fn error_get_unexisting_batch_status() { let server = Server::new_shared(); @@ -30,7 +49,7 @@ async fn get_batch_status() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (_response, code) = index.get_batch(task.batch_uid()).await; assert_eq!(code, 200); } @@ -272,7 +291,7 @@ async fn test_summarized_document_addition_or_update() { let index = server.unique_index(); let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -285,7 +304,6 @@ async fn test_summarized_document_addition_or_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -311,13 +329,13 @@ async fn test_summarized_document_addition_or_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -330,7 +348,6 @@ async fn test_summarized_document_addition_or_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -355,7 +372,7 @@ async fn test_summarized_document_addition_or_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); } @@ -368,7 +385,7 @@ async fn test_summarized_delete_documents_by_batch() { let task_uid_2 = (u32::MAX - 2) as u64; let task_uid_3 = (u32::MAX - 3) as u64; let (task, _status_code) = index.delete_batch(vec![task_uid_1, task_uid_2, task_uid_3]).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -380,7 +397,6 @@ async fn test_summarized_delete_documents_by_batch() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -404,13 +420,13 @@ async fn test_summarized_delete_documents_by_batch() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); index.create(None).await; let (task, _status_code) = index.delete_batch(vec![42]).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -458,7 +474,7 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -470,7 +486,6 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -495,14 +510,14 @@ async fn test_summarized_delete_documents_by_filter() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); index.create(None).await; let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -515,7 +530,6 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -540,14 +554,14 @@ async fn test_summarized_delete_documents_by_filter() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); index.update_settings(json!({ "filterableAttributes": ["doggo"] })).await; let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -595,7 +609,7 @@ async fn test_summarized_delete_document_by_id() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.delete_document(1).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -607,7 +621,6 @@ async fn test_summarized_delete_document_by_id() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -631,13 +644,13 @@ async fn test_summarized_delete_document_by_id() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); index.create(None).await; let (task, _status_code) = index.delete_document(42).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -650,7 +663,6 @@ async fn test_summarized_delete_document_by_id() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" }, @r###" { @@ -674,7 +686,7 @@ async fn test_summarized_delete_document_by_id() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); } @@ -696,7 +708,7 @@ async fn test_summarized_settings_update() { "###); let (task,_status_code) = index.update_settings(json!({ "displayedAttributes": ["doggos", "name"], "filterableAttributes": ["age", "nb_paw_pads"], "sortableAttributes": ["iq"] })).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -709,8 +721,6 @@ async fn test_summarized_settings_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" - }, @r###" { @@ -743,7 +753,7 @@ async fn test_summarized_settings_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchStrategy": "batched all enqueued tasks" } "###); } @@ -753,7 +763,7 @@ async fn test_summarized_index_creation() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -765,7 +775,7 @@ async fn test_summarized_index_creation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -786,12 +796,12 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); let (task, _status_code) = index.create(Some("doggos")).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -803,7 +813,7 @@ async fn test_summarized_index_creation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -826,7 +836,7 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); } @@ -946,7 +956,7 @@ async fn test_summarized_index_update() { let index = server.unique_index(); // If the index doesn't exist yet, we should get errors with or without the primary key. let (task, _status_code) = index.update(None).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -958,7 +968,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -979,12 +989,12 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); let (task, _status_code) = index.update(Some("bones")).await; - index.wait_task(task.uid()).await.failed(); + let task = index.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -996,7 +1006,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1019,15 +1029,15 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); - // And run the same two tests once the index do exists. + // And run the same two tests once the index does exist. index.create(None).await; let (task, _status_code) = index.update(None).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1039,7 +1049,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1060,12 +1070,12 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); let (task, _status_code) = index.update(Some("bones")).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1077,7 +1087,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexUpdate` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1100,7 +1110,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); } @@ -1124,7 +1134,7 @@ async fn test_summarized_index_swap() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchCreationComplete" => "task with id X of type `indexSwap` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1154,7 +1164,7 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexSwap` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexSwap` that cannot be batched with any other task." } "###); @@ -1179,7 +1189,7 @@ async fn test_summarized_index_swap() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "task with id X of type `indexCreation` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1200,11 +1210,12 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); } + #[actix_web::test] async fn test_summarized_batch_cancelation() { let server = Server::new_shared(); @@ -1213,7 +1224,7 @@ async fn test_summarized_batch_cancelation() { let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.cancel_tasks(format!("uids={}", task.uid()).as_str()).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1224,8 +1235,8 @@ async fn test_summarized_batch_cancelation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchCreationComplete" => "task with id X of type `taskCancelation` cannot be batched", ".details.originalFilter" => "?uids=X", + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1250,7 +1261,7 @@ async fn test_summarized_batch_cancelation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `taskCancelation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `taskCancelation` that cannot be batched with any other task." } "###); } @@ -1263,7 +1274,7 @@ async fn test_summarized_batch_deletion() { let (task, _status_code) = index.create(None).await; index.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.delete_tasks(format!("uids={}", task.uid()).as_str()).await; - index.wait_task(task.uid()).await.succeeded(); + let task = index.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1308,8 +1319,8 @@ async fn test_summarized_batch_deletion() { async fn test_summarized_dump_creation() { let server = Server::new_shared(); let (task, _status_code) = server.create_dump().await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = server.get_latest_batch().await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = server.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1320,7 +1331,7 @@ async fn test_summarized_dump_creation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchCreationComplete" => "task with id X of type `dumpCreation` cannot be batched" + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1343,7 +1354,7 @@ async fn test_summarized_dump_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `dumpCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `dumpCreation` that cannot be batched with any other task." } "###); } diff --git a/crates/meilisearch/tests/common/server.rs b/crates/meilisearch/tests/common/server.rs index 787cafc9f..431972983 100644 --- a/crates/meilisearch/tests/common/server.rs +++ b/crates/meilisearch/tests/common/server.rs @@ -429,19 +429,6 @@ impl Server { self.service.get(url).await } - // https://www.meilisearch.com/docs/reference/api/batches#get-batches states: - // "Batches are always returned in descending order of uid. This means that by default, - // the most recently created batch objects appear first." - pub async fn get_latest_batch(&self) -> (Option, StatusCode) { - let url = "/batches?limit=1&offset=0"; - let (value, code) = self.service.get(url).await; - value - .get("results") - .and_then(|results| results.as_array()) - .and_then(|array| array.first()) - .map_or((None, code), |latest| (Some(Value(latest.clone())), code)) - } - pub async fn get_features(&self) -> (Value, StatusCode) { self.service.get("/experimental-features").await } From 9f89881b0df27b5e155a3ec2e1c63831d0573174 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Fri, 11 Jul 2025 10:11:58 +0300 Subject: [PATCH 13/21] More tests fixes Signed-off-by: Martin Tzvetanov Grigorov --- crates/index-scheduler/src/queue/tasks.rs | 7 +- crates/meili-snap/src/lib.rs | 2 +- crates/meilisearch/tests/batches/mod.rs | 79 ++++++++++++----------- 3 files changed, 43 insertions(+), 45 deletions(-) diff --git a/crates/index-scheduler/src/queue/tasks.rs b/crates/index-scheduler/src/queue/tasks.rs index 92789b93f..74192232e 100644 --- a/crates/index-scheduler/src/queue/tasks.rs +++ b/crates/index-scheduler/src/queue/tasks.rs @@ -530,12 +530,7 @@ impl Queue { ..task } } else { - dbg!(&task); - if task.status == Status::Succeeded || task.status == Status::Failed { - Task { batch_uid: Some(batch.uid), ..task } - } else { - task - } + task } }) .collect(), diff --git a/crates/meili-snap/src/lib.rs b/crates/meili-snap/src/lib.rs index 1641a6335..a59732f04 100644 --- a/crates/meili-snap/src/lib.rs +++ b/crates/meili-snap/src/lib.rs @@ -55,7 +55,7 @@ pub fn default_snapshot_settings_for_test<'a>( settings.add_dynamic_redaction(".error.message", |content, _content_path| match &content { Content::String(s) => { - let uuid_replaced = UUID_IN_MESSAGE_RE.replace_all(s, "$before[uuid]$after"); + let uuid_replaced = UUID_IN_MESSAGE_RE.replace_all(s, "[uuid]"); Content::String(uuid_replaced.to_string()) } _ => content, diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index 5b67dc50c..e68eb3fc6 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -1,19 +1,16 @@ mod errors; use insta::internals::{Content, ContentPath}; -use once_cell::sync::Lazy; -use regex::Regex; use meili_snap::insta::assert_json_snapshot; use meili_snap::{json_string, snapshot}; +use once_cell::sync::Lazy; +use regex::Regex; use crate::common::Server; use crate::json; - -static TASK_WITH_ID_RE: Lazy = Lazy::new(|| { - Regex::new(r"task with id (\d+) of type") - .unwrap() -}); +static TASK_WITH_ID_RE: Lazy = + Lazy::new(|| Regex::new(r"task with id (\d+) of type").unwrap()); fn task_with_id_redaction(value: Content, _path: ContentPath) -> Content { match value { @@ -304,6 +301,7 @@ async fn test_summarized_document_addition_or_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -329,7 +327,7 @@ async fn test_summarized_document_addition_or_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); @@ -372,7 +370,7 @@ async fn test_summarized_document_addition_or_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); } @@ -397,6 +395,7 @@ async fn test_summarized_delete_documents_by_batch() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -420,7 +419,7 @@ async fn test_summarized_delete_documents_by_batch() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); @@ -486,6 +485,7 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -510,7 +510,7 @@ async fn test_summarized_delete_documents_by_filter() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); @@ -530,6 +530,7 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -554,7 +555,7 @@ async fn test_summarized_delete_documents_by_filter() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); @@ -574,7 +575,7 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks for index `[uuid]`" + ".batchCreationComplete" => "batched all enqueued tasks" }, @r###" { @@ -599,7 +600,7 @@ async fn test_summarized_delete_documents_by_filter() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "batched all enqueued tasks for index `[uuid]`" + "batchCreationComplete": "batched all enqueued tasks" } "###); } @@ -621,6 +622,7 @@ async fn test_summarized_delete_document_by_id() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -644,7 +646,7 @@ async fn test_summarized_delete_document_by_id() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); @@ -663,6 +665,7 @@ async fn test_summarized_delete_document_by_id() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -686,7 +689,7 @@ async fn test_summarized_delete_document_by_id() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); } @@ -721,6 +724,7 @@ async fn test_summarized_settings_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks" }, @r###" { @@ -753,7 +757,7 @@ async fn test_summarized_settings_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "batched all enqueued tasks" + "batchCreationComplete": "batched all enqueued tasks" } "###); } @@ -775,7 +779,7 @@ async fn test_summarized_index_creation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -796,7 +800,7 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" } "###); @@ -813,7 +817,7 @@ async fn test_summarized_index_creation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -836,7 +840,7 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" } "###); } @@ -968,7 +972,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -989,7 +993,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); @@ -1006,7 +1010,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1029,7 +1033,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); @@ -1049,7 +1053,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1070,7 +1074,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); @@ -1087,7 +1091,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1110,7 +1114,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" } "###); } @@ -1134,7 +1138,7 @@ async fn test_summarized_index_swap() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1164,7 +1168,7 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexSwap` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexSwap` cannot be batched" } "###); @@ -1189,7 +1193,7 @@ async fn test_summarized_index_swap() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1210,12 +1214,11 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" } "###); } - #[actix_web::test] async fn test_summarized_batch_cancelation() { let server = Server::new_shared(); @@ -1236,7 +1239,7 @@ async fn test_summarized_batch_cancelation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".details.originalFilter" => "?uids=X", - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1261,7 +1264,7 @@ async fn test_summarized_batch_cancelation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `taskCancelation` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `taskCancelation` cannot be batched" } "###); } @@ -1331,7 +1334,7 @@ async fn test_summarized_dump_creation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), + ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1354,7 +1357,7 @@ async fn test_summarized_dump_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchStrategy": "created batch containing only task with id X of type `dumpCreation` that cannot be batched with any other task." + "batchCreationComplete": "task with id X of type `dumpCreation` cannot be batched" } "###); } From 3bef4f4413ccdaa5c769c6d95c5211032d46e22b Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Fri, 11 Jul 2025 10:16:25 +0300 Subject: [PATCH 14/21] Use Server::wait_task() instead of Index::wait_task() Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 74 ++++++++++++------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index e68eb3fc6..f763a8fc0 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -27,7 +27,7 @@ async fn error_get_unexisting_batch_status() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _coder) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.get_batch(u32::MAX).await; let expected_response = json!({ @@ -46,7 +46,7 @@ async fn get_batch_status() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (_response, code) = index.get_batch(task.batch_uid()).await; assert_eq!(code, 200); } @@ -56,9 +56,9 @@ async fn list_batches() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (response, code) = index.list_batches().await; assert_eq!(code, 200); assert_eq!( @@ -112,10 +112,10 @@ async fn list_batches_with_star_filters() { let server = Server::new().await; let index = server.index("test"); let (task, _code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let index = server.index("test"); let (task, _code) = index.create(None).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (response, code) = index.service.get("/batches?indexUids=test").await; assert_eq!(code, 200); @@ -158,9 +158,9 @@ async fn list_batches_status_filtered() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.failed(); + server.wait_task(task.uid()).await.failed(); let (response, code) = index.filtered_batches(&[], &["succeeded"], &[]).await; assert_eq!(code, 200, "{response}"); @@ -180,9 +180,9 @@ async fn list_batches_type_filtered() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _) = index.delete().await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&["indexCreation"], &[], &[]).await; assert_eq!(code, 200, "{response}"); assert_eq!(response["results"].as_array().unwrap().len(), 1); @@ -202,7 +202,7 @@ async fn list_batches_invalid_canceled_by_filter() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&[], &[], &["0"]).await; assert_eq!(code, 200, "{response}"); @@ -214,9 +214,9 @@ async fn list_batches_status_and_type_filtered() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = index.update(Some("id")).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (response, code) = index.filtered_batches(&["indexCreation"], &["failed"], &[]).await; assert_eq!(code, 200, "{response}"); @@ -288,7 +288,7 @@ async fn test_summarized_document_addition_or_update() { let index = server.unique_index(); let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -333,7 +333,7 @@ async fn test_summarized_document_addition_or_update() { let (task, _status_code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -383,7 +383,7 @@ async fn test_summarized_delete_documents_by_batch() { let task_uid_2 = (u32::MAX - 2) as u64; let task_uid_3 = (u32::MAX - 3) as u64; let (task, _status_code) = index.delete_batch(vec![task_uid_1, task_uid_2, task_uid_3]).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -425,7 +425,7 @@ async fn test_summarized_delete_documents_by_batch() { index.create(None).await; let (task, _status_code) = index.delete_batch(vec![42]).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -473,7 +473,7 @@ async fn test_summarized_delete_documents_by_filter() { let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -517,7 +517,7 @@ async fn test_summarized_delete_documents_by_filter() { index.create(None).await; let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -562,7 +562,7 @@ async fn test_summarized_delete_documents_by_filter() { index.update_settings(json!({ "filterableAttributes": ["doggo"] })).await; let (task, _status_code) = index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -610,7 +610,7 @@ async fn test_summarized_delete_document_by_id() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.delete_document(1).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -652,7 +652,7 @@ async fn test_summarized_delete_document_by_id() { index.create(None).await; let (task, _status_code) = index.delete_document(42).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -711,7 +711,7 @@ async fn test_summarized_settings_update() { "###); let (task,_status_code) = index.update_settings(json!({ "displayedAttributes": ["doggos", "name"], "filterableAttributes": ["age", "nb_paw_pads"], "sortableAttributes": ["iq"] })).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -767,7 +767,7 @@ async fn test_summarized_index_creation() { let server = Server::new_shared(); let index = server.unique_index(); let (task, _status_code) = index.create(None).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -805,7 +805,7 @@ async fn test_summarized_index_creation() { "###); let (task, _status_code) = index.create(Some("doggos")).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -850,7 +850,7 @@ async fn test_summarized_index_deletion() { let server = Server::new_shared(); let index = server.unique_index(); let (ret, _code) = index.delete().await; - let batch = index.wait_task(ret.uid()).await.failed(); + let batch = server.wait_task(ret.uid()).await.failed(); snapshot!(batch, @r###" { @@ -881,7 +881,7 @@ async fn test_summarized_index_deletion() { // both batches may get autobatched and the deleted documents count will be wrong. let (ret, _code) = index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; - let batch = index.wait_task(ret.uid()).await.succeeded(); + let batch = server.wait_task(ret.uid()).await.succeeded(); snapshot!(batch, @r###" { @@ -904,7 +904,7 @@ async fn test_summarized_index_deletion() { "###); let (ret, _code) = index.delete().await; - let batch = index.wait_task(ret.uid()).await.succeeded(); + let batch = server.wait_task(ret.uid()).await.succeeded(); snapshot!(batch, @r###" { @@ -927,7 +927,7 @@ async fn test_summarized_index_deletion() { // What happens when you delete an index that doesn't exists. let (ret, _code) = index.delete().await; - let batch = index.wait_task(ret.uid()).await.failed(); + let batch = server.wait_task(ret.uid()).await.failed(); snapshot!(batch, @r###" { @@ -960,7 +960,7 @@ async fn test_summarized_index_update() { let index = server.unique_index(); // If the index doesn't exist yet, we should get errors with or without the primary key. let (task, _status_code) = index.update(None).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -998,7 +998,7 @@ async fn test_summarized_index_update() { "###); let (task, _status_code) = index.update(Some("bones")).await; - let task = index.wait_task(task.uid()).await.failed(); + let task = server.wait_task(task.uid()).await.failed(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1041,7 +1041,7 @@ async fn test_summarized_index_update() { index.create(None).await; let (task, _status_code) = index.update(None).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1079,7 +1079,7 @@ async fn test_summarized_index_update() { "###); let (task, _status_code) = index.update(Some("bones")).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1225,9 +1225,9 @@ async fn test_summarized_batch_cancelation() { let index = server.unique_index(); // to avoid being flaky we're only going to cancel an already finished batch :( let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.cancel_tasks(format!("uids={}", task.uid()).as_str()).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { @@ -1275,9 +1275,9 @@ async fn test_summarized_batch_deletion() { let index = server.unique_index(); // to avoid being flaky we're only going to delete an already finished batch :( let (task, _status_code) = index.create(None).await; - index.wait_task(task.uid()).await.succeeded(); + server.wait_task(task.uid()).await.succeeded(); let (task, _status_code) = server.delete_tasks(format!("uids={}", task.uid()).as_str()).await; - let task = index.wait_task(task.uid()).await.succeeded(); + let task = server.wait_task(task.uid()).await.succeeded(); let (batch, _) = index.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { From eb6ad3ef9c4fab57d80bc127b29e25e18b4491c4 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Fri, 11 Jul 2025 10:24:25 +0300 Subject: [PATCH 15/21] Fix batch id detection Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index f763a8fc0..268147d02 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -346,6 +346,7 @@ async fn test_summarized_document_addition_or_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -438,6 +439,7 @@ async fn test_summarized_delete_documents_by_batch() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, + ".batchCreationComplete" => "batched all enqueued tasks", }, @r###" { @@ -1127,8 +1129,8 @@ async fn test_summarized_index_swap() { { "indexes": ["doggos", "cattos"] } ])) .await; - server.wait_task(task.uid()).await.failed(); - let (batch, _) = server.get_batch(task.uid() as u32).await; + let task = server.wait_task(task.uid()).await.failed(); + let (batch, _) = server.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", @@ -1181,8 +1183,8 @@ async fn test_summarized_index_swap() { { "indexes": [doggos_index.uid, cattos_index.uid] } ])) .await; - server.wait_task(task.uid()).await.succeeded(); - let (batch, _) = server.get_batch(task.uid() as u32).await; + let task = server.wait_task(task.uid()).await.succeeded(); + let (batch, _) = server.get_batch(task.batch_uid()).await; assert_json_snapshot!(batch, { ".uid" => "[uid]", From a39223822af690800ee0ce12806ceb41dce71f68 Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Fri, 11 Jul 2025 11:11:46 +0300 Subject: [PATCH 16/21] More tests fixes Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 48 ++++++++++++------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index 7a21f1eca..bb8f3b6aa 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -396,7 +396,7 @@ async fn test_summarized_delete_documents_by_batch() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -439,7 +439,7 @@ async fn test_summarized_delete_documents_by_batch() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -532,7 +532,7 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -667,7 +667,7 @@ async fn test_summarized_delete_document_by_id() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -781,7 +781,7 @@ async fn test_summarized_index_creation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -802,7 +802,7 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); @@ -819,7 +819,7 @@ async fn test_summarized_index_creation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -842,7 +842,7 @@ async fn test_summarized_index_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); } @@ -974,7 +974,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -995,7 +995,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); @@ -1012,7 +1012,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1035,7 +1035,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); @@ -1055,7 +1055,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1076,7 +1076,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); @@ -1093,7 +1093,7 @@ async fn test_summarized_index_update() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1116,7 +1116,7 @@ async fn test_summarized_index_update() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexUpdate` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexUpdate` that cannot be batched with any other task." } "###); } @@ -1140,7 +1140,7 @@ async fn test_summarized_index_swap() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1170,7 +1170,7 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexSwap` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexSwap` that cannot be batched with any other task." } "###); @@ -1195,7 +1195,7 @@ async fn test_summarized_index_swap() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1216,7 +1216,7 @@ async fn test_summarized_index_swap() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `indexCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `indexCreation` that cannot be batched with any other task." } "###); } @@ -1241,7 +1241,7 @@ async fn test_summarized_batch_cancelation() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".details.originalFilter" => "?uids=X", - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1266,7 +1266,7 @@ async fn test_summarized_batch_cancelation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `taskCancelation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `taskCancelation` that cannot be batched with any other task." } "###); } @@ -1336,7 +1336,7 @@ async fn test_summarized_dump_creation() { ".finishedAt" => "[date]", ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", - ".batchCreationComplete" => insta::dynamic_redaction(task_with_id_redaction), + ".batchStrategy" => insta::dynamic_redaction(task_with_id_redaction), }, @r###" { @@ -1359,7 +1359,7 @@ async fn test_summarized_dump_creation() { "duration": "[duration]", "startedAt": "[date]", "finishedAt": "[date]", - "batchCreationComplete": "task with id X of type `dumpCreation` cannot be batched" + "batchStrategy": "created batch containing only task with id X of type `dumpCreation` that cannot be batched with any other task." } "###); } From e3daa907c5fe904951f47d10886cfc7b3cfcdf8c Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Fri, 11 Jul 2025 11:14:39 +0300 Subject: [PATCH 17/21] Update redactions Signed-off-by: Martin Tzvetanov Grigorov --- crates/meilisearch/tests/batches/mod.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/meilisearch/tests/batches/mod.rs b/crates/meilisearch/tests/batches/mod.rs index bb8f3b6aa..9d6bee7c1 100644 --- a/crates/meilisearch/tests/batches/mod.rs +++ b/crates/meilisearch/tests/batches/mod.rs @@ -301,7 +301,7 @@ async fn test_summarized_document_addition_or_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -346,7 +346,7 @@ async fn test_summarized_document_addition_or_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -487,7 +487,7 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -577,7 +577,7 @@ async fn test_summarized_delete_documents_by_filter() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks" + ".batchStrategy" => "batched all enqueued tasks" }, @r###" { @@ -624,7 +624,7 @@ async fn test_summarized_delete_document_by_id() { ".stats.progressTrace" => "[progressTrace]", ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks", + ".batchStrategy" => "batched all enqueued tasks", }, @r###" { @@ -726,7 +726,7 @@ async fn test_summarized_settings_update() { ".stats.writeChannelCongestion" => "[writeChannelCongestion]", ".stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".stats.indexUids" => r#"{"[uuid]": 1}"#, - ".batchCreationComplete" => "batched all enqueued tasks" + ".batchStrategy" => "batched all enqueued tasks" }, @r###" { From d072edaa49394c8affe73f4413637824bf2ebc97 Mon Sep 17 00:00:00 2001 From: curquiza Date: Sun, 13 Jul 2025 12:26:56 +0200 Subject: [PATCH 18/21] Fix Rails CI --- .github/workflows/sdks-tests.yml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/workflows/sdks-tests.yml b/.github/workflows/sdks-tests.yml index edabec0a7..c83864e7f 100644 --- a/.github/workflows/sdks-tests.yml +++ b/.github/workflows/sdks-tests.yml @@ -344,15 +344,21 @@ jobs: MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }} ports: - '7700:7700' + env: + RAILS_VERSION: '7.0' steps: - uses: actions/checkout@v3 - with: - repository: meilisearch/meilisearch-rails - - name: Set up Ruby 3 + - name: Install SQLite dependencies + run: sudo apt-get update && sudo apt-get install -y libsqlite3-dev + - name: Set up Ruby uses: ruby/setup-ruby@v1 with: ruby-version: 3 bundler-cache: true + - name: Start MongoDB + uses: supercharge/mongodb-github-action@1.12.0 + with: + mongodb-version: 8.0 - name: Run tests run: bundle exec rspec From f813eb7ca49da9330cb74dbcb82283c4ab5a7bd3 Mon Sep 17 00:00:00 2001 From: curquiza Date: Sun, 13 Jul 2025 12:35:54 +0200 Subject: [PATCH 19/21] Fix --- .github/workflows/sdks-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/sdks-tests.yml b/.github/workflows/sdks-tests.yml index c83864e7f..dc4d51068 100644 --- a/.github/workflows/sdks-tests.yml +++ b/.github/workflows/sdks-tests.yml @@ -348,6 +348,8 @@ jobs: RAILS_VERSION: '7.0' steps: - uses: actions/checkout@v3 + with: + repository: meilisearch/meilisearch-rails - name: Install SQLite dependencies run: sudo apt-get update && sudo apt-get install -y libsqlite3-dev - name: Set up Ruby From 191ea340ed7ba4deea3538e7a716b7c6052c1689 Mon Sep 17 00:00:00 2001 From: Thomas Gerbet Date: Wed, 23 Apr 2025 11:50:36 +0200 Subject: [PATCH 20/21] Sign container image using Cosign in keyless mode Cosign keyless mode makes possible to sign the container image using the OIDC Identity Tokens provided by GitHub Actions [0][1]. The signature is published to the registry storing the image and to the public Rekor transparency log instance [2]. Cosign keyless mode has already been adopted by some major projects like Kubernetes [3]. The image signature can be manually verified using: ``` $ cosign verify \ --certificate-oidc-issuer='https://token.actions.githubusercontent.com' \ --certificate-identity-regexp='^https://github.com/meilisearch/meilisearch/.github/workflows/publish-docker-images.yaml' \ ``` See #2179. Note that a similar approach can be used to sign the release binaries. [0] https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect [1] https://docs.sigstore.dev/cosign/signing/signing_with_containers/ [2] https://docs.sigstore.dev/rekor/overview [3] https://kubernetes.io/docs/tasks/administer-cluster/verify-signed-artifacts/#verifying-image-signatures --- .github/workflows/publish-docker-images.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.github/workflows/publish-docker-images.yml b/.github/workflows/publish-docker-images.yml index 74384e670..6d2ce2248 100644 --- a/.github/workflows/publish-docker-images.yml +++ b/.github/workflows/publish-docker-images.yml @@ -16,6 +16,8 @@ on: jobs: docker: runs-on: docker + permissions: + id-token: write # This is needed to use Cosign in keyless mode steps: - uses: actions/checkout@v3 @@ -62,6 +64,9 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 + - name: Install cosign + uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # tag=v3.8.2 + - name: Login to Docker Hub uses: docker/login-action@v3 with: @@ -85,6 +90,7 @@ jobs: - name: Build and push uses: docker/build-push-action@v6 + id: build-and-push with: push: true platforms: linux/amd64,linux/arm64 @@ -94,6 +100,17 @@ jobs: COMMIT_DATE=${{ steps.build-metadata.outputs.date }} GIT_TAG=${{ github.ref_name }} + - name: Sign the images with GitHub OIDC Token + env: + DIGEST: ${{ steps.build-and-push.outputs.digest }} + TAGS: ${{ steps.meta.outputs.tags }} + run: | + images="" + for tag in ${TAGS}; do + images+="${tag}@${DIGEST} " + done + cosign sign --yes ${images} + # /!\ Don't touch this without checking with Cloud team - name: Send CI information to Cloud team # Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event) From c1a5a545b6f1b4d4f3b46e1faf83e717c32b7ee1 Mon Sep 17 00:00:00 2001 From: curquiza Date: Thu, 31 Jul 2025 15:23:45 +0200 Subject: [PATCH 21/21] Adapt Go CI to recent change in the Go repo --- .github/workflows/sdks-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sdks-tests.yml b/.github/workflows/sdks-tests.yml index dc4d51068..0bcc1d7a7 100644 --- a/.github/workflows/sdks-tests.yml +++ b/.github/workflows/sdks-tests.yml @@ -114,7 +114,7 @@ jobs: dep ensure fi - name: Run integration tests - run: go test -v ./... + run: go test --race -v ./integration meilisearch-java-tests: needs: define-docker-image