Revert to unique server + named index for some tests

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
This commit is contained in:
Martin Tzvetanov Grigorov 2025-06-02 11:44:21 +03:00
parent 5a7cfc57fd
commit 8d8fcb9846
No known key found for this signature in database
GPG Key ID: 3194FD8C1AE300EF

View File

@ -1,7 +1,6 @@
mod errors; mod errors;
mod webhook; mod webhook;
use insta::assert_json_snapshot;
use meili_snap::{json_string, snapshot}; use meili_snap::{json_string, snapshot};
use time::format_description::well_known::Rfc3339; use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime; use time::OffsetDateTime;
@ -18,7 +17,7 @@ async fn error_get_unexisting_task_status() {
let (response, code) = index.get_task(u32::MAX as u64).await; let (response, code) = index.get_task(u32::MAX as u64).await;
let expected_response = json!({ let expected_response = json!({
"message": "Task `1` not found.", "message": "Task `4294967295` not found.",
"code": "task_not_found", "code": "task_not_found",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#task_not_found" "link": "https://docs.meilisearch.com/errors#task_not_found"
@ -64,11 +63,12 @@ async fn list_tasks() {
#[actix_rt::test] #[actix_rt::test]
async fn list_tasks_pagination_and_reverse() { async fn list_tasks_pagination_and_reverse() {
let server = Server::new_shared(); // do not use a shared server here, as we want to assert tasks ids and we need them to be stable
let server = Server::new().await;
// First of all we want to create a lot of tasks very quickly. The fastest way is to delete a lot of unexisting indexes // First of all we want to create a lot of tasks very quickly. The fastest way is to delete a lot of unexisting indexes
let mut last_task = None; let mut last_task = None;
for _ in 0..10 { for i in 0..10 {
let index = server.unique_index(); let index = server.index(format!("test-{i}"));
last_task = Some(index.create(None).await.0.uid()); last_task = Some(index.create(None).await.0.uid());
} }
server.wait_task(last_task.unwrap()).await.succeeded(); server.wait_task(last_task.unwrap()).await.succeeded();
@ -102,8 +102,9 @@ async fn list_tasks_pagination_and_reverse() {
#[actix_rt::test] #[actix_rt::test]
async fn list_tasks_with_star_filters() { async fn list_tasks_with_star_filters() {
let server = Server::new_shared(); let server = Server::new().await;
let index = server.unique_index(); // Do not use a unique index here, as we want to test the `indexUids=*` filter.
let index = server.index("test");
let (task, _code) = index.create(None).await; let (task, _code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded(); index.wait_task(task.uid()).await.succeeded();
index index
@ -132,7 +133,10 @@ async fn list_tasks_with_star_filters() {
let (response, code) = index let (response, code) = index
.service .service
.get(format!("/tasks?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids={}", index.uid)) .get(format!(
"/tasks?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids={}",
index.uid
))
.await; .await;
assert_eq!(code, 200, "{response:?}"); assert_eq!(code, 200, "{response:?}");
assert_eq!(response["results"].as_array().unwrap().len(), 2); assert_eq!(response["results"].as_array().unwrap().len(), 2);
@ -278,6 +282,7 @@ async fn test_summarized_document_addition_or_update() {
let (task, _status_code) = let (task, _status_code) =
index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await; index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await;
index.wait_task(task.uid()).await.succeeded(); index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
snapshot!(json_string!(task, snapshot!(json_string!(task,
{ ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }), { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###" @r###"
@ -303,14 +308,14 @@ async fn test_summarized_document_addition_or_update() {
let (task, _status_code) = let (task, _status_code) =
index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await; index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await;
index.wait_task(task.uid()).await.succeeded(); index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(1).await; let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task, snapshot!(json_string!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###" @r###"
{ {
"uid": 1, "uid": "[uid]",
"batchUid": 1, "batchUid": "[batch_uid]",
"indexUid": "test", "indexUid": "[uuid]",
"status": "succeeded", "status": "succeeded",
"type": "documentAdditionOrUpdate", "type": "documentAdditionOrUpdate",
"canceledBy": null, "canceledBy": null,
@ -331,14 +336,20 @@ async fn test_summarized_document_addition_or_update() {
async fn test_summarized_delete_documents_by_batch() { async fn test_summarized_delete_documents_by_batch() {
let server = Server::new_shared(); let server = Server::new_shared();
let index = server.unique_index(); let index = server.unique_index();
let (task, _status_code) = index.delete_batch(vec![1, 2, 3]).await; let non_existing_task_id1 = u32::MAX as u64;
let non_existing_task_id2 = non_existing_task_id1 - 1;
let non_existing_task_id3 = non_existing_task_id1 - 2;
let (task, _status_code) = index
.delete_batch(vec![non_existing_task_id1, non_existing_task_id2, non_existing_task_id3])
.await;
index.wait_task(task.uid()).await.failed(); index.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
snapshot!(json_string!(task, snapshot!(json_string!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }), { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###" @r###"
{ {
"uid": 0, "uid": "[uid]",
"batchUid": 0, "batchUid": "[batch_uid]",
"indexUid": "[uuid]", "indexUid": "[uuid]",
"status": "failed", "status": "failed",
"type": "documentDeletion", "type": "documentDeletion",
@ -349,7 +360,7 @@ async fn test_summarized_delete_documents_by_batch() {
"originalFilter": null "originalFilter": null
}, },
"error": { "error": {
"message": "Index `test` not found.", "message": "Index `[uuid]` not found.",
"code": "index_not_found", "code": "index_not_found",
"type": "invalid_request", "type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found" "link": "https://docs.meilisearch.com/errors#index_not_found"
@ -366,11 +377,11 @@ async fn test_summarized_delete_documents_by_batch() {
index.wait_task(del_task.uid()).await.succeeded(); index.wait_task(del_task.uid()).await.succeeded();
let (task, _) = index.get_task(del_task.uid()).await; let (task, _) = index.get_task(del_task.uid()).await;
snapshot!(json_string!(task, snapshot!(json_string!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }), { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###" @r###"
{ {
"uid": 2, "uid": "[uid]",
"batchUid": 2, "batchUid": "[batch_uid]",
"indexUid": "[uuid]", "indexUid": "[uuid]",
"status": "succeeded", "status": "succeeded",
"type": "documentDeletion", "type": "documentDeletion",
@ -975,12 +986,13 @@ async fn test_summarized_task_cancelation() {
let index = server.unique_index(); let index = server.unique_index();
// to avoid being flaky we're only going to cancel an already finished task :( // to avoid being flaky we're only going to cancel an already finished task :(
let (task, _status_code) = index.create(None).await; let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded(); let task_uid = task.uid();
let (task, _status_code) = server.cancel_tasks("uids=0").await; index.wait_task(task_uid).await.succeeded();
let (task, _status_code) = server.cancel_tasks(format!("uids={task_uid}").as_str()).await;
index.wait_task(task.uid()).await.succeeded(); index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await; let (task, _) = index.get_task(task.uid()).await;
snapshot!(json_string!(task, snapshot!(json_string!(task,
{ ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }), { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".**.originalFilter" => "[of]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###" @r###"
{ {
"uid": "[uid]", "uid": "[uid]",
@ -992,7 +1004,7 @@ async fn test_summarized_task_cancelation() {
"details": { "details": {
"matchedTasks": 1, "matchedTasks": 1,
"canceledTasks": 0, "canceledTasks": 0,
"originalFilter": "?uids=0" "originalFilter": "[of]"
}, },
"error": null, "error": null,
"duration": "[duration]", "duration": "[duration]",