mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-04 03:35:43 +00:00
Compare commits
49 Commits
sharding
...
embedder-s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
00a0f74afa | ||
|
|
71b5afa23b | ||
|
|
bff49cae38 | ||
|
|
17abe14bd9 | ||
|
|
170ad87e44 | ||
|
|
8f96724adf | ||
|
|
01e5b0effa | ||
|
|
2ec9664878 | ||
|
|
10028515ac | ||
|
|
63ccd19ab1 | ||
|
|
1b4d344e18 | ||
|
|
89c0cf9b12 | ||
|
|
3770e70581 | ||
|
|
e497008161 | ||
|
|
a15ebb283f | ||
|
|
3f256a7959 | ||
|
|
b41af0d0f6 | ||
|
|
3ebff65ef3 | ||
|
|
1d02efeab9 | ||
|
|
53fc98d3b0 | ||
|
|
61b0f50d4d | ||
|
|
0557a4dd2f | ||
|
|
930d5a09a8 | ||
|
|
8b0c4291ae | ||
|
|
c9efdf8c88 | ||
|
|
72736c0ea9 | ||
|
|
49317bbee4 | ||
|
|
af54c8381e | ||
|
|
693fcd5752 | ||
|
|
733175359a | ||
|
|
7c6162f0bf | ||
|
|
d6ae39bf0f | ||
|
|
e416bbc1de | ||
|
|
2cfd363dc6 | ||
|
|
70aa78a2c2 | ||
|
|
96c81762ed | ||
|
|
0b1f634afa | ||
|
|
d3d5015854 | ||
|
|
f95f29c492 | ||
|
|
a50b69b868 | ||
|
|
3668f5f021 | ||
|
|
54fdf379bb | ||
|
|
41b1cd5a73 | ||
|
|
5cb75d1f2a | ||
|
|
52591761af | ||
|
|
3b30b6a57a | ||
|
|
cffbe3fcb6 | ||
|
|
8d8fcb9846 | ||
|
|
5a7cfc57fd |
10
.github/workflows/db-change-missing.yml
vendored
10
.github/workflows/db-change-missing.yml
vendored
@@ -4,22 +4,22 @@ on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, labeled, unlabeled]
|
||||
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
check-labels:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Check db change labels
|
||||
id: check_labels
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
URL=/repos/meilisearch/meilisearch/pulls/${{ github.event.pull_request.number }}/labels
|
||||
echo ${{ github.event.pull_request.number }}
|
||||
echo $URL
|
||||
LABELS=$(gh api -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" /repos/meilisearch/meilisearch/issues/${{ github.event.pull_request.number }}/labels -q .[].name)
|
||||
LABELS=$(gh api -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" /repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/labels -q .[].name)
|
||||
echo "Labels: $LABELS"
|
||||
if [[ ! "$LABELS" =~ "db change" && ! "$LABELS" =~ "no db change" ]]; then
|
||||
echo "::error::Pull request must contain either the 'db change' or 'no db change' label."
|
||||
exit 1
|
||||
|
||||
10
Cargo.lock
generated
10
Cargo.lock
generated
@@ -611,15 +611,6 @@ dependencies = [
|
||||
"wyz",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
version = "0.10.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe"
|
||||
dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
version = "0.10.4"
|
||||
@@ -3809,7 +3800,6 @@ dependencies = [
|
||||
"big_s",
|
||||
"bimap",
|
||||
"bincode",
|
||||
"blake2",
|
||||
"bstr",
|
||||
"bumpalo",
|
||||
"bumparaw-collections",
|
||||
|
||||
@@ -115,19 +115,6 @@ impl IndexScheduler {
|
||||
|
||||
let indexer_config = self.index_mapper.indexer_config();
|
||||
let pool = &indexer_config.thread_pool;
|
||||
let network = self.network();
|
||||
let shards: Vec<&str> = network
|
||||
.local
|
||||
.as_deref()
|
||||
.into_iter()
|
||||
.chain(
|
||||
network
|
||||
.remotes
|
||||
.keys()
|
||||
.map(|s| s.as_str())
|
||||
.filter(|s| Some(s) != network.local.as_deref().as_ref()),
|
||||
)
|
||||
.collect();
|
||||
|
||||
progress.update_progress(DocumentOperationProgress::ComputingDocumentChanges);
|
||||
let (document_changes, operation_stats, primary_key) = indexer
|
||||
@@ -139,7 +126,6 @@ impl IndexScheduler {
|
||||
&mut new_fields_ids_map,
|
||||
&|| must_stop_processing.get(),
|
||||
progress.clone(),
|
||||
&shards,
|
||||
)
|
||||
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?;
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//! This file implements a queue of searches to process and the ability to control how many searches can be run in parallel.
|
||||
//! We need this because we don't want to process more search requests than we have cores.
|
||||
//! We need this because we don't want to process more search requests than the available CPU cores.
|
||||
//! That slows down everything and consumes RAM for no reason.
|
||||
//! The steps to do a search are to get the `SearchQueue` data structure and try to get a search permit.
|
||||
//! This can fail if the queue is full, and we need to drop your search request to register a new one.
|
||||
@@ -8,7 +8,7 @@
|
||||
//!
|
||||
//! In order to do a search request you should try to get a search permit.
|
||||
//! Retrieve the `SearchQueue` structure from actix-web (`search_queue: Data<SearchQueue>`)
|
||||
//! and right before processing the search, calls the `SearchQueue::try_get_search_permit` method: `search_queue.try_get_search_permit().await?;`
|
||||
//! and right before processing the search, call the `SearchQueue::try_get_search_permit` method: `search_queue.try_get_search_permit().await?;`
|
||||
//!
|
||||
//! What is going to happen at this point is that you're going to send a oneshot::Sender over an async mpsc channel.
|
||||
//! Then, the queue/scheduler is going to either:
|
||||
@@ -121,12 +121,12 @@ impl SearchQueue {
|
||||
let mut queue: Vec<oneshot::Sender<Permit>> = Default::default();
|
||||
let mut rng: StdRng = StdRng::from_entropy();
|
||||
let mut searches_running: usize = 0;
|
||||
// By having a capacity of parallelism we ensures that every time a search finish it can release its RAM asap
|
||||
// By having a capacity of parallelism we ensure that every time a search finish it can release its RAM asap
|
||||
let (sender, mut search_finished) = mpsc::channel(parallelism.into());
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
// biased select because we wants to free up space before trying to register new tasks
|
||||
// biased select because we want to free up space before trying to register new tasks
|
||||
biased;
|
||||
_ = search_finished.recv() => {
|
||||
searches_running = searches_running.saturating_sub(1);
|
||||
@@ -148,11 +148,11 @@ impl SearchQueue {
|
||||
|
||||
if searches_running < usize::from(parallelism) && queue.is_empty() {
|
||||
searches_running += 1;
|
||||
// if the search requests die it's not a hard error on our side
|
||||
// if the search requests die, it's not a hard error on our side
|
||||
let _ = search_request.send(Permit { sender: sender.clone() });
|
||||
continue;
|
||||
} else if capacity == 0 {
|
||||
// in the very specific case where we have a capacity of zero
|
||||
// in the very specific case where we have a capacity of zero,
|
||||
// we must refuse the request straight away without going through
|
||||
// the queue stuff.
|
||||
drop(search_request);
|
||||
@@ -183,7 +183,7 @@ impl SearchQueue {
|
||||
.map_err(|_| MeilisearchHttpError::TooManySearchRequests(self.capacity))?;
|
||||
|
||||
// If we've been for more than one minute to get a search permit, it's better to simply
|
||||
// abort the search request than spending time processing something were the client
|
||||
// abort the search request than spending time processing something where the client
|
||||
// most certainly exited or got a timeout a long time ago.
|
||||
// We may find a better solution in https://github.com/actix/actix-web/issues/3462.
|
||||
if now.elapsed() > self.time_to_abort {
|
||||
|
||||
@@ -128,7 +128,8 @@ impl Display for Value {
|
||||
".finishedAt" => "[date]",
|
||||
".duration" => "[duration]",
|
||||
".processingTimeMs" => "[duration]",
|
||||
".details.embedders.*.url" => "[url]"
|
||||
".details.embedders.*.url" => "[url]",
|
||||
".details.dumpUid" => "[dump_uid]",
|
||||
})
|
||||
)
|
||||
}
|
||||
@@ -351,7 +352,7 @@ pub async fn shared_index_with_nested_documents() -> &'static Index<'static, Sha
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
let (response, _code) = index
|
||||
._update_settings(
|
||||
json!({"filterableAttributes": ["father", "doggos"], "sortableAttributes": ["doggos"]}),
|
||||
json!({"filterableAttributes": ["father", "doggos", "cattos"], "sortableAttributes": ["doggos"]}),
|
||||
)
|
||||
.await;
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
@@ -453,3 +454,57 @@ pub async fn shared_index_with_test_set() -> &'static Index<'static, Shared> {
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub static GEO_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
|
||||
json!([
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Taco Truck",
|
||||
"address": "444 Salsa Street, Burritoville",
|
||||
"type": "Mexican",
|
||||
"rating": 9,
|
||||
"_geo": {
|
||||
"lat": 34.0522,
|
||||
"lng": -118.2437
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "La Bella Italia",
|
||||
"address": "456 Elm Street, Townsville",
|
||||
"type": "Italian",
|
||||
"rating": 9,
|
||||
"_geo": {
|
||||
"lat": "45.4777599",
|
||||
"lng": "9.1967508"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Crêpe Truck",
|
||||
"address": "2 Billig Avenue, Rouenville",
|
||||
"type": "French",
|
||||
"rating": 10
|
||||
}
|
||||
])
|
||||
});
|
||||
|
||||
pub async fn shared_index_with_geo_documents() -> &'static Index<'static, Shared> {
|
||||
static INDEX: OnceCell<Index<'static, Shared>> = OnceCell::const_new();
|
||||
INDEX
|
||||
.get_or_init(|| async {
|
||||
let server = Server::new_shared();
|
||||
let index = server._index("SHARED_GEO_DOCUMENTS").to_shared();
|
||||
let (response, _code) = index._add_documents(GEO_DOCUMENTS.clone(), None).await;
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (response, _code) = index
|
||||
._update_settings(
|
||||
json!({"filterableAttributes": ["_geo"], "sortableAttributes": ["_geo"]}),
|
||||
)
|
||||
.await;
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
index
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -399,18 +399,9 @@ impl<State> Server<State> {
|
||||
pub async fn wait_task(&self, update_id: u64) -> Value {
|
||||
// try several times to get status, or panic to not wait forever
|
||||
let url = format!("/tasks/{}", update_id);
|
||||
// Increase timeout for vector-related tests
|
||||
let max_attempts = if url.contains("/tasks/") {
|
||||
if update_id > 1000 {
|
||||
400 // 200 seconds for vector tests
|
||||
} else {
|
||||
100 // 50 seconds for other tests
|
||||
}
|
||||
} else {
|
||||
100 // 50 seconds for other tests
|
||||
};
|
||||
let max_attempts = 400; // 200 seconds total, 0.5s per attempt
|
||||
|
||||
for _ in 0..max_attempts {
|
||||
for i in 0..max_attempts {
|
||||
let (response, status_code) = self.service.get(&url).await;
|
||||
assert_eq!(200, status_code, "response: {}", response);
|
||||
|
||||
@@ -420,6 +411,10 @@ impl<State> Server<State> {
|
||||
|
||||
// wait 0.5 second.
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
|
||||
if i == max_attempts - 1 {
|
||||
dbg!(response);
|
||||
}
|
||||
}
|
||||
panic!("Timeout waiting for update id");
|
||||
}
|
||||
|
||||
@@ -1,39 +1,35 @@
|
||||
use meili_snap::{json_string, snapshot};
|
||||
|
||||
use crate::common::{GetAllDocumentsOptions, Server};
|
||||
use crate::common::{shared_does_not_exists_index, GetAllDocumentsOptions, Server};
|
||||
use crate::json;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn delete_one_document_unexisting_index() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let (task, code) = index.delete_document(0).await;
|
||||
let index = shared_does_not_exists_index().await;
|
||||
let (task, code) = index.delete_document_by_filter_fail(json!({"filter": "a = b"})).await;
|
||||
assert_eq!(code, 202);
|
||||
|
||||
let response = index.wait_task(task.uid()).await;
|
||||
|
||||
assert_eq!(response["status"], "failed");
|
||||
index.wait_task(task.uid()).await.failed();
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn delete_one_unexisting_document() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
index.create(None).await;
|
||||
let (response, code) = index.delete_document(0).await;
|
||||
assert_eq!(code, 202, "{}", response);
|
||||
let update = index.wait_task(response.uid()).await;
|
||||
assert_eq!(update["status"], "succeeded");
|
||||
assert_eq!(code, 202, "{response}");
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn delete_one_document() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (task, _status_code) =
|
||||
index.add_documents(json!([{ "id": 0, "content": "foobar" }]), None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
let (task, status_code) = server.index("test").delete_document(0).await;
|
||||
let (task, status_code) = index.delete_document(0).await;
|
||||
assert_eq!(status_code, 202);
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
@@ -43,20 +39,18 @@ async fn delete_one_document() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn clear_all_documents_unexisting_index() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (task, code) = index.clear_all_documents().await;
|
||||
assert_eq!(code, 202);
|
||||
|
||||
let response = index.wait_task(task.uid()).await;
|
||||
|
||||
assert_eq!(response["status"], "failed");
|
||||
index.wait_task(task.uid()).await.failed();
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn clear_all_documents() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (task, _status_code) = index
|
||||
.add_documents(
|
||||
json!([{ "id": 1, "content": "foobar" }, { "id": 0, "content": "foobar" }]),
|
||||
@@ -67,7 +61,7 @@ async fn clear_all_documents() {
|
||||
let (task, code) = index.clear_all_documents().await;
|
||||
assert_eq!(code, 202);
|
||||
|
||||
let _update = index.wait_task(task.uid()).await;
|
||||
let _update = index.wait_task(task.uid()).await.succeeded();
|
||||
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
|
||||
assert_eq!(code, 200);
|
||||
assert!(response["results"].as_array().unwrap().is_empty());
|
||||
@@ -75,14 +69,14 @@ async fn clear_all_documents() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn clear_all_documents_empty_index() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (task, _status_code) = index.create(None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
let (task, code) = index.clear_all_documents().await;
|
||||
assert_eq!(code, 202);
|
||||
|
||||
let _update = index.wait_task(task.uid()).await;
|
||||
let _update = index.wait_task(task.uid()).await.succeeded();
|
||||
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
|
||||
assert_eq!(code, 200);
|
||||
assert!(response["results"].as_array().unwrap().is_empty());
|
||||
@@ -90,33 +84,31 @@ async fn clear_all_documents_empty_index() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn error_delete_batch_unexisting_index() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (task, code) = index.delete_batch(vec![]).await;
|
||||
let expected_response = json!({
|
||||
"message": "Index `test` not found.",
|
||||
"message": format!("Index `{}` not found.", index.uid),
|
||||
"code": "index_not_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||
});
|
||||
assert_eq!(code, 202);
|
||||
|
||||
let response = index.wait_task(task.uid()).await;
|
||||
|
||||
assert_eq!(response["status"], "failed");
|
||||
let response = index.wait_task(task.uid()).await.failed();
|
||||
assert_eq!(response["error"], expected_response);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn delete_batch() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (task,_status_code) = index.add_documents(json!([{ "id": 1, "content": "foobar" }, { "id": 0, "content": "foobar" }, { "id": 3, "content": "foobar" }]), Some("id")).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
let (task, code) = index.delete_batch(vec![1, 0]).await;
|
||||
assert_eq!(code, 202);
|
||||
|
||||
let _update = index.wait_task(task.uid()).await;
|
||||
let _update = index.wait_task(task.uid()).await.succeeded();
|
||||
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
|
||||
assert_eq!(code, 200);
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 1);
|
||||
@@ -125,14 +117,14 @@ async fn delete_batch() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn delete_no_document_batch() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (task,_status_code) = index.add_documents(json!([{ "id": 1, "content": "foobar" }, { "id": 0, "content": "foobar" }, { "id": 3, "content": "foobar" }]), Some("id")).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
let (_response, code) = index.delete_batch(vec![]).await;
|
||||
assert_eq!(code, 202, "{}", _response);
|
||||
let (response, code) = index.delete_batch(vec![]).await;
|
||||
assert_eq!(code, 202, "{response}");
|
||||
|
||||
let _update = index.wait_task(_response.uid()).await;
|
||||
let _update = index.wait_task(response.uid()).await.succeeded();
|
||||
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
|
||||
assert_eq!(code, 200);
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 3);
|
||||
@@ -140,8 +132,8 @@ async fn delete_no_document_batch() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn delete_document_by_filter() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("doggo");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
index.update_settings_filterable_attributes(json!(["color"])).await;
|
||||
let (task, _status_code) = index
|
||||
.add_documents(
|
||||
@@ -178,22 +170,22 @@ async fn delete_document_by_filter() {
|
||||
let (response, code) =
|
||||
index.delete_document_by_filter(json!({ "filter": "color = blue"})).await;
|
||||
snapshot!(code, @"202 Accepted");
|
||||
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
|
||||
{
|
||||
"taskUid": 2,
|
||||
"indexUid": "doggo",
|
||||
"taskUid": "[task_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "enqueued",
|
||||
"type": "documentDeletion",
|
||||
"enqueuedAt": "[date]"
|
||||
}
|
||||
"###);
|
||||
|
||||
let response = index.wait_task(response.uid()).await;
|
||||
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
|
||||
let response = index.wait_task(response.uid()).await.succeeded();
|
||||
snapshot!(json_string!(response, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
|
||||
{
|
||||
"uid": 2,
|
||||
"batchUid": 2,
|
||||
"indexUid": "doggo",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
@@ -251,22 +243,22 @@ async fn delete_document_by_filter() {
|
||||
let (response, code) =
|
||||
index.delete_document_by_filter(json!({ "filter": "color NOT EXISTS"})).await;
|
||||
snapshot!(code, @"202 Accepted");
|
||||
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
|
||||
{
|
||||
"taskUid": 3,
|
||||
"indexUid": "doggo",
|
||||
"taskUid": "[task_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "enqueued",
|
||||
"type": "documentDeletion",
|
||||
"enqueuedAt": "[date]"
|
||||
}
|
||||
"###);
|
||||
|
||||
let response = index.wait_task(response.uid()).await;
|
||||
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
|
||||
let response = index.wait_task(response.uid()).await.succeeded();
|
||||
snapshot!(json_string!(response, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
|
||||
{
|
||||
"uid": 3,
|
||||
"batchUid": 3,
|
||||
"indexUid": "doggo",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
@@ -321,8 +313,8 @@ async fn delete_document_by_filter() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn delete_document_by_complex_filter() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("doggo");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
index.update_settings_filterable_attributes(json!(["color"])).await;
|
||||
let (task, _status_code) = index
|
||||
.add_documents(
|
||||
@@ -343,22 +335,22 @@ async fn delete_document_by_complex_filter() {
|
||||
)
|
||||
.await;
|
||||
snapshot!(code, @"202 Accepted");
|
||||
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
|
||||
{
|
||||
"taskUid": 2,
|
||||
"indexUid": "doggo",
|
||||
"taskUid": "[task_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "enqueued",
|
||||
"type": "documentDeletion",
|
||||
"enqueuedAt": "[date]"
|
||||
}
|
||||
"###);
|
||||
|
||||
let response = index.wait_task(response.uid()).await;
|
||||
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
|
||||
let response = index.wait_task(response.uid()).await.succeeded();
|
||||
snapshot!(json_string!(response, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
|
||||
{
|
||||
"uid": 2,
|
||||
"batchUid": 2,
|
||||
"indexUid": "doggo",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
@@ -402,22 +394,22 @@ async fn delete_document_by_complex_filter() {
|
||||
.delete_document_by_filter(json!({ "filter": [["color = green", "color NOT EXISTS"]] }))
|
||||
.await;
|
||||
snapshot!(code, @"202 Accepted");
|
||||
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
|
||||
{
|
||||
"taskUid": 3,
|
||||
"indexUid": "doggo",
|
||||
"taskUid": "[task_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "enqueued",
|
||||
"type": "documentDeletion",
|
||||
"enqueuedAt": "[date]"
|
||||
}
|
||||
"###);
|
||||
|
||||
let response = index.wait_task(response.uid()).await;
|
||||
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
|
||||
let response = index.wait_task(response.uid()).await.succeeded();
|
||||
snapshot!(json_string!(response, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
|
||||
{
|
||||
"uid": 3,
|
||||
"batchUid": 3,
|
||||
"indexUid": "doggo",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
|
||||
@@ -623,88 +623,3 @@ async fn get_and_set_network() {
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn index_with_shards() {
|
||||
let server = Server::new().await;
|
||||
|
||||
let (response, code) = server.set_features(json!({"network": true})).await;
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response["network"]), @r#"true"#);
|
||||
|
||||
// adding self
|
||||
let (response, code) = server.set_network(json!({"self": "myself"})).await;
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"self": "myself",
|
||||
"remotes": {}
|
||||
}
|
||||
"###);
|
||||
|
||||
// adding remotes
|
||||
let (response, code) = server
|
||||
.set_network(json!({
|
||||
"self": "myself",
|
||||
"remotes": {
|
||||
"myself": {
|
||||
"url": "http://localhost:7700"
|
||||
},
|
||||
"thy": {
|
||||
"url": "http://localhost:7701",
|
||||
"searchApiKey": "foo"
|
||||
}
|
||||
}}))
|
||||
.await;
|
||||
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"self": "myself",
|
||||
"remotes": {
|
||||
"myself": {
|
||||
"url": "http://localhost:7700",
|
||||
"searchApiKey": null
|
||||
},
|
||||
"thy": {
|
||||
"url": "http://localhost:7701",
|
||||
"searchApiKey": "foo"
|
||||
}
|
||||
}
|
||||
}
|
||||
"###);
|
||||
|
||||
// adding one remote
|
||||
let (response, code) = server
|
||||
.set_network(json!({"remotes": {
|
||||
"them": {
|
||||
"url": "http://localhost:7702",
|
||||
"searchApiKey": "baz"
|
||||
}
|
||||
}}))
|
||||
.await;
|
||||
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"self": "myself",
|
||||
"remotes": {
|
||||
"myself": {
|
||||
"url": "http://localhost:7700",
|
||||
"searchApiKey": null
|
||||
},
|
||||
"them": {
|
||||
"url": "http://localhost:7702",
|
||||
"searchApiKey": "baz"
|
||||
},
|
||||
"thy": {
|
||||
"url": "http://localhost:7701",
|
||||
"searchApiKey": "bar"
|
||||
}
|
||||
}
|
||||
}
|
||||
"###);
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -4,23 +4,16 @@ use tempfile::TempDir;
|
||||
|
||||
use super::test_settings_documents_indexing_swapping_and_search;
|
||||
use crate::{
|
||||
common::{default_settings, shared_index_with_documents, Server, DOCUMENTS, NESTED_DOCUMENTS},
|
||||
common::{
|
||||
default_settings, shared_index_with_documents, shared_index_with_nested_documents, Server,
|
||||
DOCUMENTS, NESTED_DOCUMENTS,
|
||||
},
|
||||
json,
|
||||
};
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_with_filter_string_notation() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let (_, code) = index.update_settings(json!({"filterableAttributes": ["title"]})).await;
|
||||
meili_snap::snapshot!(code, @"202 Accepted");
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
let (task, code) = index.add_documents(documents, None).await;
|
||||
meili_snap::snapshot!(code, @"202 Accepted");
|
||||
let res = index.wait_task(task.uid()).await;
|
||||
meili_snap::snapshot!(res["status"], @r###""succeeded""###);
|
||||
let index = shared_index_with_documents().await;
|
||||
|
||||
index
|
||||
.search(
|
||||
@@ -28,44 +21,34 @@ async fn search_with_filter_string_notation() {
|
||||
"filter": "title = Gläss"
|
||||
}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
let index = server.index("nested");
|
||||
let nested_index = shared_index_with_nested_documents().await;
|
||||
|
||||
let (_, code) =
|
||||
index.update_settings(json!({"filterableAttributes": ["cattos", "doggos.age"]})).await;
|
||||
meili_snap::snapshot!(code, @"202 Accepted");
|
||||
|
||||
let documents = NESTED_DOCUMENTS.clone();
|
||||
let (task, code) = index.add_documents(documents, None).await;
|
||||
meili_snap::snapshot!(code, @"202 Accepted");
|
||||
let res = index.wait_task(task.uid()).await;
|
||||
meili_snap::snapshot!(res["status"], @r###""succeeded""###);
|
||||
|
||||
index
|
||||
nested_index
|
||||
.search(
|
||||
json!({
|
||||
"filter": "cattos = pésti"
|
||||
}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
|
||||
assert_eq!(response["hits"][0]["id"], json!(852));
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
index
|
||||
nested_index
|
||||
.search(
|
||||
json!({
|
||||
"filter": "doggos.age > 5"
|
||||
}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["hits"].as_array().unwrap().len(), 2);
|
||||
assert_eq!(response["hits"][0]["id"], json!(654));
|
||||
assert_eq!(response["hits"][1]["id"], json!(951));
|
||||
@@ -82,7 +65,7 @@ async fn search_with_filter_array_notation() {
|
||||
"filter": ["title = Gläss"]
|
||||
}))
|
||||
.await;
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
|
||||
|
||||
let (response, code) = index
|
||||
@@ -90,7 +73,7 @@ async fn search_with_filter_array_notation() {
|
||||
"filter": [["title = Gläss", "title = \"Shazam!\"", "title = \"Escape Room\""]]
|
||||
}))
|
||||
.await;
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["hits"].as_array().unwrap().len(), 3);
|
||||
}
|
||||
|
||||
@@ -116,7 +99,7 @@ async fn search_with_contains_filter() {
|
||||
"filter": "title CONTAINS cap"
|
||||
}))
|
||||
.await;
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["hits"].as_array().unwrap().len(), 2);
|
||||
}
|
||||
|
||||
@@ -269,16 +252,14 @@ async fn search_with_pattern_filter_settings() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_with_pattern_filter_settings_scenario_1() {
|
||||
let temp = TempDir::new().unwrap();
|
||||
let server = Server::new_with_options(Opt { ..default_settings(temp.path()) }).await.unwrap();
|
||||
let server = Server::new_shared();
|
||||
|
||||
eprintln!("Documents -> Settings -> test");
|
||||
let index = server.index("test");
|
||||
let index = server.unique_index();
|
||||
|
||||
let (task, code) = index.add_documents(NESTED_DOCUMENTS.clone(), None).await;
|
||||
assert_eq!(code, 202, "{}", task);
|
||||
let response = index.wait_task(task.uid()).await;
|
||||
snapshot!(response["status"], @r###""succeeded""###);
|
||||
assert_eq!(code, 202, "{task}");
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
let (task, code) = index
|
||||
.update_settings(json!({"filterableAttributes": [{
|
||||
@@ -289,9 +270,8 @@ async fn search_with_pattern_filter_settings_scenario_1() {
|
||||
}
|
||||
}]}))
|
||||
.await;
|
||||
assert_eq!(code, 202, "{}", task);
|
||||
let response = index.wait_task(task.uid()).await;
|
||||
snapshot!(response["status"], @r###""succeeded""###);
|
||||
assert_eq!(code, 202, "{task}");
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Check if the Equality filter works
|
||||
index
|
||||
@@ -335,7 +315,7 @@ async fn search_with_pattern_filter_settings_scenario_1() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
|
||||
"message": "Index `[uuid]`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
@@ -355,9 +335,8 @@ async fn search_with_pattern_filter_settings_scenario_1() {
|
||||
}
|
||||
}]}))
|
||||
.await;
|
||||
assert_eq!(code, 202, "{}", task);
|
||||
let response = index.wait_task(task.uid()).await;
|
||||
snapshot!(response["status"], @r###""succeeded""###);
|
||||
assert_eq!(code, 202, "{task}");
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Check if the Equality filter works
|
||||
index
|
||||
@@ -467,9 +446,8 @@ async fn search_with_pattern_filter_settings_scenario_1() {
|
||||
}
|
||||
}]}))
|
||||
.await;
|
||||
assert_eq!(code, 202, "{}", task);
|
||||
let response = index.wait_task(task.uid()).await;
|
||||
snapshot!(response["status"], @r###""succeeded""###);
|
||||
assert_eq!(code, 202, "{task}");
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Check if the Equality filter returns an error
|
||||
index
|
||||
@@ -481,7 +459,7 @@ async fn search_with_pattern_filter_settings_scenario_1() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Index `test`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
|
||||
"message": "Index `[uuid]`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
@@ -567,9 +545,8 @@ async fn search_with_pattern_filter_settings_scenario_1() {
|
||||
}
|
||||
}]}))
|
||||
.await;
|
||||
assert_eq!(code, 202, "{}", task);
|
||||
let response = index.wait_task(task.uid()).await;
|
||||
snapshot!(response["status"], @r###""succeeded""###);
|
||||
assert_eq!(code, 202, "{task}");
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
// Check if the Equality filter works
|
||||
index
|
||||
@@ -613,7 +590,7 @@ async fn search_with_pattern_filter_settings_scenario_1() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
|
||||
"message": "Index `[uuid]`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
|
||||
@@ -1,56 +1,14 @@
|
||||
use meili_snap::{json_string, snapshot};
|
||||
use meilisearch_types::milli::constants::RESERVED_GEO_FIELD_NAME;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use crate::common::{Server, Value};
|
||||
use crate::common::shared_index_with_geo_documents;
|
||||
use crate::json;
|
||||
|
||||
use super::test_settings_documents_indexing_swapping_and_search;
|
||||
|
||||
static DOCUMENTS: Lazy<Value> = Lazy::new(|| {
|
||||
json!([
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Taco Truck",
|
||||
"address": "444 Salsa Street, Burritoville",
|
||||
"type": "Mexican",
|
||||
"rating": 9,
|
||||
"_geo": {
|
||||
"lat": 34.0522,
|
||||
"lng": -118.2437
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "La Bella Italia",
|
||||
"address": "456 Elm Street, Townsville",
|
||||
"type": "Italian",
|
||||
"rating": 9,
|
||||
"_geo": {
|
||||
"lat": "45.4777599",
|
||||
"lng": "9.1967508"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Crêpe Truck",
|
||||
"address": "2 Billig Avenue, Rouenville",
|
||||
"type": "French",
|
||||
"rating": 10
|
||||
}
|
||||
])
|
||||
});
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn geo_sort_with_geo_strings() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
index.update_settings_filterable_attributes(json!(["_geo"])).await;
|
||||
index.update_settings_sortable_attributes(json!(["_geo"])).await;
|
||||
let (task, _status_code) = index.add_documents(documents, None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
let index = shared_index_with_geo_documents().await;
|
||||
|
||||
index
|
||||
.search(
|
||||
@@ -59,7 +17,7 @@ async fn geo_sort_with_geo_strings() {
|
||||
"sort": ["_geoPoint(0.0, 0.0):asc"]
|
||||
}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@@ -67,14 +25,7 @@ async fn geo_sort_with_geo_strings() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn geo_bounding_box_with_string_and_number() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
index.update_settings_filterable_attributes(json!(["_geo"])).await;
|
||||
index.update_settings_sortable_attributes(json!(["_geo"])).await;
|
||||
let (ret, _code) = index.add_documents(documents, None).await;
|
||||
index.wait_task(ret.uid()).await.succeeded();
|
||||
let index = shared_index_with_geo_documents().await;
|
||||
|
||||
index
|
||||
.search(
|
||||
@@ -82,7 +33,7 @@ async fn geo_bounding_box_with_string_and_number() {
|
||||
"filter": "_geoBoundingBox([89, 179], [-89, -179])",
|
||||
}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
@@ -124,14 +75,7 @@ async fn geo_bounding_box_with_string_and_number() {
|
||||
#[actix_rt::test]
|
||||
async fn bug_4640() {
|
||||
// https://github.com/meilisearch/meilisearch/issues/4640
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
index.add_documents(documents, None).await;
|
||||
index.update_settings_filterable_attributes(json!(["_geo"])).await;
|
||||
let (ret, _code) = index.update_settings_sortable_attributes(json!(["_geo"])).await;
|
||||
index.wait_task(ret.uid()).await.succeeded();
|
||||
let index = shared_index_with_geo_documents().await;
|
||||
|
||||
// Sort the document with the second one first
|
||||
index
|
||||
@@ -140,7 +84,7 @@ async fn bug_4640() {
|
||||
"sort": ["_geoPoint(45.4777599, 9.1967508):asc"],
|
||||
}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
@@ -203,7 +147,7 @@ async fn geo_asc_with_words() {
|
||||
&json!({"searchableAttributes": ["id", "doggo"], "rankingRules": ["words", "geo:asc"]}),
|
||||
&json!({"q": "jean"}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
@@ -248,7 +192,7 @@ async fn geo_asc_with_words() {
|
||||
&json!({"searchableAttributes": ["id", "doggo"], "rankingRules": ["words", "geo:asc"]}),
|
||||
&json!({"q": "bob"}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
@@ -285,7 +229,7 @@ async fn geo_asc_with_words() {
|
||||
&json!({"searchableAttributes": ["id", "doggo"], "rankingRules": ["words", "geo:asc"]}),
|
||||
&json!({"q": "intel"}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
@@ -325,7 +269,7 @@ async fn geo_sort_with_words() {
|
||||
&json!({"searchableAttributes": ["id", "doggo"], "rankingRules": ["words", "sort"], "sortableAttributes": [RESERVED_GEO_FIELD_NAME]}),
|
||||
&json!({"q": "jean", "sort": ["_geoPoint(0.0, 0.0):asc"]}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
|
||||
@@ -2,31 +2,31 @@ use meili_snap::snapshot;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use crate::common::index::Index;
|
||||
use crate::common::{Server, Value};
|
||||
use crate::common::{Server, Shared, Value};
|
||||
use crate::json;
|
||||
|
||||
async fn index_with_documents_user_provided<'a>(
|
||||
server: &'a Server,
|
||||
server: &'a Server<Shared>,
|
||||
documents: &Value,
|
||||
) -> Index<'a> {
|
||||
let index = server.index("test");
|
||||
let index = server.unique_index();
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({ "embedders": {"default": {
|
||||
"source": "userProvided",
|
||||
"dimensions": 2}}} ))
|
||||
.await;
|
||||
assert_eq!(202, code, "{:?}", response);
|
||||
assert_eq!(202, code, "{response:?}");
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (response, code) = index.add_documents(documents.clone(), None).await;
|
||||
assert_eq!(202, code, "{:?}", response);
|
||||
assert_eq!(202, code, "{response:?}");
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
index
|
||||
}
|
||||
|
||||
async fn index_with_documents_hf<'a>(server: &'a Server, documents: &Value) -> Index<'a> {
|
||||
let index = server.index("test");
|
||||
async fn index_with_documents_hf<'a>(server: &'a Server<Shared>, documents: &Value) -> Index<'a> {
|
||||
let index = server.unique_index();
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({ "embedders": {"default": {
|
||||
@@ -36,11 +36,11 @@ async fn index_with_documents_hf<'a>(server: &'a Server, documents: &Value) -> I
|
||||
"documentTemplate": "{{doc.title}}, {{doc.desc}}"
|
||||
}}} ))
|
||||
.await;
|
||||
assert_eq!(202, code, "{:?}", response);
|
||||
assert_eq!(202, code, "{response:?}");
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (response, code) = index.add_documents(documents.clone(), None).await;
|
||||
assert_eq!(202, code, "{:?}", response);
|
||||
assert_eq!(202, code, "{response:?}");
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
index
|
||||
}
|
||||
@@ -139,8 +139,8 @@ static SIMPLE_SEARCH_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn simple_search() {
|
||||
let server = Server::new().await;
|
||||
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
let server = Server::new_shared();
|
||||
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
|
||||
let (response, code) = index
|
||||
.search_post(
|
||||
@@ -172,8 +172,8 @@ async fn simple_search() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn limit_offset() {
|
||||
let server = Server::new().await;
|
||||
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
let server = Server::new_shared();
|
||||
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
|
||||
let (response, code) = index
|
||||
.search_post(
|
||||
@@ -185,8 +185,8 @@ async fn limit_offset() {
|
||||
snapshot!(response["semanticHitCount"], @"0");
|
||||
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
|
||||
|
||||
let server = Server::new().await;
|
||||
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
let server = Server::new_shared();
|
||||
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
|
||||
let (response, code) = index
|
||||
.search_post(
|
||||
@@ -201,8 +201,8 @@ async fn limit_offset() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn simple_search_hf() {
|
||||
let server = Server::new().await;
|
||||
let index = index_with_documents_hf(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
|
||||
let server = Server::new_shared();
|
||||
let index = index_with_documents_hf(server, &SIMPLE_SEARCH_DOCUMENTS).await;
|
||||
|
||||
let (response, code) = index
|
||||
.search_post(
|
||||
@@ -253,8 +253,8 @@ async fn simple_search_hf() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn distribution_shift() {
|
||||
let server = Server::new().await;
|
||||
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
let server = Server::new_shared();
|
||||
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
|
||||
let search = json!({"q": "Captain", "vector": [1.0, 1.0], "showRankingScore": true, "hybrid": {"embedder": "default", "semanticRatio": 1.0}, "retrieveVectors": true});
|
||||
let (response, code) = index.search_post(search.clone()).await;
|
||||
@@ -275,7 +275,7 @@ async fn distribution_shift() {
|
||||
.await;
|
||||
|
||||
snapshot!(code, @"202 Accepted");
|
||||
let response = server.wait_task(response.uid()).await;
|
||||
let response = server.wait_task(response.uid()).await.succeeded();
|
||||
snapshot!(response["details"], @r#"{"embedders":{"default":{"distribution":{"mean":0.998,"sigma":0.01}}}}"#);
|
||||
|
||||
let (response, code) = index.search_post(search).await;
|
||||
@@ -285,8 +285,8 @@ async fn distribution_shift() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn highlighter() {
|
||||
let server = Server::new().await;
|
||||
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
let server = Server::new_shared();
|
||||
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
|
||||
let (response, code) = index
|
||||
.search_post(json!({"q": "Captain Marvel", "vector": [1.0, 1.0],
|
||||
@@ -340,8 +340,8 @@ async fn highlighter() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn invalid_semantic_ratio() {
|
||||
let server = Server::new().await;
|
||||
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
let server = Server::new_shared();
|
||||
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
|
||||
let (response, code) = index
|
||||
.search_post(
|
||||
@@ -412,8 +412,8 @@ async fn invalid_semantic_ratio() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn single_document() {
|
||||
let server = Server::new().await;
|
||||
let index = index_with_documents_user_provided(&server, &SINGLE_DOCUMENT_VEC).await;
|
||||
let server = Server::new_shared();
|
||||
let index = index_with_documents_user_provided(server, &SINGLE_DOCUMENT_VEC).await;
|
||||
|
||||
let (response, code) = index
|
||||
.search_post(
|
||||
@@ -428,8 +428,8 @@ async fn single_document() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn query_combination() {
|
||||
let server = Server::new().await;
|
||||
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
let server = Server::new_shared();
|
||||
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
|
||||
|
||||
// search without query and vector, but with hybrid => still placeholder
|
||||
let (response, code) = index
|
||||
@@ -538,8 +538,8 @@ async fn query_combination() {
|
||||
// see <https://github.com/meilisearch/meilisearch/issues/5526>
|
||||
#[actix_rt::test]
|
||||
async fn distinct_is_applied() {
|
||||
let server = Server::new().await;
|
||||
let index = index_with_documents_user_provided(&server, &TEST_DISTINCT_DOCUMENTS).await;
|
||||
let server = Server::new_shared();
|
||||
let index = index_with_documents_user_provided(server, &TEST_DISTINCT_DOCUMENTS).await;
|
||||
|
||||
let (response, code) = index.update_settings(json!({ "distinctAttribute": "distinct" } )).await;
|
||||
assert_eq!(202, code, "{:?}", response);
|
||||
@@ -581,8 +581,8 @@ async fn distinct_is_applied() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn retrieve_vectors() {
|
||||
let server = Server::new().await;
|
||||
let index = index_with_documents_hf(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
|
||||
let server = Server::new_shared();
|
||||
let index = index_with_documents_hf(server, &SIMPLE_SEARCH_DOCUMENTS).await;
|
||||
|
||||
let (response, code) = index
|
||||
.search_post(
|
||||
@@ -632,7 +632,7 @@ async fn retrieve_vectors() {
|
||||
let (response, code) = index
|
||||
.update_settings(json!({ "displayedAttributes": ["id", "title", "desc", "_vectors"]} ))
|
||||
.await;
|
||||
assert_eq!(202, code, "{:?}", response);
|
||||
assert_eq!(202, code, "{response:?}");
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (response, code) = index
|
||||
@@ -682,7 +682,7 @@ async fn retrieve_vectors() {
|
||||
// remove `_vectors` from displayed attributes
|
||||
let (response, code) =
|
||||
index.update_settings(json!({ "displayedAttributes": ["id", "title", "desc"]} )).await;
|
||||
assert_eq!(202, code, "{:?}", response);
|
||||
assert_eq!(202, code, "{response:?}");
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (response, code) = index
|
||||
|
||||
@@ -2296,6 +2296,7 @@ async fn error_remote_500_once() {
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
#[ignore]
|
||||
async fn error_remote_timeout() {
|
||||
let ms0 = Server::new().await;
|
||||
let ms1 = Server::new().await;
|
||||
|
||||
@@ -6,8 +6,8 @@ use crate::common::Server;
|
||||
use crate::json;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn get_settings_unexisting_index() {
|
||||
let server = Server::new().await;
|
||||
async fn get_version() {
|
||||
let server = Server::new_shared();
|
||||
let (response, code) = server.version().await;
|
||||
assert_eq!(code, 200);
|
||||
let version = response.as_object().unwrap();
|
||||
@@ -18,7 +18,7 @@ async fn get_settings_unexisting_index() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_healthyness() {
|
||||
let server = Server::new().await;
|
||||
let server = Server::new_shared();
|
||||
|
||||
let (response, status_code) = server.service.get("/health").await;
|
||||
assert_eq!(status_code, 200);
|
||||
@@ -55,7 +55,7 @@ async fn stats() {
|
||||
]);
|
||||
|
||||
let (response, code) = index.add_documents(documents, None).await;
|
||||
assert_eq!(code, 202, "{}", response);
|
||||
assert_eq!(code, 202, "{response}");
|
||||
assert_eq!(response["taskUid"], 1);
|
||||
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
@@ -78,8 +78,8 @@ async fn stats() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn add_remove_embeddings() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("doggo");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@@ -216,8 +216,8 @@ async fn add_remove_embeddings() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn add_remove_embedded_documents() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("doggo");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@@ -293,8 +293,8 @@ async fn add_remove_embedded_documents() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn update_embedder_settings() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("doggo");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
|
||||
// 2 embedded documents for 3 embeddings in total
|
||||
// but no embedders are added in the settings yet so we expect 0 embedded documents for 0 embeddings in total
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
mod errors;
|
||||
mod webhook;
|
||||
|
||||
use meili_snap::insta::assert_json_snapshot;
|
||||
use meili_snap::snapshot;
|
||||
use meili_snap::{json_string, snapshot};
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
@@ -11,14 +10,12 @@ use crate::json;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn error_get_unexisting_task_status() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let (task, _status_code) = index.create(None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
let (response, code) = index.get_task(1).await;
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (response, code) = index.get_task(u32::MAX as u64).await;
|
||||
|
||||
let expected_response = json!({
|
||||
"message": "Task `1` not found.",
|
||||
"message": "Task `4294967295` not found.",
|
||||
"code": "task_not_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#task_not_found"
|
||||
@@ -30,8 +27,8 @@ async fn error_get_unexisting_task_status() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn get_task_status() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (create_task, _status_code) = index.create(None).await;
|
||||
let (add_task, _status_code) = index
|
||||
.add_documents(
|
||||
@@ -42,7 +39,7 @@ async fn get_task_status() {
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
index.wait_task(create_task.uid()).await.succeeded();
|
||||
server.wait_task(create_task.uid()).await.succeeded();
|
||||
let (_response, code) = index.get_task(add_task.uid()).await;
|
||||
assert_eq!(code, 200);
|
||||
// TODO check response format, as per #48
|
||||
@@ -50,10 +47,11 @@ async fn get_task_status() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn list_tasks() {
|
||||
// Do not use a shared server because we want to assert stuff against the global list of tasks
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let (task, _status_code) = index.create(None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
index
|
||||
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
|
||||
.await;
|
||||
@@ -64,6 +62,7 @@ async fn list_tasks() {
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn list_tasks_pagination_and_reverse() {
|
||||
// do not use a shared server here, as we want to assert tasks ids and we need them to be stable
|
||||
let server = Server::new().await;
|
||||
// First of all we want to create a lot of tasks very quickly. The fastest way is to delete a lot of unexisting indexes
|
||||
let mut last_task = None;
|
||||
@@ -71,7 +70,7 @@ async fn list_tasks_pagination_and_reverse() {
|
||||
let index = server.index(format!("test-{i}"));
|
||||
last_task = Some(index.create(None).await.0.uid());
|
||||
}
|
||||
server.wait_task(last_task.unwrap()).await;
|
||||
server.wait_task(last_task.unwrap()).await.succeeded();
|
||||
|
||||
let (response, code) = server.tasks_filter("limit=3").await;
|
||||
assert_eq!(code, 200);
|
||||
@@ -103,13 +102,14 @@ async fn list_tasks_pagination_and_reverse() {
|
||||
#[actix_rt::test]
|
||||
async fn list_tasks_with_star_filters() {
|
||||
let server = Server::new().await;
|
||||
// Do not use a unique index here, as we want to test the `indexUids=*` filter.
|
||||
let index = server.index("test");
|
||||
let (task, _code) = index.create(None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
index
|
||||
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
|
||||
.await;
|
||||
let (response, code) = index.service.get("/tasks?indexUids=test").await;
|
||||
let (response, code) = index.service.get(format!("/tasks?indexUids={}", index.uid)).await;
|
||||
assert_eq!(code, 200);
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 2);
|
||||
|
||||
@@ -127,93 +127,102 @@ async fn list_tasks_with_star_filters() {
|
||||
|
||||
let (response, code) =
|
||||
index.service.get("/tasks?types=*,documentAdditionOrUpdate&statuses=*").await;
|
||||
assert_eq!(code, 200, "{:?}", response);
|
||||
assert_eq!(code, 200, "{response:?}");
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 2);
|
||||
|
||||
let (response, code) = index
|
||||
.service
|
||||
.get("/tasks?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids=test")
|
||||
.get(format!(
|
||||
"/tasks?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids={}",
|
||||
index.uid
|
||||
))
|
||||
.await;
|
||||
assert_eq!(code, 200, "{:?}", response);
|
||||
assert_eq!(code, 200, "{response:?}");
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 2);
|
||||
|
||||
let (response, code) = index
|
||||
.service
|
||||
.get("/tasks?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids=test,*")
|
||||
.await;
|
||||
assert_eq!(code, 200, "{:?}", response);
|
||||
assert_eq!(code, 200, "{response:?}");
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 2);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn list_tasks_status_filtered() {
|
||||
// Do not use a shared server because we want to assert stuff against the global list of tasks
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let (task, _status_code) = index.create(None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _status_code) = index.create(None).await;
|
||||
index.wait_task(task.uid()).await.failed();
|
||||
server.wait_task(task.uid()).await.failed();
|
||||
|
||||
let (response, code) = index.filtered_tasks(&[], &["succeeded"], &[]).await;
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 1);
|
||||
|
||||
let (response, code) = index.filtered_tasks(&[], &["succeeded"], &[]).await;
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 1);
|
||||
|
||||
let (response, code) = index.filtered_tasks(&[], &["succeeded", "failed"], &[]).await;
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 2);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn list_tasks_type_filtered() {
|
||||
// Do not use a shared server because we want to assert stuff against the global list of tasks
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let (task, _status_code) = index.create(None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
index
|
||||
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
|
||||
.await;
|
||||
|
||||
let (response, code) = index.filtered_tasks(&["indexCreation"], &[], &[]).await;
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 1);
|
||||
|
||||
let (response, code) =
|
||||
index.filtered_tasks(&["indexCreation", "documentAdditionOrUpdate"], &[], &[]).await;
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 2);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn list_tasks_invalid_canceled_by_filter() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (task, _status_code) = index.create(None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
index
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
let (task, _code) = index
|
||||
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
|
||||
.await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
let (response, code) = index.filtered_tasks(&[], &[], &["0"]).await;
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
let (response, code) =
|
||||
index.filtered_tasks(&[], &[], &[format!("{}", task.uid()).as_str()]).await;
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 0);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn list_tasks_status_and_type_filtered() {
|
||||
// Do not use a shared server because we want to assert stuff against the global list of tasks
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let (task, _status_code) = index.create(None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
index
|
||||
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
|
||||
.await;
|
||||
|
||||
let (response, code) = index.filtered_tasks(&["indexCreation"], &["failed"], &[]).await;
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 0);
|
||||
|
||||
let (response, code) = index
|
||||
@@ -223,12 +232,12 @@ async fn list_tasks_status_and_type_filtered() {
|
||||
&[],
|
||||
)
|
||||
.await;
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
assert_eq!(code, 200, "{response}");
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 2);
|
||||
}
|
||||
|
||||
macro_rules! assert_valid_summarized_task {
|
||||
($response:expr, $task_type:literal, $index:literal) => {{
|
||||
($response:expr, $task_type:literal, $index:tt) => {{
|
||||
assert_eq!($response.as_object().unwrap().len(), 5);
|
||||
assert!($response["taskUid"].as_u64().is_some());
|
||||
assert_eq!($response["indexUid"], $index);
|
||||
@@ -242,49 +251,49 @@ macro_rules! assert_valid_summarized_task {
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_task_view() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let index_uid = index.uid.clone();
|
||||
|
||||
let (response, _) = index.create(None).await;
|
||||
assert_valid_summarized_task!(response, "indexCreation", "test");
|
||||
assert_valid_summarized_task!(response, "indexCreation", index_uid);
|
||||
|
||||
let (response, _) = index.update(None).await;
|
||||
assert_valid_summarized_task!(response, "indexUpdate", "test");
|
||||
assert_valid_summarized_task!(response, "indexUpdate", index_uid);
|
||||
|
||||
let (response, _) = index.update_settings(json!({})).await;
|
||||
assert_valid_summarized_task!(response, "settingsUpdate", "test");
|
||||
assert_valid_summarized_task!(response, "settingsUpdate", index_uid);
|
||||
|
||||
let (response, _) = index.update_documents(json!([{"id": 1}]), None).await;
|
||||
assert_valid_summarized_task!(response, "documentAdditionOrUpdate", "test");
|
||||
assert_valid_summarized_task!(response, "documentAdditionOrUpdate", index_uid);
|
||||
|
||||
let (response, _) = index.add_documents(json!([{"id": 1}]), None).await;
|
||||
assert_valid_summarized_task!(response, "documentAdditionOrUpdate", "test");
|
||||
assert_valid_summarized_task!(response, "documentAdditionOrUpdate", index_uid);
|
||||
|
||||
let (response, _) = index.delete_document(1).await;
|
||||
assert_valid_summarized_task!(response, "documentDeletion", "test");
|
||||
assert_valid_summarized_task!(response, "documentDeletion", index_uid);
|
||||
|
||||
let (response, _) = index.clear_all_documents().await;
|
||||
assert_valid_summarized_task!(response, "documentDeletion", "test");
|
||||
assert_valid_summarized_task!(response, "documentDeletion", index_uid);
|
||||
|
||||
let (response, _) = index.delete().await;
|
||||
assert_valid_summarized_task!(response, "indexDeletion", "test");
|
||||
assert_valid_summarized_task!(response, "indexDeletion", index_uid);
|
||||
}
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_document_addition_or_update() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (task, _status_code) =
|
||||
index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _) = index.get_task(0).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 0,
|
||||
"batchUid": 0,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
@@ -302,15 +311,14 @@ async fn test_summarized_document_addition_or_update() {
|
||||
|
||||
let (task, _status_code) =
|
||||
index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _) = index.get_task(1).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 1,
|
||||
"batchUid": 1,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
@@ -329,18 +337,22 @@ async fn test_summarized_document_addition_or_update() {
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_delete_documents_by_batch() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let (task, _status_code) = index.delete_batch(vec![1, 2, 3]).await;
|
||||
index.wait_task(task.uid()).await.failed();
|
||||
let (task, _) = index.get_task(0).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let non_existing_task_id1 = u32::MAX as u64;
|
||||
let non_existing_task_id2 = non_existing_task_id1 - 1;
|
||||
let non_existing_task_id3 = non_existing_task_id1 - 2;
|
||||
let (task, _status_code) = index
|
||||
.delete_batch(vec![non_existing_task_id1, non_existing_task_id2, non_existing_task_id3])
|
||||
.await;
|
||||
server.wait_task(task.uid()).await.failed();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 0,
|
||||
"batchUid": 0,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "failed",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
@@ -350,7 +362,7 @@ async fn test_summarized_delete_documents_by_batch() {
|
||||
"originalFilter": null
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` not found.",
|
||||
"message": "Index `[uuid]` not found.",
|
||||
"code": "index_not_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||
@@ -364,15 +376,14 @@ async fn test_summarized_delete_documents_by_batch() {
|
||||
|
||||
index.create(None).await;
|
||||
let (del_task, _status_code) = index.delete_batch(vec![42]).await;
|
||||
index.wait_task(del_task.uid()).await.succeeded();
|
||||
server.wait_task(del_task.uid()).await.succeeded();
|
||||
let (task, _) = index.get_task(del_task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 2,
|
||||
"batchUid": 2,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
@@ -392,20 +403,19 @@ async fn test_summarized_delete_documents_by_batch() {
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_delete_documents_by_filter() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
|
||||
let (task, _status_code) =
|
||||
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||
index.wait_task(task.uid()).await.failed();
|
||||
server.wait_task(task.uid()).await.failed();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 0,
|
||||
"batchUid": 0,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "failed",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
@@ -415,7 +425,7 @@ async fn test_summarized_delete_documents_by_filter() {
|
||||
"originalFilter": "\"doggo = bernese\""
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` not found.",
|
||||
"message": "Index `[uuid]` not found.",
|
||||
"code": "index_not_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||
@@ -430,15 +440,14 @@ async fn test_summarized_delete_documents_by_filter() {
|
||||
index.create(None).await;
|
||||
let (task, _status_code) =
|
||||
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||
index.wait_task(task.uid()).await.failed();
|
||||
server.wait_task(task.uid()).await.failed();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 2,
|
||||
"batchUid": 2,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "failed",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
@@ -448,7 +457,7 @@ async fn test_summarized_delete_documents_by_filter() {
|
||||
"originalFilter": "\"doggo = bernese\""
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test`: Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
|
||||
"message": "Index `[uuid]`: Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
|
||||
"code": "invalid_document_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||
@@ -463,15 +472,14 @@ async fn test_summarized_delete_documents_by_filter() {
|
||||
index.update_settings(json!({ "filterableAttributes": ["doggo"] })).await;
|
||||
let (task, _status_code) =
|
||||
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 4,
|
||||
"batchUid": 4,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
@@ -491,18 +499,17 @@ async fn test_summarized_delete_documents_by_filter() {
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_delete_document_by_id() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (task, _status_code) = index.delete_document(1).await;
|
||||
index.wait_task(task.uid()).await.failed();
|
||||
server.wait_task(task.uid()).await.failed();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 0,
|
||||
"batchUid": 0,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "failed",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
@@ -512,7 +519,7 @@ async fn test_summarized_delete_document_by_id() {
|
||||
"originalFilter": null
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` not found.",
|
||||
"message": "Index `[uuid]` not found.",
|
||||
"code": "index_not_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||
@@ -526,15 +533,14 @@ async fn test_summarized_delete_document_by_id() {
|
||||
|
||||
index.create(None).await;
|
||||
let (task, _status_code) = index.delete_document(42).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 2,
|
||||
"batchUid": 2,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "documentDeletion",
|
||||
"canceledBy": null,
|
||||
@@ -554,12 +560,12 @@ async fn test_summarized_delete_document_by_id() {
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_settings_update() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
// here we should find my payload even in the failed task.
|
||||
let (response, code) = index.update_settings(json!({ "rankingRules": ["custom"] })).await;
|
||||
meili_snap::snapshot!(code, @"400 Bad Request");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid value at `.rankingRules[0]`: `custom` ranking rule is invalid. Valid ranking rules are words, typo, sort, proximity, attribute, exactness and custom ranking rules.",
|
||||
"code": "invalid_settings_ranking_rules",
|
||||
@@ -569,15 +575,14 @@ async fn test_summarized_settings_update() {
|
||||
"###);
|
||||
|
||||
let (task,_status_code) = index.update_settings(json!({ "displayedAttributes": ["doggos", "name"], "filterableAttributes": ["age", "nb_paw_pads"], "sortableAttributes": ["iq"] })).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 0,
|
||||
"batchUid": 0,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
@@ -605,18 +610,17 @@ async fn test_summarized_settings_update() {
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_index_creation() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (task, _status_code) = index.create(None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 0,
|
||||
"batchUid": 0,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "indexCreation",
|
||||
"canceledBy": null,
|
||||
@@ -632,15 +636,14 @@ async fn test_summarized_index_creation() {
|
||||
"###);
|
||||
|
||||
let (task, _status_code) = index.create(Some("doggos")).await;
|
||||
index.wait_task(task.uid()).await.failed();
|
||||
server.wait_task(task.uid()).await.failed();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 1,
|
||||
"batchUid": 1,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "failed",
|
||||
"type": "indexCreation",
|
||||
"canceledBy": null,
|
||||
@@ -648,7 +651,7 @@ async fn test_summarized_index_creation() {
|
||||
"primaryKey": "doggos"
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` already exists.",
|
||||
"message": "Index `[uuid]` already exists.",
|
||||
"code": "index_already_exists",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_already_exists"
|
||||
@@ -663,16 +666,16 @@ async fn test_summarized_index_creation() {
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_index_deletion() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
let (ret, _code) = index.delete().await;
|
||||
let task = index.wait_task(ret.uid()).await;
|
||||
let task = server.wait_task(ret.uid()).await;
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "test",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "failed",
|
||||
"type": "indexDeletion",
|
||||
"canceledBy": null,
|
||||
@@ -680,7 +683,7 @@ async fn test_summarized_index_deletion() {
|
||||
"deletedDocuments": 0
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` not found.",
|
||||
"message": "Index `[uuid]` not found.",
|
||||
"code": "index_not_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||
@@ -697,13 +700,13 @@ async fn test_summarized_index_deletion() {
|
||||
// both tasks may get autobatched and the deleted documents count will be wrong.
|
||||
let (ret, _code) =
|
||||
index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await;
|
||||
let task = index.wait_task(ret.uid()).await;
|
||||
let task = server.wait_task(ret.uid()).await;
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "test",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
@@ -720,13 +723,13 @@ async fn test_summarized_index_deletion() {
|
||||
"###);
|
||||
|
||||
let (ret, _code) = index.delete().await;
|
||||
let task = index.wait_task(ret.uid()).await;
|
||||
let task = server.wait_task(ret.uid()).await;
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "test",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "indexDeletion",
|
||||
"canceledBy": null,
|
||||
@@ -743,13 +746,13 @@ async fn test_summarized_index_deletion() {
|
||||
|
||||
// What happens when you delete an index that doesn't exists.
|
||||
let (ret, _code) = index.delete().await;
|
||||
let task = index.wait_task(ret.uid()).await;
|
||||
let task = server.wait_task(ret.uid()).await;
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "test",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "failed",
|
||||
"type": "indexDeletion",
|
||||
"canceledBy": null,
|
||||
@@ -757,7 +760,7 @@ async fn test_summarized_index_deletion() {
|
||||
"deletedDocuments": 0
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` not found.",
|
||||
"message": "Index `[uuid]` not found.",
|
||||
"code": "index_not_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||
@@ -772,19 +775,18 @@ async fn test_summarized_index_deletion() {
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_index_update() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
// If the index doesn't exist yet, we should get errors with or without the primary key.
|
||||
let (task, _status_code) = index.update(None).await;
|
||||
index.wait_task(task.uid()).await.failed();
|
||||
server.wait_task(task.uid()).await.failed();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 0,
|
||||
"batchUid": 0,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "failed",
|
||||
"type": "indexUpdate",
|
||||
"canceledBy": null,
|
||||
@@ -792,7 +794,7 @@ async fn test_summarized_index_update() {
|
||||
"primaryKey": null
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` not found.",
|
||||
"message": "Index `[uuid]` not found.",
|
||||
"code": "index_not_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||
@@ -805,15 +807,14 @@ async fn test_summarized_index_update() {
|
||||
"###);
|
||||
|
||||
let (task, _status_code) = index.update(Some("bones")).await;
|
||||
index.wait_task(task.uid()).await.failed();
|
||||
server.wait_task(task.uid()).await.failed();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 1,
|
||||
"batchUid": 1,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "failed",
|
||||
"type": "indexUpdate",
|
||||
"canceledBy": null,
|
||||
@@ -821,7 +822,7 @@ async fn test_summarized_index_update() {
|
||||
"primaryKey": "bones"
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `test` not found.",
|
||||
"message": "Index `[uuid]` not found.",
|
||||
"code": "index_not_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||
@@ -837,15 +838,14 @@ async fn test_summarized_index_update() {
|
||||
index.create(None).await;
|
||||
|
||||
let (task, _status_code) = index.update(None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 3,
|
||||
"batchUid": 3,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "indexUpdate",
|
||||
"canceledBy": null,
|
||||
@@ -861,15 +861,14 @@ async fn test_summarized_index_update() {
|
||||
"###);
|
||||
|
||||
let (task, _status_code) = index.update(Some("bones")).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 4,
|
||||
"batchUid": 4,
|
||||
"indexUid": "test",
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "[uuid]",
|
||||
"status": "succeeded",
|
||||
"type": "indexUpdate",
|
||||
"canceledBy": null,
|
||||
@@ -887,7 +886,7 @@ async fn test_summarized_index_update() {
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_index_swap() {
|
||||
let server = Server::new().await;
|
||||
let server = Server::new_shared();
|
||||
let (task, _status_code) = server
|
||||
.index_swap(json!([
|
||||
{ "indexes": ["doggos", "cattos"] }
|
||||
@@ -895,12 +894,11 @@ async fn test_summarized_index_swap() {
|
||||
.await;
|
||||
server.wait_task(task.uid()).await.failed();
|
||||
let (task, _) = server.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 0,
|
||||
"batchUid": 0,
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": null,
|
||||
"status": "failed",
|
||||
"type": "indexSwap",
|
||||
@@ -928,23 +926,25 @@ async fn test_summarized_index_swap() {
|
||||
}
|
||||
"###);
|
||||
|
||||
let (task, _code) = server.index("doggos").create(None).await;
|
||||
let doggos_index = server.unique_index();
|
||||
let (task, _code) = doggos_index.create(None).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _code) = server.index("cattos").create(None).await;
|
||||
let cattos_index = server.unique_index();
|
||||
let (task, _code) = cattos_index.create(None).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _code) = server
|
||||
.index_swap(json!([
|
||||
{ "indexes": ["doggos", "cattos"] }
|
||||
{ "indexes": [doggos_index.uid, cattos_index.uid] }
|
||||
]))
|
||||
.await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _) = server.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(json_string!(task,
|
||||
{ ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".**.indexes[0]" => "doggos", ".**.indexes[1]" => "cattos", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
|
||||
@r###"
|
||||
{
|
||||
"uid": 3,
|
||||
"batchUid": 3,
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "indexSwap",
|
||||
@@ -970,20 +970,21 @@ async fn test_summarized_index_swap() {
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_task_cancelation() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("doggos");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
// to avoid being flaky we're only going to cancel an already finished task :(
|
||||
let (task, _status_code) = index.create(None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _status_code) = server.cancel_tasks("uids=0").await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
let task_uid = task.uid();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _status_code) = server.cancel_tasks(format!("uids={task_uid}").as_str()).await;
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(json_string!(task,
|
||||
{ ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".**.originalFilter" => "[of]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
|
||||
@r###"
|
||||
{
|
||||
"uid": 1,
|
||||
"batchUid": 1,
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskCancelation",
|
||||
@@ -991,7 +992,7 @@ async fn test_summarized_task_cancelation() {
|
||||
"details": {
|
||||
"matchedTasks": 1,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?uids=0"
|
||||
"originalFilter": "[of]"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
@@ -1004,20 +1005,19 @@ async fn test_summarized_task_cancelation() {
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_task_deletion() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("doggos");
|
||||
let server = Server::new_shared();
|
||||
let index = server.unique_index();
|
||||
// to avoid being flaky we're only going to delete an already finished task :(
|
||||
let (task, _status_code) = index.create(None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _status_code) = server.delete_tasks("uids=0").await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
server.wait_task(task.uid()).await.succeeded();
|
||||
let (task, _) = index.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 1,
|
||||
"batchUid": 1,
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskDeletion",
|
||||
@@ -1038,22 +1038,22 @@ async fn test_summarized_task_deletion() {
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_summarized_dump_creation() {
|
||||
// Do not use a shared server because it takes too long to create a dump
|
||||
let server = Server::new().await;
|
||||
let (task, _status_code) = server.create_dump().await;
|
||||
server.wait_task(task.uid()).await;
|
||||
let (task, _) = server.get_task(task.uid()).await;
|
||||
assert_json_snapshot!(task,
|
||||
{ ".details.dumpUid" => "[dumpUid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||
snapshot!(task,
|
||||
@r###"
|
||||
{
|
||||
"uid": 0,
|
||||
"batchUid": 0,
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "dumpCreation",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"dumpUid": "[dumpUid]"
|
||||
"dumpUid": "[dump_uid]"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
|
||||
@@ -111,7 +111,6 @@ utoipa = { version = "5.3.1", features = [
|
||||
"openapi_extensions",
|
||||
] }
|
||||
lru = "0.13.0"
|
||||
blake2 = "0.10.6"
|
||||
|
||||
[dev-dependencies]
|
||||
mimalloc = { version = "0.1.43", default-features = false }
|
||||
|
||||
423
crates/milli/src/bin/embedder_settings.rs
Normal file
423
crates/milli/src/bin/embedder_settings.rs
Normal file
@@ -0,0 +1,423 @@
|
||||
use std::io::Write;
|
||||
|
||||
use milli::vector::settings::{
|
||||
EmbedderSource, EmbeddingSettings, FieldStatus, MetaEmbeddingSetting, NestingContext,
|
||||
ReindexOutcome,
|
||||
};
|
||||
|
||||
pub trait Formatter {
|
||||
fn begin_document(&mut self);
|
||||
fn end_document(&mut self);
|
||||
|
||||
fn begin_header(&mut self);
|
||||
fn put_source_header(&mut self, source: EmbedderSource);
|
||||
fn end_header(&mut self);
|
||||
|
||||
fn begin_setting(
|
||||
&mut self,
|
||||
setting: MetaEmbeddingSetting,
|
||||
description: &'static str,
|
||||
kind: &'static str,
|
||||
reindex_outcome: ReindexOutcome,
|
||||
default_value: &'static str,
|
||||
);
|
||||
fn end_setting(&mut self, setting: MetaEmbeddingSetting);
|
||||
|
||||
fn put_setting_status(
|
||||
&mut self,
|
||||
source: EmbedderSource,
|
||||
field_status_by_nesting_context: FieldStatusByNestingContext,
|
||||
);
|
||||
}
|
||||
|
||||
pub struct GitHubMdFormatter<W> {
|
||||
w: W,
|
||||
}
|
||||
|
||||
impl<W: Write> GitHubMdFormatter<W> {
|
||||
pub fn new(w: W) -> Self {
|
||||
Self { w }
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> Formatter for GitHubMdFormatter<W> {
|
||||
fn begin_document(&mut self) {
|
||||
let s = r#"
|
||||
|
||||
<table>
|
||||
<tbody>
|
||||
"#;
|
||||
write!(self.w, "{s}").unwrap()
|
||||
}
|
||||
|
||||
fn end_document(&mut self) {
|
||||
write!(
|
||||
self.w,
|
||||
r#"
|
||||
</tbody>
|
||||
</table>
|
||||
"#
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn begin_header(&mut self) {
|
||||
write!(
|
||||
self.w,
|
||||
r#"
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Setting</th>
|
||||
<th>Description</th>
|
||||
<th>Type</th>
|
||||
<th>Default Value</th>
|
||||
<th>Regenerate on Change</th>
|
||||
<th colspan="6">Availability for source</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th colspan="5"></th>
|
||||
"#
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
fn put_source_header(&mut self, source: EmbedderSource) {
|
||||
write!(
|
||||
self.w,
|
||||
r#"
|
||||
<th>
|
||||
|
||||
{source}
|
||||
|
||||
</th>
|
||||
"#
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
fn end_header(&mut self) {
|
||||
write!(
|
||||
self.w,
|
||||
r#"
|
||||
</tr>
|
||||
</thead>
|
||||
"#
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn begin_setting(
|
||||
&mut self,
|
||||
setting: MetaEmbeddingSetting,
|
||||
description: &'static str,
|
||||
kind: &'static str,
|
||||
reindex_outcome: ReindexOutcome,
|
||||
default_value: &'static str,
|
||||
) {
|
||||
let name = setting.name();
|
||||
let reindex_outcome = match reindex_outcome {
|
||||
ReindexOutcome::AlwaysReindex => "🏗️ Always",
|
||||
ReindexOutcome::NeverReindex => "🌱 Never",
|
||||
ReindexOutcome::ReindexSometimes(sometimes) => sometimes,
|
||||
};
|
||||
write!(
|
||||
self.w,
|
||||
r#"
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
`{name}`
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
{description}
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
{kind}
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
{default_value}
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
{reindex_outcome}
|
||||
|
||||
</td>
|
||||
"#
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn end_setting(&mut self, _setting: MetaEmbeddingSetting) {
|
||||
write!(
|
||||
self.w,
|
||||
r#"
|
||||
|
||||
|
||||
</tr>
|
||||
"#
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn put_setting_status(
|
||||
&mut self,
|
||||
_source: EmbedderSource,
|
||||
field_status_by_nesting_context: FieldStatusByNestingContext,
|
||||
) {
|
||||
let field_status = match field_status_by_nesting_context {
|
||||
FieldStatusByNestingContext::Invariant(field_status) => {
|
||||
format_field_status(field_status).to_string()
|
||||
}
|
||||
FieldStatusByNestingContext::Variant(variant_field_status_by_nesting_context) => {
|
||||
format!(
|
||||
r#"
|
||||
- Usually, {}
|
||||
- When used in `searchEmbedder` in a `composite` embedder, {}
|
||||
- When used in `indexingEmbedder` in a `composite` embedder, {}
|
||||
"#,
|
||||
format_field_status(variant_field_status_by_nesting_context.not_nested),
|
||||
format_field_status(variant_field_status_by_nesting_context.search),
|
||||
format_field_status(variant_field_status_by_nesting_context.index)
|
||||
)
|
||||
}
|
||||
};
|
||||
write!(
|
||||
self.w,
|
||||
r#"
|
||||
<td>
|
||||
|
||||
{field_status}
|
||||
|
||||
</td>
|
||||
"#
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn format_field_status(field_status: FieldStatus) -> &'static str {
|
||||
match field_status {
|
||||
FieldStatus::Mandatory => "🔐 **Mandatory**",
|
||||
FieldStatus::Allowed => "✅ Allowed",
|
||||
FieldStatus::Disallowed => "🚫 Disallowed",
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GitHubMdAvailabilityFormatter<W>(pub GitHubMdFormatter<W>);
|
||||
impl<W: Write> Formatter for GitHubMdAvailabilityFormatter<W> {
|
||||
fn begin_document(&mut self) {
|
||||
write!(self.0.w, "## Availability of the settings depending on the selected source\n\n")
|
||||
.unwrap();
|
||||
self.0.begin_document();
|
||||
}
|
||||
|
||||
fn end_document(&mut self) {
|
||||
self.0.end_document();
|
||||
}
|
||||
|
||||
fn begin_header(&mut self) {
|
||||
write!(
|
||||
self.0.w,
|
||||
r#"
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Setting</th>
|
||||
"#
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn put_source_header(&mut self, source: EmbedderSource) {
|
||||
self.0.put_source_header(source);
|
||||
}
|
||||
|
||||
fn end_header(&mut self) {
|
||||
self.0.end_header();
|
||||
}
|
||||
|
||||
fn begin_setting(
|
||||
&mut self,
|
||||
setting: MetaEmbeddingSetting,
|
||||
_description: &'static str,
|
||||
_kind: &'static str,
|
||||
_reindex_outcome: ReindexOutcome,
|
||||
_default_value: &'static str,
|
||||
) {
|
||||
if setting == MetaEmbeddingSetting::Source {
|
||||
return;
|
||||
}
|
||||
let name = setting.name();
|
||||
write!(
|
||||
self.0.w,
|
||||
r#"
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
`{name}`
|
||||
|
||||
</td>
|
||||
"#
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn end_setting(&mut self, setting: MetaEmbeddingSetting) {
|
||||
if setting == MetaEmbeddingSetting::Source {
|
||||
return;
|
||||
}
|
||||
self.0.end_setting(setting);
|
||||
}
|
||||
|
||||
fn put_setting_status(
|
||||
&mut self,
|
||||
source: EmbedderSource,
|
||||
field_status_by_nesting_context: FieldStatusByNestingContext,
|
||||
) {
|
||||
self.0.put_setting_status(source, field_status_by_nesting_context);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GitHubMdBasicFormatter<W>(pub GitHubMdFormatter<W>);
|
||||
impl<W: Write> Formatter for GitHubMdBasicFormatter<W> {
|
||||
fn begin_document(&mut self) {
|
||||
write!(self.0.w, "## List of the embedder settings\n\n").unwrap();
|
||||
self.0.begin_document();
|
||||
}
|
||||
|
||||
fn end_document(&mut self) {
|
||||
self.0.end_document();
|
||||
}
|
||||
|
||||
fn begin_header(&mut self) {
|
||||
write!(
|
||||
self.0.w,
|
||||
r#"
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Setting</th>
|
||||
<th>Description</th>
|
||||
<th>Type</th>
|
||||
<th>Default Value</th>
|
||||
<th>Regenerate on Change</th>
|
||||
"#
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn put_source_header(&mut self, _source: EmbedderSource) {}
|
||||
|
||||
fn end_header(&mut self) {
|
||||
self.0.end_header();
|
||||
}
|
||||
|
||||
fn begin_setting(
|
||||
&mut self,
|
||||
setting: MetaEmbeddingSetting,
|
||||
description: &'static str,
|
||||
kind: &'static str,
|
||||
reindex_outcome: ReindexOutcome,
|
||||
default_value: &'static str,
|
||||
) {
|
||||
self.0.begin_setting(setting, description, kind, reindex_outcome, default_value);
|
||||
}
|
||||
|
||||
fn end_setting(&mut self, setting: MetaEmbeddingSetting) {
|
||||
self.0.end_setting(setting);
|
||||
}
|
||||
|
||||
fn put_setting_status(
|
||||
&mut self,
|
||||
_source: EmbedderSource,
|
||||
_field_status_by_nesting_context: FieldStatusByNestingContext,
|
||||
) {
|
||||
}
|
||||
}
|
||||
|
||||
pub enum FieldStatusByNestingContext {
|
||||
Invariant(FieldStatus),
|
||||
Variant(VariantFieldStatusByNestingContext),
|
||||
}
|
||||
|
||||
pub struct VariantFieldStatusByNestingContext {
|
||||
not_nested: FieldStatus,
|
||||
search: FieldStatus,
|
||||
index: FieldStatus,
|
||||
}
|
||||
|
||||
fn format_settings(mut fmt: impl Formatter) {
|
||||
#![allow(unused_labels)] // the labels are used as documentation
|
||||
fmt.begin_document();
|
||||
fmt.begin_header();
|
||||
for source in enum_iterator::all::<EmbedderSource>() {
|
||||
fmt.put_source_header(source);
|
||||
}
|
||||
fmt.end_header();
|
||||
'setting: for setting in enum_iterator::all::<MetaEmbeddingSetting>() {
|
||||
let description = setting.description();
|
||||
let kind = setting.kind();
|
||||
let reindex_outcome = setting.reindex_outcome();
|
||||
let default_value = setting.default_value();
|
||||
fmt.begin_setting(setting, description, kind, reindex_outcome, default_value);
|
||||
|
||||
'source: for source in enum_iterator::all::<EmbedderSource>() {
|
||||
if setting == MetaEmbeddingSetting::Source {
|
||||
break 'source;
|
||||
}
|
||||
let mut field_status = VariantFieldStatusByNestingContext {
|
||||
not_nested: FieldStatus::Disallowed,
|
||||
search: FieldStatus::Disallowed,
|
||||
index: FieldStatus::Disallowed,
|
||||
};
|
||||
'nesting: for nesting_context in enum_iterator::all::<NestingContext>() {
|
||||
let status = EmbeddingSettings::field_status(source, setting, nesting_context);
|
||||
|
||||
match nesting_context {
|
||||
NestingContext::NotNested => {
|
||||
field_status.not_nested = status;
|
||||
}
|
||||
NestingContext::Search => {
|
||||
field_status.search = status;
|
||||
}
|
||||
NestingContext::Indexing => {
|
||||
field_status.index = status;
|
||||
}
|
||||
}
|
||||
}
|
||||
let field_status_by_nesting_context = if field_status.index == field_status.search
|
||||
&& field_status.search == field_status.not_nested
|
||||
{
|
||||
FieldStatusByNestingContext::Invariant(field_status.not_nested)
|
||||
} else {
|
||||
FieldStatusByNestingContext::Variant(field_status)
|
||||
};
|
||||
fmt.put_setting_status(source, field_status_by_nesting_context);
|
||||
}
|
||||
fmt.end_setting(setting);
|
||||
}
|
||||
fmt.end_document();
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut std_out = std::io::stdout().lock();
|
||||
|
||||
write!(
|
||||
&mut std_out,
|
||||
"The tables below have been generated by calling `cargo run --bin embedder_settings`\n\n"
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let formatter = GitHubMdFormatter::new(&mut std_out);
|
||||
let formatter = GitHubMdBasicFormatter(formatter);
|
||||
format_settings(formatter);
|
||||
|
||||
write!(&mut std_out, "\n\n").unwrap();
|
||||
|
||||
let formatter = GitHubMdFormatter::new(&mut std_out);
|
||||
let formatter = GitHubMdAvailabilityFormatter(formatter);
|
||||
format_settings(formatter);
|
||||
}
|
||||
@@ -65,7 +65,7 @@ fn default_template() -> liquid::Template {
|
||||
new_template(default_template_text()).unwrap()
|
||||
}
|
||||
|
||||
fn default_template_text() -> &'static str {
|
||||
pub(crate) fn default_template_text() -> &'static str {
|
||||
"{% for field in fields %}\
|
||||
{% if field.is_searchable and field.value != nil %}\
|
||||
{{ field.name }}: {{ field.value }}\n\
|
||||
|
||||
@@ -76,7 +76,6 @@ pub fn setup_search_index_with_criteria(criteria: &[Criterion]) -> Index {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -84,7 +84,6 @@ impl TempIndex {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)?;
|
||||
|
||||
if let Some(error) = operation_stats.into_iter().find_map(|stat| stat.error) {
|
||||
@@ -167,7 +166,6 @@ impl TempIndex {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)?;
|
||||
|
||||
if let Some(error) = operation_stats.into_iter().find_map(|stat| stat.error) {
|
||||
@@ -242,7 +240,6 @@ fn aborting_indexation() {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -1955,7 +1955,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -2008,7 +2007,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -2096,7 +2094,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -2285,7 +2282,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -2348,7 +2344,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -2402,7 +2397,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -2455,7 +2449,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -2510,7 +2503,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -2570,7 +2562,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -2623,7 +2614,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -2676,7 +2666,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -2875,7 +2864,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -2935,7 +2923,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -2992,7 +2979,6 @@ mod tests {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use blake2::{Blake2b512, Digest};
|
||||
use bumpalo::collections::CollectIn;
|
||||
use bumpalo::Bump;
|
||||
use bumparaw_collections::RawMap;
|
||||
@@ -72,7 +71,6 @@ impl<'pl> DocumentOperation<'pl> {
|
||||
new_fields_ids_map: &mut FieldsIdsMap,
|
||||
must_stop_processing: &MSP,
|
||||
progress: Progress,
|
||||
shards: &[&str],
|
||||
) -> Result<(DocumentOperationChanges<'pl>, Vec<PayloadStats>, Option<PrimaryKey<'pl>>)>
|
||||
where
|
||||
MSP: Fn() -> bool,
|
||||
@@ -110,7 +108,6 @@ impl<'pl> DocumentOperation<'pl> {
|
||||
&docids_version_offsets,
|
||||
IndexDocumentsMethod::ReplaceDocuments,
|
||||
payload,
|
||||
shards,
|
||||
),
|
||||
Payload::Update(payload) => extract_addition_payload_changes(
|
||||
indexer,
|
||||
@@ -124,7 +121,6 @@ impl<'pl> DocumentOperation<'pl> {
|
||||
&docids_version_offsets,
|
||||
IndexDocumentsMethod::UpdateDocuments,
|
||||
payload,
|
||||
shards,
|
||||
),
|
||||
Payload::Deletion(to_delete) => extract_deletion_payload_changes(
|
||||
index,
|
||||
@@ -132,7 +128,6 @@ impl<'pl> DocumentOperation<'pl> {
|
||||
&mut available_docids,
|
||||
&docids_version_offsets,
|
||||
to_delete,
|
||||
shards,
|
||||
),
|
||||
};
|
||||
|
||||
@@ -179,7 +174,6 @@ fn extract_addition_payload_changes<'r, 'pl: 'r>(
|
||||
main_docids_version_offsets: &hashbrown::HashMap<&'pl str, PayloadOperations<'pl>>,
|
||||
method: IndexDocumentsMethod,
|
||||
payload: &'pl [u8],
|
||||
shards: &[&str],
|
||||
) -> Result<hashbrown::HashMap<&'pl str, PayloadOperations<'pl>>> {
|
||||
use IndexDocumentsMethod::{ReplaceDocuments, UpdateDocuments};
|
||||
|
||||
@@ -190,7 +184,7 @@ fn extract_addition_payload_changes<'r, 'pl: 'r>(
|
||||
while let Some(doc) = iter.next().transpose().map_err(InternalError::SerdeJson)? {
|
||||
*bytes = previous_offset as u64;
|
||||
|
||||
// Only guess the primary key if it is the first document and whatever the shard is
|
||||
// Only guess the primary key if it is the first document
|
||||
let retrieved_primary_key = if previous_offset == 0 {
|
||||
let doc = RawMap::from_raw_value_and_hasher(doc, FxBuildHasher, indexer)
|
||||
.map(Some)
|
||||
@@ -219,11 +213,6 @@ fn extract_addition_payload_changes<'r, 'pl: 'r>(
|
||||
let external_id =
|
||||
retrieved_primary_key.extract_fields_and_docid(doc, new_fields_ids_map, indexer)?;
|
||||
|
||||
if must_be_skipped(external_id.to_de(), shards) {
|
||||
previous_offset = iter.byte_offset();
|
||||
continue;
|
||||
}
|
||||
|
||||
let external_id = external_id.to_de();
|
||||
let current_offset = iter.byte_offset();
|
||||
let document_offset = DocumentOffset { content: &payload[previous_offset..current_offset] };
|
||||
@@ -341,15 +330,10 @@ fn extract_deletion_payload_changes<'s, 'pl: 's>(
|
||||
available_docids: &mut AvailableIds,
|
||||
main_docids_version_offsets: &hashbrown::HashMap<&'s str, PayloadOperations<'pl>>,
|
||||
to_delete: &'pl [&'pl str],
|
||||
shards: &[&str],
|
||||
) -> Result<hashbrown::HashMap<&'s str, PayloadOperations<'pl>>> {
|
||||
let mut new_docids_version_offsets = hashbrown::HashMap::<&str, PayloadOperations<'pl>>::new();
|
||||
|
||||
for external_id in to_delete {
|
||||
if must_be_skipped(external_id, shards) {
|
||||
continue;
|
||||
}
|
||||
|
||||
match main_docids_version_offsets.get(external_id) {
|
||||
None => {
|
||||
match index.external_documents_ids().get(rtxn, external_id) {
|
||||
@@ -628,25 +612,3 @@ pub fn first_update_pointer(docops: &[InnerDocOp]) -> Option<usize> {
|
||||
InnerDocOp::Deletion => None,
|
||||
})
|
||||
}
|
||||
|
||||
fn must_be_skipped(pk: &str, shards: &[&str]) -> bool {
|
||||
// Special case for no shard, it means we must index the document
|
||||
if shards.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If there is multiple shards, the fisrt shard is ourselves
|
||||
let mut hasher = Blake2b512::new();
|
||||
hasher.update(shards[0].as_bytes());
|
||||
hasher.update(pk.as_bytes());
|
||||
let me = hasher.finalize();
|
||||
|
||||
shards.iter().skip(1).any(|shard| {
|
||||
let mut hasher = Blake2b512::new();
|
||||
hasher.update(shard.as_bytes());
|
||||
hasher.update(pk.as_bytes());
|
||||
let them = hasher.finalize();
|
||||
|
||||
me < them
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1065,13 +1065,14 @@ fn apply_default_for_source(
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) enum FieldStatus {
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum FieldStatus {
|
||||
Mandatory,
|
||||
Allowed,
|
||||
Disallowed,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
#[derive(Debug, Clone, Copy, enum_iterator::Sequence)]
|
||||
pub enum NestingContext {
|
||||
NotNested,
|
||||
Search,
|
||||
@@ -1108,7 +1109,7 @@ impl NestingContext {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, enum_iterator::Sequence)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, enum_iterator::Sequence)]
|
||||
pub enum MetaEmbeddingSetting {
|
||||
Source,
|
||||
Model,
|
||||
@@ -1128,8 +1129,14 @@ pub enum MetaEmbeddingSetting {
|
||||
BinaryQuantized,
|
||||
}
|
||||
|
||||
pub enum ReindexOutcome {
|
||||
AlwaysReindex,
|
||||
NeverReindex,
|
||||
ReindexSometimes(&'static str),
|
||||
}
|
||||
|
||||
impl MetaEmbeddingSetting {
|
||||
pub(crate) fn name(&self) -> &'static str {
|
||||
pub fn name(&self) -> &'static str {
|
||||
use MetaEmbeddingSetting::*;
|
||||
match self {
|
||||
Source => "source",
|
||||
@@ -1150,6 +1157,159 @@ impl MetaEmbeddingSetting {
|
||||
BinaryQuantized => "binaryQuantized",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn description(&self) -> &'static str {
|
||||
use MetaEmbeddingSetting::*;
|
||||
match self {
|
||||
Source => {
|
||||
r#"
|
||||
The source used to provide the embeddings.
|
||||
|
||||
Which embedder parameters are available and mandatory is determined by the value of this setting.
|
||||
"#
|
||||
}
|
||||
Model => "The name of the model to use.",
|
||||
Revision => {
|
||||
r#"
|
||||
The revision (commit SHA1) of the model to use.
|
||||
|
||||
If unspecified, Meilisearch picks the latest revision of the model.
|
||||
"#
|
||||
}
|
||||
Pooling => "The pooling method to use.",
|
||||
ApiKey => "The API key to pass to the remote embedder while making requests.",
|
||||
Dimensions => "The expected dimensions of the embeddings produced by this embedder.",
|
||||
DocumentTemplate => {
|
||||
r#"
|
||||
A liquid template used to render documents to a text that can be embedded.
|
||||
|
||||
Meillisearch interpolates the template for each document and sends the resulting text to the embedder.
|
||||
The embedder then generates document vectors based on this text.
|
||||
"#
|
||||
}
|
||||
DocumentTemplateMaxBytes => {
|
||||
"Rendered texts are truncated to this size before embedding."
|
||||
}
|
||||
Url => "URL to reach the remote embedder.",
|
||||
Request => "Template request to send to the remote embedder.",
|
||||
Response => "Template response indicating how to find the embeddings in the response from the remote embedder.",
|
||||
Headers => "Additional headers to send to the remote embedder.",
|
||||
SearchEmbedder => "Embedder settings for the embedder used at search time.",
|
||||
IndexingEmbedder => "Embedder settings for the embedder used at indexing time.",
|
||||
Distribution => "Affine transformation applied to the semantic score to make it more comparable to the ranking score.",
|
||||
BinaryQuantized => r#"
|
||||
Whether to binary quantize the embeddings of this embedder.
|
||||
|
||||
Binary quantized embeddings are smaller than regular embeddings, which improves
|
||||
disk usage and retrieval speed, at the cost of relevancy.
|
||||
"#,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn kind(&self) -> &'static str {
|
||||
use MetaEmbeddingSetting::*;
|
||||
match self {
|
||||
Source => {
|
||||
r#""openAi" | "huggingFace" | "userProvided" | "ollama" | "rest" | "composite""#
|
||||
}
|
||||
Model => "string",
|
||||
Revision => "string",
|
||||
Pooling => r#""useModel" | "forceCls" | "forceMean""#,
|
||||
ApiKey => "string",
|
||||
Dimensions => "number",
|
||||
DocumentTemplate => "string",
|
||||
DocumentTemplateMaxBytes => "number",
|
||||
Url => "string",
|
||||
Request => "any",
|
||||
Response => "any",
|
||||
Headers => "object",
|
||||
SearchEmbedder => "object",
|
||||
IndexingEmbedder => "object",
|
||||
Distribution => "object",
|
||||
BinaryQuantized => "boolean",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_value(&self) -> &'static str {
|
||||
use MetaEmbeddingSetting::*;
|
||||
match self {
|
||||
Source => r#""openAi""#,
|
||||
Model => {
|
||||
r#"
|
||||
- For source `openAi`, defaults to "text-embedding-3-small"
|
||||
- For source `huggingFace`, defaults to "BAAI/bge-base-en-v1.5"
|
||||
"#
|
||||
}
|
||||
Revision => {
|
||||
r#"
|
||||
- When `model` is set to default, defaults to "617ca489d9e86b49b8167676d8220688b99db36e"
|
||||
- Otherwise, defaults to `null`
|
||||
"#
|
||||
}
|
||||
Pooling => r#""useModel""#,
|
||||
ApiKey => "`null`",
|
||||
Dimensions => "`null`",
|
||||
DocumentTemplate => crate::prompt::default_template_text(),
|
||||
DocumentTemplateMaxBytes => "400",
|
||||
Url => "`null`",
|
||||
Request => "`null`",
|
||||
Response => "`null`",
|
||||
Headers => "`null`",
|
||||
SearchEmbedder => "`null`",
|
||||
IndexingEmbedder => "`null`",
|
||||
Distribution => "`null`",
|
||||
BinaryQuantized => "`false`",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reindex_outcome(&self) -> ReindexOutcome {
|
||||
use MetaEmbeddingSetting::*;
|
||||
match self {
|
||||
Source => ReindexOutcome::AlwaysReindex,
|
||||
Model => ReindexOutcome::AlwaysReindex,
|
||||
Revision => ReindexOutcome::AlwaysReindex,
|
||||
Pooling => ReindexOutcome::AlwaysReindex,
|
||||
ApiKey => ReindexOutcome::NeverReindex,
|
||||
Dimensions => ReindexOutcome::ReindexSometimes(
|
||||
r#"
|
||||
- 🏗️ When the source is `openAi`, changing the value of this parameter always regenerates embeddings
|
||||
- 🌱 For other sources, changing the value of this parameter never regenerates embeddings
|
||||
"#,
|
||||
),
|
||||
DocumentTemplate => ReindexOutcome::ReindexSometimes(
|
||||
r#"
|
||||
- 🏗️ When modified, embeddings are regenerated for documents whose rendering through the template produces a different text.
|
||||
"#,
|
||||
),
|
||||
DocumentTemplateMaxBytes => ReindexOutcome::ReindexSometimes(
|
||||
r#"
|
||||
- 🏗️ When increased, embeddings are regenerated for documents whose rendering through the template produces a different text.
|
||||
- 🌱 When decreased, embeddings are never regenerated
|
||||
"#,
|
||||
),
|
||||
Url => ReindexOutcome::ReindexSometimes(
|
||||
r#"
|
||||
- 🌱 When modified for source `openAi`, embeddings are never regenerated
|
||||
- 🏗️ When modified for sources `ollama` and `rest`, embeddings are always regenerated
|
||||
"#,
|
||||
),
|
||||
Request => ReindexOutcome::AlwaysReindex,
|
||||
Response => ReindexOutcome::AlwaysReindex,
|
||||
Headers => ReindexOutcome::NeverReindex,
|
||||
SearchEmbedder => ReindexOutcome::NeverReindex,
|
||||
IndexingEmbedder => ReindexOutcome::ReindexSometimes(
|
||||
r#"
|
||||
- Embedding are regenerated when the setting modified in the indexing embedder require regeneration.
|
||||
"#,
|
||||
),
|
||||
Distribution => ReindexOutcome::NeverReindex,
|
||||
BinaryQuantized => ReindexOutcome::ReindexSometimes(
|
||||
r#"
|
||||
- Embeddings are not regenerated, but the binary quantization takes time during indexing.
|
||||
"#,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EmbeddingSettings {
|
||||
@@ -1311,7 +1471,7 @@ impl EmbeddingSettings {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn field_status(
|
||||
pub fn field_status(
|
||||
source: EmbedderSource,
|
||||
field: MetaEmbeddingSetting,
|
||||
context: NestingContext,
|
||||
|
||||
@@ -59,7 +59,6 @@ fn test_facet_distribution_with_no_facet_values() {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -95,7 +95,6 @@ pub fn setup_search_index_with_criteria(criteria: &[Criterion]) -> Index {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -329,7 +329,6 @@ fn criteria_ascdesc() {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -138,7 +138,6 @@ fn test_typo_disabled_on_word() {
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
&[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
||||
5
docs/README.md
Normal file
5
docs/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
This directory is meant for auto-generated documentation for internal purposes.
|
||||
|
||||
Please refer to <https://meilisearch.com/docs> for the public documentation of Meilisearch.
|
||||
|
||||
- [Embedder settings auto-generated description](./embedder_settings.md)
|
||||
1398
docs/embedder_settings.md
Normal file
1398
docs/embedder_settings.md
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user