Compare commits

..

103 Commits

Author SHA1 Message Date
6dc241f9de Fix tests 2025-07-21 15:11:24 +02:00
01d1ef65c4 Update search and docs usages 2025-07-21 15:11:24 +02:00
3246667590 when exporting vectors, for regenerate to false when the embedder has fragments 2025-07-21 15:11:24 +02:00
109395c199 Index::embeddings specifies if the embedder has fragments 2025-07-21 15:11:24 +02:00
a0b71a8785 EmbedderOptions::has_fragments() 2025-07-21 15:11:24 +02:00
00a5c86f13 Remove accidentally added db snap 2025-07-21 15:11:24 +02:00
366c37a686 Fix new indexer 2025-07-21 15:11:23 +02:00
afc164a271 Fix in old indexer 2025-07-21 15:11:23 +02:00
0312fb22b8 Merge pull request #5761 from meilisearch/fix-chat-settings-dumpless-upgrade
Fix chat settings dumpless upgrade
2025-07-17 15:57:39 +00:00
f1d92bfead Make sure the new filter chat setting is set to it's default value if
missing
2025-07-17 15:36:21 +02:00
a005a062da Add security if chat settings parameters are missing 2025-07-17 15:27:53 +02:00
fd8b2451d7 Merge pull request #5754 from kametsun/fix/incorrect-stats-doc-count
Fix incorrect document count in stats after clearing all documents
2025-07-17 06:48:51 +00:00
058f9ffda5 Merge pull request #5734 from meilisearch/request-fragments-test
Tests for multimodal
2025-07-16 11:04:00 +00:00
5d363205a5 Merge pull request #5716 from meilisearch/document-sorting
Allow sorting on the /documents route
2025-07-16 10:26:50 +00:00
8887cbdcd5 Merge pull request #5725 from meilisearch/fix-threshold-overcounting-bug
Fix Total Hits being wrong when rankingScoreThreshold is used
2025-07-16 07:15:24 +00:00
634865ff53 Merge pull request #5710 from meilisearch/chat-route-support-filters
Introduce filters in the chat completions
2025-07-15 16:10:49 +00:00
36fccf8525 Merge remote-tracking branch 'origin/release-v1.16.0' into fix-threshold-overcounting-bug 2025-07-15 18:01:29 +02:00
d6bd60d569 Apply review suggestions
Co-Authored-By: Louis Dureuil <louis.dureuil@xinra.net>
2025-07-15 18:00:37 +02:00
48ad959fc1 Merge remote-tracking branch 'origin/release-v1.16.0' into document-sorting 2025-07-15 17:41:46 +02:00
1bc30cb4c8 Restore old benchmark names 2025-07-15 17:34:04 +02:00
77138a42d6 Apply review suggestions
Add preconditions

Fix underflow

Remove unwrap

Turn methods to associated functions

Apply review suggestions
2025-07-15 17:31:11 +02:00
0791506124 Fix some proposals 2025-07-15 17:10:45 +02:00
2a015ac3b8 Implement basic few shot prompting to improve the query capabilities 2025-07-15 14:50:10 +02:00
6f248b78a9 Merge pull request #5751 from meilisearch/fix-searchable-attributes-order
Fix: Preserve order of searchable attributes when modified
2025-07-15 10:38:11 +00:00
d694e312ff Update crates/milli/src/update/settings.rs
Co-authored-by: Clément Renault <clement@meilisearch.com>
2025-07-15 11:54:59 +02:00
d76dcc8998 Make clippy happy 2025-07-15 11:49:48 +02:00
e654f66223 Support filtering 2025-07-15 11:49:47 +02:00
34f2ab7093 WIP report search errors to the LLM 2025-07-15 11:49:46 +02:00
1a9dbd364e Fix some issues 2025-07-15 11:49:46 +02:00
662c5d9871 Introduce filters in the chat completions 2025-07-15 11:49:45 +02:00
5cd61b50f9 Fix formatting 2025-07-12 18:19:26 +09:00
9a9be76757 add: verify that the statistics are correctly update assert 2025-07-12 11:15:44 +09:00
cfa6ba6c3b Fix stats showing wrong document count after clear all
Update database stats after clearing documents to ensure
/stats endpoint returns correct numberOfDocuments: 0 instead
of stale count.
2025-07-12 11:15:44 +09:00
f4f333dbf6 Merge pull request #5753 from meilisearch/export-fixes
Various fixes on the export route
2025-07-11 19:15:42 +00:00
1ade76ba10 Remove sneaky debug 2025-07-11 12:27:04 +02:00
ae26658913 Use the most appropriate unit in payload_too_large error 2025-07-11 12:27:03 +02:00
aa09edb3fb Fix errors being silently dropped 2025-07-11 12:27:03 +02:00
3f42f1a036 Get rid of bearer 2025-07-11 12:27:03 +02:00
9bdfdd395b Fix document step overflowing 2025-07-11 12:27:03 +02:00
78d0625a91 Decrease default payload size for exports 2025-07-11 12:27:03 +02:00
3f655ea20e compare user defined searchable fields instead of internal searchable fields 2025-07-10 18:24:23 +02:00
50bc1d55f3 Add test reproducing the bug 2025-07-10 18:23:46 +02:00
faa1f7c5b7 Merge pull request #5693 from Mubelotix/default-key
Add a Read-Only Admin API Key by default
2025-07-08 12:38:29 +00:00
9cee432255 Fix broken tests 2025-07-08 13:36:26 +02:00
ff8d48d2f1 Merge branch 'main' into default-key 2025-07-08 12:21:46 +02:00
a56c036994 Update crates/meilisearch-types/src/keys.rs
Co-authored-by: gui machiavelli <hey@guimachiavelli.com>
2025-07-08 12:18:52 +02:00
511c48f520 Merge pull request #5737 from meilisearch/request-fragments-dumpless-upgrade
Fix the dumpless upgrade from v1.15 to v1.16 for request fragments
2025-07-08 08:49:38 +00:00
4623691d1f Don't make the type-that-shall-not-be-written serializable
Following tamo's advice

Co-Authored-By: Tamo <tamo@meilisearch.com>
2025-07-08 10:04:33 +02:00
5f8f48ec95 Add new snapshot checking for regenerativeness 2025-07-07 16:43:05 +02:00
ed2fe365a0 Fix existing snaps 2025-07-07 16:42:50 +02:00
f7c8a77f89 Update v1.12.0 DB to contain vectors 2025-07-07 16:01:50 +02:00
a8030850ee Merge pull request #5733 from meilisearch/improve-export-analytics
Improve the analytics of the `/export` route
2025-07-07 12:26:11 +00:00
70a860a0f0 Merge branch 'main' into fix-threshold-overcounting-bug 2025-07-07 12:26:37 +02:00
a3254d7d7d Implement dumpless upgrade from v1.15 to v1.16 2025-07-07 11:57:08 +02:00
73c9c1ebdc Add compile-time checks for dumpless upgrade 2025-07-07 11:34:18 +02:00
4c7a6e5c1b Do not leak private URLs 2025-07-07 11:07:58 +02:00
07bfed99e6 Expose the host in the analytics 2025-07-04 11:08:02 +02:00
f60814b319 Add benchmark 2025-07-02 12:06:00 +02:00
5a675bcb82 Add benchmarks 2025-07-02 11:50:32 +02:00
600178c5ab Still limit to max hits 2025-07-01 18:33:09 +02:00
dedae94102 Fix #5274 2025-07-01 16:22:25 +02:00
7ae9a4afee Add a test for issue #5274 2025-07-01 15:42:43 +02:00
e92b6beb20 Revert making check_sort_criteria usable without a search context 2025-07-01 14:26:55 +02:00
27cc357362 Document code 2025-07-01 14:21:55 +02:00
73dfeefc7c Remove plural form 2025-07-01 14:08:46 +02:00
d85480de89 Move sort code out of facet 2025-07-01 14:05:47 +02:00
9f55708d84 Format 2025-07-01 13:58:56 +02:00
280c3907be Add test to sort the unsortable 2025-07-01 13:58:37 +02:00
8419fd9b3b Ditch usage of check_sort_criteria 2025-07-01 13:42:38 +02:00
283944ea89 Differentiate between document sort error and search sort error 2025-07-01 12:03:50 +02:00
8aacd6374a Optimize geo sort 2025-07-01 11:50:01 +02:00
8326f34ad1 Add analytics 2025-07-01 11:35:28 +02:00
f4a908669c Add tests 2025-07-01 10:02:15 +02:00
eb2c2815b6 Fix panic 2025-07-01 10:00:10 +02:00
29e9c74a49 Merge two ifs 2025-06-30 16:17:04 +02:00
f6803dd7d1 Simplify iterator chaining in facet sort 2025-06-30 14:05:23 +02:00
f86f4f619f Implement geo sort on documents 2025-06-30 13:57:30 +02:00
e35d58b531 Move geosort code out of search 2025-06-30 13:12:00 +02:00
63827bbee0 Move sorting code out of search 2025-06-30 11:59:59 +02:00
340d9e6edc Optimize facet sort
5 to 10x speedup
2025-06-27 14:40:55 +02:00
28adbc0d18 Update tests 2025-06-27 09:47:46 +02:00
e3fba62e13 Fix typo 2025-06-27 09:40:59 +02:00
fb9170b8e3 Keep name consistent with others 2025-06-27 09:40:30 +02:00
c15763f910 Improve key description
Co-authored-by: Tamo <tamo@meilisearch.com>
2025-06-27 09:39:24 +02:00
4534dc2cab Create another deserr error 2025-06-25 16:45:32 +02:00
b05cb80803 Take sort criteria from the request 2025-06-25 16:41:08 +02:00
6e0526090a Implement sorting documents 2025-06-25 15:36:12 +02:00
2090e9ea31 Update test 2025-06-25 10:08:25 +02:00
1c8f1c18f4 Fix constant name and key description 2025-06-25 09:59:34 +02:00
c4a96b40eb Remove KeysGet from AllGet 2025-06-24 17:40:06 +02:00
2d6dc83940 Format the code 2025-06-19 15:55:12 +02:00
ab768f379f Fix comment 2025-06-19 15:49:34 +02:00
705e9a9e5e Make the uuids random again to prevent abuse using rainbow tables 2025-06-19 15:45:09 +02:00
67f2a30d7c Fix test 2025-06-19 13:10:08 +02:00
99732f4084 Fix some tests 2025-06-19 13:04:55 +02:00
5081d837ea Fix AllGet action being included in All 2025-06-19 12:12:30 +02:00
9e1cb792f4 Rename Action::AllRead to AllGet 2025-06-19 11:55:25 +02:00
b6b7ede266 Rename Action *.read to *.get 2025-06-19 11:53:42 +02:00
f50e586a4f Allow management key to read other keys 2025-06-19 11:52:58 +02:00
11fedea788 Set static uuids to keys 2025-06-19 11:42:45 +02:00
032b34c377 Add a default management key 2025-06-19 11:29:32 +02:00
b421c8e7de Add an AllRead key 2025-06-19 11:29:16 +02:00
00eb258a53 Fix comment 2025-06-19 11:16:07 +02:00
88 changed files with 3413 additions and 3290 deletions

8
.gitignore vendored
View File

@ -11,12 +11,18 @@
/bench
/_xtask_benchmark.ms
/benchmarks
.DS_Store
# Snapshots
## ... large
*.full.snap
## ... unreviewed
## ... unreviewed
*.snap.new
## ... pending
*.pending-snap
# Tmp files
.tmp*
# Database snapshot
crates/meilisearch/db.snapshot

1
Cargo.lock generated
View File

@ -3770,7 +3770,6 @@ dependencies = [
"itertools 0.14.0",
"jsonwebtoken",
"lazy_static",
"liquid",
"manifest-dir-macros",
"maplit",
"meili-snap",

View File

@ -51,3 +51,8 @@ harness = false
[[bench]]
name = "indexing"
harness = false
[[bench]]
name = "sort"
harness = false

View File

@ -0,0 +1,114 @@
//! This benchmark module is used to compare the performance of sorting documents in /search VS /documents
//!
//! The tests/benchmarks were designed in the context of a query returning only 20 documents.
mod datasets_paths;
mod utils;
use criterion::{criterion_group, criterion_main};
use milli::update::Settings;
use utils::Conf;
#[cfg(not(windows))]
#[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
fn base_conf(builder: &mut Settings) {
let displayed_fields =
["geonameid", "name", "asciiname", "alternatenames", "_geo", "population"]
.iter()
.map(|s| s.to_string())
.collect();
builder.set_displayed_fields(displayed_fields);
let sortable_fields =
["_geo", "name", "population", "elevation", "timezone", "modification-date"]
.iter()
.map(|s| s.to_string())
.collect();
builder.set_sortable_fields(sortable_fields);
}
#[rustfmt::skip]
const BASE_CONF: Conf = Conf {
dataset: datasets_paths::SMOL_ALL_COUNTRIES,
dataset_format: "jsonl",
configure: base_conf,
primary_key: Some("geonameid"),
queries: &[""],
offsets: &[
Some((0, 20)), // The most common query in the real world
Some((0, 500)), // A query that ranges over many documents
Some((980, 20)), // The worst query that could happen in the real world
Some((800_000, 20)) // The worst query
],
get_documents: true,
..Conf::BASE
};
fn bench_sort(c: &mut criterion::Criterion) {
#[rustfmt::skip]
let confs = &[
utils::Conf {
group_name: "without sort",
sort: None,
..BASE_CONF
},
utils::Conf {
group_name: "sort on many different values",
sort: Some(vec!["name:asc"]),
..BASE_CONF
},
utils::Conf {
group_name: "sort on many similar values",
sort: Some(vec!["timezone:desc"]),
..BASE_CONF
},
utils::Conf {
group_name: "sort on many similar then different values",
sort: Some(vec!["timezone:desc", "name:asc"]),
..BASE_CONF
},
utils::Conf {
group_name: "sort on many different then similar values",
sort: Some(vec!["timezone:desc", "name:asc"]),
..BASE_CONF
},
utils::Conf {
group_name: "geo sort",
sample_size: Some(10),
sort: Some(vec!["_geoPoint(45.4777599, 9.1967508):asc"]),
..BASE_CONF
},
utils::Conf {
group_name: "sort on many similar values then geo sort",
sample_size: Some(50),
sort: Some(vec!["timezone:desc", "_geoPoint(45.4777599, 9.1967508):asc"]),
..BASE_CONF
},
utils::Conf {
group_name: "sort on many different values then geo sort",
sample_size: Some(50),
sort: Some(vec!["name:desc", "_geoPoint(45.4777599, 9.1967508):asc"]),
..BASE_CONF
},
utils::Conf {
group_name: "sort on many fields",
sort: Some(vec!["population:asc", "name:asc", "elevation:asc", "timezone:asc"]),
..BASE_CONF
},
];
utils::run_benches(c, confs);
}
criterion_group!(benches, bench_sort);
criterion_main!(benches);

View File

@ -9,6 +9,7 @@ use anyhow::Context;
use bumpalo::Bump;
use criterion::BenchmarkId;
use memmap2::Mmap;
use milli::documents::sort::recursive_sort;
use milli::heed::EnvOpenOptions;
use milli::progress::Progress;
use milli::update::new::indexer;
@ -35,6 +36,12 @@ pub struct Conf<'a> {
pub configure: fn(&mut Settings),
pub filter: Option<&'a str>,
pub sort: Option<Vec<&'a str>>,
/// set to skip documents (offset, limit)
pub offsets: &'a [Option<(usize, usize)>],
/// enable if you want to bench getting documents without querying
pub get_documents: bool,
/// configure the benchmark sample size
pub sample_size: Option<usize>,
/// enable or disable the optional words on the query
pub optional_words: bool,
/// primary key, if there is None we'll auto-generate docids for every documents
@ -52,6 +59,9 @@ impl Conf<'_> {
configure: |_| (),
filter: None,
sort: None,
offsets: &[None],
get_documents: false,
sample_size: None,
optional_words: true,
primary_key: None,
};
@ -145,25 +155,79 @@ pub fn run_benches(c: &mut criterion::Criterion, confs: &[Conf]) {
let file_name = Path::new(conf.dataset).file_name().and_then(|f| f.to_str()).unwrap();
let name = format!("{}: {}", file_name, conf.group_name);
let mut group = c.benchmark_group(&name);
if let Some(sample_size) = conf.sample_size {
group.sample_size(sample_size);
}
for &query in conf.queries {
group.bench_with_input(BenchmarkId::from_parameter(query), &query, |b, &query| {
b.iter(|| {
let rtxn = index.read_txn().unwrap();
let mut search = index.search(&rtxn);
search.query(query).terms_matching_strategy(TermsMatchingStrategy::default());
if let Some(filter) = conf.filter {
let filter = Filter::from_str(filter).unwrap().unwrap();
search.filter(filter);
}
if let Some(sort) = &conf.sort {
let sort = sort.iter().map(|sort| sort.parse().unwrap()).collect();
search.sort_criteria(sort);
}
let _ids = search.execute().unwrap();
});
});
for offset in conf.offsets {
let parameter = match offset {
None => query.to_string(),
Some((offset, limit)) => format!("{query}[{offset}:{limit}]"),
};
group.bench_with_input(
BenchmarkId::from_parameter(parameter),
&query,
|b, &query| {
b.iter(|| {
let rtxn = index.read_txn().unwrap();
let mut search = index.search(&rtxn);
search
.query(query)
.terms_matching_strategy(TermsMatchingStrategy::default());
if let Some(filter) = conf.filter {
let filter = Filter::from_str(filter).unwrap().unwrap();
search.filter(filter);
}
if let Some(sort) = &conf.sort {
let sort = sort.iter().map(|sort| sort.parse().unwrap()).collect();
search.sort_criteria(sort);
}
if let Some((offset, limit)) = offset {
search.offset(*offset).limit(*limit);
}
let _ids = search.execute().unwrap();
});
},
);
}
}
if conf.get_documents {
for offset in conf.offsets {
let parameter = match offset {
None => String::from("get_documents"),
Some((offset, limit)) => format!("get_documents[{offset}:{limit}]"),
};
group.bench_with_input(BenchmarkId::from_parameter(parameter), &(), |b, &()| {
b.iter(|| {
let rtxn = index.read_txn().unwrap();
if let Some(sort) = &conf.sort {
let sort = sort.iter().map(|sort| sort.parse().unwrap()).collect();
let all_docs = index.documents_ids(&rtxn).unwrap();
let facet_sort =
recursive_sort(&index, &rtxn, sort, &all_docs).unwrap();
let iter = facet_sort.iter().unwrap();
if let Some((offset, limit)) = offset {
let _results = iter.skip(*offset).take(*limit).collect::<Vec<_>>();
} else {
let _results = iter.collect::<Vec<_>>();
}
} else {
let all_docs = index.documents_ids(&rtxn).unwrap();
if let Some((offset, limit)) = offset {
let _results =
all_docs.iter().skip(*offset).take(*limit).collect::<Vec<_>>();
} else {
let _results = all_docs.iter().collect::<Vec<_>>();
}
}
});
});
}
}
group.finish();
index.prepare_for_closing().wait();

View File

@ -873,7 +873,7 @@ impl IndexScheduler {
.into_inner()
.into_iter()
.map(|fragment| {
let value = embedder_options.indexing_fragment(&fragment.name).unwrap();
let value = embedder_options.fragment(&fragment.name).unwrap();
let template = JsonTemplate::new(value.clone()).unwrap();
RuntimeFragment { name: fragment.name, id: fragment.id, template }
})

View File

@ -5,6 +5,7 @@ use std::sync::atomic::Ordering;
use dump::IndexMetadata;
use meilisearch_types::milli::constants::RESERVED_VECTORS_FIELD_NAME;
use meilisearch_types::milli::index::EmbeddingsWithMetadata;
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOrArrayOfVectors};
use meilisearch_types::milli::{self};
@ -227,12 +228,21 @@ impl IndexScheduler {
return Err(Error::from_milli(user_err, Some(uid.to_string())));
};
for (embedder_name, (embeddings, regenerate)) in embeddings {
for (
embedder_name,
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
) in embeddings
{
let embeddings = ExplicitVectors {
embeddings: Some(VectorOrArrayOfVectors::from_array_of_vectors(
embeddings,
)),
regenerate,
regenerate: regenerate &&
// Meilisearch does not handle well dumps with fragments, because as the fragments
// are marked as user-provided,
// all embeddings would be regenerated on any settings change or document update.
// To prevent this, we mark embeddings has non regenerate in this case.
!has_fragments,
};
vectors.insert(embedder_name, serde_json::to_value(embeddings).unwrap());
}

View File

@ -9,6 +9,7 @@ use flate2::write::GzEncoder;
use flate2::Compression;
use meilisearch_types::index_uid_pattern::IndexUidPattern;
use meilisearch_types::milli::constants::RESERVED_VECTORS_FIELD_NAME;
use meilisearch_types::milli::index::EmbeddingsWithMetadata;
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
use meilisearch_types::milli::update::{request_threads, Setting};
use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOrArrayOfVectors};
@ -62,13 +63,14 @@ impl IndexScheduler {
let ExportIndexSettings { filter, override_settings } = export_settings;
let index = self.index(uid)?;
let index_rtxn = index.read_txn()?;
let bearer = api_key.map(|api_key| format!("Bearer {api_key}"));
// First, check if the index already exists
let url = format!("{base_url}/indexes/{uid}");
let response = retry(&must_stop_processing, || {
let mut request = agent.get(&url);
if let Some(api_key) = api_key {
request = request.set("Authorization", &format!("Bearer {api_key}"));
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(Default::default()).map_err(into_backoff_error)
@ -90,8 +92,8 @@ impl IndexScheduler {
let url = format!("{base_url}/indexes");
retry(&must_stop_processing, || {
let mut request = agent.post(&url);
if let Some(api_key) = api_key {
request = request.set("Authorization", &format!("Bearer {api_key}"));
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "uid": uid, "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
@ -103,8 +105,8 @@ impl IndexScheduler {
let url = format!("{base_url}/indexes/{uid}");
retry(&must_stop_processing, || {
let mut request = agent.patch(&url);
if let Some(api_key) = api_key {
request = request.set("Authorization", &format!("Bearer {api_key}"));
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
@ -122,7 +124,6 @@ impl IndexScheduler {
}
// Retry logic for sending settings
let url = format!("{base_url}/indexes/{uid}/settings");
let bearer = api_key.map(|api_key| format!("Bearer {api_key}"));
retry(&must_stop_processing, || {
let mut request = agent.patch(&url);
if let Some(bearer) = bearer.as_ref() {
@ -167,10 +168,10 @@ impl IndexScheduler {
},
);
let limit = payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(50 * 1024 * 1024); // defaults to 50 MiB
let limit = payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024); // defaults to 20 MiB
let documents_url = format!("{base_url}/indexes/{uid}/documents");
request_threads()
let results = request_threads()
.broadcast(|ctx| {
let index_rtxn = index
.read_txn()
@ -229,12 +230,21 @@ impl IndexScheduler {
));
};
for (embedder_name, (embeddings, regenerate)) in embeddings {
for (
embedder_name,
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
) in embeddings
{
let embeddings = ExplicitVectors {
embeddings: Some(
VectorOrArrayOfVectors::from_array_of_vectors(embeddings),
),
regenerate,
regenerate: regenerate &&
// Meilisearch does not handle well dumps with fragments, because as the fragments
// are marked as user-provided,
// all embeddings would be regenerated on any settings change or document update.
// To prevent this, we mark embeddings has non regenerate in this case.
!has_fragments,
};
vectors.insert(
embedder_name,
@ -265,9 +275,8 @@ impl IndexScheduler {
let mut request = agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson");
request = request.set("Content-Encoding", "gzip");
if let Some(api_key) = api_key {
request = request
.set("Authorization", &(format!("Bearer {api_key}")));
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
})?;
@ -276,7 +285,7 @@ impl IndexScheduler {
}
buffer.extend_from_slice(&tmp_buffer);
if i % 100 == 0 {
if i > 0 && i % 100 == 0 {
step.fetch_add(100, atomic::Ordering::Relaxed);
}
}
@ -284,8 +293,8 @@ impl IndexScheduler {
retry(&must_stop_processing, || {
let mut request = agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson");
if let Some(api_key) = api_key {
request = request.set("Authorization", &(format!("Bearer {api_key}")));
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(&buffer).map_err(into_backoff_error)
})?;
@ -298,6 +307,9 @@ impl IndexScheduler {
Some(uid.to_string()),
)
})?;
for result in results {
result?;
}
step.store(total_documents, atomic::Ordering::Relaxed);
}

View File

@ -3,6 +3,7 @@ use std::collections::BTreeMap;
use big_s::S;
use insta::assert_json_snapshot;
use meili_snap::{json_string, snapshot};
use meilisearch_types::milli::index::EmbeddingsWithMetadata;
use meilisearch_types::milli::update::Setting;
use meilisearch_types::milli::vector::settings::EmbeddingSettings;
use meilisearch_types::milli::vector::SearchQuery;
@ -220,8 +221,8 @@ fn import_vectors() {
let embeddings = index.embeddings(&rtxn, 0).unwrap();
assert_json_snapshot!(embeddings[&simple_hf_name].0[0] == lab_embed, @"true");
assert_json_snapshot!(embeddings[&fakerest_name].0[0] == beagle_embed, @"true");
assert_json_snapshot!(embeddings[&simple_hf_name].embeddings[0] == lab_embed, @"true");
assert_json_snapshot!(embeddings[&fakerest_name].embeddings[0] == beagle_embed, @"true");
let doc = index.documents(&rtxn, std::iter::once(0)).unwrap()[0].1;
let fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
@ -311,9 +312,9 @@ fn import_vectors() {
let embeddings = index.embeddings(&rtxn, 0).unwrap();
// automatically changed to patou because set to regenerate
assert_json_snapshot!(embeddings[&simple_hf_name].0[0] == patou_embed, @"true");
assert_json_snapshot!(embeddings[&simple_hf_name].embeddings[0] == patou_embed, @"true");
// remained beagle
assert_json_snapshot!(embeddings[&fakerest_name].0[0] == beagle_embed, @"true");
assert_json_snapshot!(embeddings[&fakerest_name].embeddings[0] == beagle_embed, @"true");
let doc = index.documents(&rtxn, std::iter::once(0)).unwrap()[0].1;
let fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
@ -497,13 +498,13 @@ fn import_vectors_first_and_embedder_later() {
let docid = index.external_documents_ids.get(&rtxn, "0").unwrap().unwrap();
let embeddings = index.embeddings(&rtxn, docid).unwrap();
let (embedding, _) = &embeddings["my_doggo_embedder"];
assert!(!embedding.is_empty(), "{embedding:?}");
let EmbeddingsWithMetadata { embeddings, .. } = &embeddings["my_doggo_embedder"];
assert!(!embeddings.is_empty(), "{embeddings:?}");
// the document with the id 3 should keep its original embedding
let docid = index.external_documents_ids.get(&rtxn, "3").unwrap().unwrap();
let embeddings = index.embeddings(&rtxn, docid).unwrap();
let (embeddings, _) = &embeddings["my_doggo_embedder"];
let EmbeddingsWithMetadata { embeddings, .. } = &embeddings["my_doggo_embedder"];
snapshot!(embeddings.len(), @"1");
assert!(embeddings[0].iter().all(|i| *i == 3.0), "{:?}", embeddings[0]);
@ -558,7 +559,7 @@ fn import_vectors_first_and_embedder_later() {
"###);
let embeddings = index.embeddings(&rtxn, docid).unwrap();
let (embedding, _) = &embeddings["my_doggo_embedder"];
let EmbeddingsWithMetadata { embeddings: embedding, .. } = &embeddings["my_doggo_embedder"];
assert!(!embedding.is_empty());
assert!(!embedding[0].iter().all(|i| *i == 3.0), "{:?}", embedding[0]);
@ -566,7 +567,7 @@ fn import_vectors_first_and_embedder_later() {
// the document with the id 4 should generate an embedding
let docid = index.external_documents_ids.get(&rtxn, "4").unwrap().unwrap();
let embeddings = index.embeddings(&rtxn, docid).unwrap();
let (embedding, _) = &embeddings["my_doggo_embedder"];
let EmbeddingsWithMetadata { embeddings: embedding, .. } = &embeddings["my_doggo_embedder"];
assert!(!embedding.is_empty());
}
@ -696,7 +697,7 @@ fn delete_document_containing_vector() {
"###);
let docid = index.external_documents_ids.get(&rtxn, "0").unwrap().unwrap();
let embeddings = index.embeddings(&rtxn, docid).unwrap();
let (embedding, _) = &embeddings["manual"];
let EmbeddingsWithMetadata { embeddings: embedding, .. } = &embeddings["manual"];
assert!(!embedding.is_empty(), "{embedding:?}");
index_scheduler

View File

@ -158,7 +158,7 @@ impl AuthController {
self.store.delete_all_keys()
}
/// Delete all the keys in the DB.
/// Insert a key directly into the store.
pub fn raw_insert_key(&mut self, key: Key) -> Result<()> {
self.store.put_api_key(key)?;
Ok(())
@ -351,6 +351,7 @@ pub struct IndexSearchRules {
fn generate_default_keys(store: &HeedAuthStore) -> Result<()> {
store.put_api_key(Key::default_chat())?;
store.put_api_key(Key::default_read_only_admin())?;
store.put_api_key(Key::default_admin())?;
store.put_api_key(Key::default_search())?;

View File

@ -88,7 +88,13 @@ impl HeedAuthStore {
let mut actions = HashSet::new();
for action in &key.actions {
match action {
Action::All => actions.extend(enum_iterator::all::<Action>()),
Action::All => {
actions.extend(enum_iterator::all::<Action>());
actions.remove(&Action::AllGet);
}
Action::AllGet => {
actions.extend(enum_iterator::all::<Action>().filter(|a| a.is_read()))
}
Action::DocumentsAll => {
actions.extend(
[Action::DocumentsGet, Action::DocumentsDelete, Action::DocumentsAdd]

View File

@ -237,6 +237,7 @@ InvalidDocumentRetrieveVectors , InvalidRequest , BAD_REQU
MissingDocumentFilter , InvalidRequest , BAD_REQUEST ;
MissingDocumentEditionFunction , InvalidRequest , BAD_REQUEST ;
InvalidDocumentFilter , InvalidRequest , BAD_REQUEST ;
InvalidDocumentSort , InvalidRequest , BAD_REQUEST ;
InvalidDocumentGeoField , InvalidRequest , BAD_REQUEST ;
InvalidVectorDimensions , InvalidRequest , BAD_REQUEST ;
InvalidVectorsType , InvalidRequest , BAD_REQUEST ;
@ -415,19 +416,9 @@ InvalidChatCompletionPrompts , InvalidRequest , BAD_REQU
InvalidChatCompletionSystemPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionSearchDescriptionPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionSearchQueryParamPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionSearchFilterParamPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionSearchIndexUidParamPrompt , InvalidRequest , BAD_REQUEST ;
InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST ;
// Render
InvalidRenderTemplate , InvalidRequest , BAD_REQUEST ;
InvalidRenderTemplateId , InvalidRequest , BAD_REQUEST ;
InvalidRenderTemplateInline , InvalidRequest , BAD_REQUEST ;
InvalidRenderInput , InvalidRequest , BAD_REQUEST ;
InvalidRenderInputDocumentId , InvalidRequest , BAD_REQUEST ;
InvalidRenderInputFields , InvalidRequest , BAD_REQUEST ;
InvalidRenderInputInline , InvalidRequest , BAD_REQUEST ;
RenderDocumentNotFound , InvalidRequest , NOT_FOUND ;
TemplateParsingError , InvalidRequest , BAD_REQUEST ;
TemplateRenderingError , InvalidRequest , BAD_REQUEST
InvalidChatCompletionPreQueryPrompt , InvalidRequest , BAD_REQUEST
}
impl ErrorCode for JoinError {
@ -487,7 +478,8 @@ impl ErrorCode for milli::Error {
UserError::InvalidDistinctAttribute { .. } => Code::InvalidSearchDistinct,
UserError::SortRankingRuleMissing => Code::InvalidSearchSort,
UserError::InvalidFacetsDistribution { .. } => Code::InvalidSearchFacets,
UserError::InvalidSortableAttribute { .. } => Code::InvalidSearchSort,
UserError::InvalidSearchSortableAttribute { .. } => Code::InvalidSearchSort,
UserError::InvalidDocumentSortableAttribute { .. } => Code::InvalidDocumentSort,
UserError::InvalidSearchableAttribute { .. } => {
Code::InvalidSearchAttributesToSearchOn
}
@ -503,7 +495,8 @@ impl ErrorCode for milli::Error {
UserError::InvalidVectorsMapType { .. }
| UserError::InvalidVectorsEmbedderConf { .. } => Code::InvalidVectorsType,
UserError::TooManyVectors(_, _) => Code::TooManyVectors,
UserError::SortError(_) => Code::InvalidSearchSort,
UserError::SortError { search: true, .. } => Code::InvalidSearchSort,
UserError::SortError { search: false, .. } => Code::InvalidDocumentSort,
UserError::InvalidMinTypoWordLenSetting(_, _) => {
Code::InvalidSettingsTypoTolerance
}

View File

@ -4,10 +4,11 @@ use serde::{Deserialize, Serialize};
use crate::error::{Code, ResponseError};
pub const DEFAULT_CHAT_SYSTEM_PROMPT: &str = "You are a highly capable research assistant with access to powerful search tools. IMPORTANT INSTRUCTIONS:1. When answering questions, you MUST make multiple tool calls (at least 2-3) to gather comprehensive information.2. Use different search queries for each tool call - vary keywords, rephrase questions, and explore different semantic angles to ensure broad coverage.3. Always explicitly announce BEFORE making each tool call by saying: \"I'll search for [specific information] now.\"4. Combine information from ALL tool calls to provide complete, nuanced answers rather than relying on a single source.5. For complex topics, break down your research into multiple targeted queries rather than using a single generic search.";
pub const DEFAULT_CHAT_SYSTEM_PROMPT: &str = "You are a highly capable research assistant with access to powerful search tools. IMPORTANT INSTRUCTIONS:1. When answering questions, you MUST make multiple tool calls (at least 2-3) to gather comprehensive information.2. Use different search queries for each tool call - vary keywords, rephrase questions, and explore different semantic angles to ensure broad coverage.3. Always explicitly announce BEFORE making each tool call by saying: \"I'll search for [specific information] now.\"4. Combine information from ALL tool calls to provide complete, nuanced answers rather than relying on a single source.5. For complex topics, break down your research into multiple targeted queries rather than using a single generic search. Meilisearch doesn't use the colon (:) syntax to filter but rather the equal (=) one. Separate filters from query and keep the q parameter empty if needed. Same for the filter parameter: keep it empty if need be. If you need to find documents that CONTAINS keywords simply put the keywords in the q parameter do no use a filter for this purpose. Whenever you get an error, read the error message and fix your error. ";
pub const DEFAULT_CHAT_SEARCH_DESCRIPTION_PROMPT: &str =
"Search the database for relevant JSON documents using an optional query.";
"Query: 'best story about Rust before 2018' with year: 2018, 2020, 2021\nlabel: analysis, golang, javascript\ntype: story, link\nvote: 300, 298, 278\n: {\"q\": \"\", \"filter\": \"category = Rust AND type = story AND year < 2018 AND vote > 100\"}\nQuery: 'A black or green car that can go fast with red brakes' with maxspeed_kmh: 200, 150, 130\ncolor: black, grey, red, green\nbrand: Toyota, Renault, Jeep, Ferrari\n: {\"q\": \"red brakes\", \"filter\": \"maxspeed_kmh > 150 AND color IN ['black', green]\"}\nQuery: 'Superman movie released in 2018 or after' with year: 2018, 2020, 2021\ngenres: Drama, Comedy, Adventure, Fiction\n: {\"q\":\"Superman\",\"filter\":\"genres IN [Adventure, Fiction] AND year >= 2018\"}";
pub const DEFAULT_CHAT_SEARCH_Q_PARAM_PROMPT: &str = "The search query string used to find relevant documents in the index. This should contain keywords or phrases that best represent what the user is looking for. More specific queries will yield more precise results.";
pub const DEFAULT_CHAT_SEARCH_FILTER_PARAM_PROMPT: &str = "The search filter string used to find relevant documents in the index. It supports parentheses, `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox`. Here is an example: \"price > 100 AND category = 'electronics'\". The following is a list of fields that can be filtered on: ";
pub const DEFAULT_CHAT_SEARCH_INDEX_UID_PARAM_PROMPT: &str = "The name of the index to search within. An index is a collection of documents organized for search. Selecting the right index ensures the most relevant results for the user query.";
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default, PartialEq, Eq)]
@ -161,18 +162,31 @@ impl ChatCompletionSource {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct ChatCompletionPrompts {
#[serde(default)]
pub system: String,
#[serde(default)]
pub search_description: String,
#[serde(default)]
pub search_q_param: String,
#[serde(default = "default_search_filter_param")]
pub search_filter_param: String,
#[serde(default)]
pub search_index_uid_param: String,
}
/// This function is used for when the search_filter_param is
/// not provided and this can happen when the database is in v1.15.
fn default_search_filter_param() -> String {
DEFAULT_CHAT_SEARCH_FILTER_PARAM_PROMPT.to_string()
}
impl Default for ChatCompletionPrompts {
fn default() -> Self {
Self {
system: DEFAULT_CHAT_SYSTEM_PROMPT.to_string(),
search_description: DEFAULT_CHAT_SEARCH_DESCRIPTION_PROMPT.to_string(),
search_q_param: DEFAULT_CHAT_SEARCH_Q_PARAM_PROMPT.to_string(),
search_filter_param: DEFAULT_CHAT_SEARCH_FILTER_PARAM_PROMPT.to_string(),
search_index_uid_param: DEFAULT_CHAT_SEARCH_INDEX_UID_PARAM_PROMPT.to_string(),
}
}

View File

@ -144,6 +144,21 @@ impl Key {
}
}
pub fn default_read_only_admin() -> Self {
let now = OffsetDateTime::now_utc();
let uid = Uuid::new_v4();
Self {
name: Some("Default Read-Only Admin API Key".to_string()),
description: Some("Use it to read information across the whole database. Caution! Do not expose this key on a public frontend".to_string()),
uid,
actions: vec![Action::AllGet, Action::KeysGet],
indexes: vec![IndexUidPattern::all()],
expires_at: None,
created_at: now,
updated_at: now,
}
}
pub fn default_search() -> Self {
let now = OffsetDateTime::now_utc();
let uid = Uuid::new_v4();
@ -218,6 +233,9 @@ pub enum Action {
#[serde(rename = "*")]
#[deserr(rename = "*")]
All = 0,
#[serde(rename = "*.get")]
#[deserr(rename = "*.get")]
AllGet,
#[serde(rename = "search")]
#[deserr(rename = "search")]
Search,
@ -399,6 +417,52 @@ impl Action {
}
}
/// Whether the action should be included in [Action::AllRead].
pub fn is_read(&self) -> bool {
use Action::*;
// It's using an exhaustive match to force the addition of new actions.
match self {
// Any action that expands to others must return false, as it wouldn't be able to expand recursively.
All | AllGet | DocumentsAll | IndexesAll | ChatsAll | TasksAll | SettingsAll
| StatsAll | MetricsAll | DumpsAll | SnapshotsAll | ChatsSettingsAll => false,
Search => true,
DocumentsAdd => false,
DocumentsGet => true,
DocumentsDelete => false,
Export => true,
IndexesAdd => false,
IndexesGet => true,
IndexesUpdate => false,
IndexesDelete => false,
IndexesSwap => false,
TasksCancel => false,
TasksDelete => false,
TasksGet => true,
SettingsGet => true,
SettingsUpdate => false,
StatsGet => true,
MetricsGet => true,
DumpsCreate => false,
SnapshotsCreate => false,
Version => true,
KeysAdd => false,
KeysGet => false, // Disabled in order to prevent privilege escalation
KeysUpdate => false,
KeysDelete => false,
ExperimentalFeaturesGet => true,
ExperimentalFeaturesUpdate => false,
NetworkGet => true,
NetworkUpdate => false,
ChatCompletions => false, // Disabled because it might trigger generation of new chats
ChatsGet => true,
ChatsDelete => false,
ChatsSettingsGet => true,
ChatsSettingsUpdate => false,
}
}
pub const fn repr(&self) -> u8 {
*self as u8
}
@ -408,6 +472,7 @@ pub mod actions {
use super::Action::*;
pub(crate) const ALL: u8 = All.repr();
pub const ALL_GET: u8 = AllGet.repr();
pub const SEARCH: u8 = Search.repr();
pub const DOCUMENTS_ALL: u8 = DocumentsAll.repr();
pub const DOCUMENTS_ADD: u8 = DocumentsAdd.repr();

View File

@ -48,7 +48,6 @@ is-terminal = "0.4.16"
itertools = "0.14.0"
jsonwebtoken = "9.3.1"
lazy_static = "1.5.0"
liquid = "0.26.11"
meilisearch-auth = { path = "../meilisearch-auth" }
meilisearch-types = { path = "../meilisearch-types" }
mimalloc = { version = "0.1.47", default-features = false }

View File

@ -104,6 +104,4 @@ impl Analytics for MockAnalytics {
_request: &HttpRequest,
) {
}
fn get_fetch_documents(&self, _documents_query: &DocumentFetchKind, _request: &HttpRequest) {}
fn post_fetch_documents(&self, _documents_query: &DocumentFetchKind, _request: &HttpRequest) {}
}

View File

@ -73,12 +73,6 @@ pub enum DocumentDeletionKind {
PerFilter,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum DocumentFetchKind {
PerDocumentId { retrieve_vectors: bool },
Normal { with_filter: bool, limit: usize, offset: usize, retrieve_vectors: bool },
}
/// To send an event to segment, your event must be able to aggregate itself with another event of the same type.
pub trait Aggregate: 'static + mopa::Any + Send {
/// The name of the event that will be sent to segment.

View File

@ -49,7 +49,7 @@ pub enum MeilisearchHttpError {
TooManySearchRequests(usize),
#[error("Internal error: Search limiter is down.")]
SearchLimiterIsDown,
#[error("The provided payload reached the size limit. The maximum accepted payload size is {}.", Byte::from_u64(*.0 as u64).get_appropriate_unit(UnitType::Binary))]
#[error("The provided payload reached the size limit. The maximum accepted payload size is {}.", Byte::from_u64(*.0 as u64).get_appropriate_unit(if *.0 % 1024 == 0 { UnitType::Binary } else { UnitType::Decimal }))]
PayloadTooLarge(usize),
#[error("Two indexes must be given for each swap. The list `[{}]` contains {} indexes.",
.0.iter().map(|uid| format!("\"{uid}\"")).collect::<Vec<_>>().join(", "), .0.len()

View File

@ -133,7 +133,7 @@ pub fn extract_token_from_request(
}
}
pub trait Policy: Sized {
pub trait Policy {
fn authenticate(
auth: Data<AuthController>,
token: &str,
@ -340,22 +340,6 @@ pub mod policies {
}
}
pub struct DoubleActionPolicy<const A: u8, const B: u8>;
impl<const A: u8, const B: u8> Policy for DoubleActionPolicy<A, B> {
fn authenticate(
auth: Data<AuthController>,
token: &str,
index: Option<&str>,
) -> Result<AuthFilter, AuthError> {
let filter_a = ActionPolicy::<A>::authenticate(auth.clone(), token, index)?;
let _filter_b = ActionPolicy::<B>::authenticate(auth, token, index)?;
// There is no point merging the filters here.
// Since they originate from the same API key, they will hold the same information.
Ok(filter_a)
}
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct Claims {

View File

@ -27,9 +27,10 @@ use meilisearch_types::features::{
ChatCompletionPrompts as DbChatCompletionPrompts,
ChatCompletionSource as DbChatCompletionSource, SystemRole,
};
use meilisearch_types::heed::RoTxn;
use meilisearch_types::keys::actions;
use meilisearch_types::milli::index::ChatConfig;
use meilisearch_types::milli::{all_obkv_to_json, obkv_to_json, TimeBudget};
use meilisearch_types::milli::{all_obkv_to_json, obkv_to_json, OrderBy, PatternMatch, TimeBudget};
use meilisearch_types::{Document, Index};
use serde::Deserialize;
use serde_json::json;
@ -169,6 +170,7 @@ fn setup_search_tool(
let mut index_uids = Vec::new();
let mut function_description = prompts.search_description.clone();
let mut filter_description = prompts.search_filter_param.clone();
index_scheduler.try_for_each_index::<_, ()>(|name, index| {
// Make sure to skip unauthorized indexes
if !filters.is_index_authorized(name) {
@ -180,16 +182,22 @@ fn setup_search_tool(
let index_description = chat_config.description;
let _ = writeln!(&mut function_description, "\n\n - {name}: {index_description}\n");
index_uids.push(name.to_string());
let facet_distributions = format_facet_distributions(index, &rtxn, 10).unwrap(); // TODO do not unwrap
let _ = writeln!(&mut filter_description, "\n## Facet distributions of the {name} index");
let _ = writeln!(&mut filter_description, "{facet_distributions}");
Ok(())
})?;
tracing::debug!("LLM function description: {function_description}");
tracing::debug!("LLM filter description: {filter_description}");
let tool = ChatCompletionToolArgs::default()
.r#type(ChatCompletionToolType::Function)
.function(
FunctionObjectArgs::default()
.name(MEILI_SEARCH_IN_INDEX_FUNCTION_NAME)
.description(&function_description)
.description(function_description)
.parameters(json!({
"type": "object",
"properties": {
@ -203,9 +211,13 @@ fn setup_search_tool(
// "type": ["string", "null"],
"type": "string",
"description": prompts.search_q_param,
},
"filter": {
"type": "string",
"description": filter_description,
}
},
"required": ["index_uid", "q"],
"required": ["index_uid", "q", "filter"],
"additionalProperties": false,
}))
.strict(true)
@ -247,11 +259,19 @@ async fn process_search_request(
auth_token: &str,
index_uid: String,
q: Option<String>,
filter: Option<String>,
) -> Result<(Index, Vec<Document>, String), ResponseError> {
let index = index_scheduler.index(&index_uid)?;
let rtxn = index.static_read_txn()?;
let ChatConfig { description: _, prompt: _, search_parameters } = index.chat_config(&rtxn)?;
let mut query = SearchQuery { q, ..SearchQuery::from(search_parameters) };
let mut query = SearchQuery {
q,
filter: filter.map(serde_json::Value::from),
..SearchQuery::from(search_parameters)
};
tracing::debug!("LLM query: {:?}", query);
let auth_filter = ActionPolicy::<{ actions::SEARCH }>::authenticate(
auth_ctrl,
auth_token,
@ -280,14 +300,23 @@ async fn process_search_request(
let (search, _is_finite_pagination, _max_total_hits, _offset) =
prepare_search(&index_cloned, &rtxn, &query, &search_kind, time_budget, features)?;
search_from_kind(index_uid, search_kind, search)
.map(|(search_results, _)| (rtxn, search_results))
.map_err(ResponseError::from)
match search_from_kind(index_uid, search_kind, search) {
Ok((search_results, _)) => Ok((rtxn, Ok(search_results))),
Err(MeilisearchHttpError::Milli {
error: meilisearch_types::milli::Error::UserError(user_error),
index_name: _,
}) => Ok((rtxn, Err(user_error))),
Err(err) => Err(ResponseError::from(err)),
}
})
.await;
permit.drop().await;
let output = output?;
let output = match output? {
Ok((rtxn, Ok(search_results))) => Ok((rtxn, search_results)),
Ok((_rtxn, Err(error))) => return Ok((index, Vec::new(), error.to_string())),
Err(err) => Err(err),
};
let mut documents = Vec::new();
if let Ok((ref rtxn, ref search_result)) = output {
MEILISEARCH_CHAT_SEARCH_REQUESTS.with_label_values(&["internal"]).inc();
@ -395,16 +424,19 @@ async fn non_streamed_chat(
for call in meili_calls {
let result = match serde_json::from_str(&call.function.arguments) {
Ok(SearchInIndexParameters { index_uid, q }) => process_search_request(
&index_scheduler,
auth_ctrl.clone(),
&search_queue,
auth_token,
index_uid,
q,
)
.await
.map_err(|e| e.to_string()),
Ok(SearchInIndexParameters { index_uid, q, filter }) => {
process_search_request(
&index_scheduler,
auth_ctrl.clone(),
&search_queue,
auth_token,
index_uid,
q,
filter,
)
.await
.map_err(|e| e.to_string())
}
Err(err) => Err(err.to_string()),
};
@ -719,13 +751,14 @@ async fn handle_meili_tools(
let mut error = None;
let result = match serde_json::from_str(&call.function.arguments) {
Ok(SearchInIndexParameters { index_uid, q }) => match process_search_request(
Ok(SearchInIndexParameters { index_uid, q, filter }) => match process_search_request(
index_scheduler,
auth_ctrl.clone(),
search_queue,
auth_token,
index_uid,
q,
filter,
)
.await
{
@ -801,4 +834,42 @@ struct SearchInIndexParameters {
index_uid: String,
/// The query parameter to use.
q: Option<String>,
/// The filter parameter to use.
filter: Option<String>,
}
fn format_facet_distributions(
index: &Index,
rtxn: &RoTxn,
max_values_per_facet: usize,
) -> meilisearch_types::milli::Result<String> {
let universe = index.documents_ids(rtxn)?;
let rules = index.filterable_attributes_rules(rtxn)?;
let fields_ids_map = index.fields_ids_map(rtxn)?;
let filterable_attributes = fields_ids_map
.names()
.filter(|name| rules.iter().any(|rule| matches!(rule.match_str(name), PatternMatch::Match)))
.map(|name| (name, OrderBy::Count));
let facets_distribution = index
.facets_distribution(rtxn)
.max_values_per_facet(max_values_per_facet)
.candidates(universe)
.facets(filterable_attributes)
.execute()?;
let mut output = String::new();
for (facet_name, entries) in facets_distribution {
let _ = write!(&mut output, "{}: ", facet_name);
let total_entries = entries.len();
for (i, (value, _count)) in entries.into_iter().enumerate() {
let _ = if total_entries.saturating_sub(1) == i {
write!(&mut output, "{value}.")
} else {
write!(&mut output, "{value}, ")
};
}
let _ = writeln!(&mut output);
}
Ok(output)
}

View File

@ -8,8 +8,8 @@ use meilisearch_types::error::{Code, ResponseError};
use meilisearch_types::features::{
ChatCompletionPrompts as DbChatCompletionPrompts, ChatCompletionSettings,
ChatCompletionSource as DbChatCompletionSource, DEFAULT_CHAT_SEARCH_DESCRIPTION_PROMPT,
DEFAULT_CHAT_SEARCH_INDEX_UID_PARAM_PROMPT, DEFAULT_CHAT_SEARCH_Q_PARAM_PROMPT,
DEFAULT_CHAT_SYSTEM_PROMPT,
DEFAULT_CHAT_SEARCH_FILTER_PARAM_PROMPT, DEFAULT_CHAT_SEARCH_INDEX_UID_PARAM_PROMPT,
DEFAULT_CHAT_SEARCH_Q_PARAM_PROMPT, DEFAULT_CHAT_SYSTEM_PROMPT,
};
use meilisearch_types::keys::actions;
use meilisearch_types::milli::update::Setting;
@ -84,6 +84,11 @@ async fn patch_settings(
Setting::Reset => DEFAULT_CHAT_SEARCH_Q_PARAM_PROMPT.to_string(),
Setting::NotSet => old_settings.prompts.search_q_param,
},
search_filter_param: match new_prompts.search_filter_param {
Setting::Set(new_description) => new_description,
Setting::Reset => DEFAULT_CHAT_SEARCH_FILTER_PARAM_PROMPT.to_string(),
Setting::NotSet => old_settings.prompts.search_filter_param,
},
search_index_uid_param: match new_prompts.search_index_uid_param {
Setting::Set(new_description) => new_description,
Setting::Reset => DEFAULT_CHAT_SEARCH_INDEX_UID_PARAM_PROMPT.to_string(),
@ -252,6 +257,10 @@ pub struct ChatPrompts {
#[schema(value_type = Option<String>, example = json!("This is query parameter..."))]
pub search_q_param: Setting<String>,
#[serde(default)]
#[deserr(default, error = DeserrJsonError<InvalidChatCompletionSearchFilterParamPrompt>)]
#[schema(value_type = Option<String>, example = json!("This is filter parameter..."))]
pub search_filter_param: Setting<String>,
#[serde(default)]
#[deserr(default, error = DeserrJsonError<InvalidChatCompletionSearchIndexUidParamPrompt>)]
#[schema(value_type = Option<String>, example = json!("This is index you want to search in..."))]
pub search_index_uid_param: Setting<String>,

View File

@ -1,3 +1,5 @@
use url::Url;
use crate::analytics::Aggregate;
use crate::routes::export::Export;
@ -5,6 +7,7 @@ use crate::routes::export::Export;
pub struct ExportAnalytics {
total_received: usize,
has_api_key: bool,
sum_exports_meilisearch_cloud: usize,
sum_index_patterns: usize,
sum_patterns_with_filter: usize,
sum_patterns_with_override_settings: usize,
@ -13,8 +16,14 @@ pub struct ExportAnalytics {
impl ExportAnalytics {
pub fn from_export(export: &Export) -> Self {
let Export { url: _, api_key, payload_size, indexes } = export;
let Export { url, api_key, payload_size, indexes } = export;
let url = Url::parse(url).ok();
let is_meilisearch_cloud = url.as_ref().and_then(Url::host_str).is_some_and(|host| {
host.ends_with("meilisearch.dev")
|| host.ends_with("meilisearch.com")
|| host.ends_with("meilisearch.io")
});
let has_api_key = api_key.is_some();
let index_patterns_count = indexes.as_ref().map_or(0, |indexes| indexes.len());
let patterns_with_filter_count = indexes.as_ref().map_or(0, |indexes| {
@ -33,6 +42,7 @@ impl ExportAnalytics {
Self {
total_received: 1,
has_api_key,
sum_exports_meilisearch_cloud: is_meilisearch_cloud as usize,
sum_index_patterns: index_patterns_count,
sum_patterns_with_filter: patterns_with_filter_count,
sum_patterns_with_override_settings: patterns_with_override_settings_count,
@ -49,6 +59,7 @@ impl Aggregate for ExportAnalytics {
fn aggregate(mut self: Box<Self>, other: Box<Self>) -> Box<Self> {
self.total_received += other.total_received;
self.has_api_key |= other.has_api_key;
self.sum_exports_meilisearch_cloud += other.sum_exports_meilisearch_cloud;
self.sum_index_patterns += other.sum_index_patterns;
self.sum_patterns_with_filter += other.sum_patterns_with_filter;
self.sum_patterns_with_override_settings += other.sum_patterns_with_override_settings;
@ -63,6 +74,12 @@ impl Aggregate for ExportAnalytics {
Some(self.payload_sizes.iter().sum::<u64>() / self.payload_sizes.len() as u64)
};
let avg_exports_meilisearch_cloud = if self.total_received == 0 {
None
} else {
Some(self.sum_exports_meilisearch_cloud as f64 / self.total_received as f64)
};
let avg_index_patterns = if self.total_received == 0 {
None
} else {
@ -84,6 +101,7 @@ impl Aggregate for ExportAnalytics {
serde_json::json!({
"total_received": self.total_received,
"has_api_key": self.has_api_key,
"avg_exports_meilisearch_cloud": avg_exports_meilisearch_cloud,
"avg_index_patterns": avg_index_patterns,
"avg_patterns_with_filter": avg_patterns_with_filter,
"avg_patterns_with_override_settings": avg_patterns_with_override_settings,

View File

@ -1,6 +1,7 @@
use std::collections::HashSet;
use std::io::{ErrorKind, Seek as _};
use std::marker::PhantomData;
use std::str::FromStr;
use actix_web::http::header::CONTENT_TYPE;
use actix_web::web::Data;
@ -17,9 +18,11 @@ use meilisearch_types::error::deserr_codes::*;
use meilisearch_types::error::{Code, ResponseError};
use meilisearch_types::heed::RoTxn;
use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::milli::documents::sort::recursive_sort;
use meilisearch_types::milli::index::EmbeddingsWithMetadata;
use meilisearch_types::milli::update::IndexDocumentsMethod;
use meilisearch_types::milli::vector::parsed_vectors::ExplicitVectors;
use meilisearch_types::milli::DocumentId;
use meilisearch_types::milli::{AscDesc, DocumentId};
use meilisearch_types::serde_cs::vec::CS;
use meilisearch_types::star_or::OptionStarOrList;
use meilisearch_types::tasks::KindWithContent;
@ -42,6 +45,7 @@ use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::GuardedData;
use crate::extractors::payload::Payload;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::indexes::search::fix_sort_query_parameters;
use crate::routes::{
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
};
@ -135,6 +139,8 @@ pub struct DocumentsFetchAggregator<Method: AggregateMethod> {
per_document_id: bool,
// if a filter was used
per_filter: bool,
// if documents were sorted
sort: bool,
#[serde(rename = "vector.retrieve_vectors")]
retrieve_vectors: bool,
@ -151,39 +157,6 @@ pub struct DocumentsFetchAggregator<Method: AggregateMethod> {
marker: std::marker::PhantomData<Method>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum DocumentFetchKind {
PerDocumentId { retrieve_vectors: bool },
Normal { with_filter: bool, limit: usize, offset: usize, retrieve_vectors: bool, ids: usize },
}
impl<Method: AggregateMethod> DocumentsFetchAggregator<Method> {
pub fn from_query(query: &DocumentFetchKind) -> Self {
let (limit, offset, retrieve_vectors) = match query {
DocumentFetchKind::PerDocumentId { retrieve_vectors } => (1, 0, *retrieve_vectors),
DocumentFetchKind::Normal { limit, offset, retrieve_vectors, .. } => {
(*limit, *offset, *retrieve_vectors)
}
};
let ids = match query {
DocumentFetchKind::Normal { ids, .. } => *ids,
DocumentFetchKind::PerDocumentId { .. } => 0,
};
Self {
per_document_id: matches!(query, DocumentFetchKind::PerDocumentId { .. }),
per_filter: matches!(query, DocumentFetchKind::Normal { with_filter, .. } if *with_filter),
max_limit: limit,
max_offset: offset,
retrieve_vectors,
max_document_ids: ids,
marker: PhantomData,
}
}
}
impl<Method: AggregateMethod> Aggregate for DocumentsFetchAggregator<Method> {
fn event_name(&self) -> &'static str {
Method::event_name()
@ -193,6 +166,7 @@ impl<Method: AggregateMethod> Aggregate for DocumentsFetchAggregator<Method> {
Box::new(Self {
per_document_id: self.per_document_id | new.per_document_id,
per_filter: self.per_filter | new.per_filter,
sort: self.sort | new.sort,
retrieve_vectors: self.retrieve_vectors | new.retrieve_vectors,
max_limit: self.max_limit.max(new.max_limit),
max_offset: self.max_offset.max(new.max_offset),
@ -276,6 +250,7 @@ pub async fn get_document(
retrieve_vectors: param_retrieve_vectors.0,
per_document_id: true,
per_filter: false,
sort: false,
max_limit: 0,
max_offset: 0,
max_document_ids: 0,
@ -406,6 +381,8 @@ pub struct BrowseQueryGet {
#[param(default, value_type = Option<String>, example = "popularity > 1000")]
#[deserr(default, error = DeserrQueryParamError<InvalidDocumentFilter>)]
filter: Option<String>,
#[deserr(default, error = DeserrQueryParamError<InvalidDocumentSort>)]
sort: Option<String>,
}
#[derive(Debug, Deserr, ToSchema)]
@ -430,6 +407,9 @@ pub struct BrowseQuery {
#[schema(default, value_type = Option<Value>, example = "popularity > 1000")]
#[deserr(default, error = DeserrJsonError<InvalidDocumentFilter>)]
filter: Option<Value>,
#[schema(default, value_type = Option<Vec<String>>, example = json!(["title:asc", "rating:desc"]))]
#[deserr(default, error = DeserrJsonError<InvalidDocumentSort>)]
sort: Option<Vec<String>>,
}
/// Get documents with POST
@ -495,6 +475,7 @@ pub async fn documents_by_query_post(
analytics.publish(
DocumentsFetchAggregator::<DocumentsPOST> {
per_filter: body.filter.is_some(),
sort: body.sort.is_some(),
retrieve_vectors: body.retrieve_vectors,
max_limit: body.limit,
max_offset: body.offset,
@ -571,7 +552,7 @@ pub async fn get_documents(
) -> Result<HttpResponse, ResponseError> {
debug!(parameters = ?params, "Get documents GET");
let BrowseQueryGet { limit, offset, fields, retrieve_vectors, filter, ids } =
let BrowseQueryGet { limit, offset, fields, retrieve_vectors, filter, ids, sort } =
params.into_inner();
let filter = match filter {
@ -582,20 +563,20 @@ pub async fn get_documents(
None => None,
};
let ids = ids.map(|ids| ids.into_iter().map(Into::into).collect());
let query = BrowseQuery {
offset: offset.0,
limit: limit.0,
fields: fields.merge_star_and_none(),
retrieve_vectors: retrieve_vectors.0,
filter,
ids,
ids: ids.map(|ids| ids.into_iter().map(Into::into).collect()),
sort: sort.map(|attr| fix_sort_query_parameters(&attr)),
};
analytics.publish(
DocumentsFetchAggregator::<DocumentsGET> {
per_filter: query.filter.is_some(),
sort: query.sort.is_some(),
retrieve_vectors: query.retrieve_vectors,
max_limit: query.limit,
max_offset: query.offset,
@ -615,7 +596,7 @@ fn documents_by_query(
query: BrowseQuery,
) -> Result<HttpResponse, ResponseError> {
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let BrowseQuery { offset, limit, fields, retrieve_vectors, filter, ids } = query;
let BrowseQuery { offset, limit, fields, retrieve_vectors, filter, ids, sort } = query;
let retrieve_vectors = RetrieveVectors::new(retrieve_vectors);
@ -633,6 +614,18 @@ fn documents_by_query(
None
};
let sort_criteria = if let Some(sort) = &sort {
let sorts: Vec<_> = match sort.iter().map(|s| milli::AscDesc::from_str(s)).collect() {
Ok(sorts) => sorts,
Err(asc_desc_error) => {
return Err(milli::SortError::from(asc_desc_error).into_document_error().into())
}
};
Some(sorts)
} else {
None
};
let index = index_scheduler.index(&index_uid)?;
let (total, documents) = retrieve_documents(
&index,
@ -643,6 +636,7 @@ fn documents_by_query(
fields,
retrieve_vectors,
index_scheduler.features(),
sort_criteria,
)?;
let ret = PaginationView::new(offset, limit, total as usize, documents);
@ -1461,13 +1455,19 @@ fn some_documents<'a, 't: 'a>(
document.remove("_vectors");
}
RetrieveVectors::Retrieve => {
// Clippy is simply wrong
#[allow(clippy::manual_unwrap_or_default)]
let mut vectors = match document.remove("_vectors") {
Some(Value::Object(map)) => map,
_ => Default::default(),
};
for (name, (vector, regenerate)) in index.embeddings(rtxn, key)? {
for (
name,
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments: _ },
) in index.embeddings(rtxn, key)?
{
let embeddings =
ExplicitVectors { embeddings: Some(vector.into()), regenerate };
ExplicitVectors { embeddings: Some(embeddings.into()), regenerate };
vectors.insert(
name,
serde_json::to_value(embeddings).map_err(MeilisearchHttpError::from)?,
@ -1492,6 +1492,7 @@ fn retrieve_documents<S: AsRef<str>>(
attributes_to_retrieve: Option<Vec<S>>,
retrieve_vectors: RetrieveVectors,
features: RoFeatures,
sort_criteria: Option<Vec<AscDesc>>,
) -> Result<(u64, Vec<Document>), ResponseError> {
let rtxn = index.read_txn()?;
let filter = &filter;
@ -1524,15 +1525,32 @@ fn retrieve_documents<S: AsRef<str>>(
})?
}
let (it, number_of_documents) = {
let (it, number_of_documents) = if let Some(sort) = sort_criteria {
let number_of_documents = candidates.len();
let facet_sort = recursive_sort(index, &rtxn, sort, &candidates)?;
let iter = facet_sort.iter()?;
let mut documents = Vec::with_capacity(limit);
for result in iter.skip(offset).take(limit) {
documents.push(result?);
}
(
itertools::Either::Left(some_documents(
index,
&rtxn,
documents.into_iter(),
retrieve_vectors,
)?),
number_of_documents,
)
} else {
let number_of_documents = candidates.len();
(
some_documents(
itertools::Either::Right(some_documents(
index,
&rtxn,
candidates.into_iter().skip(offset).take(limit),
retrieve_vectors,
)?,
)?),
number_of_documents,
)
};

View File

@ -30,8 +30,6 @@ use crate::Opt;
pub mod documents;
pub mod facet_search;
pub mod render;
mod render_analytics;
pub mod search;
mod search_analytics;
#[cfg(test)]
@ -78,7 +76,6 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
.service(web::scope("/documents").configure(documents::configure))
.service(web::scope("/search").configure(search::configure))
.service(web::scope("/facet-search").configure(facet_search::configure))
.service(web::scope("/render").configure(render::configure))
.service(web::scope("/similar").configure(similar::configure))
.service(web::scope("/settings").configure(settings::configure)),
);

View File

@ -1,585 +0,0 @@
use std::collections::BTreeMap;
use actix_web::web::{self, Data};
use actix_web::{HttpRequest, HttpResponse};
use deserr::actix_web::AwebJson;
use deserr::Deserr;
use index_scheduler::IndexScheduler;
use liquid::ValueView;
use meilisearch_types::deserr::DeserrJsonError;
use meilisearch_types::error::deserr_codes::{
InvalidRenderInput, InvalidRenderInputDocumentId, InvalidRenderInputFields,
InvalidRenderInputInline, InvalidRenderTemplate, InvalidRenderTemplateId,
InvalidRenderTemplateInline,
};
use meilisearch_types::error::Code;
use meilisearch_types::error::ResponseError;
use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::keys::actions;
use meilisearch_types::milli::prompt::{get_document, get_inline_document_fields};
use meilisearch_types::milli::vector::json_template::{self, JsonTemplate};
use meilisearch_types::{heed, milli, Index};
use serde::Serialize;
use serde_json::Value;
use tracing::debug;
use utoipa::{OpenApi, ToSchema};
use crate::analytics::Analytics;
use crate::extractors::authentication::policies::DoubleActionPolicy;
use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::indexes::render_analytics::RenderAggregator;
#[derive(OpenApi)]
#[openapi(
paths(render_post),
tags((
name = "Render documents",
description = "The /render route allows rendering templates used by Meilisearch.",
external_docs(url = "https://www.meilisearch.com/docs/reference/api/render"),
)),
)]
pub struct RenderApi;
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(web::resource("").route(web::post().to(SeqHandler(render_post))));
}
/// Render documents with POST
#[utoipa::path(
post,
path = "{indexUid}/render",
tag = "Render documents",
security(("Bearer" = ["settings.get,documents.get", "*.get", "*"])),
params(("indexUid" = String, Path, example = "movies", description = "Index Unique Identifier", nullable = false)),
request_body = RenderQuery,
responses(
(status = 200, description = "The rendered result is returned along with the template", body = RenderResult, content_type = "application/json", example = json!(
{
"template": "{{ doc.breed }} called {{ doc.name }}",
"rendered": "A Jack Russell called Iko"
}
)),
(status = 404, description = "Template or document not found", body = ResponseError, content_type = "application/json", example = json!(
{
"message": "Document with ID `9999` not found.",
"code": "render_document_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#render_document_not_found"
}
)),
(status = 400, description = "Parameters are incorrect", body = ResponseError, content_type = "application/json", example = json!(
{
"message": "Indexing fragment `mistake` does not exist for embedder `rest`.\n Hint: Available indexing fragments are `basic`, `withBreed`.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
)),
)
)]
pub async fn render_post(
index_scheduler: GuardedData<
DoubleActionPolicy<{ actions::SETTINGS_GET }, { actions::DOCUMENTS_GET }>,
Data<IndexScheduler>,
>,
index_uid: web::Path<String>,
params: AwebJson<RenderQuery, DeserrJsonError>,
req: HttpRequest,
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let index = index_scheduler.index(&index_uid)?;
let query = params.into_inner();
debug!(parameters = ?query, "Render document");
let mut aggregate = RenderAggregator::from_query(&query);
let result = render(index, query).await;
if result.is_ok() {
aggregate.succeed();
}
analytics.publish(aggregate, &req);
let result = result?;
debug!(returns = ?result, "Render document");
Ok(HttpResponse::Ok().json(result))
}
enum FragmentKind {
Indexing,
Search,
}
impl FragmentKind {
fn adjective(&self) -> &'static str {
match self {
FragmentKind::Indexing => "indexing",
FragmentKind::Search => "search",
}
}
fn adjective_capitalized(&self) -> &'static str {
match self {
FragmentKind::Indexing => "Indexing",
FragmentKind::Search => "Search",
}
}
}
enum RenderError {
MultipleTemplates,
MissingTemplate,
EmptyTemplateId,
UnknownTemplateRoot(String),
MissingEmbedderName {
available: Vec<String>,
},
EmbedderDoesNotExist {
embedder_name: String,
available: Vec<String>,
},
EmbedderUsesFragments {
embedder_name: String,
},
MissingTemplateAfterEmbedder {
embedder_name: String,
available_indexing_fragments: Vec<String>,
available_search_fragments: Vec<String>,
},
UnknownTemplatePrefix {
embedder_name: String,
found: String,
available_indexing_fragments: Vec<String>,
available_search_fragments: Vec<String>,
},
ReponseError(ResponseError),
MissingFragment {
embedder_name: String,
kind: FragmentKind,
available: Vec<String>,
},
FragmentDoesNotExist {
embedder_name: String,
fragment_name: String,
kind: FragmentKind,
available: Vec<String>,
},
LeftOverToken(String),
MissingChatCompletionTemplate,
UnknownChatCompletionTemplate(String),
DocumentNotFound(String),
BothInlineDocAndDocId,
TemplateParsing(json_template::Error),
TemplateRendering(json_template::Error),
FieldsUnavailable,
FieldsAlreadyPresent,
FieldsWithoutDocument,
CouldNotHandleInput,
}
impl From<heed::Error> for RenderError {
fn from(error: heed::Error) -> Self {
RenderError::ReponseError(error.into())
}
}
impl From<milli::Error> for RenderError {
fn from(error: milli::Error) -> Self {
RenderError::ReponseError(error.into())
}
}
use RenderError::*;
impl From<RenderError> for ResponseError {
fn from(error: RenderError) -> Self {
match error {
MultipleTemplates => ResponseError::from_msg(
String::from("Cannot provide both an inline template and a template ID."),
Code::InvalidRenderTemplate,
),
MissingTemplate => ResponseError::from_msg(
String::from("No template provided. Please provide either an inline template or a template ID."),
Code::InvalidRenderTemplate,
),
EmptyTemplateId => ResponseError::from_msg(
String::from("The template ID is empty.\n Hint: Valid prefixes are `embedders` or `chatCompletions`."),
Code::InvalidRenderTemplateId,
),
UnknownTemplateRoot(root) => ResponseError::from_msg(
format!("Template ID must start with `embedders` or `chatCompletions`, but found `{root}`."),
Code::InvalidRenderTemplateId,
),
MissingEmbedderName { mut available } => {
available.sort_unstable();
ResponseError::from_msg(
format!("Template ID configured with `embedders` but no embedder name provided.\n Hint: Available embedders are {}.",
available.iter().map(|s| format!("`{s}`")).collect::<Vec<_>>().join(", ")),
Code::InvalidRenderTemplateId,
)
},
EmbedderDoesNotExist { embedder_name, mut available } => {
available.sort_unstable();
ResponseError::from_msg(
format!("Embedder `{embedder_name}` does not exist.\n Hint: Available embedders are {}.",
available.iter().map(|s| format!("`{s}`")).collect::<Vec<_>>().join(", ")),
Code::InvalidRenderTemplateId,
)
},
EmbedderUsesFragments { embedder_name } => ResponseError::from_msg(
format!("Requested document template for embedder `{embedder_name}` but it uses fragments.\n Hint: Use `indexingFragments` or `searchFragments` instead."),
Code::InvalidRenderTemplateId,
),
MissingTemplateAfterEmbedder { embedder_name, mut available_indexing_fragments, mut available_search_fragments } => {
if available_indexing_fragments.is_empty() && available_search_fragments.is_empty() {
ResponseError::from_msg(
format!("Missing template id after embedder `{embedder_name}`.\n Hint: Available fragments: `documentTemplate`."),
Code::InvalidRenderTemplateId,
)
} else {
available_indexing_fragments.sort_unstable();
available_search_fragments.sort_unstable();
ResponseError::from_msg(
format!("Template ID configured with `embedders.{embedder_name}` but no template kind provided.\n Hint: Available fragments are {}.",
available_indexing_fragments.iter().map(|s| format!("`indexingFragments.{s}`")).chain(
available_search_fragments.iter().map(|s| format!("`searchFragments.{s}`"))).collect::<Vec<_>>().join(", ")),
Code::InvalidRenderTemplateId,
)
}
},
UnknownTemplatePrefix { embedder_name, found, mut available_indexing_fragments, mut available_search_fragments } => {
if available_indexing_fragments.is_empty() && available_search_fragments.is_empty() {
ResponseError::from_msg(
format!("Wrong template `{found}` after embedder `{embedder_name}`.\n Hint: Available fragments: `documentTemplate`."),
Code::InvalidRenderTemplateId,
)
} else {
available_indexing_fragments.sort_unstable();
available_search_fragments.sort_unstable();
ResponseError::from_msg(
format!("Wrong template `{found}` after embedder `{embedder_name}`.\n Hint: Available fragments are {}.",
available_indexing_fragments.iter().map(|s| format!("`indexingFragments.{s}`")).chain(
available_search_fragments.iter().map(|s| format!("`searchFragments.{s}`"))).collect::<Vec<_>>().join(", ")),
Code::InvalidRenderTemplateId,
)
}
},
ReponseError(response_error) => response_error,
MissingFragment { embedder_name, kind, mut available } => {
available.sort_unstable();
ResponseError::from_msg(
format!("{} fragment name was not provided.\n Hint: Available {} fragments for embedder `{embedder_name}` are {}.",
kind.adjective_capitalized(),
kind.adjective(),
available.iter().map(|s| format!("`{s}`")).collect::<Vec<_>>().join(", ")),
Code::InvalidRenderTemplateId,
)
},
FragmentDoesNotExist { embedder_name, fragment_name, kind, mut available } => {
available.sort_unstable();
ResponseError::from_msg(
format!("{} fragment `{fragment_name}` does not exist for embedder `{embedder_name}`.\n Hint: Available {} fragments are {}.",
kind.adjective_capitalized(),
kind.adjective(),
available.iter().map(|s| format!("`{s}`")).collect::<Vec<_>>().join(", ")),
Code::InvalidRenderTemplateId,
)
},
LeftOverToken(token) => ResponseError::from_msg(
format!("Leftover token `{token}` after parsing template ID"),
Code::InvalidRenderTemplateId,
),
MissingChatCompletionTemplate => ResponseError::from_msg(
String::from("Missing chat completion template ID. The only available template is `documentTemplate`."),
Code::InvalidRenderTemplateId,
),
UnknownChatCompletionTemplate(id) => ResponseError::from_msg(
format!("Unknown chat completion template ID `{id}`. The only available template is `documentTemplate`."),
Code::InvalidRenderTemplateId,
),
DocumentNotFound(doc_id) => ResponseError::from_msg(
format!("Document with ID `{doc_id}` not found."),
Code::RenderDocumentNotFound,
),
BothInlineDocAndDocId => ResponseError::from_msg(
String::from("A document id was provided but adding it to the input would overwrite the `doc` field that you already defined inline."),
Code::InvalidRenderInput,
),
TemplateParsing(err) => ResponseError::from_msg(
format!("Error parsing template: {}", err.parsing_error("input")),
Code::TemplateParsingError,
),
TemplateRendering(err) => ResponseError::from_msg(
format!("Error rendering template: {}", err.rendering_error("input")),
Code::TemplateRenderingError,
),
FieldsUnavailable => ResponseError::from_msg(
String::from("Fields are not available on fragments.\n Hint: Remove the `insertFields` parameter or set it to `false`."),
Code::InvalidRenderInputFields,
),
FieldsAlreadyPresent => ResponseError::from_msg(
String::from("Fields were provided in the inline input but `insertFields` is set to `true`.\n Hint: Remove the `insertFields` parameter or set it to `false`."),
Code::InvalidRenderInputFields,
),
FieldsWithoutDocument => ResponseError::from_msg(
String::from("Fields were requested but no document was provided.\n Hint: Provide a document ID or inline document."),
Code::InvalidRenderInputFields,
),
CouldNotHandleInput => ResponseError::from_msg(
String::from("Could not handle the input provided."),
Code::InvalidRenderInput,
),
}
}
}
async fn render(index: Index, query: RenderQuery) -> Result<RenderResult, RenderError> {
let rtxn = index.read_txn()?;
let (template, fields_available) = match (query.template.inline, query.template.id) {
(Some(inline), None) => (inline, true),
(None, Some(id)) => {
let mut parts = id.split('.');
let root = parts.next().ok_or(EmptyTemplateId)?;
let template = match root {
"embedders" => {
let index_embedding_configs = index.embedding_configs();
let embedding_configs = index_embedding_configs.embedding_configs(&rtxn)?;
let embedder_name = parts.next().ok_or_else(|| MissingEmbedderName {
available: embedding_configs.iter().map(|c| c.name.clone()).collect(),
})?;
let embedding_config = embedding_configs
.iter()
.find(|config| config.name == embedder_name)
.ok_or_else(|| EmbedderDoesNotExist {
embedder_name: embedder_name.to_string(),
available: embedding_configs.iter().map(|c| c.name.clone()).collect(),
})?;
let template_kind =
parts.next().ok_or_else(|| MissingTemplateAfterEmbedder {
embedder_name: embedder_name.to_string(),
available_indexing_fragments: embedding_config
.config
.embedder_options
.indexing_fragments(),
available_search_fragments: embedding_config
.config
.embedder_options
.search_fragments(),
})?;
match template_kind {
"documentTemplate" | "documenttemplate" => {
if !embedding_config.fragments.as_slice().is_empty() {
return Err(EmbedderUsesFragments {
embedder_name: embedder_name.to_string(),
});
}
(
serde_json::Value::String(
embedding_config.config.prompt.template.clone(),
),
true,
)
}
"indexingFragments" | "indexingfragments" => {
let fragment_name = parts.next().ok_or_else(|| MissingFragment {
embedder_name: embedder_name.to_string(),
kind: FragmentKind::Indexing,
available: embedding_config
.config
.embedder_options
.indexing_fragments(),
})?;
let fragment = embedding_config
.config
.embedder_options
.indexing_fragment(fragment_name)
.ok_or_else(|| FragmentDoesNotExist {
embedder_name: embedder_name.to_string(),
fragment_name: fragment_name.to_string(),
kind: FragmentKind::Indexing,
available: embedding_config
.config
.embedder_options
.indexing_fragments(),
})?;
(fragment.clone(), false)
}
"searchFragments" | "searchfragments" => {
let fragment_name = parts.next().ok_or_else(|| MissingFragment {
embedder_name: embedder_name.to_string(),
kind: FragmentKind::Search,
available: embedding_config
.config
.embedder_options
.search_fragments(),
})?;
let fragment = embedding_config
.config
.embedder_options
.search_fragment(fragment_name)
.ok_or_else(|| FragmentDoesNotExist {
embedder_name: embedder_name.to_string(),
fragment_name: fragment_name.to_string(),
kind: FragmentKind::Search,
available: embedding_config
.config
.embedder_options
.search_fragments(),
})?;
(fragment.clone(), false)
}
found => {
return Err(UnknownTemplatePrefix {
embedder_name: embedder_name.to_string(),
found: found.to_string(),
available_indexing_fragments: embedding_config
.config
.embedder_options
.indexing_fragments(),
available_search_fragments: embedding_config
.config
.embedder_options
.search_fragments(),
})
}
}
}
"chatCompletions" | "chatcompletions" => {
let template_name = parts.next().ok_or(MissingChatCompletionTemplate)?;
if template_name != "documentTemplate" {
return Err(UnknownChatCompletionTemplate(template_name.to_string()));
}
let chat_config = index.chat_config(&rtxn)?;
(serde_json::Value::String(chat_config.prompt.template.clone()), true)
}
"" => return Err(EmptyTemplateId),
unknown => {
return Err(UnknownTemplateRoot(unknown.to_string()));
}
};
if let Some(next) = parts.next() {
return Err(LeftOverToken(next.to_string()));
}
template
}
(Some(_), Some(_)) => return Err(MultipleTemplates),
(None, None) => return Err(MissingTemplate),
};
let fields_required = query.input.as_ref().and_then(|i| i.insert_fields);
let fields_already_present = query
.input
.as_ref()
.is_some_and(|i| i.inline.as_ref().is_some_and(|i| i.get("fields").is_some()));
let fields_probably_used = template.as_str().is_none_or(|s| s.contains("fields"));
let has_inline_doc = query
.input
.as_ref()
.is_some_and(|i| i.inline.as_ref().is_some_and(|i| i.get("doc").is_some()));
let has_document_id = query.input.as_ref().is_some_and(|i| i.document_id.is_some());
let has_doc = has_inline_doc || has_document_id;
let insert_fields = match fields_required {
Some(insert_fields) => insert_fields,
None => fields_available && has_doc && fields_probably_used && !fields_already_present,
};
if insert_fields && !fields_available {
return Err(FieldsUnavailable);
}
if insert_fields && fields_already_present {
return Err(FieldsAlreadyPresent);
}
if insert_fields && !has_doc {
return Err(FieldsWithoutDocument);
}
if has_inline_doc && has_document_id {
return Err(BothInlineDocAndDocId);
}
let mut rendered = Value::Null;
if let Some(input) = query.input {
let media = input.inline.unwrap_or_default();
let mut object = liquid::to_object(&media).unwrap();
if let Some(doc) = media.get("doc") {
if insert_fields {
let fields = get_inline_document_fields(&index, &rtxn, doc)?
.map_err(|_| CouldNotHandleInput)?;
object.insert("fields".into(), fields.to_value());
}
}
if let Some(document_id) = input.document_id {
let (document, fields) = get_document(&index, &rtxn, &document_id, insert_fields)?
.ok_or_else(|| DocumentNotFound(document_id))?;
object.insert("doc".into(), document);
if let Some(fields) = fields {
object.insert("fields".into(), fields);
}
}
let json_template = JsonTemplate::new(template.clone()).map_err(TemplateParsing)?;
rendered = json_template.render(&object).map_err(TemplateRendering)?;
}
Ok(RenderResult { template, rendered })
}
#[derive(Debug, Clone, PartialEq, Deserr, ToSchema)]
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
pub struct RenderQuery {
#[deserr(error = DeserrJsonError<InvalidRenderTemplate>)]
pub template: RenderQueryTemplate,
#[deserr(default, error = DeserrJsonError<InvalidRenderInput>)]
pub input: Option<RenderQueryInput>,
}
#[derive(Debug, Clone, PartialEq, Deserr, ToSchema)]
#[deserr(error = DeserrJsonError<InvalidRenderTemplate>, rename_all = camelCase, deny_unknown_fields)]
pub struct RenderQueryTemplate {
#[deserr(default, error = DeserrJsonError<InvalidRenderTemplateId>)]
pub id: Option<String>,
#[deserr(default, error = DeserrJsonError<InvalidRenderTemplateInline>)]
pub inline: Option<serde_json::Value>,
}
#[derive(Debug, Clone, Default, PartialEq, Deserr, ToSchema)]
#[deserr(error = DeserrJsonError<InvalidRenderInput>, rename_all = camelCase, deny_unknown_fields)]
pub struct RenderQueryInput {
#[deserr(default, error = DeserrJsonError<InvalidRenderInputDocumentId>)]
pub document_id: Option<String>,
#[deserr(default, error = DeserrJsonError<InvalidRenderInputFields>)]
pub insert_fields: Option<bool>,
#[deserr(default, error = DeserrJsonError<InvalidRenderInputInline>)]
pub inline: Option<BTreeMap<String, serde_json::Value>>,
}
#[derive(Debug, Clone, Serialize, PartialEq, ToSchema)]
pub struct RenderResult {
template: serde_json::Value,
rendered: serde_json::Value,
}

View File

@ -1,99 +0,0 @@
use serde_json::json;
use crate::analytics::Aggregate;
use crate::routes::indexes::render::RenderQuery;
#[derive(Default)]
pub struct RenderAggregator {
// requests
total_received: usize,
total_succeeded: usize,
// parameters
template_inline: bool,
template_id: bool,
input_inline: bool,
input_id: bool,
input_omitted: bool,
fields_forced: bool,
fields_disabled: bool,
}
impl RenderAggregator {
#[allow(clippy::field_reassign_with_default)]
pub fn from_query(query: &RenderQuery) -> Self {
let RenderQuery { template, input } = query;
let mut ret = Self::default();
ret.total_received = 1;
ret.template_inline = template.inline.is_some();
ret.template_id = template.id.is_some();
ret.input_inline = input.as_ref().is_some_and(|i| i.inline.is_some());
ret.input_id = input.as_ref().is_some_and(|i| i.document_id.is_some());
ret.input_omitted = input.as_ref().is_none();
ret.fields_forced = input.as_ref().is_some_and(|i| i.insert_fields.is_some());
ret.fields_disabled = input.as_ref().is_some_and(|i| i.insert_fields.is_none());
ret
}
pub fn succeed(&mut self) {
self.total_succeeded += 1;
}
}
impl Aggregate for RenderAggregator {
fn event_name(&self) -> &'static str {
"Documents Rendered"
}
fn aggregate(mut self: Box<Self>, new: Box<Self>) -> Box<Self> {
self.total_received += new.total_received;
self.total_succeeded += new.total_succeeded;
self.template_inline |= new.template_inline;
self.template_id |= new.template_id;
self.input_inline |= new.input_inline;
self.input_id |= new.input_id;
self.input_omitted |= new.input_omitted;
self.fields_forced |= new.fields_forced;
self.fields_disabled |= new.fields_disabled;
self
}
fn into_event(self: Box<Self>) -> serde_json::Value {
let Self {
total_received,
total_succeeded,
template_inline,
template_id,
input_inline,
input_id,
input_omitted,
fields_forced,
fields_disabled,
} = *self;
json!({
"requests": {
"total_received": total_received,
"total_succeeded": total_succeeded,
"total_failed": total_received.saturating_sub(total_succeeded) // just to be sure we never panics
},
"template": {
"inline": template_inline,
"id": template_id,
},
"input": {
"inline": input_inline,
"id": input_id,
"omitted": input_omitted,
"fields_forced": fields_forced,
"fields_disabled": fields_disabled,
},
})
}
}

View File

@ -745,10 +745,9 @@ impl SearchByIndex {
match sort.iter().map(|s| milli::AscDesc::from_str(s)).collect() {
Ok(sorts) => sorts,
Err(asc_desc_error) => {
return Err(milli::Error::from(milli::SortError::from(
asc_desc_error,
))
.into())
return Err(milli::SortError::from(asc_desc_error)
.into_search_error()
.into())
}
};
Some(sorts)

View File

@ -16,7 +16,7 @@ use meilisearch_types::error::{Code, ResponseError};
use meilisearch_types::heed::RoTxn;
use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::locales::Locale;
use meilisearch_types::milli::index::{self, SearchParameters};
use meilisearch_types::milli::index::{self, EmbeddingsWithMetadata, SearchParameters};
use meilisearch_types::milli::score_details::{ScoreDetails, ScoringStrategy};
use meilisearch_types::milli::vector::parsed_vectors::ExplicitVectors;
use meilisearch_types::milli::vector::Embedder;
@ -1051,6 +1051,7 @@ pub fn prepare_search<'t>(
.unwrap_or(DEFAULT_PAGINATION_MAX_TOTAL_HITS);
search.exhaustive_number_hits(is_finite_pagination);
search.max_total_hits(Some(max_total_hits));
search.scoring_strategy(
if query.show_ranking_score
|| query.show_ranking_score_details
@ -1091,7 +1092,7 @@ pub fn prepare_search<'t>(
let sort = match sort.iter().map(|s| AscDesc::from_str(s)).collect() {
Ok(sorts) => sorts,
Err(asc_desc_error) => {
return Err(milli::Error::from(SortError::from(asc_desc_error)).into())
return Err(SortError::from(asc_desc_error).into_search_error().into())
}
};
@ -1527,8 +1528,11 @@ impl<'a> HitMaker<'a> {
Some(Value::Object(map)) => map,
_ => Default::default(),
};
for (name, (vector, regenerate)) in self.index.embeddings(self.rtxn, id)? {
let embeddings = ExplicitVectors { embeddings: Some(vector.into()), regenerate };
for (name, EmbeddingsWithMetadata { embeddings, regenerate, has_fragments: _ }) in
self.index.embeddings(self.rtxn, id)?
{
let embeddings =
ExplicitVectors { embeddings: Some(embeddings.into()), regenerate };
vectors.insert(
name,
serde_json::to_value(embeddings).map_err(InternalError::SerdeJson)?,

View File

@ -419,14 +419,14 @@ async fn error_add_api_key_invalid_parameters_actions() {
let (response, code) = server.add_api_key(content).await;
meili_snap::snapshot!(code, @"400 Bad Request");
meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]" }), @r###"
meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]" }), @r#"
{
"message": "Unknown value `doc.add` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`",
"message": "Unknown value `doc.add` at `.actions[0]`: expected one of `*`, `*.get`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`",
"code": "invalid_api_key_actions",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_api_key_actions"
}
"###);
"#);
}
#[actix_rt::test]
@ -790,7 +790,7 @@ async fn list_api_keys() {
meili_snap::snapshot!(code, @"201 Created");
let (response, code) = server.list_api_keys("").await;
meili_snap::snapshot!(meili_snap::json_string!(response, { ".results[].createdAt" => "[ignored]", ".results[].updatedAt" => "[ignored]", ".results[].uid" => "[ignored]", ".results[].key" => "[ignored]" }), @r###"
meili_snap::snapshot!(meili_snap::json_string!(response, { ".results[].createdAt" => "[ignored]", ".results[].updatedAt" => "[ignored]", ".results[].uid" => "[ignored]", ".results[].key" => "[ignored]" }), @r#"
{
"results": [
{
@ -850,6 +850,22 @@ async fn list_api_keys() {
"createdAt": "[ignored]",
"updatedAt": "[ignored]"
},
{
"name": "Default Read-Only Admin API Key",
"description": "Use it to read information across the whole database. Caution! Do not expose this key on a public frontend",
"key": "[ignored]",
"uid": "[ignored]",
"actions": [
"*.get",
"keys.get"
],
"indexes": [
"*"
],
"expiresAt": null,
"createdAt": "[ignored]",
"updatedAt": "[ignored]"
},
{
"name": "Default Chat API Key",
"description": "Use it to chat and search from the frontend",
@ -869,9 +885,9 @@ async fn list_api_keys() {
],
"offset": 0,
"limit": 20,
"total": 4
"total": 5
}
"###);
"#);
meili_snap::snapshot!(code, @"200 OK");
}

View File

@ -1,7 +1,7 @@
use std::collections::{HashMap, HashSet};
use ::time::format_description::well_known::Rfc3339;
use maplit::hashmap;
use maplit::{hashmap, hashset};
use meilisearch::Opt;
use once_cell::sync::Lazy;
use tempfile::TempDir;
@ -10,103 +10,73 @@ use time::{Duration, OffsetDateTime};
use crate::common::{default_settings, Server, Value};
use crate::json;
macro_rules! hashset {
( $( $val:tt ),* $(,)? ) => {{
let mut set: HashSet<&'static [&'static str]> = HashSet::new();
$(
hashset!(@insert set, $val);
)*
set
}};
pub static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), HashSet<&'static str>>> =
Lazy::new(|| {
let authorizations = hashmap! {
("POST", "/multi-search") => hashset!{"search", "*"},
("POST", "/indexes/products/search") => hashset!{"search", "*"},
("GET", "/indexes/products/search") => hashset!{"search", "*"},
("POST", "/indexes/products/documents") => hashset!{"documents.add", "documents.*", "*"},
("GET", "/indexes/products/documents") => hashset!{"documents.get", "documents.*", "*"},
("POST", "/indexes/products/documents/fetch") => hashset!{"documents.get", "documents.*", "*"},
("GET", "/indexes/products/documents/0") => hashset!{"documents.get", "documents.*", "*"},
("DELETE", "/indexes/products/documents/0") => hashset!{"documents.delete", "documents.*", "*"},
("POST", "/indexes/products/documents/delete-batch") => hashset!{"documents.delete", "documents.*", "*"},
("POST", "/indexes/products/documents/delete") => hashset!{"documents.delete", "documents.*", "*"},
("GET", "/tasks") => hashset!{"tasks.get", "tasks.*", "*"},
("DELETE", "/tasks") => hashset!{"tasks.delete", "tasks.*", "*"},
("GET", "/tasks?indexUid=products") => hashset!{"tasks.get", "tasks.*", "*"},
("GET", "/tasks/0") => hashset!{"tasks.get", "tasks.*", "*"},
("PATCH", "/indexes/products/") => hashset!{"indexes.update", "indexes.*", "*"},
("GET", "/indexes/products/") => hashset!{"indexes.get", "indexes.*", "*"},
("DELETE", "/indexes/products/") => hashset!{"indexes.delete", "indexes.*", "*"},
("POST", "/indexes") => hashset!{"indexes.create", "indexes.*", "*"},
("GET", "/indexes") => hashset!{"indexes.get", "indexes.*", "*"},
("POST", "/swap-indexes") => hashset!{"indexes.swap", "indexes.*", "*"},
("GET", "/indexes/products/settings") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/displayed-attributes") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/distinct-attribute") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/filterable-attributes") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/ranking-rules") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/searchable-attributes") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/sortable-attributes") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/stop-words") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/synonyms") => hashset!{"settings.get", "settings.*", "*"},
("DELETE", "/indexes/products/settings") => hashset!{"settings.update", "settings.*", "*"},
("PATCH", "/indexes/products/settings") => hashset!{"settings.update", "settings.*", "*"},
("PATCH", "/indexes/products/settings/typo-tolerance") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/displayed-attributes") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/distinct-attribute") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/filterable-attributes") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/ranking-rules") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/searchable-attributes") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/sortable-attributes") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/stop-words") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/synonyms") => hashset!{"settings.update", "settings.*", "*"},
("GET", "/indexes/products/stats") => hashset!{"stats.get", "stats.*", "*"},
("GET", "/stats") => hashset!{"stats.get", "stats.*", "*"},
("POST", "/dumps") => hashset!{"dumps.create", "dumps.*", "*"},
("POST", "/snapshots") => hashset!{"snapshots.create", "snapshots.*", "*"},
("GET", "/version") => hashset!{"version", "*"},
("GET", "/metrics") => hashset!{"metrics.get", "metrics.*", "*"},
("POST", "/logs/stream") => hashset!{"metrics.get", "metrics.*", "*"},
("DELETE", "/logs/stream") => hashset!{"metrics.get", "metrics.*", "*"},
("PATCH", "/keys/mykey/") => hashset!{"keys.update", "*"},
("GET", "/keys/mykey/") => hashset!{"keys.get", "*"},
("DELETE", "/keys/mykey/") => hashset!{"keys.delete", "*"},
("POST", "/keys") => hashset!{"keys.create", "*"},
("GET", "/keys") => hashset!{"keys.get", "*"},
("GET", "/experimental-features") => hashset!{"experimental.get", "*"},
("PATCH", "/experimental-features") => hashset!{"experimental.update", "*"},
("GET", "/network") => hashset!{"network.get", "*"},
("PATCH", "/network") => hashset!{"network.update", "*"},
};
// Match array-like input: ["a", "b"]
(@insert $set:ident, [ $($elem:literal),* ]) => {{
const ITEM: &[&str] = &[$($elem),*];
$set.insert(ITEM);
}};
// Match single literal: "a"
(@insert $set:ident, $val:literal) => {{
const ITEM: &[&str] = &[$val];
$set.insert(ITEM);
}};
}
#[allow(clippy::type_complexity)]
pub static AUTHORIZATIONS: Lazy<
HashMap<(&'static str, &'static str), HashSet<&'static [&'static str]>>,
> = Lazy::new(|| {
let authorizations = hashmap! {
("POST", "/multi-search") => hashset!{"search", "*"},
("POST", "/indexes/products/search") => hashset!{"search", "*"},
("GET", "/indexes/products/search") => hashset!{"search", "*"},
("POST", "/indexes/products/documents") => hashset!{"documents.add", "documents.*", "*"},
("GET", "/indexes/products/documents") => hashset!{"documents.get", "documents.*", "*"},
("POST", "/indexes/products/documents/fetch") => hashset!{"documents.get", "documents.*", "*"},
("GET", "/indexes/products/documents/0") => hashset!{"documents.get", "documents.*", "*"},
("DELETE", "/indexes/products/documents/0") => hashset!{"documents.delete", "documents.*", "*"},
("POST", "/indexes/products/documents/delete-batch") => hashset!{"documents.delete", "documents.*", "*"},
("POST", "/indexes/products/documents/delete") => hashset!{"documents.delete", "documents.*", "*"},
("POST", "/indexes/products/render") => hashset!{["settings.get", "documents.get"], ["documents.*", "settings.get"], ["settings.*", "documents.get"], "*"},
("GET", "/tasks") => hashset!{"tasks.get", "tasks.*", "*"},
("DELETE", "/tasks") => hashset!{"tasks.delete", "tasks.*", "*"},
("GET", "/tasks?indexUid=products") => hashset!{"tasks.get", "tasks.*", "*"},
("GET", "/tasks/0") => hashset!{"tasks.get", "tasks.*", "*"},
("PATCH", "/indexes/products/") => hashset!{"indexes.update", "indexes.*", "*"},
("GET", "/indexes/products/") => hashset!{"indexes.get", "indexes.*", "*"},
("DELETE", "/indexes/products/") => hashset!{"indexes.delete", "indexes.*", "*"},
("POST", "/indexes") => hashset!{"indexes.create", "indexes.*", "*"},
("GET", "/indexes") => hashset!{"indexes.get", "indexes.*", "*"},
("POST", "/swap-indexes") => hashset!{"indexes.swap", "indexes.*", "*"},
("GET", "/indexes/products/settings") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/displayed-attributes") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/distinct-attribute") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/filterable-attributes") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/ranking-rules") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/searchable-attributes") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/sortable-attributes") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/stop-words") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/synonyms") => hashset!{"settings.get", "settings.*", "*"},
("DELETE", "/indexes/products/settings") => hashset!{"settings.update", "settings.*", "*"},
("PATCH", "/indexes/products/settings") => hashset!{"settings.update", "settings.*", "*"},
("PATCH", "/indexes/products/settings/typo-tolerance") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/displayed-attributes") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/distinct-attribute") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/filterable-attributes") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/ranking-rules") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/searchable-attributes") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/sortable-attributes") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/stop-words") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/synonyms") => hashset!{"settings.update", "settings.*", "*"},
("GET", "/indexes/products/stats") => hashset!{"stats.get", "stats.*", "*"},
("GET", "/stats") => hashset!{"stats.get", "stats.*", "*"},
("POST", "/dumps") => hashset!{"dumps.create", "dumps.*", "*"},
("POST", "/snapshots") => hashset!{"snapshots.create", "snapshots.*", "*"},
("GET", "/version") => hashset!{"version", "*"},
("GET", "/metrics") => hashset!{"metrics.get", "metrics.*", "*"},
("POST", "/logs/stream") => hashset!{"metrics.get", "metrics.*", "*"},
("DELETE", "/logs/stream") => hashset!{"metrics.get", "metrics.*", "*"},
("PATCH", "/keys/mykey/") => hashset!{"keys.update", "*"},
("GET", "/keys/mykey/") => hashset!{"keys.get", "*"},
("DELETE", "/keys/mykey/") => hashset!{"keys.delete", "*"},
("POST", "/keys") => hashset!{"keys.create", "*"},
("GET", "/keys") => hashset!{"keys.get", "*"},
("GET", "/experimental-features") => hashset!{"experimental.get", "*"},
("PATCH", "/experimental-features") => hashset!{"experimental.update", "*"},
("GET", "/network") => hashset!{"network.get", "*"},
("PATCH", "/network") => hashset!{"network.update", "*"},
};
authorizations
});
authorizations
});
pub static ALL_ACTIONS: Lazy<HashSet<&'static str>> = Lazy::new(|| {
AUTHORIZATIONS
.values()
.flat_map(|v| v.iter())
.flat_map(|v| v.iter())
.copied()
.collect::<HashSet<_>>()
AUTHORIZATIONS.values().cloned().reduce(|l, r| l.union(&r).cloned().collect()).unwrap()
});
static INVALID_RESPONSE: Lazy<Value> = Lazy::new(|| {
@ -194,14 +164,13 @@ async fn error_access_unauthorized_index() {
async fn error_access_unauthorized_action() {
let mut server = Server::new_auth().await;
for ((method, route), actions) in AUTHORIZATIONS.iter() {
for ((method, route), action) in AUTHORIZATIONS.iter() {
// create a new API key letting only the needed action.
server.use_api_key(MASTER_KEY);
let actions = actions.iter().flat_map(|s| s.iter()).copied().collect::<HashSet<_>>();
let content = json!({
"indexes": ["products"],
"actions": ALL_ACTIONS.difference(&actions).collect::<Vec<_>>(),
"actions": ALL_ACTIONS.difference(action).collect::<Vec<_>>(),
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
@ -225,7 +194,7 @@ async fn access_authorized_master_key() {
server.use_api_key(MASTER_KEY);
// master key must have access to all routes.
for (method, route) in AUTHORIZATIONS.keys() {
for ((method, route), _) in AUTHORIZATIONS.iter() {
let (response, code) = server.dummy_request(method, route).await;
assert_ne!(response, INVALID_RESPONSE.clone(), "on route: {:?} - {:?}", method, route);
@ -239,13 +208,13 @@ async fn access_authorized_restricted_index() {
let enable_metrics = Opt { experimental_enable_metrics: true, ..default_settings(dir.path()) };
let mut server = Server::new_auth_with_options(enable_metrics, dir).await;
for ((method, route), actions) in AUTHORIZATIONS.iter() {
for actions in actions {
for action in actions {
// create a new API key letting only the needed action.
server.use_api_key(MASTER_KEY);
let content = json!({
"indexes": ["products"],
"actions": actions,
"actions": [action],
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
@ -263,20 +232,20 @@ async fn access_authorized_restricted_index() {
assert_eq!(
response,
INVALID_METRICS_RESPONSE.clone(),
"on route: {:?} - {:?} with actions: {:?}",
"on route: {:?} - {:?} with action: {:?}",
method,
route,
actions
action
);
assert_eq!(code, 403);
} else {
assert_ne!(
response,
INVALID_RESPONSE.clone(),
"on route: {:?} - {:?} with actions: {:?}",
"on route: {:?} - {:?} with action: {:?}",
method,
route,
actions
action
);
assert_ne!(code, 403);
}
@ -284,74 +253,18 @@ async fn access_authorized_restricted_index() {
}
}
#[actix_rt::test]
async fn unauthorized_partial_actions() {
let mut server = Server::new_auth().await;
server.use_admin_key(MASTER_KEY).await;
// create index `products`
let index = server.index("products");
let (response, code) = index.create(Some("id")).await;
assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await.succeeded();
// When multiple actions are necessary, the server mustn't accept any combination with one action missing.
for ((method, route), actions) in AUTHORIZATIONS.iter() {
for actions in actions {
if 2 <= actions.len() {
for excluded_action in *actions {
// create a new API key letting all actions except one.
server.use_api_key(MASTER_KEY);
let actions = actions
.iter()
.filter(|&a| a != excluded_action)
.copied()
.collect::<HashSet<_>>();
let content = json!({
"indexes": ["products"],
"actions": actions,
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
let (mut response, code) = server.dummy_request(method, route).await;
response["message"] = serde_json::json!(null);
assert_eq!(
response,
INVALID_RESPONSE.clone(),
"on route: {:?} - {:?} with actions: {:?}",
method,
route,
actions
);
assert_eq!(code, 403, "{:?}", &response);
}
}
}
}
}
#[actix_rt::test]
async fn access_authorized_no_index_restriction() {
let mut server = Server::new_auth().await;
for ((method, route), actions) in AUTHORIZATIONS.iter() {
for actions in actions {
for action in actions {
// create a new API key letting only the needed action.
server.use_api_key(MASTER_KEY);
let content = json!({
"indexes": ["*"],
"actions": actions,
"actions": [action],
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
@ -367,16 +280,12 @@ async fn access_authorized_no_index_restriction() {
assert_ne!(
response,
INVALID_RESPONSE.clone(),
"on route: {:?} - {:?} with actions: {:?}",
"on route: {:?} - {:?} with action: {:?}",
method,
route,
actions
);
assert_ne!(
code, 403,
"on route: {:?} - {:?} with action: {:?}",
method, route, actions
action
);
assert_ne!(code, 403, "on route: {:?} - {:?} with action: {:?}", method, route, action);
}
}
}
@ -814,17 +723,10 @@ async fn error_creating_index_without_action() {
server.use_api_key(MASTER_KEY);
// create key with access on all indexes.
let create_index_actions = AUTHORIZATIONS
.get(&("POST", "/indexes"))
.unwrap()
.iter()
.flat_map(|s| s.iter())
.cloned()
.collect::<HashSet<_>>();
let content = json!({
"indexes": ["*"],
// Give all action but the ones allowing to create an index.
"actions": ALL_ACTIONS.iter().cloned().filter(|a| !create_index_actions.contains(a)).collect::<Vec<_>>(),
"actions": ALL_ACTIONS.iter().cloned().filter(|a| !AUTHORIZATIONS.get(&("POST","/indexes")).unwrap().contains(a)).collect::<Vec<_>>(),
"expiresAt": "2050-11-13T00:00:00Z"
});
let (response, code) = server.add_api_key(content).await;

View File

@ -91,14 +91,14 @@ async fn create_api_key_bad_actions() {
// can't parse
let (response, code) = server.add_api_key(json!({ "actions": ["doggo"] })).await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
snapshot!(json_string!(response), @r#"
{
"message": "Unknown value `doggo` at `.actions[0]`: expected one of `*`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`",
"message": "Unknown value `doggo` at `.actions[0]`: expected one of `*`, `*.get`, `search`, `documents.*`, `documents.add`, `documents.get`, `documents.delete`, `indexes.*`, `indexes.create`, `indexes.get`, `indexes.update`, `indexes.delete`, `indexes.swap`, `tasks.*`, `tasks.cancel`, `tasks.delete`, `tasks.get`, `settings.*`, `settings.get`, `settings.update`, `stats.*`, `stats.get`, `metrics.*`, `metrics.get`, `dumps.*`, `dumps.create`, `snapshots.*`, `snapshots.create`, `version`, `keys.create`, `keys.get`, `keys.update`, `keys.delete`, `experimental.get`, `experimental.update`, `export`, `network.get`, `network.update`, `chatCompletions`, `chats.*`, `chats.get`, `chats.delete`, `chatsSettings.*`, `chatsSettings.get`, `chatsSettings.update`",
"code": "invalid_api_key_actions",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_api_key_actions"
}
"###);
"#);
}
#[actix_rt::test]

View File

@ -1,4 +1,4 @@
use std::collections::{HashMap, HashSet};
use std::collections::HashMap;
use ::time::format_description::well_known::Rfc3339;
use maplit::hashmap;
@ -467,7 +467,6 @@ async fn error_access_forbidden_routes() {
server.use_api_key(&web_token);
for ((method, route), actions) in AUTHORIZATIONS.iter() {
let actions = actions.iter().flat_map(|s| s.iter()).copied().collect::<HashSet<_>>();
if !actions.contains("search") {
let (mut response, code) = server.dummy_request(method, route).await;
response["message"] = serde_json::json!(null);

View File

@ -457,11 +457,6 @@ impl<State> Index<'_, State> {
self.service.get(url).await
}
pub async fn render(&self, query: Value) -> (Value, StatusCode) {
let url = format!("/indexes/{}/render", urlencode(self.uid.as_ref()));
self.service.post_encoded(url, query, self.encoder).await
}
pub async fn settings(&self) -> (Value, StatusCode) {
let url = format!("/indexes/{}/settings", urlencode(self.uid.as_ref()));
self.service.get(url).await
@ -567,5 +562,7 @@ pub struct GetAllDocumentsOptions {
pub offset: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub fields: Option<Vec<&'static str>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sort: Option<Vec<&'static str>>,
pub retrieve_vectors: bool,
}

View File

@ -97,6 +97,7 @@ impl Server<Owned> {
self.use_api_key(master_key);
let (response, code) = self.list_api_keys("").await;
assert_eq!(200, code, "{:?}", response);
// TODO: relying on the order of keys is not ideal, we should use the name instead
let admin_key = &response["results"][1]["key"];
self.use_api_key(admin_key.as_str().unwrap());
}

View File

@ -5,8 +5,8 @@ use urlencoding::encode as urlencode;
use crate::common::encoder::Encoder;
use crate::common::{
shared_does_not_exists_index, shared_empty_index, shared_index_with_test_set,
GetAllDocumentsOptions, Server, Value,
shared_does_not_exists_index, shared_empty_index, shared_index_with_geo_documents,
shared_index_with_test_set, GetAllDocumentsOptions, Server, Value,
};
use crate::json;
@ -83,6 +83,311 @@ async fn get_document() {
);
}
#[actix_rt::test]
async fn get_document_sorted() {
let server = Server::new_shared();
let index = server.unique_index();
index.load_test_set().await;
let (task, _status_code) =
index.update_settings_sortable_attributes(json!(["age", "email", "gender", "name"])).await;
server.wait_task(task.uid()).await.succeeded();
let (response, _code) = index
.get_all_documents(GetAllDocumentsOptions {
fields: Some(vec!["id", "age", "email"]),
sort: Some(vec!["age:asc", "email:desc"]),
..Default::default()
})
.await;
let results = response["results"].as_array().unwrap();
snapshot!(json_string!(results), @r#"
[
{
"id": 5,
"age": 20,
"email": "warrenwatson@chorizon.com"
},
{
"id": 6,
"age": 20,
"email": "sheliaberry@chorizon.com"
},
{
"id": 57,
"age": 20,
"email": "kaitlinconner@chorizon.com"
},
{
"id": 45,
"age": 20,
"email": "irenebennett@chorizon.com"
},
{
"id": 40,
"age": 21,
"email": "staffordemerson@chorizon.com"
},
{
"id": 41,
"age": 21,
"email": "salinasgamble@chorizon.com"
},
{
"id": 63,
"age": 21,
"email": "knowleshebert@chorizon.com"
},
{
"id": 50,
"age": 21,
"email": "guerramcintyre@chorizon.com"
},
{
"id": 44,
"age": 22,
"email": "jonispears@chorizon.com"
},
{
"id": 56,
"age": 23,
"email": "tuckerbarry@chorizon.com"
},
{
"id": 51,
"age": 23,
"email": "keycervantes@chorizon.com"
},
{
"id": 60,
"age": 23,
"email": "jodyherrera@chorizon.com"
},
{
"id": 70,
"age": 23,
"email": "glassperkins@chorizon.com"
},
{
"id": 75,
"age": 24,
"email": "emmajacobs@chorizon.com"
},
{
"id": 68,
"age": 24,
"email": "angelinadyer@chorizon.com"
},
{
"id": 17,
"age": 25,
"email": "ortegabrennan@chorizon.com"
},
{
"id": 76,
"age": 25,
"email": "claricegardner@chorizon.com"
},
{
"id": 43,
"age": 25,
"email": "arnoldbender@chorizon.com"
},
{
"id": 12,
"age": 25,
"email": "aidakirby@chorizon.com"
},
{
"id": 9,
"age": 26,
"email": "kellimendez@chorizon.com"
}
]
"#);
let (response, _code) = index
.get_all_documents(GetAllDocumentsOptions {
fields: Some(vec!["id", "gender", "name"]),
sort: Some(vec!["gender:asc", "name:asc"]),
..Default::default()
})
.await;
let results = response["results"].as_array().unwrap();
snapshot!(json_string!(results), @r#"
[
{
"id": 3,
"name": "Adeline Flynn",
"gender": "female"
},
{
"id": 12,
"name": "Aida Kirby",
"gender": "female"
},
{
"id": 68,
"name": "Angelina Dyer",
"gender": "female"
},
{
"id": 15,
"name": "Aurelia Contreras",
"gender": "female"
},
{
"id": 36,
"name": "Barbra Valenzuela",
"gender": "female"
},
{
"id": 23,
"name": "Blanca Mcclain",
"gender": "female"
},
{
"id": 53,
"name": "Caitlin Burnett",
"gender": "female"
},
{
"id": 71,
"name": "Candace Sawyer",
"gender": "female"
},
{
"id": 65,
"name": "Carole Rowland",
"gender": "female"
},
{
"id": 33,
"name": "Cecilia Greer",
"gender": "female"
},
{
"id": 1,
"name": "Cherry Orr",
"gender": "female"
},
{
"id": 38,
"name": "Christina Short",
"gender": "female"
},
{
"id": 7,
"name": "Chrystal Boyd",
"gender": "female"
},
{
"id": 76,
"name": "Clarice Gardner",
"gender": "female"
},
{
"id": 73,
"name": "Eleanor Shepherd",
"gender": "female"
},
{
"id": 75,
"name": "Emma Jacobs",
"gender": "female"
},
{
"id": 16,
"name": "Estella Bass",
"gender": "female"
},
{
"id": 62,
"name": "Estelle Ramirez",
"gender": "female"
},
{
"id": 20,
"name": "Florence Long",
"gender": "female"
},
{
"id": 42,
"name": "Graciela Russell",
"gender": "female"
}
]
"#);
}
#[actix_rt::test]
async fn get_document_geosorted() {
let index = shared_index_with_geo_documents().await;
let (response, _code) = index
.get_all_documents(GetAllDocumentsOptions {
sort: Some(vec!["_geoPoint(45.4777599, 9.1967508):asc"]),
..Default::default()
})
.await;
let results = response["results"].as_array().unwrap();
snapshot!(json_string!(results), @r#"
[
{
"id": 2,
"name": "La Bella Italia",
"address": "456 Elm Street, Townsville",
"type": "Italian",
"rating": 9,
"_geo": {
"lat": "45.4777599",
"lng": "9.1967508"
}
},
{
"id": 1,
"name": "Taco Truck",
"address": "444 Salsa Street, Burritoville",
"type": "Mexican",
"rating": 9,
"_geo": {
"lat": 34.0522,
"lng": -118.2437
}
},
{
"id": 3,
"name": "CrĂŞpe Truck",
"address": "2 Billig Avenue, Rouenville",
"type": "French",
"rating": 10
}
]
"#);
}
#[actix_rt::test]
async fn get_document_sort_the_unsortable() {
let index = shared_index_with_test_set().await;
let (response, _code) = index
.get_all_documents(GetAllDocumentsOptions {
fields: Some(vec!["id", "name"]),
sort: Some(vec!["name:asc"]),
..Default::default()
})
.await;
snapshot!(json_string!(response), @r#"
{
"message": "Attribute `name` is not sortable. This index does not have configured sortable attributes.",
"code": "invalid_document_sort",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_sort"
}
"#);
}
#[actix_rt::test]
async fn error_get_unexisting_index_all_documents() {
let index = shared_does_not_exists_index().await;

View File

@ -2,5 +2,4 @@ mod add_documents;
mod delete_documents;
mod errors;
mod get_documents;
mod render_documents;
mod update_documents;

View File

@ -1,653 +0,0 @@
use crate::common::{shared_index_for_fragments, Server};
use crate::json;
use meili_snap::{json_string, snapshot};
#[actix_rt::test]
async fn empty_id() {
let index = shared_index_for_fragments().await;
let (value, code) = index.render(json! {{ "template": { "id": "" }}}).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "The template ID is empty.\n Hint: Valid prefixes are `embedders` or `chatCompletions`.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
}
#[actix_rt::test]
async fn wrong_id_prefix() {
let index = shared_index_for_fragments().await;
let (value, code) = index.render(json! {{ "template": { "id": "wrong.disregarded" }}}).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Template ID must start with `embedders` or `chatCompletions`, but found `wrong`.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
}
#[actix_rt::test]
async fn missing_embedder() {
let index = shared_index_for_fragments().await;
let (value, code) = index.render(json! {{ "template": { "id": "embedders" }}}).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Template ID configured with `embedders` but no embedder name provided.\n Hint: Available embedders are `rest`.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
}
#[actix_rt::test]
async fn wrong_embedder() {
let index = shared_index_for_fragments().await;
let (value, code) =
index.render(json! {{ "template": { "id": "embedders.wrong.disregarded" }}}).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Embedder `wrong` does not exist.\n Hint: Available embedders are `rest`.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
}
#[actix_rt::test]
async fn missing_template_kind() {
let index = shared_index_for_fragments().await;
let (value, code) = index.render(json! {{ "template": { "id": "embedders.rest" }}}).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Template ID configured with `embedders.rest` but no template kind provided.\n Hint: Available fragments are `indexingFragments.basic`, `indexingFragments.withBreed`, `searchFragments.justBreed`, `searchFragments.justName`, `searchFragments.query`.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
}
#[actix_rt::test]
async fn wrong_template_kind() {
let index = shared_index_for_fragments().await;
let (value, code) =
index.render(json! {{ "template": { "id": "embedders.rest.wrong.disregarded" }}}).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Wrong template `wrong` after embedder `rest`.\n Hint: Available fragments are `indexingFragments.basic`, `indexingFragments.withBreed`, `searchFragments.justBreed`, `searchFragments.justName`, `searchFragments.query`.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
}
#[actix_rt::test]
async fn document_template_on_fragmented_index() {
let index = shared_index_for_fragments().await;
let (value, code) =
index.render(json! {{ "template": { "id": "embedders.rest.documentTemplate" }}}).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Requested document template for embedder `rest` but it uses fragments.\n Hint: Use `indexingFragments` or `searchFragments` instead.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
}
#[actix_rt::test]
async fn missing_fragment_name() {
let index = shared_index_for_fragments().await;
let (value, code) =
index.render(json! {{ "template": { "id": "embedders.rest.indexingFragments" }}}).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Indexing fragment name was not provided.\n Hint: Available indexing fragments for embedder `rest` are `basic`, `withBreed`.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
let (value, code) =
index.render(json! {{ "template": { "id": "embedders.rest.searchFragments" }}}).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Search fragment name was not provided.\n Hint: Available search fragments for embedder `rest` are `justBreed`, `justName`, `query`.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
}
#[actix_rt::test]
async fn wrong_fragment_name() {
let index = shared_index_for_fragments().await;
let (value, code) = index
.render(json! {{ "template": { "id": "embedders.rest.indexingFragments.wrong" }}})
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Indexing fragment `wrong` does not exist for embedder `rest`.\n Hint: Available indexing fragments are `basic`, `withBreed`.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
let (value, code) =
index.render(json! {{ "template": { "id": "embedders.rest.searchFragments.wrong" }}}).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Search fragment `wrong` does not exist for embedder `rest`.\n Hint: Available search fragments are `justBreed`, `justName`, `query`.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
}
#[actix_rt::test]
async fn leftover_tokens() {
let index = shared_index_for_fragments().await;
let (value, code) = index
.render(
json! {{ "template": { "id": "embedders.rest.indexingFragments.withBreed.leftover" }}},
)
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Leftover token `leftover` after parsing template ID",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
let (value, code) = index
.render(json! {{"template": { "id": "embedders.rest.searchFragments.justBreed.leftover" }}})
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Leftover token `leftover` after parsing template ID",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
let (value, code) = index
.render(json! {{"template": { "id": "chatCompletions.documentTemplate.leftover" }}})
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Leftover token `leftover` after parsing template ID",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
}
#[actix_rt::test]
async fn fragment_retrieval() {
let index = shared_index_for_fragments().await;
let (value, code) = index
.render(json! {{ "template": { "id": "embedders.rest.indexingFragments.withBreed" }}})
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
{
"template": "{{ doc.name }} is a {{ doc.breed }}",
"rendered": null
}
"#);
let (value, code) = index
.render(json! {{ "template": { "id": "embedders.rest.searchFragments.justBreed" }}})
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
{
"template": "It's a {{ media.breed }}",
"rendered": null
}
"#);
}
#[actix_rt::test]
async fn missing_chat_completions_template() {
let index = shared_index_for_fragments().await;
let (value, code) = index.render(json! {{ "template": { "id": "chatCompletions" }}}).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Missing chat completion template ID. The only available template is `documentTemplate`.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
}
#[actix_rt::test]
async fn wrong_chat_completions_template() {
let index = shared_index_for_fragments().await;
let (value, code) =
index.render(json! {{ "template": { "id": "chatCompletions.wrong" }}}).await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Unknown chat completion template ID `wrong`. The only available template is `documentTemplate`.",
"code": "invalid_render_template_id",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template_id"
}
"#);
}
#[actix_rt::test]
async fn chat_completions_template_retrieval() {
let index = shared_index_for_fragments().await;
let (value, code) =
index.render(json! {{ "template": { "id": "chatCompletions.documentTemplate" }}}).await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
{
"template": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",
"rendered": null
}
"#);
}
#[actix_rt::test]
async fn retrieve_document_template() {
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!(
{
"embedders": {
"doggo_embedder": {
"source": "huggingFace",
"model": "sentence-transformers/all-MiniLM-L6-v2",
"revision": "e4ce9877abf3edfe10b0d82785e83bdcb973e22e",
"documentTemplate": "This is a document template {{doc.doggo}}",
}
}
}
))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response["taskUid"].as_u64().unwrap()).await;
let (value, code) = index
.render(json! {{ "template": { "id": "embedders.doggo_embedder.documentTemplate" }}})
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
{
"template": "This is a document template {{doc.doggo}}",
"rendered": null
}
"#);
}
#[actix_rt::test]
async fn render_document_kefir() {
let index = shared_index_for_fragments().await;
let (value, code) = index
.render(json! {{
"template": { "id": "embedders.rest.indexingFragments.basic" },
"input": { "documentId": "0" },
}})
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
{
"template": "{{ doc.name }} is a dog",
"rendered": "kefir is a dog"
}
"#);
let (value, code) = index
.render(json! {{
"template": { "id": "embedders.rest.indexingFragments.withBreed" },
"input": { "documentId": "0" },
}})
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(value, { ".message" => "[ignored]" }), @r#"
{
"message": "[ignored]",
"code": "template_rendering_error",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#template_rendering_error"
}
"#);
}
#[actix_rt::test]
async fn render_inline_document_iko() {
let index = shared_index_for_fragments().await;
let (value, code) = index
.render(json! {{
"template": { "id": "embedders.rest.indexingFragments.basic" },
"input": { "inline": { "doc": { "name": "iko", "breed": "jack russell" } } },
}})
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
{
"template": "{{ doc.name }} is a dog",
"rendered": "iko is a dog"
}
"#);
let (value, code) = index
.render(json! {{
"template": { "id": "embedders.rest.indexingFragments.withBreed" },
"input": { "inline": { "doc": { "name": "iko", "breed": "jack russell" } } },
}})
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
{
"template": "{{ doc.name }} is a {{ doc.breed }}",
"rendered": "iko is a jack russell"
}
"#);
let (value, code) = index
.render(json! {{
"template": { "id": "embedders.rest.searchFragments.justBreed" },
"input": { "inline": { "media": { "name": "iko", "breed": "jack russell" } } },
}})
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
{
"template": "It's a {{ media.breed }}",
"rendered": "It's a jack russell"
}
"#);
}
#[actix_rt::test]
async fn chat_completions() {
let index = shared_index_for_fragments().await;
let (value, code) = index
.render(json! {{
"template": { "id": "chatCompletions.documentTemplate" },
"input": { "documentId": "0" },
}})
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
{
"template": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",
"rendered": "id: 0\nname: kefir\n"
}
"#);
let (value, code) = index
.render(json! {{
"template": { "id": "chatCompletions.documentTemplate" },
"input": { "inline": { "doc": { "name": "iko", "breed": "jack russell" } } },
}})
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
{
"template": "{% for field in fields %}{% if field.is_searchable and field.value != nil %}{{ field.name }}: {{ field.value }}\n{% endif %}{% endfor %}",
"rendered": "name: iko\nbreed: jack russell\n"
}
"#);
}
#[actix_rt::test]
async fn both_document_id_and_inline() {
let index = shared_index_for_fragments().await;
let (value, code) = index
.render(json! {{
"template": { "inline": "{{ doc.name }} compared to {{ media.name }}" },
"input": { "documentId": "0", "inline": { "media": { "name": "iko" } } },
}})
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
{
"template": "{{ doc.name }} compared to {{ media.name }}",
"rendered": "kefir compared to iko"
}
"#);
}
#[actix_rt::test]
async fn multiple_templates_or_docs() {
let index = shared_index_for_fragments().await;
let (value, code) = index
.render(json! {{
"template": { "id": "whatever", "inline": "whatever" }
}})
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Cannot provide both an inline template and a template ID.",
"code": "invalid_render_template",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_template"
}
"#);
let (value, code) = index
.render(json! {{
"template": { "inline": "whatever" },
"input": { "documentId": "0", "inline": { "doc": { "name": "iko" } } }
}})
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "A document id was provided but adding it to the input would overwrite the `doc` field that you already defined inline.",
"code": "invalid_render_input",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_input"
}
"#);
}
#[actix_rt::test]
async fn fields() {
let index = shared_index_for_fragments().await;
let (value, code) = index
.render(json! {{
"template": { "inline": "whatever" },
"input": { "insertFields": true }
}})
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Fields were requested but no document was provided.\n Hint: Provide a document ID or inline document.",
"code": "invalid_render_input_fields",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_input_fields"
}
"#);
let (value, code) = index
.render(json! {{
"template": { "id": "embedders.rest.indexingFragments.basic" },
"input": { "documentId": "0", "insertFields": true }
}})
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Fields are not available on fragments.\n Hint: Remove the `insertFields` parameter or set it to `false`.",
"code": "invalid_render_input_fields",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_input_fields"
}
"#);
let (value, code) = index
.render(json! {{
"template": { "inline": "whatever" },
"input": { "documentId": "0", "inline": { "fields": {} }, "insertFields": true }
}})
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Fields were provided in the inline input but `insertFields` is set to `true`.\n Hint: Remove the `insertFields` parameter or set it to `false`.",
"code": "invalid_render_input_fields",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_render_input_fields"
}
"#);
}
#[actix_rt::test]
async fn document_not_found() {
let index = shared_index_for_fragments().await;
let (value, code) = index
.render(json! {{
"template": { "id": "embedders.rest.indexingFragments.basic" },
"input": { "documentId": "9999" }
}})
.await;
snapshot!(code, @"404 Not Found");
snapshot!(value, @r#"
{
"message": "Document with ID `9999` not found.",
"code": "render_document_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#render_document_not_found"
}
"#);
}
#[actix_rt::test]
async fn bad_template() {
let index = shared_index_for_fragments().await;
let (value, code) = index
.render(json! {{
"template": { "inline": "{{ doc.name" },
"input": { "documentId": "0" }
}})
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(value, @r#"
{
"message": "Error parsing template: error while parsing template: liquid: --> 1:4\n |\n1 | {{ doc.name\n | ^---\n |\n = expected Literal\n",
"code": "template_parsing_error",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#template_parsing_error"
}
"#);
}
#[actix_rt::test]
async fn inline_nested() {
let index = shared_index_for_fragments().await;
let (value, code) = index
.render(json! {{
"template": { "inline": "{{ doc.name }} is a {{ doc.breed.name }} ({{ doc.breed.kind }})" },
"input": { "inline": { "doc": { "name": "iko", "breed": { "name": "jack russell", "kind": "terrier" } } } }
}})
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
{
"template": "{{ doc.name }} is a {{ doc.breed.name }} ({{ doc.breed.kind }})",
"rendered": "iko is a jack russell (terrier)"
}
"#);
}
#[actix_rt::test]
async fn embedder_document_template() {
let (_mock, setting) = crate::vector::rest::create_mock().await;
let server = Server::new().await;
let index = server.index("doggo");
let (response, code) = index
.update_settings(json!({
"embedders": {
"rest": setting,
},
}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await.succeeded();
let documents = json!([
{"id": 0, "name": "kefir"},
]);
let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded();
let (value, code) = index
.render(json! {{
"template": { "id": "embedders.rest.documentTemplate" },
"input": { "documentId": "0" }
}})
.await;
snapshot!(code, @"200 OK");
snapshot!(value, @r#"
{
"template": "{{doc.name}}",
"rendered": "kefir"
}
"#);
}

View File

@ -1,5 +1,4 @@
use crate::common::{shared_does_not_exists_index, Server};
use crate::json;
#[actix_rt::test]

View File

@ -1,6 +1,7 @@
use super::shared_index_with_documents;
use crate::common::Server;
use crate::json;
use meili_snap::{json_string, snapshot};
#[actix_rt::test]
async fn default_search_should_return_estimated_total_hit() {
@ -133,3 +134,61 @@ async fn ensure_placeholder_search_hit_count_valid() {
.await;
}
}
#[actix_rt::test]
async fn test_issue_5274() {
let server = Server::new_shared();
let index = server.unique_index();
let documents = json!([
{
"id": 1,
"title": "Document 1",
"content": "This is the first."
},
{
"id": 2,
"title": "Document 2",
"content": "This is the second doc."
}
]);
let (task, _code) = index.add_documents(documents, None).await;
server.wait_task(task.uid()).await.succeeded();
// Find out the lowest ranking score among the documents
let (rep, _status) = index
.search_post(json!({"q": "doc", "page": 1, "hitsPerPage": 2, "showRankingScore": true}))
.await;
let hits = rep["hits"].as_array().expect("Missing hits array");
let second_hit = hits.get(1).expect("Missing second hit");
let ranking_score = second_hit
.get("_rankingScore")
.expect("Missing _rankingScore field")
.as_f64()
.expect("Expected _rankingScore to be a f64");
// Search with a ranking score threshold just above and expect to be a single hit
let (rep, _status) = index
.search_post(json!({"q": "doc", "page": 1, "hitsPerPage": 1, "rankingScoreThreshold": ranking_score + 0.0001}))
.await;
snapshot!(json_string!(rep, {
".processingTimeMs" => "[ignored]",
}), @r#"
{
"hits": [
{
"id": 2,
"title": "Document 2",
"content": "This is the second doc."
}
],
"query": "doc",
"processingTimeMs": "[ignored]",
"hitsPerPage": 1,
"page": 1,
"totalPages": 1,
"totalHits": 1
}
"#);
}

View File

@ -692,3 +692,68 @@ async fn granular_filterable_attributes() {
]
"###);
}
#[actix_rt::test]
async fn test_searchable_attributes_order() {
let server = Server::new_shared();
let index = server.unique_index();
// 1) Create an index with settings "searchableAttributes": ["title", "overview"]
let (response, code) = index.create(None).await;
assert_eq!(code, 202, "{response}");
server.wait_task(response.uid()).await.succeeded();
let (task, code) = index
.update_settings(json!({
"searchableAttributes": ["title", "overview"]
}))
.await;
assert_eq!(code, 202, "{task}");
server.wait_task(task.uid()).await.succeeded();
// 2) Add documents in the index
let documents = json!([
{
"id": 1,
"title": "The Matrix",
"overview": "A computer hacker learns from mysterious rebels about the true nature of his reality."
},
{
"id": 2,
"title": "Inception",
"overview": "A thief who steals corporate secrets through dream-sharing technology."
}
]);
let (response, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202, "{response}");
server.wait_task(response.uid()).await.succeeded();
// 3) Modify the settings "searchableAttributes": ["overview", "title"] (overview is put first)
let (task, code) = index
.update_settings(json!({
"searchableAttributes": ["overview", "title"]
}))
.await;
assert_eq!(code, 202, "{task}");
server.wait_task(task.uid()).await.succeeded();
// 4) Check if it has been applied
let (response, code) = index.settings().await;
assert_eq!(code, 200, "{response}");
assert_eq!(response["searchableAttributes"], json!(["overview", "title"]));
// 5) Re-modify the settings "searchableAttributes": ["title", "overview"] (title is put first)
let (task, code) = index
.update_settings(json!({
"searchableAttributes": ["title", "overview"]
}))
.await;
assert_eq!(code, 202, "{task}");
server.wait_task(task.uid()).await.succeeded();
// 6) Check if it has been applied
let (response, code) = index.settings().await;
assert_eq!(code, 200, "{response}");
assert_eq!(response["searchableAttributes"], json!(["title", "overview"]));
}

View File

@ -61,7 +61,16 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"pagination": {
"maxTotalHits": 15
},
"embedders": {},
"embedders": {
"doggo_embedder": {
"source": "huggingFace",
"model": "sentence-transformers/all-MiniLM-L6-v2",
"revision": "e4ce9877abf3edfe10b0d82785e83bdcb973e22e",
"pooling": "forceMean",
"documentTemplate": "{{doc.description}}",
"documentTemplateMaxBytes": 400
}
},
"searchCutoffMs": 8000,
"localizedAttributes": [
{

View File

@ -0,0 +1,40 @@
---
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
---
[
{
"id": 1,
"name": "kefir",
"surname": [
"kef",
"kefkef",
"kefirounet",
"boubou"
],
"age": 1.4,
"description": "kefir est un petit chien blanc très mignon",
"_vectors": {
"doggo_embedder": {
"embeddings": "[vector]",
"regenerate": true
}
}
},
{
"id": 2,
"name": "intel",
"surname": [
"untel",
"tétel",
"iouiou"
],
"age": 11.5,
"description": "intel est un grand beagle très mignon",
"_vectors": {
"doggo_embedder": {
"embeddings": "[vector]",
"regenerate": false
}
}
}
]

View File

@ -4,7 +4,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
{
"results": [
{
"uid": 24,
"uid": 30,
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
@ -26,6 +26,155 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"finishedAt": "[date]",
"batchStrategy": "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type."
},
{
"uid": 29,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.067201S",
"startedAt": "2025-07-07T13:43:08.772854Z",
"finishedAt": "2025-07-07T13:43:08.840055Z",
"batchStrategy": "unspecified"
},
{
"uid": 28,
"progress": null,
"details": {
"deletedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"indexDeletion": 1
},
"indexUids": {
"mieli": 1
}
},
"duration": "PT0.012727S",
"startedAt": "2025-07-07T13:42:50.745461Z",
"finishedAt": "2025-07-07T13:42:50.758188Z",
"batchStrategy": "unspecified"
},
{
"uid": 27,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 0
},
"stats": {
"totalNbTasks": 1,
"status": {
"failed": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.059920S",
"startedAt": "2025-07-07T13:42:15.625413Z",
"finishedAt": "2025-07-07T13:42:15.685333Z",
"batchStrategy": "unspecified"
},
{
"uid": 26,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"mieli": 1
}
},
"duration": "PT0.088879S",
"startedAt": "2025-07-07T13:40:01.461741Z",
"finishedAt": "2025-07-07T13:40:01.55062Z",
"batchStrategy": "unspecified"
},
{
"uid": 25,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.312911S",
"startedAt": "2025-07-07T13:32:46.139785Z",
"finishedAt": "2025-07-07T13:32:46.452696Z",
"batchStrategy": "unspecified"
},
{
"uid": 24,
"progress": null,
"details": {
"embedders": {
"doggo_embedder": {
"source": "huggingFace",
"model": "sentence-transformers/all-MiniLM-L6-v2",
"revision": "e4ce9877abf3edfe10b0d82785e83bdcb973e22e",
"documentTemplate": "{{doc.description}}"
}
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.247378S",
"startedAt": "2025-07-07T13:28:27.391344Z",
"finishedAt": "2025-07-07T13:28:27.638722Z",
"batchStrategy": "unspecified"
},
{
"uid": 23,
"progress": null,
@ -348,179 +497,10 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"startedAt": "2025-01-16T17:01:14.112756687Z",
"finishedAt": "2025-01-16T17:01:14.120064527Z",
"batchStrategy": "unspecified"
},
{
"uid": 10,
"progress": null,
"details": {
"faceting": {
"maxValuesPerFacet": 99
},
"pagination": {
"maxTotalHits": 15
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.007391353S",
"startedAt": "2025-01-16T17:00:29.201180268Z",
"finishedAt": "2025-01-16T17:00:29.208571621Z",
"batchStrategy": "unspecified"
},
{
"uid": 9,
"progress": null,
"details": {
"faceting": {
"maxValuesPerFacet": 100
},
"pagination": {
"maxTotalHits": 1000
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.007445825S",
"startedAt": "2025-01-16T17:00:15.77629445Z",
"finishedAt": "2025-01-16T17:00:15.783740275Z",
"batchStrategy": "unspecified"
},
{
"uid": 8,
"progress": null,
"details": {
"typoTolerance": {
"minWordSizeForTypos": {
"oneTypo": 4
},
"disableOnWords": [
"kefir"
],
"disableOnAttributes": [
"surname"
]
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.012020083S",
"startedAt": "2025-01-16T16:59:42.744086671Z",
"finishedAt": "2025-01-16T16:59:42.756106754Z",
"batchStrategy": "unspecified"
},
{
"uid": 7,
"progress": null,
"details": {
"typoTolerance": {
"minWordSizeForTypos": {
"oneTypo": 4
}
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.007440092S",
"startedAt": "2025-01-16T16:58:41.2155771Z",
"finishedAt": "2025-01-16T16:58:41.223017192Z",
"batchStrategy": "unspecified"
},
{
"uid": 6,
"progress": null,
"details": {
"synonyms": {
"boubou": [
"kefir"
]
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.007565161S",
"startedAt": "2025-01-16T16:54:51.940332781Z",
"finishedAt": "2025-01-16T16:54:51.947897942Z",
"batchStrategy": "unspecified"
},
{
"uid": 5,
"progress": null,
"details": {
"stopWords": [
"le",
"un"
]
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.016307263S",
"startedAt": "2025-01-16T16:53:19.913351957Z",
"finishedAt": "2025-01-16T16:53:19.92965922Z",
"batchStrategy": "unspecified"
}
],
"total": 23,
"total": 29,
"limit": 20,
"from": 24,
"next": 4
"from": 30,
"next": 10
}

View File

@ -4,7 +4,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
{
"results": [
{
"uid": 24,
"uid": 30,
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
@ -26,6 +26,155 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"finishedAt": "[date]",
"batchStrategy": "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type."
},
{
"uid": 29,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.067201S",
"startedAt": "2025-07-07T13:43:08.772854Z",
"finishedAt": "2025-07-07T13:43:08.840055Z",
"batchStrategy": "unspecified"
},
{
"uid": 28,
"progress": null,
"details": {
"deletedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"indexDeletion": 1
},
"indexUids": {
"mieli": 1
}
},
"duration": "PT0.012727S",
"startedAt": "2025-07-07T13:42:50.745461Z",
"finishedAt": "2025-07-07T13:42:50.758188Z",
"batchStrategy": "unspecified"
},
{
"uid": 27,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 0
},
"stats": {
"totalNbTasks": 1,
"status": {
"failed": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.059920S",
"startedAt": "2025-07-07T13:42:15.625413Z",
"finishedAt": "2025-07-07T13:42:15.685333Z",
"batchStrategy": "unspecified"
},
{
"uid": 26,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"mieli": 1
}
},
"duration": "PT0.088879S",
"startedAt": "2025-07-07T13:40:01.461741Z",
"finishedAt": "2025-07-07T13:40:01.55062Z",
"batchStrategy": "unspecified"
},
{
"uid": 25,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.312911S",
"startedAt": "2025-07-07T13:32:46.139785Z",
"finishedAt": "2025-07-07T13:32:46.452696Z",
"batchStrategy": "unspecified"
},
{
"uid": 24,
"progress": null,
"details": {
"embedders": {
"doggo_embedder": {
"source": "huggingFace",
"model": "sentence-transformers/all-MiniLM-L6-v2",
"revision": "e4ce9877abf3edfe10b0d82785e83bdcb973e22e",
"documentTemplate": "{{doc.description}}"
}
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.247378S",
"startedAt": "2025-07-07T13:28:27.391344Z",
"finishedAt": "2025-07-07T13:28:27.638722Z",
"batchStrategy": "unspecified"
},
{
"uid": 23,
"progress": null,
@ -348,179 +497,10 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"startedAt": "2025-01-16T17:01:14.112756687Z",
"finishedAt": "2025-01-16T17:01:14.120064527Z",
"batchStrategy": "unspecified"
},
{
"uid": 10,
"progress": null,
"details": {
"faceting": {
"maxValuesPerFacet": 99
},
"pagination": {
"maxTotalHits": 15
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.007391353S",
"startedAt": "2025-01-16T17:00:29.201180268Z",
"finishedAt": "2025-01-16T17:00:29.208571621Z",
"batchStrategy": "unspecified"
},
{
"uid": 9,
"progress": null,
"details": {
"faceting": {
"maxValuesPerFacet": 100
},
"pagination": {
"maxTotalHits": 1000
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.007445825S",
"startedAt": "2025-01-16T17:00:15.77629445Z",
"finishedAt": "2025-01-16T17:00:15.783740275Z",
"batchStrategy": "unspecified"
},
{
"uid": 8,
"progress": null,
"details": {
"typoTolerance": {
"minWordSizeForTypos": {
"oneTypo": 4
},
"disableOnWords": [
"kefir"
],
"disableOnAttributes": [
"surname"
]
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.012020083S",
"startedAt": "2025-01-16T16:59:42.744086671Z",
"finishedAt": "2025-01-16T16:59:42.756106754Z",
"batchStrategy": "unspecified"
},
{
"uid": 7,
"progress": null,
"details": {
"typoTolerance": {
"minWordSizeForTypos": {
"oneTypo": 4
}
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.007440092S",
"startedAt": "2025-01-16T16:58:41.2155771Z",
"finishedAt": "2025-01-16T16:58:41.223017192Z",
"batchStrategy": "unspecified"
},
{
"uid": 6,
"progress": null,
"details": {
"synonyms": {
"boubou": [
"kefir"
]
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.007565161S",
"startedAt": "2025-01-16T16:54:51.940332781Z",
"finishedAt": "2025-01-16T16:54:51.947897942Z",
"batchStrategy": "unspecified"
},
{
"uid": 5,
"progress": null,
"details": {
"stopWords": [
"le",
"un"
]
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.016307263S",
"startedAt": "2025-01-16T16:53:19.913351957Z",
"finishedAt": "2025-01-16T16:53:19.92965922Z",
"batchStrategy": "unspecified"
}
],
"total": 23,
"total": 29,
"limit": 20,
"from": 24,
"next": 4
"from": 30,
"next": 10
}

View File

@ -4,7 +4,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
{
"results": [
{
"uid": 24,
"uid": 30,
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
@ -26,6 +26,155 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"finishedAt": "[date]",
"batchStrategy": "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type."
},
{
"uid": 29,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.067201S",
"startedAt": "2025-07-07T13:43:08.772854Z",
"finishedAt": "2025-07-07T13:43:08.840055Z",
"batchStrategy": "unspecified"
},
{
"uid": 28,
"progress": null,
"details": {
"deletedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"indexDeletion": 1
},
"indexUids": {
"mieli": 1
}
},
"duration": "PT0.012727S",
"startedAt": "2025-07-07T13:42:50.745461Z",
"finishedAt": "2025-07-07T13:42:50.758188Z",
"batchStrategy": "unspecified"
},
{
"uid": 27,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 0
},
"stats": {
"totalNbTasks": 1,
"status": {
"failed": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.059920S",
"startedAt": "2025-07-07T13:42:15.625413Z",
"finishedAt": "2025-07-07T13:42:15.685333Z",
"batchStrategy": "unspecified"
},
{
"uid": 26,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"mieli": 1
}
},
"duration": "PT0.088879S",
"startedAt": "2025-07-07T13:40:01.461741Z",
"finishedAt": "2025-07-07T13:40:01.55062Z",
"batchStrategy": "unspecified"
},
{
"uid": 25,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.312911S",
"startedAt": "2025-07-07T13:32:46.139785Z",
"finishedAt": "2025-07-07T13:32:46.452696Z",
"batchStrategy": "unspecified"
},
{
"uid": 24,
"progress": null,
"details": {
"embedders": {
"doggo_embedder": {
"source": "huggingFace",
"model": "sentence-transformers/all-MiniLM-L6-v2",
"revision": "e4ce9877abf3edfe10b0d82785e83bdcb973e22e",
"documentTemplate": "{{doc.description}}"
}
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.247378S",
"startedAt": "2025-07-07T13:28:27.391344Z",
"finishedAt": "2025-07-07T13:28:27.638722Z",
"batchStrategy": "unspecified"
},
{
"uid": 23,
"progress": null,
@ -348,179 +497,10 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"startedAt": "2025-01-16T17:01:14.112756687Z",
"finishedAt": "2025-01-16T17:01:14.120064527Z",
"batchStrategy": "unspecified"
},
{
"uid": 10,
"progress": null,
"details": {
"faceting": {
"maxValuesPerFacet": 99
},
"pagination": {
"maxTotalHits": 15
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.007391353S",
"startedAt": "2025-01-16T17:00:29.201180268Z",
"finishedAt": "2025-01-16T17:00:29.208571621Z",
"batchStrategy": "unspecified"
},
{
"uid": 9,
"progress": null,
"details": {
"faceting": {
"maxValuesPerFacet": 100
},
"pagination": {
"maxTotalHits": 1000
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.007445825S",
"startedAt": "2025-01-16T17:00:15.77629445Z",
"finishedAt": "2025-01-16T17:00:15.783740275Z",
"batchStrategy": "unspecified"
},
{
"uid": 8,
"progress": null,
"details": {
"typoTolerance": {
"minWordSizeForTypos": {
"oneTypo": 4
},
"disableOnWords": [
"kefir"
],
"disableOnAttributes": [
"surname"
]
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.012020083S",
"startedAt": "2025-01-16T16:59:42.744086671Z",
"finishedAt": "2025-01-16T16:59:42.756106754Z",
"batchStrategy": "unspecified"
},
{
"uid": 7,
"progress": null,
"details": {
"typoTolerance": {
"minWordSizeForTypos": {
"oneTypo": 4
}
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.007440092S",
"startedAt": "2025-01-16T16:58:41.2155771Z",
"finishedAt": "2025-01-16T16:58:41.223017192Z",
"batchStrategy": "unspecified"
},
{
"uid": 6,
"progress": null,
"details": {
"synonyms": {
"boubou": [
"kefir"
]
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.007565161S",
"startedAt": "2025-01-16T16:54:51.940332781Z",
"finishedAt": "2025-01-16T16:54:51.947897942Z",
"batchStrategy": "unspecified"
},
{
"uid": 5,
"progress": null,
"details": {
"stopWords": [
"le",
"un"
]
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.016307263S",
"startedAt": "2025-01-16T16:53:19.913351957Z",
"finishedAt": "2025-01-16T16:53:19.92965922Z",
"batchStrategy": "unspecified"
}
],
"total": 23,
"total": 29,
"limit": 20,
"from": 24,
"next": 4
"from": 30,
"next": 10
}

View File

@ -4,8 +4,8 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
{
"results": [
{
"uid": 25,
"batchUid": 24,
"uid": 31,
"batchUid": 30,
"indexUid": null,
"status": "succeeded",
"type": "upgradeDatabase",
@ -20,6 +20,118 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 30,
"batchUid": 29,
"indexUid": "kefir",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "PT0.067201S",
"enqueuedAt": "2025-07-07T13:43:08.772432Z",
"startedAt": "2025-07-07T13:43:08.772854Z",
"finishedAt": "2025-07-07T13:43:08.840055Z"
},
{
"uid": 29,
"batchUid": 28,
"indexUid": "mieli",
"status": "succeeded",
"type": "indexDeletion",
"canceledBy": null,
"details": {
"deletedDocuments": 1
},
"error": null,
"duration": "PT0.012727S",
"enqueuedAt": "2025-07-07T13:42:50.744793Z",
"startedAt": "2025-07-07T13:42:50.745461Z",
"finishedAt": "2025-07-07T13:42:50.758188Z"
},
{
"uid": 28,
"batchUid": 27,
"indexUid": "kefir",
"status": "failed",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 0
},
"error": {
"message": "Index `kefir`: Bad embedder configuration in the document with id: `2`. Could not parse `._vectors.doggo_embedder`: trailing characters at line 1 column 13",
"code": "invalid_vectors_type",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_vectors_type"
},
"duration": "PT0.059920S",
"enqueuedAt": "2025-07-07T13:42:15.624598Z",
"startedAt": "2025-07-07T13:42:15.625413Z",
"finishedAt": "2025-07-07T13:42:15.685333Z"
},
{
"uid": 27,
"batchUid": 26,
"indexUid": "mieli",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "PT0.088879S",
"enqueuedAt": "2025-07-07T13:40:01.46081Z",
"startedAt": "2025-07-07T13:40:01.461741Z",
"finishedAt": "2025-07-07T13:40:01.55062Z"
},
{
"uid": 26,
"batchUid": 25,
"indexUid": "kefir",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "PT0.312911S",
"enqueuedAt": "2025-07-07T13:32:46.13871Z",
"startedAt": "2025-07-07T13:32:46.139785Z",
"finishedAt": "2025-07-07T13:32:46.452696Z"
},
{
"uid": 25,
"batchUid": 24,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"embedders": {
"doggo_embedder": {
"source": "huggingFace",
"model": "sentence-transformers/all-MiniLM-L6-v2",
"revision": "e4ce9877abf3edfe10b0d82785e83bdcb973e22e",
"documentTemplate": "{{doc.description}}"
}
}
},
"error": null,
"duration": "PT0.247378S",
"enqueuedAt": "2025-07-07T13:28:27.390054Z",
"startedAt": "2025-07-07T13:28:27.391344Z",
"finishedAt": "2025-07-07T13:28:27.638722Z"
},
{
"uid": 24,
"batchUid": 23,
@ -264,134 +376,10 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"enqueuedAt": "2025-01-16T17:02:52.527382964Z",
"startedAt": "2025-01-16T17:02:52.539749853Z",
"finishedAt": "2025-01-16T17:02:52.547390016Z"
},
{
"uid": 11,
"batchUid": 11,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"searchCutoffMs": 8000
},
"error": null,
"duration": "PT0.007307840S",
"enqueuedAt": "2025-01-16T17:01:14.100316617Z",
"startedAt": "2025-01-16T17:01:14.112756687Z",
"finishedAt": "2025-01-16T17:01:14.120064527Z"
},
{
"uid": 10,
"batchUid": 10,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"faceting": {
"maxValuesPerFacet": 99
},
"pagination": {
"maxTotalHits": 15
}
},
"error": null,
"duration": "PT0.007391353S",
"enqueuedAt": "2025-01-16T17:00:29.188815062Z",
"startedAt": "2025-01-16T17:00:29.201180268Z",
"finishedAt": "2025-01-16T17:00:29.208571621Z"
},
{
"uid": 9,
"batchUid": 9,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"faceting": {
"maxValuesPerFacet": 100
},
"pagination": {
"maxTotalHits": 1000
}
},
"error": null,
"duration": "PT0.007445825S",
"enqueuedAt": "2025-01-16T17:00:15.759501709Z",
"startedAt": "2025-01-16T17:00:15.77629445Z",
"finishedAt": "2025-01-16T17:00:15.783740275Z"
},
{
"uid": 8,
"batchUid": 8,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"typoTolerance": {
"minWordSizeForTypos": {
"oneTypo": 4
},
"disableOnWords": [
"kefir"
],
"disableOnAttributes": [
"surname"
]
}
},
"error": null,
"duration": "PT0.012020083S",
"enqueuedAt": "2025-01-16T16:59:42.727292501Z",
"startedAt": "2025-01-16T16:59:42.744086671Z",
"finishedAt": "2025-01-16T16:59:42.756106754Z"
},
{
"uid": 7,
"batchUid": 7,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"typoTolerance": {
"minWordSizeForTypos": {
"oneTypo": 4
}
}
},
"error": null,
"duration": "PT0.007440092S",
"enqueuedAt": "2025-01-16T16:58:41.203145044Z",
"startedAt": "2025-01-16T16:58:41.2155771Z",
"finishedAt": "2025-01-16T16:58:41.223017192Z"
},
{
"uid": 6,
"batchUid": 6,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"synonyms": {
"boubou": [
"kefir"
]
}
},
"error": null,
"duration": "PT0.007565161S",
"enqueuedAt": "2025-01-16T16:54:51.927866243Z",
"startedAt": "2025-01-16T16:54:51.940332781Z",
"finishedAt": "2025-01-16T16:54:51.947897942Z"
}
],
"total": 24,
"total": 30,
"limit": 20,
"from": 25,
"next": 5
"from": 31,
"next": 11
}

View File

@ -4,8 +4,8 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
{
"results": [
{
"uid": 25,
"batchUid": 24,
"uid": 31,
"batchUid": 30,
"indexUid": null,
"status": "succeeded",
"type": "upgradeDatabase",
@ -20,6 +20,118 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 30,
"batchUid": 29,
"indexUid": "kefir",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "PT0.067201S",
"enqueuedAt": "2025-07-07T13:43:08.772432Z",
"startedAt": "2025-07-07T13:43:08.772854Z",
"finishedAt": "2025-07-07T13:43:08.840055Z"
},
{
"uid": 29,
"batchUid": 28,
"indexUid": "mieli",
"status": "succeeded",
"type": "indexDeletion",
"canceledBy": null,
"details": {
"deletedDocuments": 1
},
"error": null,
"duration": "PT0.012727S",
"enqueuedAt": "2025-07-07T13:42:50.744793Z",
"startedAt": "2025-07-07T13:42:50.745461Z",
"finishedAt": "2025-07-07T13:42:50.758188Z"
},
{
"uid": 28,
"batchUid": 27,
"indexUid": "kefir",
"status": "failed",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 0
},
"error": {
"message": "Index `kefir`: Bad embedder configuration in the document with id: `2`. Could not parse `._vectors.doggo_embedder`: trailing characters at line 1 column 13",
"code": "invalid_vectors_type",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_vectors_type"
},
"duration": "PT0.059920S",
"enqueuedAt": "2025-07-07T13:42:15.624598Z",
"startedAt": "2025-07-07T13:42:15.625413Z",
"finishedAt": "2025-07-07T13:42:15.685333Z"
},
{
"uid": 27,
"batchUid": 26,
"indexUid": "mieli",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "PT0.088879S",
"enqueuedAt": "2025-07-07T13:40:01.46081Z",
"startedAt": "2025-07-07T13:40:01.461741Z",
"finishedAt": "2025-07-07T13:40:01.55062Z"
},
{
"uid": 26,
"batchUid": 25,
"indexUid": "kefir",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "PT0.312911S",
"enqueuedAt": "2025-07-07T13:32:46.13871Z",
"startedAt": "2025-07-07T13:32:46.139785Z",
"finishedAt": "2025-07-07T13:32:46.452696Z"
},
{
"uid": 25,
"batchUid": 24,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"embedders": {
"doggo_embedder": {
"source": "huggingFace",
"model": "sentence-transformers/all-MiniLM-L6-v2",
"revision": "e4ce9877abf3edfe10b0d82785e83bdcb973e22e",
"documentTemplate": "{{doc.description}}"
}
}
},
"error": null,
"duration": "PT0.247378S",
"enqueuedAt": "2025-07-07T13:28:27.390054Z",
"startedAt": "2025-07-07T13:28:27.391344Z",
"finishedAt": "2025-07-07T13:28:27.638722Z"
},
{
"uid": 24,
"batchUid": 23,
@ -264,134 +376,10 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"enqueuedAt": "2025-01-16T17:02:52.527382964Z",
"startedAt": "2025-01-16T17:02:52.539749853Z",
"finishedAt": "2025-01-16T17:02:52.547390016Z"
},
{
"uid": 11,
"batchUid": 11,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"searchCutoffMs": 8000
},
"error": null,
"duration": "PT0.007307840S",
"enqueuedAt": "2025-01-16T17:01:14.100316617Z",
"startedAt": "2025-01-16T17:01:14.112756687Z",
"finishedAt": "2025-01-16T17:01:14.120064527Z"
},
{
"uid": 10,
"batchUid": 10,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"faceting": {
"maxValuesPerFacet": 99
},
"pagination": {
"maxTotalHits": 15
}
},
"error": null,
"duration": "PT0.007391353S",
"enqueuedAt": "2025-01-16T17:00:29.188815062Z",
"startedAt": "2025-01-16T17:00:29.201180268Z",
"finishedAt": "2025-01-16T17:00:29.208571621Z"
},
{
"uid": 9,
"batchUid": 9,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"faceting": {
"maxValuesPerFacet": 100
},
"pagination": {
"maxTotalHits": 1000
}
},
"error": null,
"duration": "PT0.007445825S",
"enqueuedAt": "2025-01-16T17:00:15.759501709Z",
"startedAt": "2025-01-16T17:00:15.77629445Z",
"finishedAt": "2025-01-16T17:00:15.783740275Z"
},
{
"uid": 8,
"batchUid": 8,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"typoTolerance": {
"minWordSizeForTypos": {
"oneTypo": 4
},
"disableOnWords": [
"kefir"
],
"disableOnAttributes": [
"surname"
]
}
},
"error": null,
"duration": "PT0.012020083S",
"enqueuedAt": "2025-01-16T16:59:42.727292501Z",
"startedAt": "2025-01-16T16:59:42.744086671Z",
"finishedAt": "2025-01-16T16:59:42.756106754Z"
},
{
"uid": 7,
"batchUid": 7,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"typoTolerance": {
"minWordSizeForTypos": {
"oneTypo": 4
}
}
},
"error": null,
"duration": "PT0.007440092S",
"enqueuedAt": "2025-01-16T16:58:41.203145044Z",
"startedAt": "2025-01-16T16:58:41.2155771Z",
"finishedAt": "2025-01-16T16:58:41.223017192Z"
},
{
"uid": 6,
"batchUid": 6,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"synonyms": {
"boubou": [
"kefir"
]
}
},
"error": null,
"duration": "PT0.007565161S",
"enqueuedAt": "2025-01-16T16:54:51.927866243Z",
"startedAt": "2025-01-16T16:54:51.940332781Z",
"finishedAt": "2025-01-16T16:54:51.947897942Z"
}
],
"total": 24,
"total": 30,
"limit": 20,
"from": 25,
"next": 5
"from": 31,
"next": 11
}

View File

@ -4,8 +4,8 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
{
"results": [
{
"uid": 25,
"batchUid": 24,
"uid": 31,
"batchUid": 30,
"indexUid": null,
"status": "succeeded",
"type": "upgradeDatabase",
@ -20,6 +20,118 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 30,
"batchUid": 29,
"indexUid": "kefir",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "PT0.067201S",
"enqueuedAt": "2025-07-07T13:43:08.772432Z",
"startedAt": "2025-07-07T13:43:08.772854Z",
"finishedAt": "2025-07-07T13:43:08.840055Z"
},
{
"uid": 29,
"batchUid": 28,
"indexUid": "mieli",
"status": "succeeded",
"type": "indexDeletion",
"canceledBy": null,
"details": {
"deletedDocuments": 1
},
"error": null,
"duration": "PT0.012727S",
"enqueuedAt": "2025-07-07T13:42:50.744793Z",
"startedAt": "2025-07-07T13:42:50.745461Z",
"finishedAt": "2025-07-07T13:42:50.758188Z"
},
{
"uid": 28,
"batchUid": 27,
"indexUid": "kefir",
"status": "failed",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 0
},
"error": {
"message": "Index `kefir`: Bad embedder configuration in the document with id: `2`. Could not parse `._vectors.doggo_embedder`: trailing characters at line 1 column 13",
"code": "invalid_vectors_type",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_vectors_type"
},
"duration": "PT0.059920S",
"enqueuedAt": "2025-07-07T13:42:15.624598Z",
"startedAt": "2025-07-07T13:42:15.625413Z",
"finishedAt": "2025-07-07T13:42:15.685333Z"
},
{
"uid": 27,
"batchUid": 26,
"indexUid": "mieli",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "PT0.088879S",
"enqueuedAt": "2025-07-07T13:40:01.46081Z",
"startedAt": "2025-07-07T13:40:01.461741Z",
"finishedAt": "2025-07-07T13:40:01.55062Z"
},
{
"uid": 26,
"batchUid": 25,
"indexUid": "kefir",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "PT0.312911S",
"enqueuedAt": "2025-07-07T13:32:46.13871Z",
"startedAt": "2025-07-07T13:32:46.139785Z",
"finishedAt": "2025-07-07T13:32:46.452696Z"
},
{
"uid": 25,
"batchUid": 24,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"embedders": {
"doggo_embedder": {
"source": "huggingFace",
"model": "sentence-transformers/all-MiniLM-L6-v2",
"revision": "e4ce9877abf3edfe10b0d82785e83bdcb973e22e",
"documentTemplate": "{{doc.description}}"
}
}
},
"error": null,
"duration": "PT0.247378S",
"enqueuedAt": "2025-07-07T13:28:27.390054Z",
"startedAt": "2025-07-07T13:28:27.391344Z",
"finishedAt": "2025-07-07T13:28:27.638722Z"
},
{
"uid": 24,
"batchUid": 23,
@ -264,134 +376,10 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"enqueuedAt": "2025-01-16T17:02:52.527382964Z",
"startedAt": "2025-01-16T17:02:52.539749853Z",
"finishedAt": "2025-01-16T17:02:52.547390016Z"
},
{
"uid": 11,
"batchUid": 11,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"searchCutoffMs": 8000
},
"error": null,
"duration": "PT0.007307840S",
"enqueuedAt": "2025-01-16T17:01:14.100316617Z",
"startedAt": "2025-01-16T17:01:14.112756687Z",
"finishedAt": "2025-01-16T17:01:14.120064527Z"
},
{
"uid": 10,
"batchUid": 10,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"faceting": {
"maxValuesPerFacet": 99
},
"pagination": {
"maxTotalHits": 15
}
},
"error": null,
"duration": "PT0.007391353S",
"enqueuedAt": "2025-01-16T17:00:29.188815062Z",
"startedAt": "2025-01-16T17:00:29.201180268Z",
"finishedAt": "2025-01-16T17:00:29.208571621Z"
},
{
"uid": 9,
"batchUid": 9,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"faceting": {
"maxValuesPerFacet": 100
},
"pagination": {
"maxTotalHits": 1000
}
},
"error": null,
"duration": "PT0.007445825S",
"enqueuedAt": "2025-01-16T17:00:15.759501709Z",
"startedAt": "2025-01-16T17:00:15.77629445Z",
"finishedAt": "2025-01-16T17:00:15.783740275Z"
},
{
"uid": 8,
"batchUid": 8,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"typoTolerance": {
"minWordSizeForTypos": {
"oneTypo": 4
},
"disableOnWords": [
"kefir"
],
"disableOnAttributes": [
"surname"
]
}
},
"error": null,
"duration": "PT0.012020083S",
"enqueuedAt": "2025-01-16T16:59:42.727292501Z",
"startedAt": "2025-01-16T16:59:42.744086671Z",
"finishedAt": "2025-01-16T16:59:42.756106754Z"
},
{
"uid": 7,
"batchUid": 7,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"typoTolerance": {
"minWordSizeForTypos": {
"oneTypo": 4
}
}
},
"error": null,
"duration": "PT0.007440092S",
"enqueuedAt": "2025-01-16T16:58:41.203145044Z",
"startedAt": "2025-01-16T16:58:41.2155771Z",
"finishedAt": "2025-01-16T16:58:41.223017192Z"
},
{
"uid": 6,
"batchUid": 6,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"synonyms": {
"boubou": [
"kefir"
]
}
},
"error": null,
"duration": "PT0.007565161S",
"enqueuedAt": "2025-01-16T16:54:51.927866243Z",
"startedAt": "2025-01-16T16:54:51.940332781Z",
"finishedAt": "2025-01-16T16:54:51.947897942Z"
}
],
"total": 24,
"total": 30,
"limit": 20,
"from": 25,
"next": 5
"from": 31,
"next": 11
}

View File

@ -4,7 +4,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
{
"results": [
{
"uid": 24,
"uid": 30,
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
@ -26,6 +26,155 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"finishedAt": "[date]",
"batchStrategy": "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type."
},
{
"uid": 29,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.067201S",
"startedAt": "2025-07-07T13:43:08.772854Z",
"finishedAt": "2025-07-07T13:43:08.840055Z",
"batchStrategy": "unspecified"
},
{
"uid": 28,
"progress": null,
"details": {
"deletedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"indexDeletion": 1
},
"indexUids": {
"mieli": 1
}
},
"duration": "PT0.012727S",
"startedAt": "2025-07-07T13:42:50.745461Z",
"finishedAt": "2025-07-07T13:42:50.758188Z",
"batchStrategy": "unspecified"
},
{
"uid": 27,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 0
},
"stats": {
"totalNbTasks": 1,
"status": {
"failed": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.059920S",
"startedAt": "2025-07-07T13:42:15.625413Z",
"finishedAt": "2025-07-07T13:42:15.685333Z",
"batchStrategy": "unspecified"
},
{
"uid": 26,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"mieli": 1
}
},
"duration": "PT0.088879S",
"startedAt": "2025-07-07T13:40:01.461741Z",
"finishedAt": "2025-07-07T13:40:01.55062Z",
"batchStrategy": "unspecified"
},
{
"uid": 25,
"progress": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"documentAdditionOrUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.312911S",
"startedAt": "2025-07-07T13:32:46.139785Z",
"finishedAt": "2025-07-07T13:32:46.452696Z",
"batchStrategy": "unspecified"
},
{
"uid": 24,
"progress": null,
"details": {
"embedders": {
"doggo_embedder": {
"source": "huggingFace",
"model": "sentence-transformers/all-MiniLM-L6-v2",
"revision": "e4ce9877abf3edfe10b0d82785e83bdcb973e22e",
"documentTemplate": "{{doc.description}}"
}
}
},
"stats": {
"totalNbTasks": 1,
"status": {
"succeeded": 1
},
"types": {
"settingsUpdate": 1
},
"indexUids": {
"kefir": 1
}
},
"duration": "PT0.247378S",
"startedAt": "2025-07-07T13:28:27.391344Z",
"finishedAt": "2025-07-07T13:28:27.638722Z",
"batchStrategy": "unspecified"
},
{
"uid": 23,
"progress": null,
@ -642,8 +791,8 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"batchStrategy": "unspecified"
}
],
"total": 25,
"total": 31,
"limit": 1000,
"from": 24,
"from": 30,
"next": null
}

View File

@ -4,8 +4,8 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
{
"results": [
{
"uid": 25,
"batchUid": 24,
"uid": 31,
"batchUid": 30,
"indexUid": null,
"status": "succeeded",
"type": "upgradeDatabase",
@ -20,6 +20,118 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"startedAt": "[date]",
"finishedAt": "[date]"
},
{
"uid": 30,
"batchUid": 29,
"indexUid": "kefir",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "PT0.067201S",
"enqueuedAt": "2025-07-07T13:43:08.772432Z",
"startedAt": "2025-07-07T13:43:08.772854Z",
"finishedAt": "2025-07-07T13:43:08.840055Z"
},
{
"uid": 29,
"batchUid": 28,
"indexUid": "mieli",
"status": "succeeded",
"type": "indexDeletion",
"canceledBy": null,
"details": {
"deletedDocuments": 1
},
"error": null,
"duration": "PT0.012727S",
"enqueuedAt": "2025-07-07T13:42:50.744793Z",
"startedAt": "2025-07-07T13:42:50.745461Z",
"finishedAt": "2025-07-07T13:42:50.758188Z"
},
{
"uid": 28,
"batchUid": 27,
"indexUid": "kefir",
"status": "failed",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 0
},
"error": {
"message": "Index `kefir`: Bad embedder configuration in the document with id: `2`. Could not parse `._vectors.doggo_embedder`: trailing characters at line 1 column 13",
"code": "invalid_vectors_type",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_vectors_type"
},
"duration": "PT0.059920S",
"enqueuedAt": "2025-07-07T13:42:15.624598Z",
"startedAt": "2025-07-07T13:42:15.625413Z",
"finishedAt": "2025-07-07T13:42:15.685333Z"
},
{
"uid": 27,
"batchUid": 26,
"indexUid": "mieli",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "PT0.088879S",
"enqueuedAt": "2025-07-07T13:40:01.46081Z",
"startedAt": "2025-07-07T13:40:01.461741Z",
"finishedAt": "2025-07-07T13:40:01.55062Z"
},
{
"uid": 26,
"batchUid": 25,
"indexUid": "kefir",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "PT0.312911S",
"enqueuedAt": "2025-07-07T13:32:46.13871Z",
"startedAt": "2025-07-07T13:32:46.139785Z",
"finishedAt": "2025-07-07T13:32:46.452696Z"
},
{
"uid": 25,
"batchUid": 24,
"indexUid": "kefir",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
"details": {
"embedders": {
"doggo_embedder": {
"source": "huggingFace",
"model": "sentence-transformers/all-MiniLM-L6-v2",
"revision": "e4ce9877abf3edfe10b0d82785e83bdcb973e22e",
"documentTemplate": "{{doc.description}}"
}
}
},
"error": null,
"duration": "PT0.247378S",
"enqueuedAt": "2025-07-07T13:28:27.390054Z",
"startedAt": "2025-07-07T13:28:27.391344Z",
"finishedAt": "2025-07-07T13:28:27.638722Z"
},
{
"uid": 24,
"batchUid": 23,
@ -497,8 +609,8 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"finishedAt": "2025-01-16T16:45:16.131303739Z"
}
],
"total": 26,
"total": 32,
"limit": 1000,
"from": 25,
"from": 31,
"next": null
}

View File

@ -114,13 +114,13 @@ async fn check_the_index_scheduler(server: &Server) {
// All the indexes are still present
let (indexes, _) = server.list_indexes(None, None).await;
snapshot!(indexes, @r#"
snapshot!(indexes, @r###"
{
"results": [
{
"uid": "kefir",
"createdAt": "2025-01-16T16:45:16.020663157Z",
"updatedAt": "2025-01-23T11:36:22.634859166Z",
"updatedAt": "2025-07-07T13:43:08.835381Z",
"primaryKey": "id"
}
],
@ -128,7 +128,7 @@ async fn check_the_index_scheduler(server: &Server) {
"limit": 20,
"total": 1
}
"#);
"###);
// And their metadata are still right
let (stats, _) = server.stats().await;
assert_json_snapshot!(stats, {
@ -141,21 +141,21 @@ async fn check_the_index_scheduler(server: &Server) {
{
"databaseSize": "[bytes]",
"usedDatabaseSize": "[bytes]",
"lastUpdate": "2025-01-23T11:36:22.634859166Z",
"lastUpdate": "2025-07-07T13:43:08.835381Z",
"indexes": {
"kefir": {
"numberOfDocuments": 1,
"numberOfDocuments": 2,
"rawDocumentDbSize": "[bytes]",
"avgDocumentSize": "[bytes]",
"isIndexing": false,
"numberOfEmbeddings": 0,
"numberOfEmbeddedDocuments": 0,
"numberOfEmbeddings": 2,
"numberOfEmbeddedDocuments": 2,
"fieldDistribution": {
"age": 1,
"description": 1,
"id": 1,
"name": 1,
"surname": 1
"age": 2,
"description": 2,
"id": 2,
"name": 2,
"surname": 2
}
}
}
@ -227,21 +227,21 @@ async fn check_the_index_scheduler(server: &Server) {
{
"databaseSize": "[bytes]",
"usedDatabaseSize": "[bytes]",
"lastUpdate": "2025-01-23T11:36:22.634859166Z",
"lastUpdate": "2025-07-07T13:43:08.835381Z",
"indexes": {
"kefir": {
"numberOfDocuments": 1,
"numberOfDocuments": 2,
"rawDocumentDbSize": "[bytes]",
"avgDocumentSize": "[bytes]",
"isIndexing": false,
"numberOfEmbeddings": 0,
"numberOfEmbeddedDocuments": 0,
"numberOfEmbeddings": 2,
"numberOfEmbeddedDocuments": 2,
"fieldDistribution": {
"age": 1,
"description": 1,
"id": 1,
"name": 1,
"surname": 1
"age": 2,
"description": 2,
"id": 2,
"name": 2,
"surname": 2
}
}
}
@ -254,18 +254,18 @@ async fn check_the_index_scheduler(server: &Server) {
".avgDocumentSize" => "[bytes]",
}), @r###"
{
"numberOfDocuments": 1,
"numberOfDocuments": 2,
"rawDocumentDbSize": "[bytes]",
"avgDocumentSize": "[bytes]",
"isIndexing": false,
"numberOfEmbeddings": 0,
"numberOfEmbeddedDocuments": 0,
"numberOfEmbeddings": 2,
"numberOfEmbeddedDocuments": 2,
"fieldDistribution": {
"age": 1,
"description": 1,
"id": 1,
"name": 1,
"surname": 1
"age": 2,
"description": 2,
"id": 2,
"name": 2,
"surname": 2
}
}
"###);
@ -295,4 +295,8 @@ async fn check_the_index_features(server: &Server) {
let (results, _status) =
kefir.search_post(json!({ "sort": ["age:asc"], "filter": "surname = kefirounet" })).await;
snapshot!(results, name: "search_with_sort_and_filter");
// ensuring we can get the vectors and their `regenerate` is still good.
let (results, _status) = kefir.search_post(json!({"retrieveVectors": true})).await;
snapshot!(json_string!(results["hits"], {"[]._vectors.doggo_embedder.embeddings" => "[vector]"}), name: "search_with_retrieve_vectors");
}

View File

@ -3,7 +3,7 @@ mod fragments;
#[cfg(feature = "test-ollama")]
mod ollama;
mod openai;
pub mod rest;
mod rest;
mod settings;
use std::str::FromStr;
@ -15,6 +15,10 @@ use crate::common::index::Index;
use crate::common::{default_settings, GetAllDocumentsOptions, Server};
use crate::json;
async fn get_server_vector() -> Server {
Server::new().await
}
#[actix_rt::test]
async fn add_remove_user_provided() {
let server = Server::new().await;

View File

@ -7,8 +7,9 @@ use meili_snap::{json_string, snapshot};
use wiremock::matchers::{method, path};
use wiremock::{Mock, MockServer, Request, ResponseTemplate};
use crate::common::{GetAllDocumentsOptions, Server, Value};
use crate::common::{GetAllDocumentsOptions, Value};
use crate::json;
use crate::vector::get_server_vector;
#[derive(serde::Deserialize)]
struct OpenAiResponses(BTreeMap<String, OpenAiResponse>);
@ -348,7 +349,7 @@ async fn create_slow_mock() -> (&'static MockServer, Value) {
#[actix_rt::test]
async fn it_works() {
let (_mock, setting) = create_mock().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -582,7 +583,7 @@ async fn it_works() {
#[actix_rt::test]
async fn tokenize_long_text() {
let (_mock, setting) = create_mock_tokenized().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -645,7 +646,7 @@ async fn tokenize_long_text() {
#[actix_rt::test]
async fn bad_api_key() {
let (_mock, mut setting) = create_mock().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let documents = json!([
@ -793,7 +794,7 @@ async fn bad_api_key() {
#[actix_rt::test]
async fn bad_model() {
let (_mock, mut setting) = create_mock().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let documents = json!([
@ -871,7 +872,7 @@ async fn bad_model() {
#[actix_rt::test]
async fn bad_dimensions() {
let (_mock, mut setting) = create_mock().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let documents = json!([
@ -970,7 +971,7 @@ async fn bad_dimensions() {
#[actix_rt::test]
async fn smaller_dimensions() {
let (_mock, setting) = create_mock_dimensions().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -1202,7 +1203,7 @@ async fn smaller_dimensions() {
#[actix_rt::test]
async fn small_embedding_model() {
let (_mock, setting) = create_mock_small_embedding_model().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -1433,7 +1434,7 @@ async fn small_embedding_model() {
#[actix_rt::test]
async fn legacy_embedding_model() {
let (_mock, setting) = create_mock_legacy_embedding_model().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -1665,7 +1666,7 @@ async fn legacy_embedding_model() {
#[actix_rt::test]
async fn it_still_works() {
let (_mock, setting) = create_fallible_mock().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -1897,7 +1898,7 @@ async fn it_still_works() {
#[actix_rt::test]
async fn timeout() {
let (_mock, setting) = create_slow_mock().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index

View File

@ -8,11 +8,11 @@ use tokio::sync::mpsc;
use wiremock::matchers::{method, path};
use wiremock::{Mock, MockServer, Request, ResponseTemplate};
use crate::common::{Server, Value};
use crate::common::Value;
use crate::json;
use crate::vector::GetAllDocumentsOptions;
use crate::vector::{get_server_vector, GetAllDocumentsOptions};
pub async fn create_mock() -> (&'static MockServer, Value) {
async fn create_mock() -> (&'static MockServer, Value) {
let mock_server = Box::leak(Box::new(MockServer::start().await));
let text_to_embedding: BTreeMap<_, _> = vec![
@ -395,7 +395,7 @@ async fn dummy_testing_the_mock() {
async fn bad_request() {
let (mock, _setting) = create_mock().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
// No placeholder string appear in the template
@ -631,7 +631,7 @@ async fn bad_request() {
async fn bad_response() {
let (mock, _setting) = create_mock().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
// No placeholder string appear in the template
@ -907,7 +907,7 @@ async fn bad_response() {
async fn bad_settings() {
let (mock, _setting) = create_mock().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -1079,7 +1079,7 @@ async fn bad_settings() {
#[actix_rt::test]
async fn add_vector_and_user_provided() {
let (_mock, setting) = create_mock().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -1185,7 +1185,7 @@ async fn add_vector_and_user_provided() {
#[actix_rt::test]
async fn server_returns_bad_request() {
let (mock, _setting) = create_mock_multiple().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -1301,7 +1301,7 @@ async fn server_returns_bad_request() {
#[actix_rt::test]
async fn server_returns_bad_response() {
let (mock, _setting) = create_mock_multiple().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -1596,7 +1596,7 @@ async fn server_returns_bad_response() {
#[actix_rt::test]
async fn server_returns_multiple() {
let (_mock, setting) = create_mock_multiple().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -1702,7 +1702,7 @@ async fn server_returns_multiple() {
#[actix_rt::test]
async fn server_single_input_returns_in_array() {
let (_mock, setting) = create_mock_single_response_in_array().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -1808,7 +1808,7 @@ async fn server_single_input_returns_in_array() {
#[actix_rt::test]
async fn server_raw() {
let (_mock, setting) = create_mock_raw().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -1915,7 +1915,7 @@ async fn server_raw() {
async fn server_custom_header() {
let (mock, setting) = create_mock_raw_with_custom_header().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -2044,7 +2044,7 @@ async fn server_custom_header() {
#[actix_rt::test]
async fn searchable_reindex() {
let (_mock, setting) = create_mock_default_template().await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index
@ -2154,7 +2154,7 @@ async fn searchable_reindex() {
async fn last_error_stats() {
let (sender, mut receiver) = mpsc::channel(10);
let (_mock, setting) = create_faulty_mock_raw(sender).await;
let server = Server::new().await;
let server = get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index

View File

@ -101,14 +101,7 @@ async fn reset_embedder_documents() {
server.wait_task(response.uid()).await;
// Make sure the documents are still present
let (documents, _code) = index
.get_all_documents(GetAllDocumentsOptions {
limit: None,
offset: None,
retrieve_vectors: false,
fields: None,
})
.await;
let (documents, _code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
snapshot!(json_string!(documents), @r###"
{
"results": [
@ -258,7 +251,7 @@ async fn reset_embedder_documents() {
#[actix_rt::test]
async fn ollama_url_checks() {
let server = Server::new().await;
let server = super::get_server_vector().await;
let index = server.index("doggo");
let (response, code) = index

View File

@ -15,6 +15,7 @@ use meilisearch_types::heed::{
};
use meilisearch_types::milli::constants::RESERVED_VECTORS_FIELD_NAME;
use meilisearch_types::milli::documents::{obkv_to_object, DocumentsBatchReader};
use meilisearch_types::milli::index::EmbeddingsWithMetadata;
use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOrArrayOfVectors};
use meilisearch_types::milli::{obkv_to_json, BEU32};
use meilisearch_types::tasks::{Status, Task};
@ -591,12 +592,21 @@ fn export_documents(
.into());
};
for (embedder_name, (embeddings, regenerate)) in embeddings {
for (
embedder_name,
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
) in embeddings
{
let embeddings = ExplicitVectors {
embeddings: Some(VectorOrArrayOfVectors::from_array_of_vectors(
embeddings,
)),
regenerate,
regenerate: regenerate &&
// Meilisearch does not handle well dumps with fragments, because as the fragments
// are marked as user-provided,
// all embeddings would be regenerated on any settings change or document update.
// To prevent this, we mark embeddings has non regenerate in this case.
!has_fragments,
};
vectors
.insert(embedder_name, serde_json::to_value(embeddings).unwrap());

View File

@ -168,6 +168,16 @@ pub enum SortError {
ReservedNameForFilter { name: String },
}
impl SortError {
pub fn into_search_error(self) -> Error {
Error::UserError(UserError::SortError { error: self, search: true })
}
pub fn into_document_error(self) -> Error {
Error::UserError(UserError::SortError { error: self, search: false })
}
}
impl From<AscDescError> for SortError {
fn from(error: AscDescError) -> Self {
match error {
@ -190,12 +200,6 @@ impl From<AscDescError> for SortError {
}
}
impl From<SortError> for Error {
fn from(error: SortError) -> Self {
Self::UserError(UserError::SortError(error))
}
}
#[cfg(test)]
mod tests {
use big_s::S;

View File

@ -0,0 +1,294 @@
use crate::{
distance_between_two_points,
heed_codec::facet::{FieldDocIdFacetCodec, OrderedF64Codec},
lat_lng_to_xyz,
search::new::{facet_string_values, facet_values_prefix_key},
GeoPoint, Index,
};
use heed::{
types::{Bytes, Unit},
RoPrefix, RoTxn,
};
use roaring::RoaringBitmap;
use rstar::RTree;
use std::collections::VecDeque;
#[derive(Debug, Clone, Copy)]
pub struct GeoSortParameter {
// Define the strategy used by the geo sort
pub strategy: GeoSortStrategy,
// Limit the number of docs in a single bucket to avoid unexpectedly large overhead
pub max_bucket_size: u64,
// Considering the errors of GPS and geographical calculations, distances less than distance_error_margin will be treated as equal
pub distance_error_margin: f64,
}
impl Default for GeoSortParameter {
fn default() -> Self {
Self {
strategy: GeoSortStrategy::default(),
max_bucket_size: 1000,
distance_error_margin: 1.0,
}
}
}
/// Define the strategy used by the geo sort.
/// The parameter represents the cache size, and, in the case of the Dynamic strategy,
/// the point where we move from using the iterative strategy to the rtree.
#[derive(Debug, Clone, Copy)]
pub enum GeoSortStrategy {
AlwaysIterative(usize),
AlwaysRtree(usize),
Dynamic(usize),
}
impl Default for GeoSortStrategy {
fn default() -> Self {
GeoSortStrategy::Dynamic(1000)
}
}
impl GeoSortStrategy {
pub fn use_rtree(&self, candidates: usize) -> bool {
match self {
GeoSortStrategy::AlwaysIterative(_) => false,
GeoSortStrategy::AlwaysRtree(_) => true,
GeoSortStrategy::Dynamic(i) => candidates >= *i,
}
}
pub fn cache_size(&self) -> usize {
match self {
GeoSortStrategy::AlwaysIterative(i)
| GeoSortStrategy::AlwaysRtree(i)
| GeoSortStrategy::Dynamic(i) => *i,
}
}
}
#[allow(clippy::too_many_arguments)]
pub fn fill_cache(
index: &Index,
txn: &RoTxn<heed::AnyTls>,
strategy: GeoSortStrategy,
ascending: bool,
target_point: [f64; 2],
field_ids: &Option<[u16; 2]>,
rtree: &mut Option<RTree<GeoPoint>>,
geo_candidates: &RoaringBitmap,
cached_sorted_docids: &mut VecDeque<(u32, [f64; 2])>,
) -> crate::Result<()> {
debug_assert!(cached_sorted_docids.is_empty());
// lazily initialize the rtree if needed by the strategy, and cache it in `self.rtree`
let rtree = if strategy.use_rtree(geo_candidates.len() as usize) {
if let Some(rtree) = rtree.as_ref() {
// get rtree from cache
Some(rtree)
} else {
let rtree2 = index.geo_rtree(txn)?.expect("geo candidates but no rtree");
// insert rtree in cache and returns it.
// Can't use `get_or_insert_with` because getting the rtree from the DB is a fallible operation.
Some(&*rtree.insert(rtree2))
}
} else {
None
};
let cache_size = strategy.cache_size();
if let Some(rtree) = rtree {
if ascending {
let point = lat_lng_to_xyz(&target_point);
for point in rtree.nearest_neighbor_iter(&point) {
if geo_candidates.contains(point.data.0) {
cached_sorted_docids.push_back(point.data);
if cached_sorted_docids.len() >= cache_size {
break;
}
}
}
} else {
// in the case of the desc geo sort we look for the closest point to the opposite of the queried point
// and we insert the points in reverse order they get reversed when emptying the cache later on
let point = lat_lng_to_xyz(&opposite_of(target_point));
for point in rtree.nearest_neighbor_iter(&point) {
if geo_candidates.contains(point.data.0) {
cached_sorted_docids.push_front(point.data);
if cached_sorted_docids.len() >= cache_size {
break;
}
}
}
}
} else {
// the iterative version
let [lat, lng] = field_ids.expect("fill_buffer can't be called without the lat&lng");
let mut documents = geo_candidates
.iter()
.map(|id| -> crate::Result<_> { Ok((id, geo_value(id, lat, lng, index, txn)?)) })
.collect::<crate::Result<Vec<(u32, [f64; 2])>>>()?;
// computing the distance between two points is expensive thus we cache the result
documents
.sort_by_cached_key(|(_, p)| distance_between_two_points(&target_point, p) as usize);
cached_sorted_docids.extend(documents);
};
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn next_bucket(
index: &Index,
txn: &RoTxn<heed::AnyTls>,
universe: &RoaringBitmap,
ascending: bool,
target_point: [f64; 2],
field_ids: &Option<[u16; 2]>,
rtree: &mut Option<RTree<GeoPoint>>,
cached_sorted_docids: &mut VecDeque<(u32, [f64; 2])>,
geo_candidates: &RoaringBitmap,
parameter: GeoSortParameter,
) -> crate::Result<Option<(RoaringBitmap, Option<[f64; 2]>)>> {
let mut geo_candidates = geo_candidates & universe;
if geo_candidates.is_empty() {
return Ok(Some((universe.clone(), None)));
}
let next = |cache: &mut VecDeque<_>| {
if ascending {
cache.pop_front()
} else {
cache.pop_back()
}
};
let put_back = |cache: &mut VecDeque<_>, x: _| {
if ascending {
cache.push_front(x)
} else {
cache.push_back(x)
}
};
let mut current_bucket = RoaringBitmap::new();
// current_distance stores the first point and distance in current bucket
let mut current_distance: Option<([f64; 2], f64)> = None;
loop {
// The loop will only exit when we have found all points with equal distance or have exhausted the candidates.
if let Some((id, point)) = next(cached_sorted_docids) {
if geo_candidates.contains(id) {
let distance = distance_between_two_points(&target_point, &point);
if let Some((point0, bucket_distance)) = current_distance.as_ref() {
if (bucket_distance - distance).abs() > parameter.distance_error_margin {
// different distance, point belongs to next bucket
put_back(cached_sorted_docids, (id, point));
return Ok(Some((current_bucket, Some(point0.to_owned()))));
} else {
// same distance, point belongs to current bucket
current_bucket.insert(id);
// remove from candidates to prevent it from being added to the cache again
geo_candidates.remove(id);
// current bucket size reaches limit, force return
if current_bucket.len() == parameter.max_bucket_size {
return Ok(Some((current_bucket, Some(point0.to_owned()))));
}
}
} else {
// first doc in current bucket
current_distance = Some((point, distance));
current_bucket.insert(id);
geo_candidates.remove(id);
// current bucket size reaches limit, force return
if current_bucket.len() == parameter.max_bucket_size {
return Ok(Some((current_bucket, Some(point.to_owned()))));
}
}
}
} else {
// cache exhausted, we need to refill it
fill_cache(
index,
txn,
parameter.strategy,
ascending,
target_point,
field_ids,
rtree,
&geo_candidates,
cached_sorted_docids,
)?;
if cached_sorted_docids.is_empty() {
// candidates exhausted, exit
if let Some((point0, _)) = current_distance.as_ref() {
return Ok(Some((current_bucket, Some(point0.to_owned()))));
} else {
return Ok(Some((universe.clone(), None)));
}
}
}
}
}
/// Return an iterator over each number value in the given field of the given document.
fn facet_number_values<'a>(
docid: u32,
field_id: u16,
index: &Index,
txn: &'a RoTxn<'a>,
) -> crate::Result<RoPrefix<'a, FieldDocIdFacetCodec<OrderedF64Codec>, Unit>> {
let key = facet_values_prefix_key(field_id, docid);
let iter = index
.field_id_docid_facet_f64s
.remap_key_type::<Bytes>()
.prefix_iter(txn, &key)?
.remap_key_type();
Ok(iter)
}
/// Extracts the lat and long values from a single document.
///
/// If it is not able to find it in the facet number index it will extract it
/// from the facet string index and parse it as f64 (as the geo extraction behaves).
pub(crate) fn geo_value(
docid: u32,
field_lat: u16,
field_lng: u16,
index: &Index,
rtxn: &RoTxn<'_>,
) -> crate::Result<[f64; 2]> {
let extract_geo = |geo_field: u16| -> crate::Result<f64> {
match facet_number_values(docid, geo_field, index, rtxn)?.next() {
Some(Ok(((_, _, geo), ()))) => Ok(geo),
Some(Err(e)) => Err(e.into()),
None => match facet_string_values(docid, geo_field, index, rtxn)?.next() {
Some(Ok((_, geo))) => {
Ok(geo.parse::<f64>().expect("cannot parse geo field as f64"))
}
Some(Err(e)) => Err(e.into()),
None => panic!("A geo faceted document doesn't contain any lat or lng"),
},
}
};
let lat = extract_geo(field_lat)?;
let lng = extract_geo(field_lng)?;
Ok([lat, lng])
}
/// Compute the antipodal coordinate of `coord`
pub(crate) fn opposite_of(mut coord: [f64; 2]) -> [f64; 2] {
coord[0] *= -1.;
// in the case of x,0 we want to return x,180
if coord[1] > 0. {
coord[1] -= 180.;
} else {
coord[1] += 180.;
}
coord
}

View File

@ -1,8 +1,10 @@
mod builder;
mod enriched;
pub mod geo_sort;
mod primary_key;
mod reader;
mod serde_impl;
pub mod sort;
use std::fmt::Debug;
use std::io;
@ -19,6 +21,7 @@ pub use primary_key::{
pub use reader::{DocumentsBatchCursor, DocumentsBatchCursorError, DocumentsBatchReader};
use serde::{Deserialize, Serialize};
pub use self::geo_sort::{GeoSortParameter, GeoSortStrategy};
use crate::error::{FieldIdMapMissingEntry, InternalError};
use crate::{FieldId, Object, Result};

View File

@ -0,0 +1,444 @@
use std::collections::{BTreeSet, VecDeque};
use crate::{
constants::RESERVED_GEO_FIELD_NAME,
documents::{geo_sort::next_bucket, GeoSortParameter},
heed_codec::{
facet::{FacetGroupKeyCodec, FacetGroupValueCodec},
BytesRefCodec,
},
is_faceted,
search::facet::{ascending_facet_sort, descending_facet_sort},
AscDesc, DocumentId, Member, UserError,
};
use heed::Database;
use roaring::RoaringBitmap;
#[derive(Debug, Clone, Copy)]
enum AscDescId {
Facet { field_id: u16, ascending: bool },
Geo { field_ids: [u16; 2], target_point: [f64; 2], ascending: bool },
}
/// A [`SortedDocumentsIterator`] allows efficient access to a continuous range of sorted documents.
/// This is ideal in the context of paginated queries in which only a small number of documents are needed at a time.
/// Search operations will only be performed upon access.
pub enum SortedDocumentsIterator<'ctx> {
Leaf {
/// The exact number of documents remaining
size: usize,
values: Box<dyn Iterator<Item = DocumentId> + 'ctx>,
},
Branch {
/// The current child, got from the children iterator
current_child: Option<Box<SortedDocumentsIterator<'ctx>>>,
/// The exact number of documents remaining, excluding documents in the current child
next_children_size: usize,
/// Iterators to become the current child once it is exhausted
next_children:
Box<dyn Iterator<Item = crate::Result<SortedDocumentsIteratorBuilder<'ctx>>> + 'ctx>,
},
}
impl SortedDocumentsIterator<'_> {
/// Takes care of updating the current child if it is `None`, and also updates the size
fn update_current<'ctx>(
current_child: &mut Option<Box<SortedDocumentsIterator<'ctx>>>,
next_children_size: &mut usize,
next_children: &mut Box<
dyn Iterator<Item = crate::Result<SortedDocumentsIteratorBuilder<'ctx>>> + 'ctx,
>,
) -> crate::Result<()> {
if current_child.is_none() {
*current_child = match next_children.next() {
Some(Ok(builder)) => {
let next_child = Box::new(builder.build()?);
*next_children_size -= next_child.size_hint().0;
Some(next_child)
}
Some(Err(e)) => return Err(e),
None => return Ok(()),
};
}
Ok(())
}
}
impl Iterator for SortedDocumentsIterator<'_> {
type Item = crate::Result<DocumentId>;
/// Implementing the `nth` method allows for efficient access to the nth document in the sorted order.
/// It's used by `skip` internally.
/// The default implementation of `nth` would iterate over all children, which is inefficient for large datasets.
/// This implementation will jump over whole chunks of children until it gets close.
fn nth(&mut self, n: usize) -> Option<Self::Item> {
if n == 0 {
return self.next();
}
// If it's at the leaf level, just forward the call to the values iterator
let (current_child, next_children, next_children_size) = match self {
SortedDocumentsIterator::Leaf { values, size } => {
*size = size.saturating_sub(n);
return values.nth(n).map(Ok);
}
SortedDocumentsIterator::Branch {
current_child,
next_children,
next_children_size,
} => (current_child, next_children, next_children_size),
};
// Otherwise don't directly iterate over children, skip them if we know we will go further
let mut to_skip = n - 1;
while to_skip > 0 {
if let Err(e) = SortedDocumentsIterator::update_current(
current_child,
next_children_size,
next_children,
) {
return Some(Err(e));
}
let Some(inner) = current_child else {
return None; // No more inner iterators, everything has been consumed.
};
if to_skip >= inner.size_hint().0 {
// The current child isn't large enough to contain the nth element.
// Skip it and continue with the next one.
to_skip -= inner.size_hint().0;
*current_child = None;
continue;
} else {
// The current iterator is large enough, so we can forward the call to it.
return inner.nth(to_skip + 1);
}
}
self.next()
}
/// Iterators need to keep track of their size so that they can be skipped efficiently by the `nth` method.
fn size_hint(&self) -> (usize, Option<usize>) {
let size = match self {
SortedDocumentsIterator::Leaf { size, .. } => *size,
SortedDocumentsIterator::Branch {
next_children_size,
current_child: Some(current_child),
..
} => current_child.size_hint().0 + next_children_size,
SortedDocumentsIterator::Branch { next_children_size, current_child: None, .. } => {
*next_children_size
}
};
(size, Some(size))
}
fn next(&mut self) -> Option<Self::Item> {
match self {
SortedDocumentsIterator::Leaf { values, size } => {
let result = values.next().map(Ok);
if result.is_some() {
*size -= 1;
}
result
}
SortedDocumentsIterator::Branch {
current_child,
next_children_size,
next_children,
} => {
let mut result = None;
while result.is_none() {
// Ensure we have selected an iterator to work with
if let Err(e) = SortedDocumentsIterator::update_current(
current_child,
next_children_size,
next_children,
) {
return Some(Err(e));
}
let Some(inner) = current_child else {
return None;
};
result = inner.next();
// If the current iterator is exhausted, we need to try the next one
if result.is_none() {
*current_child = None;
}
}
result
}
}
}
}
/// Builder for a [`SortedDocumentsIterator`].
/// Most builders won't ever be built, because pagination will skip them.
pub struct SortedDocumentsIteratorBuilder<'ctx> {
index: &'ctx crate::Index,
rtxn: &'ctx heed::RoTxn<'ctx>,
number_db: Database<FacetGroupKeyCodec<BytesRefCodec>, FacetGroupValueCodec>,
string_db: Database<FacetGroupKeyCodec<BytesRefCodec>, FacetGroupValueCodec>,
fields: &'ctx [AscDescId],
candidates: RoaringBitmap,
geo_candidates: &'ctx RoaringBitmap,
}
impl<'ctx> SortedDocumentsIteratorBuilder<'ctx> {
/// Performs the sort and builds a [`SortedDocumentsIterator`].
fn build(self) -> crate::Result<SortedDocumentsIterator<'ctx>> {
let size = self.candidates.len() as usize;
match self.fields {
[] => Ok(SortedDocumentsIterator::Leaf {
size,
values: Box::new(self.candidates.into_iter()),
}),
[AscDescId::Facet { field_id, ascending }, next_fields @ ..] => {
SortedDocumentsIteratorBuilder::build_facet(
self.index,
self.rtxn,
self.number_db,
self.string_db,
next_fields,
self.candidates,
self.geo_candidates,
*field_id,
*ascending,
)
}
[AscDescId::Geo { field_ids, target_point, ascending }, next_fields @ ..] => {
SortedDocumentsIteratorBuilder::build_geo(
self.index,
self.rtxn,
self.number_db,
self.string_db,
next_fields,
self.candidates,
self.geo_candidates,
*field_ids,
*target_point,
*ascending,
)
}
}
}
/// Builds a [`SortedDocumentsIterator`] based on the results of a facet sort.
#[allow(clippy::too_many_arguments)]
fn build_facet(
index: &'ctx crate::Index,
rtxn: &'ctx heed::RoTxn<'ctx>,
number_db: Database<FacetGroupKeyCodec<BytesRefCodec>, FacetGroupValueCodec>,
string_db: Database<FacetGroupKeyCodec<BytesRefCodec>, FacetGroupValueCodec>,
next_fields: &'ctx [AscDescId],
candidates: RoaringBitmap,
geo_candidates: &'ctx RoaringBitmap,
field_id: u16,
ascending: bool,
) -> crate::Result<SortedDocumentsIterator<'ctx>> {
let size = candidates.len() as usize;
// Perform the sort on the first field
let (number_iter, string_iter) = if ascending {
let number_iter = ascending_facet_sort(rtxn, number_db, field_id, candidates.clone())?;
let string_iter = ascending_facet_sort(rtxn, string_db, field_id, candidates)?;
(itertools::Either::Left(number_iter), itertools::Either::Left(string_iter))
} else {
let number_iter = descending_facet_sort(rtxn, number_db, field_id, candidates.clone())?;
let string_iter = descending_facet_sort(rtxn, string_db, field_id, candidates)?;
(itertools::Either::Right(number_iter), itertools::Either::Right(string_iter))
};
// Create builders for the next level of the tree
let number_iter = number_iter.map(|r| r.map(|(d, _)| d));
let string_iter = string_iter.map(|r| r.map(|(d, _)| d));
let next_children = number_iter.chain(string_iter).map(move |r| {
Ok(SortedDocumentsIteratorBuilder {
index,
rtxn,
number_db,
string_db,
fields: next_fields,
candidates: r?,
geo_candidates,
})
});
Ok(SortedDocumentsIterator::Branch {
current_child: None,
next_children_size: size,
next_children: Box::new(next_children),
})
}
/// Builds a [`SortedDocumentsIterator`] based on the (lazy) results of a geo sort.
#[allow(clippy::too_many_arguments)]
fn build_geo(
index: &'ctx crate::Index,
rtxn: &'ctx heed::RoTxn<'ctx>,
number_db: Database<FacetGroupKeyCodec<BytesRefCodec>, FacetGroupValueCodec>,
string_db: Database<FacetGroupKeyCodec<BytesRefCodec>, FacetGroupValueCodec>,
next_fields: &'ctx [AscDescId],
candidates: RoaringBitmap,
geo_candidates: &'ctx RoaringBitmap,
field_ids: [u16; 2],
target_point: [f64; 2],
ascending: bool,
) -> crate::Result<SortedDocumentsIterator<'ctx>> {
let mut cache = VecDeque::new();
let mut rtree = None;
let size = candidates.len() as usize;
let not_geo_candidates = candidates.clone() - geo_candidates;
let mut geo_remaining = size - not_geo_candidates.len() as usize;
let mut not_geo_candidates = Some(not_geo_candidates);
let next_children = std::iter::from_fn(move || {
// Find the next bucket of geo-sorted documents.
// next_bucket loops and will go back to the beginning so we use a variable to track how many are left.
if geo_remaining > 0 {
if let Ok(Some((docids, _point))) = next_bucket(
index,
rtxn,
&candidates,
ascending,
target_point,
&Some(field_ids),
&mut rtree,
&mut cache,
geo_candidates,
GeoSortParameter::default(),
) {
geo_remaining -= docids.len() as usize;
return Some(Ok(SortedDocumentsIteratorBuilder {
index,
rtxn,
number_db,
string_db,
fields: next_fields,
candidates: docids,
geo_candidates,
}));
}
}
// Once all geo candidates have been processed, we can return the others
if let Some(not_geo_candidates) = not_geo_candidates.take() {
if !not_geo_candidates.is_empty() {
return Some(Ok(SortedDocumentsIteratorBuilder {
index,
rtxn,
number_db,
string_db,
fields: next_fields,
candidates: not_geo_candidates,
geo_candidates,
}));
}
}
None
});
Ok(SortedDocumentsIterator::Branch {
current_child: None,
next_children_size: size,
next_children: Box::new(next_children),
})
}
}
/// A structure owning the data needed during the lifetime of a [`SortedDocumentsIterator`].
pub struct SortedDocuments<'ctx> {
index: &'ctx crate::Index,
rtxn: &'ctx heed::RoTxn<'ctx>,
fields: Vec<AscDescId>,
number_db: Database<FacetGroupKeyCodec<BytesRefCodec>, FacetGroupValueCodec>,
string_db: Database<FacetGroupKeyCodec<BytesRefCodec>, FacetGroupValueCodec>,
candidates: &'ctx RoaringBitmap,
geo_candidates: RoaringBitmap,
}
impl<'ctx> SortedDocuments<'ctx> {
pub fn iter(&'ctx self) -> crate::Result<SortedDocumentsIterator<'ctx>> {
let builder = SortedDocumentsIteratorBuilder {
index: self.index,
rtxn: self.rtxn,
number_db: self.number_db,
string_db: self.string_db,
fields: &self.fields,
candidates: self.candidates.clone(),
geo_candidates: &self.geo_candidates,
};
builder.build()
}
}
pub fn recursive_sort<'ctx>(
index: &'ctx crate::Index,
rtxn: &'ctx heed::RoTxn<'ctx>,
sort: Vec<AscDesc>,
candidates: &'ctx RoaringBitmap,
) -> crate::Result<SortedDocuments<'ctx>> {
let sortable_fields: BTreeSet<_> = index.sortable_fields(rtxn)?.into_iter().collect();
let fields_ids_map = index.fields_ids_map(rtxn)?;
// Retrieve the field ids that are used for sorting
let mut fields = Vec::new();
let mut need_geo_candidates = false;
for asc_desc in sort {
let (field, geofield) = match asc_desc {
AscDesc::Asc(Member::Field(field)) => (Some((field, true)), None),
AscDesc::Desc(Member::Field(field)) => (Some((field, false)), None),
AscDesc::Asc(Member::Geo(target_point)) => (None, Some((target_point, true))),
AscDesc::Desc(Member::Geo(target_point)) => (None, Some((target_point, false))),
};
if let Some((field, ascending)) = field {
if is_faceted(&field, &sortable_fields) {
if let Some(field_id) = fields_ids_map.id(&field) {
fields.push(AscDescId::Facet { field_id, ascending });
continue;
}
}
return Err(UserError::InvalidDocumentSortableAttribute {
field: field.to_string(),
sortable_fields: sortable_fields.clone(),
}
.into());
}
if let Some((target_point, ascending)) = geofield {
if sortable_fields.contains(RESERVED_GEO_FIELD_NAME) {
if let (Some(lat), Some(lng)) =
(fields_ids_map.id("_geo.lat"), fields_ids_map.id("_geo.lng"))
{
need_geo_candidates = true;
fields.push(AscDescId::Geo { field_ids: [lat, lng], target_point, ascending });
continue;
}
}
return Err(UserError::InvalidDocumentSortableAttribute {
field: RESERVED_GEO_FIELD_NAME.to_string(),
sortable_fields: sortable_fields.clone(),
}
.into());
}
}
let geo_candidates = if need_geo_candidates {
index.geo_faceted_documents_ids(rtxn)?
} else {
RoaringBitmap::new()
};
let number_db = index.facet_id_f64_docids.remap_key_type::<FacetGroupKeyCodec<BytesRefCodec>>();
let string_db =
index.facet_id_string_docids.remap_key_type::<FacetGroupKeyCodec<BytesRefCodec>>();
Ok(SortedDocuments { index, rtxn, fields, number_db, string_db, candidates, geo_candidates })
}

View File

@ -191,7 +191,21 @@ and can not be more than 511 bytes.", .document_id.to_string()
),
}
)]
InvalidSortableAttribute { field: String, valid_fields: BTreeSet<String>, hidden_fields: bool },
InvalidSearchSortableAttribute {
field: String,
valid_fields: BTreeSet<String>,
hidden_fields: bool,
},
#[error("Attribute `{}` is not sortable. {}",
.field,
match .sortable_fields.is_empty() {
true => "This index does not have configured sortable attributes.".to_string(),
false => format!("Available sortable attributes are: `{}`.",
sortable_fields.iter().map(AsRef::as_ref).collect::<Vec<&str>>().join(", ")
),
}
)]
InvalidDocumentSortableAttribute { field: String, sortable_fields: BTreeSet<String> },
#[error("Attribute `{}` is not filterable and thus, cannot be used as distinct attribute. {}",
.field,
match (.valid_patterns.is_empty(), .matching_rule_index) {
@ -272,8 +286,8 @@ and can not be more than 511 bytes.", .document_id.to_string()
PrimaryKeyCannotBeChanged(String),
#[error(transparent)]
SerdeJson(serde_json::Error),
#[error(transparent)]
SortError(#[from] SortError),
#[error("{error}")]
SortError { error: SortError, search: bool },
#[error("An unknown internal document id have been used: `{document_id}`.")]
UnknownInternalDocumentId { document_id: DocumentId },
#[error("`minWordSizeForTypos` setting is invalid. `oneTypo` and `twoTypos` fields should be between `0` and `255`, and `twoTypos` should be greater or equals to `oneTypo` but found `oneTypo: {0}` and twoTypos: {1}`.")]
@ -616,7 +630,7 @@ fn conditionally_lookup_for_error_message() {
];
for (list, suffix) in messages {
let err = UserError::InvalidSortableAttribute {
let err = UserError::InvalidSearchSortableAttribute {
field: "name".to_string(),
valid_fields: list,
hidden_fields: false,

View File

@ -1766,20 +1766,22 @@ impl Index {
&self,
rtxn: &RoTxn<'_>,
docid: DocumentId,
) -> Result<BTreeMap<String, (Vec<Embedding>, bool)>> {
) -> Result<BTreeMap<String, EmbeddingsWithMetadata>> {
let mut res = BTreeMap::new();
let embedders = self.embedding_configs();
for config in embedders.embedding_configs(rtxn)? {
let embedder_info = embedders.embedder_info(rtxn, &config.name)?.unwrap();
let has_fragments = config.config.embedder_options.has_fragments();
let reader = ArroyWrapper::new(
self.vector_arroy,
embedder_info.embedder_id,
config.config.quantized(),
);
let embeddings = reader.item_vectors(rtxn, docid)?;
let regenerate = embedder_info.embedding_status.must_regenerate(docid);
res.insert(
config.name.to_owned(),
(embeddings, embedder_info.embedding_status.must_regenerate(docid)),
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
);
}
Ok(res)
@ -1919,6 +1921,12 @@ impl Index {
}
}
pub struct EmbeddingsWithMetadata {
pub embeddings: Vec<Embedding>,
pub regenerate: bool,
pub has_fragments: bool,
}
#[derive(Debug, Default, Deserialize, Serialize)]
pub struct ChatConfig {
pub description: String,

View File

@ -43,12 +43,13 @@ use std::fmt;
use std::hash::BuildHasherDefault;
use charabia::normalizer::{CharNormalizer, CompatibilityDecompositionNormalizer};
pub use documents::GeoSortStrategy;
pub use filter_parser::{Condition, FilterCondition, Span, Token};
use fxhash::{FxHasher32, FxHasher64};
pub use grenad::CompressionType;
pub use search::new::{
execute_search, filtered_universe, DefaultSearchLogger, GeoSortStrategy, SearchContext,
SearchLogger, VisualSearchLogger,
execute_search, filtered_universe, DefaultSearchLogger, SearchContext, SearchLogger,
VisualSearchLogger,
};
use serde_json::Value;
pub use thread_pool_no_abort::{PanicCatched, ThreadPoolNoAbort, ThreadPoolNoAbortBuilder};

View File

@ -12,7 +12,6 @@ use liquid::{ObjectView, ValueView};
use rustc_hash::FxBuildHasher;
use serde_json::value::RawValue;
use crate::constants::{RESERVED_GEO_FIELD_NAME, RESERVED_VECTORS_FIELD_NAME};
use crate::update::del_add::{DelAdd, KvReaderDelAdd};
use crate::FieldsIdsMap;
@ -144,110 +143,6 @@ impl ValueView for Document<'_> {
/// Implementation for any type that implements the Document trait
use crate::update::new::document::Document as DocumentTrait;
pub struct JsonDocument {
object: liquid::Object,
cached: BTreeMap<String, Box<RawValue>>,
}
impl JsonDocument {
pub fn new(value: &serde_json::Value) -> Result<Self, ()> {
let to_string = serde_json::to_string(&value).map_err(|_| ())?;
let back_to_value: BTreeMap<String, Box<RawValue>> =
serde_json::from_str(&to_string).map_err(|_| ())?;
let object = liquid::to_object(&value).map_err(|_| ())?;
Ok(Self { object, cached: back_to_value })
}
}
impl std::fmt::Debug for JsonDocument {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.object.fmt(f)
}
}
impl<'a> DocumentTrait<'a> for &'a JsonDocument {
fn iter_top_level_fields(
&self,
) -> impl Iterator<Item = crate::Result<(&'a str, &'a RawValue)>> {
self.cached.iter().filter_map(|(k, v)| {
if k == RESERVED_VECTORS_FIELD_NAME || k == RESERVED_GEO_FIELD_NAME {
None
} else {
Some(Ok((k.as_str(), v.as_ref())))
}
})
}
fn top_level_fields_count(&self) -> usize {
self.cached.len()
- self.cached.contains_key(RESERVED_VECTORS_FIELD_NAME) as usize
- self.cached.contains_key(RESERVED_GEO_FIELD_NAME) as usize
}
fn top_level_field(&self, k: &str) -> crate::Result<Option<&'a RawValue>> {
if k == RESERVED_VECTORS_FIELD_NAME || k == RESERVED_GEO_FIELD_NAME {
return Ok(None);
}
Ok(self.cached.get(k).map(|r| r.as_ref()))
}
fn vectors_field(&self) -> crate::Result<Option<&'a RawValue>> {
Ok(self.cached.get(RESERVED_VECTORS_FIELD_NAME).map(|r| r.as_ref()))
}
fn geo_field(&self) -> crate::Result<Option<&'a RawValue>> {
Ok(self.cached.get(RESERVED_GEO_FIELD_NAME).map(|r| r.as_ref()))
}
}
impl ObjectView for JsonDocument {
fn as_value(&self) -> &dyn ValueView {
self.object.as_value()
}
fn size(&self) -> i64 {
self.object.size()
}
fn keys<'k>(&'k self) -> Box<dyn Iterator<Item = KStringCow<'k>> + 'k> {
Box::new(self.object.keys().map(|s| s.into()))
}
fn values<'k>(&'k self) -> Box<dyn Iterator<Item = &'k dyn ValueView> + 'k> {
Box::new(self.object.values().map(|v| v.as_view()))
}
fn iter<'k>(&'k self) -> Box<dyn Iterator<Item = (KStringCow<'k>, &'k dyn ValueView)> + 'k> {
Box::new(self.object.iter().map(|(k, v)| (k.into(), v.as_view())))
}
fn contains_key(&self, index: &str) -> bool {
self.object.contains_key(index)
}
fn get<'s>(&'s self, index: &str) -> Option<&'s dyn ValueView> {
self.object.get(index).map(|v| v.as_view())
}
}
impl ValueView for JsonDocument {
fn as_debug(&self) -> &dyn fmt::Debug {
self.object.as_debug()
}
fn render(&self) -> DisplayCow<'_> {
self.object.render()
}
fn source(&self) -> DisplayCow<'_> {
self.object.source()
}
fn type_name(&self) -> &'static str {
self.object.type_name()
}
fn query_state(&self, state: State) -> bool {
self.object.query_state(state)
}
fn to_kstr(&self) -> KStringCow<'_> {
self.object.to_kstr()
}
fn to_value(&self) -> LiquidValue {
self.object.to_value()
}
}
#[derive(Debug)]
pub struct ParseableDocument<'a, 'doc, D: DocumentTrait<'a> + Debug> {
document: D,

View File

@ -12,16 +12,11 @@ use bumpalo::Bump;
pub(crate) use document::{Document, ParseableDocument};
use error::{NewPromptError, RenderPromptError};
pub use fields::{BorrowedFields, OwnedFields};
use heed::RoTxn;
use liquid::model::Value as LiquidValue;
use liquid::ValueView;
pub use self::context::Context;
use crate::fields_ids_map::metadata::FieldIdMapWithMetadata;
use crate::prompt::document::JsonDocument;
use crate::update::del_add::DelAdd;
use crate::update::new::document::DocumentFromDb;
use crate::{GlobalFieldsIdsMap, Index, MetadataBuilder};
use crate::GlobalFieldsIdsMap;
pub struct Prompt {
template: liquid::Template,
@ -169,49 +164,6 @@ fn truncate(s: &mut String, max_bytes: usize) {
}
}
pub fn get_inline_document_fields(
index: &Index,
rtxn: &RoTxn<'_>,
inline_doc: &serde_json::Value,
) -> Result<Result<LiquidValue, ()>, crate::Error> {
let fid_map_with_meta = index.fields_ids_map_with_metadata(rtxn)?;
let Ok(inline_doc) = JsonDocument::new(inline_doc) else {
return Ok(Err(()));
};
let fields = OwnedFields::new(&inline_doc, &fid_map_with_meta);
Ok(Ok(fields.to_value()))
}
pub fn get_document(
index: &Index,
rtxn: &RoTxn<'_>,
external_id: &str,
with_fields: bool,
) -> Result<Option<(LiquidValue, Option<LiquidValue>)>, crate::Error> {
let Some(internal_id) = index.external_documents_ids().get(rtxn, external_id)? else {
return Ok(None);
};
let fid_map = index.fields_ids_map(rtxn)?;
let Some(document_from_db) = DocumentFromDb::new(internal_id, rtxn, index, &fid_map)? else {
return Ok(None);
};
let doc_alloc = Bump::new();
let parseable_document = ParseableDocument::new(document_from_db, &doc_alloc);
if with_fields {
let metadata_builder = MetadataBuilder::from_index(index, rtxn)?;
let fid_map_with_meta = FieldIdMapWithMetadata::new(fid_map.clone(), metadata_builder);
let fields = OwnedFields::new(&parseable_document, &fid_map_with_meta);
Ok(Some((parseable_document.to_value(), Some(fields.to_value()))))
} else {
Ok(Some((parseable_document.to_value(), None)))
}
}
#[cfg(test)]
mod test {
use super::Prompt;

View File

@ -210,6 +210,7 @@ impl Search<'_> {
scoring_strategy: ScoringStrategy::Detailed,
words_limit: self.words_limit,
exhaustive_number_hits: self.exhaustive_number_hits,
max_total_hits: self.max_total_hits,
rtxn: self.rtxn,
index: self.index,
semantic: self.semantic.clone(),

View File

@ -9,6 +9,7 @@ use roaring::bitmap::RoaringBitmap;
pub use self::facet::{FacetDistribution, Filter, OrderBy, DEFAULT_VALUES_PER_FACET};
pub use self::new::matches::{FormatOptions, MatchBounds, MatcherBuilder, MatchingWords};
use self::new::{execute_vector_search, PartialSearchResult, VectorStoreStats};
use crate::documents::GeoSortParameter;
use crate::filterable_attributes_rules::{filtered_matching_patterns, matching_features};
use crate::index::MatchingStrategy;
use crate::score_details::{ScoreDetails, ScoringStrategy};
@ -47,11 +48,12 @@ pub struct Search<'a> {
sort_criteria: Option<Vec<AscDesc>>,
distinct: Option<String>,
searchable_attributes: Option<&'a [String]>,
geo_param: new::GeoSortParameter,
geo_param: GeoSortParameter,
terms_matching_strategy: TermsMatchingStrategy,
scoring_strategy: ScoringStrategy,
words_limit: usize,
exhaustive_number_hits: bool,
max_total_hits: Option<usize>,
rtxn: &'a heed::RoTxn<'a>,
index: &'a Index,
semantic: Option<SemanticSearch>,
@ -70,10 +72,11 @@ impl<'a> Search<'a> {
sort_criteria: None,
distinct: None,
searchable_attributes: None,
geo_param: new::GeoSortParameter::default(),
geo_param: GeoSortParameter::default(),
terms_matching_strategy: TermsMatchingStrategy::default(),
scoring_strategy: Default::default(),
exhaustive_number_hits: false,
max_total_hits: None,
words_limit: 10,
rtxn,
index,
@ -147,7 +150,7 @@ impl<'a> Search<'a> {
}
#[cfg(test)]
pub fn geo_sort_strategy(&mut self, strategy: new::GeoSortStrategy) -> &mut Search<'a> {
pub fn geo_sort_strategy(&mut self, strategy: crate::GeoSortStrategy) -> &mut Search<'a> {
self.geo_param.strategy = strategy;
self
}
@ -165,6 +168,11 @@ impl<'a> Search<'a> {
self
}
pub fn max_total_hits(&mut self, max_total_hits: Option<usize>) -> &mut Search<'a> {
self.max_total_hits = max_total_hits;
self
}
pub fn time_budget(&mut self, time_budget: TimeBudget) -> &mut Search<'a> {
self.time_budget = time_budget;
self
@ -243,6 +251,8 @@ impl<'a> Search<'a> {
&mut ctx,
vector,
self.scoring_strategy,
self.exhaustive_number_hits,
self.max_total_hits,
universe,
&self.sort_criteria,
&self.distinct,
@ -261,6 +271,7 @@ impl<'a> Search<'a> {
self.terms_matching_strategy,
self.scoring_strategy,
self.exhaustive_number_hits,
self.max_total_hits,
universe,
&self.sort_criteria,
&self.distinct,
@ -314,6 +325,7 @@ impl fmt::Debug for Search<'_> {
scoring_strategy,
words_limit,
exhaustive_number_hits,
max_total_hits,
rtxn: _,
index: _,
semantic,
@ -333,6 +345,7 @@ impl fmt::Debug for Search<'_> {
.field("terms_matching_strategy", terms_matching_strategy)
.field("scoring_strategy", scoring_strategy)
.field("exhaustive_number_hits", exhaustive_number_hits)
.field("max_total_hits", max_total_hits)
.field("words_limit", words_limit)
.field(
"semantic.embedder_name",

View File

@ -32,6 +32,8 @@ pub fn bucket_sort<'ctx, Q: RankingRuleQueryTrait>(
logger: &mut dyn SearchLogger<Q>,
time_budget: TimeBudget,
ranking_score_threshold: Option<f64>,
exhaustive_number_hits: bool,
max_total_hits: Option<usize>,
) -> Result<BucketSortOutput> {
logger.initial_query(query);
logger.ranking_rules(&ranking_rules);
@ -159,7 +161,13 @@ pub fn bucket_sort<'ctx, Q: RankingRuleQueryTrait>(
};
}
while valid_docids.len() < length {
let max_len_to_evaluate =
match (max_total_hits, exhaustive_number_hits && ranking_score_threshold.is_some()) {
(Some(max_total_hits), true) => max_total_hits,
_ => length,
};
while valid_docids.len() < max_len_to_evaluate {
if time_budget.exceeded() {
loop {
let bucket = std::mem::take(&mut ranking_rule_universes[cur_ranking_rule_index]);

View File

@ -82,7 +82,7 @@ fn facet_value_docids(
}
/// Return an iterator over each number value in the given field of the given document.
fn facet_number_values<'a>(
pub(crate) fn facet_number_values<'a>(
docid: u32,
field_id: u16,
index: &Index,
@ -118,7 +118,7 @@ pub fn facet_string_values<'a>(
}
#[allow(clippy::drop_non_drop)]
fn facet_values_prefix_key(distinct: u16, id: u32) -> [u8; FID_SIZE + DOCID_SIZE] {
pub(crate) fn facet_values_prefix_key(distinct: u16, id: u32) -> [u8; FID_SIZE + DOCID_SIZE] {
concat_arrays::concat_arrays!(distinct.to_be_bytes(), id.to_be_bytes())
}

View File

@ -1,96 +1,18 @@
use std::collections::VecDeque;
use heed::types::{Bytes, Unit};
use heed::{RoPrefix, RoTxn};
use roaring::RoaringBitmap;
use rstar::RTree;
use super::facet_string_values;
use super::ranking_rules::{RankingRule, RankingRuleOutput, RankingRuleQueryTrait};
use crate::heed_codec::facet::{FieldDocIdFacetCodec, OrderedF64Codec};
use crate::documents::geo_sort::{fill_cache, next_bucket};
use crate::documents::{GeoSortParameter, GeoSortStrategy};
use crate::score_details::{self, ScoreDetails};
use crate::{
distance_between_two_points, lat_lng_to_xyz, GeoPoint, Index, Result, SearchContext,
SearchLogger,
};
const FID_SIZE: usize = 2;
const DOCID_SIZE: usize = 4;
#[allow(clippy::drop_non_drop)]
fn facet_values_prefix_key(distinct: u16, id: u32) -> [u8; FID_SIZE + DOCID_SIZE] {
concat_arrays::concat_arrays!(distinct.to_be_bytes(), id.to_be_bytes())
}
/// Return an iterator over each number value in the given field of the given document.
fn facet_number_values<'a>(
docid: u32,
field_id: u16,
index: &Index,
txn: &'a RoTxn<'a>,
) -> Result<RoPrefix<'a, FieldDocIdFacetCodec<OrderedF64Codec>, Unit>> {
let key = facet_values_prefix_key(field_id, docid);
let iter = index
.field_id_docid_facet_f64s
.remap_key_type::<Bytes>()
.prefix_iter(txn, &key)?
.remap_key_type();
Ok(iter)
}
#[derive(Debug, Clone, Copy)]
pub struct Parameter {
// Define the strategy used by the geo sort
pub strategy: Strategy,
// Limit the number of docs in a single bucket to avoid unexpectedly large overhead
pub max_bucket_size: u64,
// Considering the errors of GPS and geographical calculations, distances less than distance_error_margin will be treated as equal
pub distance_error_margin: f64,
}
impl Default for Parameter {
fn default() -> Self {
Self { strategy: Strategy::default(), max_bucket_size: 1000, distance_error_margin: 1.0 }
}
}
/// Define the strategy used by the geo sort.
/// The parameter represents the cache size, and, in the case of the Dynamic strategy,
/// the point where we move from using the iterative strategy to the rtree.
#[derive(Debug, Clone, Copy)]
pub enum Strategy {
AlwaysIterative(usize),
AlwaysRtree(usize),
Dynamic(usize),
}
impl Default for Strategy {
fn default() -> Self {
Strategy::Dynamic(1000)
}
}
impl Strategy {
pub fn use_rtree(&self, candidates: usize) -> bool {
match self {
Strategy::AlwaysIterative(_) => false,
Strategy::AlwaysRtree(_) => true,
Strategy::Dynamic(i) => candidates >= *i,
}
}
pub fn cache_size(&self) -> usize {
match self {
Strategy::AlwaysIterative(i) | Strategy::AlwaysRtree(i) | Strategy::Dynamic(i) => *i,
}
}
}
use crate::{GeoPoint, Result, SearchContext, SearchLogger};
pub struct GeoSort<Q: RankingRuleQueryTrait> {
query: Option<Q>,
strategy: Strategy,
strategy: GeoSortStrategy,
ascending: bool,
point: [f64; 2],
field_ids: Option<[u16; 2]>,
@ -107,12 +29,12 @@ pub struct GeoSort<Q: RankingRuleQueryTrait> {
impl<Q: RankingRuleQueryTrait> GeoSort<Q> {
pub fn new(
parameter: Parameter,
parameter: GeoSortParameter,
geo_faceted_docids: RoaringBitmap,
point: [f64; 2],
ascending: bool,
) -> Result<Self> {
let Parameter { strategy, max_bucket_size, distance_error_margin } = parameter;
let GeoSortParameter { strategy, max_bucket_size, distance_error_margin } = parameter;
Ok(Self {
query: None,
strategy,
@ -134,98 +56,22 @@ impl<Q: RankingRuleQueryTrait> GeoSort<Q> {
ctx: &mut SearchContext<'_>,
geo_candidates: &RoaringBitmap,
) -> Result<()> {
debug_assert!(self.field_ids.is_some(), "fill_buffer can't be called without the lat&lng");
debug_assert!(self.cached_sorted_docids.is_empty());
// lazily initialize the rtree if needed by the strategy, and cache it in `self.rtree`
let rtree = if self.strategy.use_rtree(geo_candidates.len() as usize) {
if let Some(rtree) = self.rtree.as_ref() {
// get rtree from cache
Some(rtree)
} else {
let rtree = ctx.index.geo_rtree(ctx.txn)?.expect("geo candidates but no rtree");
// insert rtree in cache and returns it.
// Can't use `get_or_insert_with` because getting the rtree from the DB is a fallible operation.
Some(&*self.rtree.insert(rtree))
}
} else {
None
};
let cache_size = self.strategy.cache_size();
if let Some(rtree) = rtree {
if self.ascending {
let point = lat_lng_to_xyz(&self.point);
for point in rtree.nearest_neighbor_iter(&point) {
if geo_candidates.contains(point.data.0) {
self.cached_sorted_docids.push_back(point.data);
if self.cached_sorted_docids.len() >= cache_size {
break;
}
}
}
} else {
// in the case of the desc geo sort we look for the closest point to the opposite of the queried point
// and we insert the points in reverse order they get reversed when emptying the cache later on
let point = lat_lng_to_xyz(&opposite_of(self.point));
for point in rtree.nearest_neighbor_iter(&point) {
if geo_candidates.contains(point.data.0) {
self.cached_sorted_docids.push_front(point.data);
if self.cached_sorted_docids.len() >= cache_size {
break;
}
}
}
}
} else {
// the iterative version
let [lat, lng] = self.field_ids.unwrap();
let mut documents = geo_candidates
.iter()
.map(|id| -> Result<_> { Ok((id, geo_value(id, lat, lng, ctx.index, ctx.txn)?)) })
.collect::<Result<Vec<(u32, [f64; 2])>>>()?;
// computing the distance between two points is expensive thus we cache the result
documents
.sort_by_cached_key(|(_, p)| distance_between_two_points(&self.point, p) as usize);
self.cached_sorted_docids.extend(documents);
};
fill_cache(
ctx.index,
ctx.txn,
self.strategy,
self.ascending,
self.point,
&self.field_ids,
&mut self.rtree,
geo_candidates,
&mut self.cached_sorted_docids,
)?;
Ok(())
}
}
/// Extracts the lat and long values from a single document.
///
/// If it is not able to find it in the facet number index it will extract it
/// from the facet string index and parse it as f64 (as the geo extraction behaves).
fn geo_value(
docid: u32,
field_lat: u16,
field_lng: u16,
index: &Index,
rtxn: &RoTxn<'_>,
) -> Result<[f64; 2]> {
let extract_geo = |geo_field: u16| -> Result<f64> {
match facet_number_values(docid, geo_field, index, rtxn)?.next() {
Some(Ok(((_, _, geo), ()))) => Ok(geo),
Some(Err(e)) => Err(e.into()),
None => match facet_string_values(docid, geo_field, index, rtxn)?.next() {
Some(Ok((_, geo))) => {
Ok(geo.parse::<f64>().expect("cannot parse geo field as f64"))
}
Some(Err(e)) => Err(e.into()),
None => panic!("A geo faceted document doesn't contain any lat or lng"),
},
}
};
let lat = extract_geo(field_lat)?;
let lng = extract_geo(field_lng)?;
Ok([lat, lng])
}
impl<'ctx, Q: RankingRuleQueryTrait> RankingRule<'ctx, Q> for GeoSort<Q> {
fn id(&self) -> String {
"geo_sort".to_owned()
@ -267,124 +113,33 @@ impl<'ctx, Q: RankingRuleQueryTrait> RankingRule<'ctx, Q> for GeoSort<Q> {
) -> Result<Option<RankingRuleOutput<Q>>> {
let query = self.query.as_ref().unwrap().clone();
let mut geo_candidates = &self.geo_candidates & universe;
if geo_candidates.is_empty() {
return Ok(Some(RankingRuleOutput {
next_bucket(
ctx.index,
ctx.txn,
universe,
self.ascending,
self.point,
&self.field_ids,
&mut self.rtree,
&mut self.cached_sorted_docids,
&self.geo_candidates,
GeoSortParameter {
strategy: self.strategy,
max_bucket_size: self.max_bucket_size,
distance_error_margin: self.distance_error_margin,
},
)
.map(|o| {
o.map(|(candidates, point)| RankingRuleOutput {
query,
candidates: universe.clone(),
candidates,
score: ScoreDetails::GeoSort(score_details::GeoSort {
target_point: self.point,
ascending: self.ascending,
value: None,
value: point,
}),
}));
}
let ascending = self.ascending;
let next = |cache: &mut VecDeque<_>| {
if ascending {
cache.pop_front()
} else {
cache.pop_back()
}
};
let put_back = |cache: &mut VecDeque<_>, x: _| {
if ascending {
cache.push_front(x)
} else {
cache.push_back(x)
}
};
let mut current_bucket = RoaringBitmap::new();
// current_distance stores the first point and distance in current bucket
let mut current_distance: Option<([f64; 2], f64)> = None;
loop {
// The loop will only exit when we have found all points with equal distance or have exhausted the candidates.
if let Some((id, point)) = next(&mut self.cached_sorted_docids) {
if geo_candidates.contains(id) {
let distance = distance_between_two_points(&self.point, &point);
if let Some((point0, bucket_distance)) = current_distance.as_ref() {
if (bucket_distance - distance).abs() > self.distance_error_margin {
// different distance, point belongs to next bucket
put_back(&mut self.cached_sorted_docids, (id, point));
return Ok(Some(RankingRuleOutput {
query,
candidates: current_bucket,
score: ScoreDetails::GeoSort(score_details::GeoSort {
target_point: self.point,
ascending: self.ascending,
value: Some(point0.to_owned()),
}),
}));
} else {
// same distance, point belongs to current bucket
current_bucket.insert(id);
// remove from cadidates to prevent it from being added to the cache again
geo_candidates.remove(id);
// current bucket size reaches limit, force return
if current_bucket.len() == self.max_bucket_size {
return Ok(Some(RankingRuleOutput {
query,
candidates: current_bucket,
score: ScoreDetails::GeoSort(score_details::GeoSort {
target_point: self.point,
ascending: self.ascending,
value: Some(point0.to_owned()),
}),
}));
}
}
} else {
// first doc in current bucket
current_distance = Some((point, distance));
current_bucket.insert(id);
geo_candidates.remove(id);
// current bucket size reaches limit, force return
if current_bucket.len() == self.max_bucket_size {
return Ok(Some(RankingRuleOutput {
query,
candidates: current_bucket,
score: ScoreDetails::GeoSort(score_details::GeoSort {
target_point: self.point,
ascending: self.ascending,
value: Some(point.to_owned()),
}),
}));
}
}
}
} else {
// cache exhausted, we need to refill it
self.fill_buffer(ctx, &geo_candidates)?;
if self.cached_sorted_docids.is_empty() {
// candidates exhausted, exit
if let Some((point0, _)) = current_distance.as_ref() {
return Ok(Some(RankingRuleOutput {
query,
candidates: current_bucket,
score: ScoreDetails::GeoSort(score_details::GeoSort {
target_point: self.point,
ascending: self.ascending,
value: Some(point0.to_owned()),
}),
}));
} else {
return Ok(Some(RankingRuleOutput {
query,
candidates: universe.clone(),
score: ScoreDetails::GeoSort(score_details::GeoSort {
target_point: self.point,
ascending: self.ascending,
value: None,
}),
}));
}
}
}
}
})
})
}
#[tracing::instrument(level = "trace", skip_all, target = "search::geo_sort")]
@ -394,16 +149,3 @@ impl<'ctx, Q: RankingRuleQueryTrait> RankingRule<'ctx, Q> for GeoSort<Q> {
self.cached_sorted_docids.clear();
}
}
/// Compute the antipodal coordinate of `coord`
fn opposite_of(mut coord: [f64; 2]) -> [f64; 2] {
coord[0] *= -1.;
// in the case of x,0 we want to return x,180
if coord[1] > 0. {
coord[1] -= 180.;
} else {
coord[1] += 180.;
}
coord
}

View File

@ -510,6 +510,7 @@ mod tests {
crate::TermsMatchingStrategy::default(),
crate::score_details::ScoringStrategy::Skip,
false,
None,
universe,
&None,
&None,

View File

@ -1,7 +1,7 @@
mod bucket_sort;
mod db_cache;
mod distinct;
mod geo_sort;
pub(crate) mod geo_sort;
mod graph_based_ranking_rule;
mod interner;
mod limits;
@ -46,14 +46,14 @@ use resolve_query_graph::{compute_query_graph_docids, PhraseDocIdsCache};
use roaring::RoaringBitmap;
use sort::Sort;
use self::distinct::facet_string_values;
pub(crate) use self::distinct::{facet_string_values, facet_values_prefix_key};
use self::geo_sort::GeoSort;
pub use self::geo_sort::{Parameter as GeoSortParameter, Strategy as GeoSortStrategy};
use self::graph_based_ranking_rule::Words;
use self::interner::Interned;
use self::vector_sort::VectorSort;
use crate::attribute_patterns::{match_pattern, PatternMatch};
use crate::constants::RESERVED_GEO_FIELD_NAME;
use crate::documents::GeoSortParameter;
use crate::index::PrefixSearch;
use crate::localized_attributes_rules::LocalizedFieldIds;
use crate::score_details::{ScoreDetails, ScoringStrategy};
@ -319,7 +319,7 @@ fn resolve_negative_phrases(
fn get_ranking_rules_for_placeholder_search<'ctx>(
ctx: &SearchContext<'ctx>,
sort_criteria: &Option<Vec<AscDesc>>,
geo_param: geo_sort::Parameter,
geo_param: GeoSortParameter,
) -> Result<Vec<BoxRankingRule<'ctx, PlaceholderQuery>>> {
let mut sort = false;
let mut sorted_fields = HashSet::new();
@ -371,7 +371,7 @@ fn get_ranking_rules_for_placeholder_search<'ctx>(
fn get_ranking_rules_for_vector<'ctx>(
ctx: &SearchContext<'ctx>,
sort_criteria: &Option<Vec<AscDesc>>,
geo_param: geo_sort::Parameter,
geo_param: GeoSortParameter,
limit_plus_offset: usize,
target: &[f32],
embedder_name: &str,
@ -448,7 +448,7 @@ fn get_ranking_rules_for_vector<'ctx>(
fn get_ranking_rules_for_query_graph_search<'ctx>(
ctx: &SearchContext<'ctx>,
sort_criteria: &Option<Vec<AscDesc>>,
geo_param: geo_sort::Parameter,
geo_param: GeoSortParameter,
terms_matching_strategy: TermsMatchingStrategy,
) -> Result<Vec<BoxRankingRule<'ctx, QueryGraph>>> {
// query graph search
@ -559,7 +559,7 @@ fn resolve_sort_criteria<'ctx, Query: RankingRuleQueryTrait>(
ranking_rules: &mut Vec<BoxRankingRule<'ctx, Query>>,
sorted_fields: &mut HashSet<String>,
geo_sorted: &mut bool,
geo_param: geo_sort::Parameter,
geo_param: GeoSortParameter,
) -> Result<()> {
let sort_criteria = sort_criteria.clone().unwrap_or_default();
ranking_rules.reserve(sort_criteria.len());
@ -626,10 +626,12 @@ pub fn execute_vector_search(
ctx: &mut SearchContext<'_>,
vector: &[f32],
scoring_strategy: ScoringStrategy,
exhaustive_number_hits: bool,
max_total_hits: Option<usize>,
universe: RoaringBitmap,
sort_criteria: &Option<Vec<AscDesc>>,
distinct: &Option<String>,
geo_param: geo_sort::Parameter,
geo_param: GeoSortParameter,
from: usize,
length: usize,
embedder_name: &str,
@ -669,6 +671,8 @@ pub fn execute_vector_search(
placeholder_search_logger,
time_budget,
ranking_score_threshold,
exhaustive_number_hits,
max_total_hits,
)?;
Ok(PartialSearchResult {
@ -689,10 +693,11 @@ pub fn execute_search(
terms_matching_strategy: TermsMatchingStrategy,
scoring_strategy: ScoringStrategy,
exhaustive_number_hits: bool,
max_total_hits: Option<usize>,
mut universe: RoaringBitmap,
sort_criteria: &Option<Vec<AscDesc>>,
distinct: &Option<String>,
geo_param: geo_sort::Parameter,
geo_param: GeoSortParameter,
from: usize,
length: usize,
words_limit: Option<usize>,
@ -825,6 +830,8 @@ pub fn execute_search(
query_graph_logger,
time_budget,
ranking_score_threshold,
exhaustive_number_hits,
max_total_hits,
)?
} else {
let ranking_rules =
@ -841,6 +848,8 @@ pub fn execute_search(
placeholder_search_logger,
time_budget,
ranking_score_threshold,
exhaustive_number_hits,
max_total_hits,
)?
};
@ -872,7 +881,7 @@ pub fn execute_search(
})
}
fn check_sort_criteria(
pub(crate) fn check_sort_criteria(
ctx: &SearchContext<'_>,
sort_criteria: Option<&Vec<AscDesc>>,
) -> Result<()> {
@ -902,7 +911,7 @@ fn check_sort_criteria(
let (valid_fields, hidden_fields) =
ctx.index.remove_hidden_fields(ctx.txn, sortable_fields)?;
return Err(UserError::InvalidSortableAttribute {
return Err(UserError::InvalidSearchSortableAttribute {
field: field.to_string(),
valid_fields,
hidden_fields,
@ -913,7 +922,7 @@ fn check_sort_criteria(
let (valid_fields, hidden_fields) =
ctx.index.remove_hidden_fields(ctx.txn, sortable_fields)?;
return Err(UserError::InvalidSortableAttribute {
return Err(UserError::InvalidSearchSortableAttribute {
field: RESERVED_GEO_FIELD_NAME.to_string(),
valid_fields,
hidden_fields,

View File

@ -2,7 +2,7 @@ use heed::RwTxn;
use roaring::RoaringBitmap;
use time::OffsetDateTime;
use crate::{FieldDistribution, Index, Result};
use crate::{database_stats::DatabaseStats, FieldDistribution, Index, Result};
pub struct ClearDocuments<'t, 'i> {
wtxn: &'t mut RwTxn<'i>,
@ -92,6 +92,10 @@ impl<'t, 'i> ClearDocuments<'t, 'i> {
documents.clear(self.wtxn)?;
// Update the stats of the documents database after clearing all documents.
let stats = DatabaseStats::new(self.index.documents.remap_data_type(), self.wtxn)?;
self.index.put_documents_stats(self.wtxn, stats)?;
Ok(number_of_documents)
}
}
@ -122,6 +126,9 @@ mod tests {
let rtxn = index.read_txn().unwrap();
// Variables for statistics verification
let stats = index.documents_stats(&rtxn).unwrap().unwrap();
// the value is 7 because there is `[id, name, age, country, _geo, _geo.lng, _geo.lat]`
assert_eq!(index.fields_ids_map(&rtxn).unwrap().len(), 7);
@ -142,5 +149,9 @@ mod tests {
assert!(index.field_id_docid_facet_f64s.is_empty(&rtxn).unwrap());
assert!(index.field_id_docid_facet_strings.is_empty(&rtxn).unwrap());
assert!(index.documents.is_empty(&rtxn).unwrap());
// Verify that the statistics are correctly updated after clearing documents
assert_eq!(index.number_of_documents(&rtxn).unwrap(), 0);
assert_eq!(stats.number_of_entries(), 0);
}
}

View File

@ -23,7 +23,7 @@ use crate::progress::EmbedderStats;
use crate::prompt::Prompt;
use crate::update::del_add::{DelAdd, KvReaderDelAdd, KvWriterDelAdd};
use crate::update::settings::InnerIndexSettingsDiff;
use crate::vector::db::{EmbedderInfo, EmbeddingStatus, EmbeddingStatusDelta};
use crate::vector::db::{EmbedderInfo, EmbeddingStatusDelta};
use crate::vector::error::{EmbedErrorKind, PossibleEmbeddingMistakes, UnusedVectorsDistribution};
use crate::vector::extractor::{Extractor, ExtractorDiff, RequestFragmentExtractor};
use crate::vector::parsed_vectors::{ParsedVectorsDiff, VectorState};
@ -441,6 +441,8 @@ pub fn extract_vector_points<R: io::Read + io::Seek>(
{
let embedder_is_manual = matches!(*runtime.embedder, Embedder::UserProvided(_));
let (old_is_user_provided, old_must_regenerate) =
embedder_info.embedding_status.is_user_provided_must_regenerate(docid);
let (old, new) = parsed_vectors.remove(embedder_name);
let new_must_regenerate = new.must_regenerate();
let delta = match action {
@ -499,16 +501,19 @@ pub fn extract_vector_points<R: io::Read + io::Seek>(
let is_adding_fragments = has_fragments && !old_has_fragments;
if is_adding_fragments {
if !has_fragments {
// removing fragments
regenerate_prompt(obkv, &runtime.document_template, new_fields_ids_map)?
} else if is_adding_fragments ||
// regenerate all fragments when going from user provided to ! user provided
old_is_user_provided
{
regenerate_all_fragments(
runtime.fragments(),
&doc_alloc,
new_fields_ids_map,
obkv,
)
} else if !has_fragments {
// removing fragments
regenerate_prompt(obkv, &runtime.document_template, new_fields_ids_map)?
} else {
let mut fragment_diff = Vec::new();
let new_fields_ids_map = new_fields_ids_map.as_fields_ids_map();
@ -600,7 +605,8 @@ pub fn extract_vector_points<R: io::Read + io::Seek>(
docid,
&delta,
new_must_regenerate,
&embedder_info.embedding_status,
old_is_user_provided,
old_must_regenerate,
);
// and we finally push the unique vectors into the writer
@ -657,10 +663,9 @@ fn push_embedding_status_delta(
docid: DocumentId,
delta: &VectorStateDelta,
new_must_regenerate: bool,
embedding_status: &EmbeddingStatus,
old_is_user_provided: bool,
old_must_regenerate: bool,
) {
let (old_is_user_provided, old_must_regenerate) =
embedding_status.is_user_provided_must_regenerate(docid);
let new_is_user_provided = match delta {
VectorStateDelta::NoChange => old_is_user_provided,
VectorStateDelta::NowRemoved => {

View File

@ -620,12 +620,35 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
where
'a: 'doc,
{
match &mut self.kind {
ChunkType::Fragments { fragments: _, session } => {
let doc_alloc = session.doc_alloc();
self.set_status(docid, old_is_user_provided, true, false, true);
if old_is_user_provided | full_reindex {
match &mut self.kind {
ChunkType::Fragments { fragments, session } => {
let doc_alloc = session.doc_alloc();
let reindex_all_fragments =
// when the vectors were user-provided, Meilisearch cannot know if they come from a particular fragment,
// and so Meilisearch needs to clear all embeddings in that case.
// Fortunately, as dump export fragment vector with `regenerate` set to `false`,
// this case should be rare and opt-in.
old_is_user_provided ||
// full-reindex case
full_reindex;
if reindex_all_fragments {
session.on_embed_mut().clear_vectors(docid);
let extractors = fragments.iter().map(|fragment| {
RequestFragmentExtractor::new(fragment, doc_alloc).ignore_errors()
});
insert_autogenerated(
docid,
external_docid,
extractors,
document,
&(),
session,
unused_vectors_distribution,
)?;
return Ok(());
}
settings_delta.try_for_each_fragment_diff(
@ -669,7 +692,6 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
Result::Ok(())
},
)?;
self.set_status(docid, old_is_user_provided, true, false, true);
}
ChunkType::DocumentTemplate { document_template, session } => {
let doc_alloc = session.doc_alloc();
@ -690,12 +712,18 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
match extractor.diff_settings(document, &external_docid, old_extractor.as_ref())? {
ExtractorDiff::Removed => {
if old_is_user_provided || full_reindex {
session.on_embed_mut().clear_vectors(docid);
}
OnEmbed::process_embedding_response(
session.on_embed_mut(),
crate::vector::session::EmbeddingResponse { metadata, embedding: None },
);
}
ExtractorDiff::Added(input) | ExtractorDiff::Updated(input) => {
if old_is_user_provided || full_reindex {
session.on_embed_mut().clear_vectors(docid);
}
session.request_embedding(metadata, input, unused_vectors_distribution)?;
}
ExtractorDiff::Unchanged => { /* do nothing */ }
@ -722,6 +750,13 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
where
'a: 'doc,
{
self.set_status(
docid,
old_is_user_provided,
old_must_regenerate,
false,
new_must_regenerate,
);
match &mut self.kind {
ChunkType::DocumentTemplate { document_template, session } => {
let doc_alloc = session.doc_alloc();
@ -731,10 +766,6 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
new_fields_ids_map,
);
if old_is_user_provided {
session.on_embed_mut().clear_vectors(docid);
}
update_autogenerated(
docid,
external_docid,
@ -743,6 +774,7 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
new_document,
&external_docid,
old_must_regenerate,
old_is_user_provided,
session,
unused_vectors_distribution,
)?
@ -754,7 +786,21 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
});
if old_is_user_provided {
// when the document was `userProvided`, Meilisearch cannot know whose fragments a particular
// vector was referring to.
// So as a result Meilisearch will regenerate all fragments on this case.
// Fortunately, since dumps for fragments set regenerate to false, this case should be rare.
session.on_embed_mut().clear_vectors(docid);
insert_autogenerated(
docid,
external_docid,
extractors,
new_document,
&(),
session,
unused_vectors_distribution,
)?;
return Ok(());
}
update_autogenerated(
@ -765,25 +811,18 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
new_document,
&(),
old_must_regenerate,
false,
session,
unused_vectors_distribution,
)?
}
};
self.set_status(
docid,
old_is_user_provided,
old_must_regenerate,
false,
new_must_regenerate,
);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn insert_autogenerated<D: Document<'a> + Debug>(
pub fn insert_autogenerated<'doc, D: Document<'doc> + Debug>(
&mut self,
docid: DocumentId,
external_docid: &'a str,
@ -791,7 +830,10 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
new_fields_ids_map: &'a RefCell<crate::GlobalFieldsIdsMap>,
unused_vectors_distribution: &UnusedVectorsDistributionBump<'a>,
new_must_regenerate: bool,
) -> Result<()> {
) -> Result<()>
where
'a: 'doc,
{
let (default_is_user_provided, default_must_regenerate) = (false, true);
self.set_status(
docid,
@ -956,6 +998,7 @@ fn update_autogenerated<'doc, 'a: 'doc, 'b, E, OD, ND>(
new_document: ND,
meta: &E::DocumentMetadata,
old_must_regenerate: bool,
mut must_clear_on_generation: bool,
session: &mut EmbedSession<'a, OnEmbeddingDocumentUpdates<'a, 'b>, E::Input>,
unused_vectors_distribution: &UnusedVectorsDistributionBump<'a>,
) -> Result<()>
@ -984,6 +1027,11 @@ where
};
if must_regenerate {
if must_clear_on_generation {
must_clear_on_generation = false;
session.on_embed_mut().clear_vectors(docid);
}
let metadata =
Metadata { docid, external_docid, extractor_id: extractor.extractor_id() };
@ -1002,7 +1050,7 @@ where
Ok(())
}
fn insert_autogenerated<'a, 'b, E, D: Document<'a> + Debug>(
fn insert_autogenerated<'doc, 'a: 'doc, 'b, E, D: Document<'doc> + Debug>(
docid: DocumentId,
external_docid: &'a str,
extractors: impl IntoIterator<Item = E>,

View File

@ -558,10 +558,10 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
match self.searchable_fields {
Setting::Set(ref fields) => {
// Check to see if the searchable fields changed before doing anything else
let old_fields = self.index.searchable_fields(self.wtxn)?;
let old_fields = self.index.user_defined_searchable_fields(self.wtxn)?;
let did_change = {
let new_fields = fields.iter().map(String::as_str).collect::<Vec<_>>();
new_fields != old_fields
old_fields.is_none_or(|old| new_fields != old)
};
if !did_change {
return Ok(false);
@ -2041,7 +2041,7 @@ fn embedders(embedding_configs: Vec<IndexEmbeddingConfig>) -> Result<RuntimeEmbe
.into_iter()
.map(|fragment| {
let template = JsonTemplate::new(
embedder_options.indexing_fragment(&fragment.name).unwrap().clone(),
embedder_options.fragment(&fragment.name).unwrap().clone(),
)
.unwrap();

View File

@ -2,6 +2,7 @@ mod v1_12;
mod v1_13;
mod v1_14;
mod v1_15;
mod v1_16;
use heed::RwTxn;
use v1_12::{V1_12_3_To_V1_13_0, V1_12_To_V1_12_3};
use v1_13::{V1_13_0_To_V1_13_1, V1_13_1_To_Latest_V1_13};
@ -10,6 +11,7 @@ use v1_15::Latest_V1_14_To_Latest_V1_15;
use crate::constants::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
use crate::progress::{Progress, VariableNameStep};
use crate::update::upgrade::v1_16::Latest_V1_15_To_V1_16_0;
use crate::{Index, InternalError, Result};
trait UpgradeIndex {
@ -24,6 +26,59 @@ trait UpgradeIndex {
fn target_version(&self) -> (u32, u32, u32);
}
const UPGRADE_FUNCTIONS: &[&dyn UpgradeIndex] = &[
&V1_12_To_V1_12_3 {},
&V1_12_3_To_V1_13_0 {},
&V1_13_0_To_V1_13_1 {},
&V1_13_1_To_Latest_V1_13 {},
&Latest_V1_13_To_Latest_V1_14 {},
&Latest_V1_14_To_Latest_V1_15 {},
&Latest_V1_15_To_V1_16_0 {},
// This is the last upgrade function, it will be called when the index is up to date.
// any other upgrade function should be added before this one.
&ToCurrentNoOp {},
];
/// Causes a compile-time error if the argument is not in range of `0..UPGRADE_FUNCTIONS.len()`
macro_rules! function_index {
($start:expr) => {{
const _CHECK_INDEX: () = {
if $start >= $crate::update::upgrade::UPGRADE_FUNCTIONS.len() {
panic!("upgrade functions out of range")
}
};
$start
}};
}
const fn start(from: (u32, u32, u32)) -> Option<usize> {
let start = match from {
(1, 12, 0..=2) => function_index!(0),
(1, 12, 3..) => function_index!(1),
(1, 13, 0) => function_index!(2),
(1, 13, _) => function_index!(4),
(1, 14, _) => function_index!(5),
// We must handle the current version in the match because in case of a failure some index may have been upgraded but not other.
(1, 15, _) => function_index!(6),
(1, 16, _) => function_index!(7),
// We deliberately don't add a placeholder with (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) here to force manually
// considering dumpless upgrade.
(_major, _minor, _patch) => return None,
};
Some(start)
}
/// Causes a compile-time error if the latest package cannot be upgraded.
///
/// This serves as a reminder to consider the proper dumpless upgrade implementation when changing the package version.
const _CHECK_PACKAGE_CAN_UPGRADE: () = {
if start((VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)).is_none() {
panic!("cannot upgrade from latest package version")
}
};
/// Return true if the cached stats of the index must be regenerated
pub fn upgrade<MSP>(
wtxn: &mut RwTxn,
@ -36,33 +91,12 @@ where
MSP: Fn() -> bool + Sync,
{
let from = index.get_version(wtxn)?.unwrap_or(db_version);
let upgrade_functions: &[&dyn UpgradeIndex] = &[
&V1_12_To_V1_12_3 {},
&V1_12_3_To_V1_13_0 {},
&V1_13_0_To_V1_13_1 {},
&V1_13_1_To_Latest_V1_13 {},
&Latest_V1_13_To_Latest_V1_14 {},
&Latest_V1_14_To_Latest_V1_15 {},
// This is the last upgrade function, it will be called when the index is up to date.
// any other upgrade function should be added before this one.
&ToCurrentNoOp {},
];
let start = match from {
(1, 12, 0..=2) => 0,
(1, 12, 3..) => 1,
(1, 13, 0) => 2,
(1, 13, _) => 4,
(1, 14, _) => 5,
// We must handle the current version in the match because in case of a failure some index may have been upgraded but not other.
(1, 15, _) => 6,
(major, minor, patch) => {
return Err(InternalError::CannotUpgradeToVersion(major, minor, patch).into())
}
};
let start =
start(from).ok_or_else(|| InternalError::CannotUpgradeToVersion(from.0, from.1, from.2))?;
enum UpgradeVersion {}
let upgrade_path = &upgrade_functions[start..];
let upgrade_path = &UPGRADE_FUNCTIONS[start..];
let mut current_version = from;
let mut regenerate_stats = false;

View File

@ -1,4 +1,6 @@
use heed::RwTxn;
use roaring::RoaringBitmap;
use serde::Deserialize;
use super::UpgradeIndex;
use crate::progress::Progress;
@ -26,3 +28,14 @@ impl UpgradeIndex for Latest_V1_14_To_Latest_V1_15 {
(1, 15, 0)
}
}
/// Parts of v1.15 `IndexingEmbeddingConfig` that are relevant for upgrade to v1.16
///
/// # Warning
///
/// This object should not be rewritten to the DB, only read to get the name and `user_provided` roaring.
#[derive(Debug, Deserialize)]
pub struct IndexEmbeddingConfig {
pub name: String,
pub user_provided: RoaringBitmap,
}

View File

@ -0,0 +1,48 @@
use heed::types::{SerdeJson, Str};
use heed::RwTxn;
use super::UpgradeIndex;
use crate::progress::Progress;
use crate::vector::db::{EmbedderInfo, EmbeddingStatus};
use crate::{Index, InternalError, Result};
#[allow(non_camel_case_types)]
pub(super) struct Latest_V1_15_To_V1_16_0();
impl UpgradeIndex for Latest_V1_15_To_V1_16_0 {
fn upgrade(
&self,
wtxn: &mut RwTxn,
index: &Index,
_original: (u32, u32, u32),
_progress: Progress,
) -> Result<bool> {
let v1_15_indexing_configs = index
.main
.remap_types::<Str, SerdeJson<Vec<super::v1_15::IndexEmbeddingConfig>>>()
.get(wtxn, crate::index::main_key::EMBEDDING_CONFIGS)?
.unwrap_or_default();
let embedders = index.embedding_configs();
for config in v1_15_indexing_configs {
let embedder_id = embedders.embedder_id(wtxn, &config.name)?.ok_or(
InternalError::DatabaseMissingEntry {
db_name: crate::index::db_name::VECTOR_EMBEDDER_CATEGORY_ID,
key: None,
},
)?;
let info = EmbedderInfo {
embedder_id,
// v1.15 used not to make a difference between `user_provided` and `! regenerate`.
embedding_status: EmbeddingStatus::from_user_provided(config.user_provided),
};
embedders.put_embedder_info(wtxn, &config.name, &info)?;
}
Ok(false)
}
fn target_version(&self) -> (u32, u32, u32) {
(1, 16, 0)
}
}

View File

@ -117,6 +117,13 @@ impl EmbeddingStatus {
Default::default()
}
/// Create a new `EmbeddingStatus` that assumes that any `user_provided` docid is also skipping regenerate.
///
/// Used for migration from v1.15 and earlier DBs.
pub(crate) fn from_user_provided(user_provided: RoaringBitmap) -> Self {
Self { user_provided, skip_regenerate_different_from_user_provided: Default::default() }
}
/// Whether the document contains user-provided vectors for that embedder.
pub fn is_user_provided(&self, docid: DocumentId) -> bool {
self.user_provided.contains(docid)

View File

@ -61,28 +61,20 @@ pub struct Error {
impl Error {
/// Produces an error message when the error happened at rendering time.
pub fn rendering_error(&self, root: &str) -> String {
if self.path.is_empty() {
format!("error while rendering template: {}", &self.template_error)
} else {
format!(
"in `{}`, error while rendering template: {}",
path_with_root(root, self.path.iter()),
&self.template_error
)
}
format!(
"in `{}`, error while rendering template: {}",
path_with_root(root, self.path.iter()),
&self.template_error
)
}
/// Produces an error message when the error happened at parsing time.
pub fn parsing_error(&self, root: &str) -> String {
if self.path.is_empty() {
format!("error while parsing template: {}", &self.template_error)
} else {
format!(
"in `{}`, error while parsing template: {}",
path_with_root(root, self.path.iter()),
&self.template_error
)
}
pub fn parsing(&self, root: &str) -> String {
format!(
"in `{}`, error while parsing template: {}",
path_with_root(root, self.path.iter()),
&self.template_error
)
}
}

View File

@ -823,26 +823,7 @@ pub enum EmbedderOptions {
}
impl EmbedderOptions {
pub fn indexing_fragments(&self) -> Vec<String> {
match &self {
EmbedderOptions::HuggingFace(_)
| EmbedderOptions::OpenAi(_)
| EmbedderOptions::Ollama(_)
| EmbedderOptions::UserProvided(_) => vec![],
EmbedderOptions::Rest(embedder_options) => {
embedder_options.indexing_fragments.keys().cloned().collect()
}
EmbedderOptions::Composite(embedder_options) => {
if let SubEmbedderOptions::Rest(embedder_options) = &embedder_options.index {
embedder_options.indexing_fragments.keys().cloned().collect()
} else {
vec![]
}
}
}
}
pub fn indexing_fragment(&self, name: &str) -> Option<&serde_json::Value> {
pub fn fragment(&self, name: &str) -> Option<&serde_json::Value> {
match &self {
EmbedderOptions::HuggingFace(_)
| EmbedderOptions::OpenAi(_)
@ -861,37 +842,20 @@ impl EmbedderOptions {
}
}
pub fn search_fragments(&self) -> Vec<String> {
pub fn has_fragments(&self) -> bool {
match &self {
EmbedderOptions::HuggingFace(_)
| EmbedderOptions::OpenAi(_)
| EmbedderOptions::Ollama(_)
| EmbedderOptions::UserProvided(_) => vec![],
| EmbedderOptions::UserProvided(_) => false,
EmbedderOptions::Rest(embedder_options) => {
embedder_options.search_fragments.keys().cloned().collect()
!embedder_options.indexing_fragments.is_empty()
}
EmbedderOptions::Composite(embedder_options) => {
if let SubEmbedderOptions::Rest(embedder_options) = &embedder_options.search {
embedder_options.search_fragments.keys().cloned().collect()
if let SubEmbedderOptions::Rest(embedder_options) = &embedder_options.index {
!embedder_options.indexing_fragments.is_empty()
} else {
vec![]
}
}
}
}
pub fn search_fragment(&self, name: &str) -> Option<&serde_json::Value> {
match &self {
EmbedderOptions::HuggingFace(_)
| EmbedderOptions::OpenAi(_)
| EmbedderOptions::Ollama(_)
| EmbedderOptions::UserProvided(_) => None,
EmbedderOptions::Rest(embedder_options) => embedder_options.search_fragments.get(name),
EmbedderOptions::Composite(embedder_options) => {
if let SubEmbedderOptions::Rest(embedder_options) = &embedder_options.search {
embedder_options.search_fragments.get(name)
} else {
None
false
}
}
}

View File

@ -113,7 +113,7 @@ impl RequestData {
for (name, value) in indexing_fragments {
JsonTemplate::new(value).map_err(|error| {
NewEmbedderError::rest_could_not_parse_template(
error.parsing_error(&format!(".indexingFragments.{name}")),
error.parsing(&format!(".indexingFragments.{name}")),
)
})?;
}
@ -623,7 +623,7 @@ impl RequestFromFragments {
.map(|(name, value)| {
let json_template = JsonTemplate::new(value).map_err(|error| {
NewEmbedderError::rest_could_not_parse_template(
error.parsing_error(&format!(".searchFragments.{name}")),
error.parsing(&format!(".searchFragments.{name}")),
)
})?;
Ok((name, json_template))