mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-02 10:45:36 +00:00
Compare commits
9 Commits
prototype-
...
prototype-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9d5e3457e5 | ||
|
|
04694071fe | ||
|
|
b0c1a9504a | ||
|
|
d57026cd96 | ||
|
|
41c9e8856a | ||
|
|
d4ff59fcf5 | ||
|
|
9c485f8563 | ||
|
|
d8d12d5979 | ||
|
|
0597a97c84 |
513
Cargo.lock
generated
513
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -65,7 +65,7 @@ You may also want to check out [Meilisearch 101](https://www.meilisearch.com/doc
|
||||
|
||||
## ⚡ Supercharge your Meilisearch experience
|
||||
|
||||
Say goodbye to server deployment and manual updates with [Meilisearch Cloud](https://www.meilisearch.com/cloud?utm_campaign=oss&utm_source=github&utm_medium=meilisearch). No credit card required.
|
||||
Say goodbye to server deployment and manual updates with [Meilisearch Cloud](https://www.meilisearch.com/pricing?utm_campaign=oss&utm_source=engine&utm_medium=meilisearch). Get started with a 14-day free trial! No credit card required.
|
||||
|
||||
## 🧰 SDKs & integration tools
|
||||
|
||||
@@ -87,7 +87,7 @@ Finally, for more in-depth information, refer to our articles explaining fundame
|
||||
|
||||
Meilisearch collects **anonymized** data from users to help us improve our product. You can [deactivate this](https://www.meilisearch.com/docs/learn/what_is_meilisearch/telemetry?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=telemetry#how-to-disable-data-collection) whenever you want.
|
||||
|
||||
To request deletion of collected data, please write to us at [privacy@meilisearch.com](mailto:privacy@meilisearch.com). Don't forget to include your `Instance UID` in the message, as this helps us quickly find and delete your data.
|
||||
To request deletion of collected data, please write to us at [privacy@meilisearch.com](mailto:privacy@meilisearch.com). Don't forget to include your `Instance UID` in the message, as this helps us quickly find and delete your data.
|
||||
|
||||
If you want to know more about the kind of data we collect and what we use it for, check the [telemetry section](https://www.meilisearch.com/docs/learn/what_is_meilisearch/telemetry?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=telemetry#how-to-disable-data-collection) of our documentation.
|
||||
|
||||
|
||||
@@ -14,11 +14,11 @@ license.workspace = true
|
||||
anyhow = "1.0.70"
|
||||
csv = "1.2.1"
|
||||
milli = { path = "../milli" }
|
||||
mimalloc = { version = "0.1.37", default-features = false }
|
||||
mimalloc = { version = "0.1.36", default-features = false }
|
||||
serde_json = { version = "1.0.95", features = ["preserve_order"] }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.5.1", features = ["html_reports"] }
|
||||
criterion = { version = "0.4.0", features = ["html_reports"] }
|
||||
rand = "0.8.5"
|
||||
rand_chacha = "0.3.1"
|
||||
roaring = "0.10.1"
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
version: "3.9"
|
||||
services:
|
||||
zk1:
|
||||
container_name: zk1
|
||||
hostname: zk1
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
ports:
|
||||
- 21811:2181
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_SERVER_ID=1
|
||||
- ZOO_SERVERS=0.0.0.0:2888:3888,zk2:2888:3888,zk3:2888:3888
|
||||
zk2:
|
||||
container_name: zk2
|
||||
hostname: zk2
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
ports:
|
||||
- 21812:2181
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_SERVER_ID=2
|
||||
- ZOO_SERVERS=zk1:2888:3888,0.0.0.0:2888:3888,zk3:2888:3888
|
||||
zk3:
|
||||
container_name: zk3
|
||||
hostname: zk3
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
ports:
|
||||
- 21813:2181
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_SERVER_ID=3
|
||||
- ZOO_SERVERS=zk1:2888:3888,zk2:2888:3888,0.0.0.0:2888:3888
|
||||
zoonavigator:
|
||||
container_name: zoonavigator
|
||||
image: elkozmon/zoonavigator
|
||||
ports:
|
||||
- 9000:9000
|
||||
@@ -210,7 +210,6 @@ pub(crate) mod test {
|
||||
use big_s::S;
|
||||
use maplit::{btreemap, btreeset};
|
||||
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
||||
use meilisearch_types::features::RuntimeTogglableFeatures;
|
||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
use meilisearch_types::keys::{Action, Key};
|
||||
use meilisearch_types::milli;
|
||||
@@ -262,6 +261,9 @@ pub(crate) mod test {
|
||||
sortable_attributes: Setting::Set(btreeset! { S("age") }),
|
||||
ranking_rules: Setting::NotSet,
|
||||
stop_words: Setting::NotSet,
|
||||
non_separator_tokens: Setting::NotSet,
|
||||
separator_tokens: Setting::NotSet,
|
||||
dictionary: Setting::NotSet,
|
||||
synonyms: Setting::NotSet,
|
||||
distinct_attribute: Setting::NotSet,
|
||||
typo_tolerance: Setting::NotSet,
|
||||
@@ -419,10 +421,7 @@ pub(crate) mod test {
|
||||
}
|
||||
keys.flush().unwrap();
|
||||
|
||||
// ========== experimental features
|
||||
let features = create_test_features();
|
||||
|
||||
dump.create_experimental_features(features).unwrap();
|
||||
// ========== TODO: create features here
|
||||
|
||||
// create the dump
|
||||
let mut file = tempfile::tempfile().unwrap();
|
||||
@@ -432,10 +431,6 @@ pub(crate) mod test {
|
||||
file
|
||||
}
|
||||
|
||||
fn create_test_features() -> RuntimeTogglableFeatures {
|
||||
RuntimeTogglableFeatures { vector_store: true, ..Default::default() }
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_creating_and_read_dump() {
|
||||
let mut file = create_test_dump();
|
||||
@@ -480,9 +475,5 @@ pub(crate) mod test {
|
||||
for (key, expected) in dump.keys().unwrap().zip(create_test_api_keys()) {
|
||||
assert_eq!(key.unwrap(), expected);
|
||||
}
|
||||
|
||||
// ==== checking the features
|
||||
let expected = create_test_features();
|
||||
assert_eq!(dump.features().unwrap().unwrap(), expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,6 +340,9 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
||||
}
|
||||
},
|
||||
stop_words: settings.stop_words.into(),
|
||||
non_separator_tokens: v6::Setting::NotSet,
|
||||
separator_tokens: v6::Setting::NotSet,
|
||||
dictionary: v6::Setting::NotSet,
|
||||
synonyms: settings.synonyms.into(),
|
||||
distinct_attribute: settings.distinct_attribute.into(),
|
||||
typo_tolerance: match settings.typo_tolerance {
|
||||
|
||||
@@ -195,53 +195,8 @@ pub(crate) mod test {
|
||||
use meili_snap::insta;
|
||||
|
||||
use super::*;
|
||||
use crate::reader::v6::RuntimeTogglableFeatures;
|
||||
|
||||
#[test]
|
||||
fn import_dump_v6_experimental() {
|
||||
let dump = File::open("tests/assets/v6-with-experimental.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2023-07-06 7:10:27.21958 +00:00:00");
|
||||
insta::assert_debug_snapshot!(dump.instance_uid().unwrap(), @"None");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"d45cd8571703e58ae53c7bd7ce3f5c22");
|
||||
assert_eq!(update_files.len(), 2);
|
||||
assert!(update_files[0].is_none()); // the dump creation
|
||||
assert!(update_files[1].is_none()); // the processed document addition
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys), @"13c2da155e9729c2344688cab29af71d");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut test = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
insta::assert_json_snapshot!(test.metadata(), @r###"
|
||||
{
|
||||
"uid": "test",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "2023-07-06T07:07:41.364694Z",
|
||||
"updatedAt": "2023-07-06T07:07:41.396114Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
assert_eq!(test.documents().unwrap().count(), 1);
|
||||
|
||||
assert_eq!(
|
||||
dump.features().unwrap().unwrap(),
|
||||
RuntimeTogglableFeatures { vector_store: true, ..Default::default() }
|
||||
);
|
||||
}
|
||||
// TODO: add `features` to tests
|
||||
|
||||
#[test]
|
||||
fn import_dump_v5() {
|
||||
@@ -319,8 +274,6 @@ pub(crate) mod test {
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
|
||||
assert_eq!(dump.features().unwrap(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -292,7 +292,6 @@ pub(crate) mod test {
|
||||
│ ├---- update_files/
|
||||
│ │ └---- 1.jsonl
|
||||
│ └---- queue.jsonl
|
||||
├---- experimental-features.json
|
||||
├---- instance_uid.uuid
|
||||
├---- keys.jsonl
|
||||
└---- metadata.json
|
||||
|
||||
Binary file not shown.
@@ -472,77 +472,6 @@ pub fn parse_filter(input: Span) -> IResult<FilterCondition> {
|
||||
terminated(|input| parse_expression(input, 0), eof)(input)
|
||||
}
|
||||
|
||||
impl<'a> std::fmt::Display for FilterCondition<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
FilterCondition::Not(filter) => {
|
||||
write!(f, "NOT ({filter})")
|
||||
}
|
||||
FilterCondition::Condition { fid, op } => {
|
||||
write!(f, "{fid} {op}")
|
||||
}
|
||||
FilterCondition::In { fid, els } => {
|
||||
write!(f, "{fid} IN[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::Or(els) => {
|
||||
write!(f, "OR[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::And(els) => {
|
||||
write!(f, "AND[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::GeoLowerThan { point, radius } => {
|
||||
write!(f, "_geoRadius({}, {}, {})", point[0], point[1], radius)
|
||||
}
|
||||
FilterCondition::GeoBoundingBox {
|
||||
top_right_point: top_left_point,
|
||||
bottom_left_point: bottom_right_point,
|
||||
} => {
|
||||
write!(
|
||||
f,
|
||||
"_geoBoundingBox([{}, {}], [{}, {}])",
|
||||
top_left_point[0],
|
||||
top_left_point[1],
|
||||
bottom_right_point[0],
|
||||
bottom_right_point[1]
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Condition<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Condition::GreaterThan(token) => write!(f, "> {token}"),
|
||||
Condition::GreaterThanOrEqual(token) => write!(f, ">= {token}"),
|
||||
Condition::Equal(token) => write!(f, "= {token}"),
|
||||
Condition::NotEqual(token) => write!(f, "!= {token}"),
|
||||
Condition::Null => write!(f, "IS NULL"),
|
||||
Condition::Empty => write!(f, "IS EMPTY"),
|
||||
Condition::Exists => write!(f, "EXISTS"),
|
||||
Condition::LowerThan(token) => write!(f, "< {token}"),
|
||||
Condition::LowerThanOrEqual(token) => write!(f, "<= {token}"),
|
||||
Condition::Between { from, to } => write!(f, "{from} TO {to}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Token<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{{{}}}", self.value())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
@@ -923,3 +852,74 @@ pub mod tests {
|
||||
assert_eq!(token.value(), s);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> std::fmt::Display for FilterCondition<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
FilterCondition::Not(filter) => {
|
||||
write!(f, "NOT ({filter})")
|
||||
}
|
||||
FilterCondition::Condition { fid, op } => {
|
||||
write!(f, "{fid} {op}")
|
||||
}
|
||||
FilterCondition::In { fid, els } => {
|
||||
write!(f, "{fid} IN[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::Or(els) => {
|
||||
write!(f, "OR[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::And(els) => {
|
||||
write!(f, "AND[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::GeoLowerThan { point, radius } => {
|
||||
write!(f, "_geoRadius({}, {}, {})", point[0], point[1], radius)
|
||||
}
|
||||
FilterCondition::GeoBoundingBox {
|
||||
top_right_point: top_left_point,
|
||||
bottom_left_point: bottom_right_point,
|
||||
} => {
|
||||
write!(
|
||||
f,
|
||||
"_geoBoundingBox([{}, {}], [{}, {}])",
|
||||
top_left_point[0],
|
||||
top_left_point[1],
|
||||
bottom_right_point[0],
|
||||
bottom_right_point[1]
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Condition<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Condition::GreaterThan(token) => write!(f, "> {token}"),
|
||||
Condition::GreaterThanOrEqual(token) => write!(f, ">= {token}"),
|
||||
Condition::Equal(token) => write!(f, "= {token}"),
|
||||
Condition::NotEqual(token) => write!(f, "!= {token}"),
|
||||
Condition::Null => write!(f, "IS NULL"),
|
||||
Condition::Empty => write!(f, "IS EMPTY"),
|
||||
Condition::Exists => write!(f, "EXISTS"),
|
||||
Condition::LowerThan(token) => write!(f, "< {token}"),
|
||||
Condition::LowerThanOrEqual(token) => write!(f, "<= {token}"),
|
||||
Condition::Between { from, to } => write!(f, "{from} TO {to}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Token<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{{{}}}", self.value())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ license.workspace = true
|
||||
serde_json = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.5.1", features = ["html_reports"] }
|
||||
criterion = { version = "0.4.0", features = ["html_reports"] }
|
||||
|
||||
[[bench]]
|
||||
name = "benchmarks"
|
||||
|
||||
@@ -138,12 +138,6 @@ impl Query {
|
||||
index_vec.push(index_uid);
|
||||
Self { index_uids: Some(index_vec), ..self }
|
||||
}
|
||||
|
||||
// Removes the `from` and `limit` restrictions from the query.
|
||||
// Useful to get the total number of tasks matching a filter.
|
||||
pub fn without_limits(self) -> Self {
|
||||
Query { limit: None, from: None, ..self }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -813,11 +807,6 @@ impl IndexScheduler {
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
// Return true if there is at least one task that is processing.
|
||||
pub fn is_task_processing(&self) -> Result<bool> {
|
||||
Ok(!self.processing_tasks.read().unwrap().processing.is_empty())
|
||||
}
|
||||
|
||||
/// Return true iff there is at least one task associated with this index
|
||||
/// that is processing.
|
||||
pub fn is_index_processing(&self, index: &str) -> Result<bool> {
|
||||
@@ -828,8 +817,7 @@ impl IndexScheduler {
|
||||
Ok(nbr_index_processing_tasks > 0)
|
||||
}
|
||||
|
||||
/// Return the task ids matching the query along with the total number of tasks
|
||||
/// by ignoring the from and limit parameters from the user's point of view.
|
||||
/// Return the task ids matching the query from the user's point of view.
|
||||
///
|
||||
/// There are two differences between an internal query and a query executed by
|
||||
/// the user.
|
||||
@@ -842,13 +830,7 @@ impl IndexScheduler {
|
||||
rtxn: &RoTxn,
|
||||
query: &Query,
|
||||
filters: &meilisearch_auth::AuthFilter,
|
||||
) -> Result<(RoaringBitmap, u64)> {
|
||||
// compute all tasks matching the filter by ignoring the limits, to find the number of tasks matching
|
||||
// the filter.
|
||||
// As this causes us to compute the filter twice it is slightly inefficient, but doing it this way spares
|
||||
// us from modifying the underlying implementation, and the performance remains sufficient.
|
||||
// Should this change, we would modify `get_task_ids` to directly return the number of matching tasks.
|
||||
let total_tasks = self.get_task_ids(rtxn, &query.clone().without_limits())?;
|
||||
) -> Result<RoaringBitmap> {
|
||||
let mut tasks = self.get_task_ids(rtxn, query)?;
|
||||
|
||||
// If the query contains a list of index uid or there is a finite list of authorized indexes,
|
||||
@@ -871,11 +853,10 @@ impl IndexScheduler {
|
||||
}
|
||||
}
|
||||
|
||||
Ok((tasks, total_tasks.len()))
|
||||
Ok(tasks)
|
||||
}
|
||||
|
||||
/// Return the tasks matching the query from the user's point of view along
|
||||
/// with the total number of tasks matching the query, ignoring from and limit.
|
||||
/// Return the tasks matching the query from the user's point of view.
|
||||
///
|
||||
/// There are two differences between an internal query and a query executed by
|
||||
/// the user.
|
||||
@@ -887,10 +868,11 @@ impl IndexScheduler {
|
||||
&self,
|
||||
query: Query,
|
||||
filters: &meilisearch_auth::AuthFilter,
|
||||
) -> Result<(Vec<Task>, u64)> {
|
||||
) -> Result<Vec<Task>> {
|
||||
let rtxn = self.env.read_txn()?;
|
||||
|
||||
let (tasks, total) = self.get_task_ids_from_authorized_indexes(&rtxn, &query, filters)?;
|
||||
let tasks = self.get_task_ids_from_authorized_indexes(&rtxn, &query, filters)?;
|
||||
|
||||
let tasks = self.get_existing_tasks(
|
||||
&rtxn,
|
||||
tasks.into_iter().rev().take(query.limit.unwrap_or(u32::MAX) as usize),
|
||||
@@ -901,19 +883,16 @@ impl IndexScheduler {
|
||||
|
||||
let ret = tasks.into_iter();
|
||||
if processing.is_empty() {
|
||||
Ok((ret.collect(), total))
|
||||
Ok(ret.collect())
|
||||
} else {
|
||||
Ok((
|
||||
ret.map(|task| {
|
||||
if processing.contains(task.uid) {
|
||||
Ok(ret
|
||||
.map(|task| match processing.contains(task.uid) {
|
||||
true => {
|
||||
Task { status: Status::Processing, started_at: Some(started_at), ..task }
|
||||
} else {
|
||||
task
|
||||
}
|
||||
false => task,
|
||||
})
|
||||
.collect(),
|
||||
total,
|
||||
))
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1856,17 +1835,6 @@ mod tests {
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "registered_the_third_task");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_task_is_processing() {
|
||||
let (index_scheduler, mut handle) = IndexScheduler::test(true, vec![]);
|
||||
|
||||
index_scheduler.register(index_creation_task("index_a", "id")).unwrap();
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "registered_a_task");
|
||||
|
||||
handle.advance_till([Start, BatchCreated]);
|
||||
assert!(index_scheduler.is_task_processing().unwrap());
|
||||
}
|
||||
|
||||
/// We send a lot of tasks but notify the tasks scheduler only once as
|
||||
/// we send them very fast, we must make sure that they are all processed.
|
||||
#[test]
|
||||
@@ -2799,43 +2767,43 @@ mod tests {
|
||||
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let query = Query { limit: Some(0), ..Default::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||
|
||||
let query = Query { limit: Some(1), ..Default::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||
|
||||
let query = Query { limit: Some(2), ..Default::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,2,]");
|
||||
|
||||
let query = Query { from: Some(1), ..Default::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,]");
|
||||
|
||||
let query = Query { from: Some(2), ..Default::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,]");
|
||||
|
||||
let query = Query { from: Some(1), limit: Some(1), ..Default::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,]");
|
||||
|
||||
let query = Query { from: Some(1), limit: Some(2), ..Default::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,]");
|
||||
@@ -2862,13 +2830,13 @@ mod tests {
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
|
||||
let query = Query { statuses: Some(vec![Status::Processing]), ..Default::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,]"); // only the processing tasks in the first tick
|
||||
|
||||
let query = Query { statuses: Some(vec![Status::Enqueued]), ..Default::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,2,]"); // only the enqueued tasks in the first tick
|
||||
@@ -2877,7 +2845,7 @@ mod tests {
|
||||
statuses: Some(vec![Status::Enqueued, Status::Processing]),
|
||||
..Default::default()
|
||||
};
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,]"); // both enqueued and processing tasks in the first tick
|
||||
@@ -2887,7 +2855,7 @@ mod tests {
|
||||
after_started_at: Some(start_time),
|
||||
..Default::default()
|
||||
};
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// both enqueued and processing tasks in the first tick, but limited to those with a started_at
|
||||
@@ -2899,7 +2867,7 @@ mod tests {
|
||||
before_started_at: Some(start_time),
|
||||
..Default::default()
|
||||
};
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// both enqueued and processing tasks in the first tick, but limited to those with a started_at
|
||||
@@ -2912,7 +2880,7 @@ mod tests {
|
||||
before_started_at: Some(start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// both enqueued and processing tasks in the first tick, but limited to those with a started_at
|
||||
@@ -2939,7 +2907,7 @@ mod tests {
|
||||
before_started_at: Some(start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// both succeeded and processing tasks in the first tick, but limited to those with a started_at
|
||||
@@ -2952,7 +2920,7 @@ mod tests {
|
||||
before_started_at: Some(start_time),
|
||||
..Default::default()
|
||||
};
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// both succeeded and processing tasks in the first tick, but limited to those with a started_at
|
||||
@@ -2965,7 +2933,7 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// both succeeded and processing tasks in the first tick, but limited to those with a started_at
|
||||
@@ -2985,7 +2953,7 @@ mod tests {
|
||||
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// we run the same query to verify that, and indeed find that the last task is matched
|
||||
@@ -2997,7 +2965,7 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// enqueued, succeeded, or processing tasks started after the second part of the test, should
|
||||
@@ -3009,7 +2977,7 @@ mod tests {
|
||||
|
||||
// now the last task should have failed
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "end");
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// so running the last query should return nothing
|
||||
@@ -3021,7 +2989,7 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// but the same query on failed tasks should return the last task
|
||||
@@ -3033,7 +3001,7 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// but the same query on failed tasks should return the last task
|
||||
@@ -3046,7 +3014,7 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// same query but with an invalid uid
|
||||
@@ -3059,7 +3027,7 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// same query but with a valid uid
|
||||
@@ -3091,14 +3059,14 @@ mod tests {
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
|
||||
let query = Query { index_uids: Some(vec!["catto".to_owned()]), ..Default::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// only the first task associated with catto is returned, the indexSwap tasks are excluded!
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,]");
|
||||
|
||||
let query = Query { index_uids: Some(vec!["catto".to_owned()]), ..Default::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(
|
||||
&rtxn,
|
||||
&query,
|
||||
@@ -3112,7 +3080,7 @@ mod tests {
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||
|
||||
let query = Query::default();
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(
|
||||
&rtxn,
|
||||
&query,
|
||||
@@ -3126,7 +3094,7 @@ mod tests {
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,]");
|
||||
|
||||
let query = Query::default();
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(
|
||||
&rtxn,
|
||||
&query,
|
||||
@@ -3145,7 +3113,7 @@ mod tests {
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,]");
|
||||
|
||||
let query = Query::default();
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// we asked for all the tasks with all index authorized -> all tasks returned
|
||||
@@ -3178,7 +3146,7 @@ mod tests {
|
||||
|
||||
let rtxn = index_scheduler.read_txn().unwrap();
|
||||
let query = Query { canceled_by: Some(vec![task_cancelation.uid]), ..Query::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// 0 is not returned because it was not canceled, 3 is not returned because it is the uid of the
|
||||
@@ -3186,7 +3154,7 @@ mod tests {
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,2,]");
|
||||
|
||||
let query = Query { canceled_by: Some(vec![task_cancelation.uid]), ..Query::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(
|
||||
&rtxn,
|
||||
&query,
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"indexCreation" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
index_a [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
@@ -15,7 +15,7 @@ license.workspace = true
|
||||
serde_json = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.5.1"
|
||||
criterion = "0.4.0"
|
||||
|
||||
[[bench]]
|
||||
name = "depth"
|
||||
|
||||
@@ -199,30 +199,6 @@ macro_rules! snapshot {
|
||||
};
|
||||
}
|
||||
|
||||
/// Create a string from the value by serializing it as Json, optionally
|
||||
/// redacting some parts of it.
|
||||
///
|
||||
/// The second argument to the macro can be an object expression for redaction.
|
||||
/// It's in the form { selector => replacement }. For more information about redactions
|
||||
/// refer to the redactions feature in the `insta` guide.
|
||||
#[macro_export]
|
||||
macro_rules! json_string {
|
||||
($value:expr, {$($k:expr => $v:expr),*$(,)?}) => {
|
||||
{
|
||||
let (_, snap) = meili_snap::insta::_prepare_snapshot_for_redaction!($value, {$($k => $v),*}, Json, File);
|
||||
snap
|
||||
}
|
||||
};
|
||||
($value:expr) => {{
|
||||
let value = meili_snap::insta::_macro_support::serialize_value(
|
||||
&$value,
|
||||
meili_snap::insta::_macro_support::SerializationFormat::Json,
|
||||
meili_snap::insta::_macro_support::SnapshotLocation::File
|
||||
);
|
||||
value
|
||||
}};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate as meili_snap;
|
||||
@@ -274,3 +250,27 @@ mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a string from the value by serializing it as Json, optionally
|
||||
/// redacting some parts of it.
|
||||
///
|
||||
/// The second argument to the macro can be an object expression for redaction.
|
||||
/// It's in the form { selector => replacement }. For more information about redactions
|
||||
/// refer to the redactions feature in the `insta` guide.
|
||||
#[macro_export]
|
||||
macro_rules! json_string {
|
||||
($value:expr, {$($k:expr => $v:expr),*$(,)?}) => {
|
||||
{
|
||||
let (_, snap) = meili_snap::insta::_prepare_snapshot_for_redaction!($value, {$($k => $v),*}, Json, File);
|
||||
snap
|
||||
}
|
||||
};
|
||||
($value:expr) => {{
|
||||
let value = meili_snap::insta::_macro_support::serialize_value(
|
||||
&$value,
|
||||
meili_snap::insta::_macro_support::SerializationFormat::Json,
|
||||
meili_snap::insta::_macro_support::SnapshotLocation::File
|
||||
);
|
||||
value
|
||||
}};
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ license.workspace = true
|
||||
base64 = "0.21.0"
|
||||
enum-iterator = "1.4.0"
|
||||
hmac = "0.12.1"
|
||||
log = "0.4.19"
|
||||
maplit = "1.0.2"
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
||||
rand = "0.8.5"
|
||||
@@ -24,6 +23,4 @@ serde_json = { version = "1.0.95", features = ["preserve_order"] }
|
||||
sha2 = "0.10.6"
|
||||
thiserror = "1.0.40"
|
||||
time = { version = "0.3.20", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||
tokio = { version = "1.27.0", features = ["full"] }
|
||||
uuid = { version = "1.3.1", features = ["serde", "v4"] }
|
||||
zookeeper-client = "0.4.0"
|
||||
|
||||
@@ -2,7 +2,6 @@ use std::error::Error;
|
||||
|
||||
use meilisearch_types::error::{Code, ErrorCode};
|
||||
use meilisearch_types::internal_error;
|
||||
use zookeeper_client as zk;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, AuthControllerError>;
|
||||
|
||||
@@ -20,8 +19,6 @@ internal_error!(
|
||||
AuthControllerError: meilisearch_types::milli::heed::Error,
|
||||
std::io::Error,
|
||||
serde_json::Error,
|
||||
tokio::task::JoinError,
|
||||
zk::Error,
|
||||
std::str::Utf8Error
|
||||
);
|
||||
|
||||
|
||||
@@ -16,119 +16,22 @@ pub use store::open_auth_store_env;
|
||||
use store::{generate_key_as_hexa, HeedAuthStore};
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
use zookeeper_client as zk;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AuthController {
|
||||
store: Arc<HeedAuthStore>,
|
||||
master_key: Option<String>,
|
||||
zk: Option<zk::Client>,
|
||||
}
|
||||
|
||||
impl AuthController {
|
||||
pub async fn new(
|
||||
db_path: impl AsRef<Path>,
|
||||
master_key: &Option<String>,
|
||||
zk: Option<zk::Client>,
|
||||
) -> Result<Self> {
|
||||
pub fn new(db_path: impl AsRef<Path>, master_key: &Option<String>) -> Result<Self> {
|
||||
let store = HeedAuthStore::new(db_path)?;
|
||||
let controller = Self { store: Arc::new(store), master_key: master_key.clone(), zk };
|
||||
|
||||
match controller.zk {
|
||||
// setup the auth zk environment, the `auth` node
|
||||
Some(ref zk) => {
|
||||
let options =
|
||||
zk::CreateOptions::new(zk::CreateMode::Persistent, zk::Acl::anyone_all());
|
||||
// TODO: we should catch the potential unexpected errors here https://docs.rs/zookeeper-client/latest/zookeeper_client/struct.Client.html#method.create
|
||||
// for the moment we consider that `create` only returns Error::NodeExists.
|
||||
match zk.create("/auth", &[], &options).await {
|
||||
// If the store is empty, we must generate and push the default api-keys.
|
||||
Ok(_) => generate_default_keys(&controller).await?,
|
||||
// If the node exist we should clear our DB and download all the existing api-keys
|
||||
Err(zk::Error::NodeExists) => {
|
||||
log::warn!("Auth directory already exists, we need to clear our keys + import the one in zookeeper");
|
||||
|
||||
let store = controller.store.clone();
|
||||
tokio::task::spawn_blocking(move || store.delete_all_keys()).await??;
|
||||
let children = zk
|
||||
.list_children("/auth")
|
||||
.await
|
||||
.expect("Internal, the auth directory was deleted during execution.");
|
||||
|
||||
log::info!("Importing {} api-keys", children.len());
|
||||
for path in children {
|
||||
log::info!(" Importing {}", path);
|
||||
match zk.get_data(&format!("/auth/{}", &path)).await {
|
||||
Ok((key, _stat)) => {
|
||||
let key = serde_json::from_slice(&key).unwrap();
|
||||
let store = controller.store.clone();
|
||||
tokio::task::spawn_blocking(move || store.put_api_key(key))
|
||||
.await??;
|
||||
|
||||
},
|
||||
Err(e) => panic!("{e}")
|
||||
}
|
||||
// else the file was deleted while we were inserting the key. We ignore it.
|
||||
// TODO: What happens if someone updates the files before we have the time
|
||||
// to setup the watcher
|
||||
}
|
||||
}
|
||||
e @ Err(
|
||||
zk::Error::NoNode
|
||||
| zk::Error::NoChildrenForEphemerals
|
||||
| zk::Error::InvalidAcl,
|
||||
) => unreachable!("{e:?}"),
|
||||
Err(e) => {
|
||||
panic!("{e}")
|
||||
}
|
||||
}
|
||||
// TODO: Race condition above:
|
||||
// What happens if two node join exactly at the same moment:
|
||||
// One will create the dir
|
||||
// The second one will delete its DB, load nothing and install a watcher
|
||||
// The first one will push its keys and should wake up and update the second one.
|
||||
// / BUT, if the second one delete its DB and the first one push its files before the second one install the watcher we're fucked
|
||||
|
||||
// Zookeeper Event listener loop
|
||||
let controller_clone = controller.clone();
|
||||
let mut watcher = zk.watch("/auth", zk::AddWatchMode::PersistentRecursive).await?;
|
||||
let czk = zk.clone();
|
||||
tokio::spawn(async move {
|
||||
let zk = czk;
|
||||
loop {
|
||||
let zk::WatchedEvent { event_type, session_state, path } =
|
||||
dbg!(watcher.changed().await);
|
||||
|
||||
match event_type {
|
||||
zk::EventType::Session => panic!("Session error {:?}", session_state),
|
||||
// a key is deleted from zk
|
||||
zk::EventType::NodeDeleted => {
|
||||
// TODO: ugly unwraps
|
||||
let uuid = path.strip_prefix("/auth/").unwrap();
|
||||
let uuid = Uuid::parse_str(&uuid).unwrap();
|
||||
log::info!("The key {} has been deleted", uuid);
|
||||
dbg!(controller_clone.store.delete_api_key(uuid).unwrap());
|
||||
}
|
||||
zk::EventType::NodeCreated | zk::EventType::NodeDataChanged => {
|
||||
let (key, stat) = zk.get_data(&path).await.unwrap();
|
||||
let key: Key = serde_json::from_slice(&key).unwrap();
|
||||
log::info!("The key {} has been deleted", key.uid);
|
||||
|
||||
dbg!(controller_clone.store.put_api_key(key).unwrap());
|
||||
}
|
||||
zk::EventType::NodeChildrenChanged => panic!("Got the unexpected NodeChildrenChanged event, what is it used for?"),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
None => {
|
||||
if controller.store.is_empty()? {
|
||||
generate_default_keys(&controller).await?;
|
||||
}
|
||||
}
|
||||
if store.is_empty()? {
|
||||
generate_default_keys(&store)?;
|
||||
}
|
||||
|
||||
Ok(controller)
|
||||
Ok(Self { store: Arc::new(store), master_key: master_key.clone() })
|
||||
}
|
||||
|
||||
/// Return `Ok(())` if the auth controller is able to access one of its database.
|
||||
@@ -147,27 +50,14 @@ impl AuthController {
|
||||
self.store.used_size()
|
||||
}
|
||||
|
||||
pub async fn create_key(&self, create_key: CreateApiKey) -> Result<Key> {
|
||||
pub fn create_key(&self, create_key: CreateApiKey) -> Result<Key> {
|
||||
match self.store.get_api_key(create_key.uid)? {
|
||||
Some(_) => Err(AuthControllerError::ApiKeyAlreadyExists(create_key.uid.to_string())),
|
||||
None => self.put_key(create_key.to_key()).await,
|
||||
None => self.store.put_api_key(create_key.to_key()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn put_key(&self, key: Key) -> Result<Key> {
|
||||
let store = self.store.clone();
|
||||
// TODO: we may commit only after zk persisted the keys
|
||||
let key = tokio::task::spawn_blocking(move || store.put_api_key(key)).await??;
|
||||
if let Some(ref zk) = self.zk {
|
||||
let options = zk::CreateOptions::new(zk::CreateMode::Persistent, zk::Acl::anyone_all());
|
||||
|
||||
zk.create(&format!("/auth/{}", key.uid), &serde_json::to_vec_pretty(&key)?, &options)
|
||||
.await?;
|
||||
}
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
pub async fn update_key(&self, uid: Uuid, patch: PatchApiKey) -> Result<Key> {
|
||||
pub fn update_key(&self, uid: Uuid, patch: PatchApiKey) -> Result<Key> {
|
||||
let mut key = self.get_key(uid)?;
|
||||
match patch.description {
|
||||
Setting::NotSet => (),
|
||||
@@ -178,14 +68,7 @@ impl AuthController {
|
||||
name => key.name = name.set(),
|
||||
};
|
||||
key.updated_at = OffsetDateTime::now_utc();
|
||||
let store = self.store.clone();
|
||||
// TODO: we may commit only after zk persisted the keys
|
||||
let key = tokio::task::spawn_blocking(move || store.put_api_key(key)).await??;
|
||||
if let Some(ref zk) = self.zk {
|
||||
zk.set_data(&format!("/auth/{}", key.uid), &serde_json::to_vec_pretty(&key)?, None)
|
||||
.await?;
|
||||
}
|
||||
Ok(key)
|
||||
self.store.put_api_key(key)
|
||||
}
|
||||
|
||||
pub fn get_key(&self, uid: Uuid) -> Result<Key> {
|
||||
@@ -226,13 +109,8 @@ impl AuthController {
|
||||
self.store.list_api_keys()
|
||||
}
|
||||
|
||||
pub async fn delete_key(&self, uid: Uuid) -> Result<()> {
|
||||
let store = self.store.clone();
|
||||
let deleted = tokio::task::spawn_blocking(move || store.delete_api_key(uid)).await??;
|
||||
if deleted {
|
||||
if let Some(ref zk) = self.zk {
|
||||
zk.delete(&format!("/auth/{}", uid), None).await?;
|
||||
}
|
||||
pub fn delete_key(&self, uid: Uuid) -> Result<()> {
|
||||
if self.store.delete_api_key(uid)? {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AuthControllerError::ApiKeyNotFound(uid.to_string()))
|
||||
@@ -281,7 +159,7 @@ impl AuthController {
|
||||
self.store.delete_all_keys()
|
||||
}
|
||||
|
||||
/// Insert a key in the DB without any check on its validity
|
||||
/// Delete all the keys in the DB.
|
||||
pub fn raw_insert_key(&mut self, key: Key) -> Result<()> {
|
||||
self.store.put_api_key(key)?;
|
||||
Ok(())
|
||||
@@ -426,9 +304,9 @@ pub struct IndexSearchRules {
|
||||
pub filter: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
async fn generate_default_keys(controller: &AuthController) -> Result<()> {
|
||||
controller.put_key(Key::default_admin()).await?;
|
||||
controller.put_key(Key::default_search()).await?;
|
||||
fn generate_default_keys(store: &HeedAuthStore) -> Result<()> {
|
||||
store.put_api_key(Key::default_admin())?;
|
||||
store.put_api_key(Key::default_search())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -259,6 +259,9 @@ InvalidSettingsRankingRules , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsSearchableAttributes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsSortableAttributes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsStopWords , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsNonSeparatorTokens , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsSeparatorTokens , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsDictionary , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsSynonyms , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsTypoTolerance , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidState , Internal , INTERNAL_SERVER_ERROR ;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default)]
|
||||
#[serde(rename_all = "camelCase", default)]
|
||||
pub struct RuntimeTogglableFeatures {
|
||||
pub score_details: bool,
|
||||
|
||||
@@ -171,6 +171,15 @@ pub struct Settings<T> {
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSettingsStopWords>)]
|
||||
pub stop_words: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSettingsNonSeparatorTokens>)]
|
||||
pub non_separator_tokens: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSettingsSeparatorTokens>)]
|
||||
pub separator_tokens: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSettingsDictionary>)]
|
||||
pub dictionary: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSettingsSynonyms>)]
|
||||
pub synonyms: Setting<BTreeMap<String, Vec<String>>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
@@ -201,6 +210,9 @@ impl Settings<Checked> {
|
||||
ranking_rules: Setting::Reset,
|
||||
stop_words: Setting::Reset,
|
||||
synonyms: Setting::Reset,
|
||||
non_separator_tokens: Setting::Reset,
|
||||
separator_tokens: Setting::Reset,
|
||||
dictionary: Setting::Reset,
|
||||
distinct_attribute: Setting::Reset,
|
||||
typo_tolerance: Setting::Reset,
|
||||
faceting: Setting::Reset,
|
||||
@@ -217,6 +229,9 @@ impl Settings<Checked> {
|
||||
sortable_attributes,
|
||||
ranking_rules,
|
||||
stop_words,
|
||||
non_separator_tokens,
|
||||
separator_tokens,
|
||||
dictionary,
|
||||
synonyms,
|
||||
distinct_attribute,
|
||||
typo_tolerance,
|
||||
@@ -232,6 +247,9 @@ impl Settings<Checked> {
|
||||
sortable_attributes,
|
||||
ranking_rules,
|
||||
stop_words,
|
||||
non_separator_tokens,
|
||||
separator_tokens,
|
||||
dictionary,
|
||||
synonyms,
|
||||
distinct_attribute,
|
||||
typo_tolerance,
|
||||
@@ -274,6 +292,9 @@ impl Settings<Unchecked> {
|
||||
ranking_rules: self.ranking_rules,
|
||||
stop_words: self.stop_words,
|
||||
synonyms: self.synonyms,
|
||||
non_separator_tokens: self.non_separator_tokens,
|
||||
separator_tokens: self.separator_tokens,
|
||||
dictionary: self.dictionary,
|
||||
distinct_attribute: self.distinct_attribute,
|
||||
typo_tolerance: self.typo_tolerance,
|
||||
faceting: self.faceting,
|
||||
@@ -335,6 +356,28 @@ pub fn apply_settings_to_builder(
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match settings.non_separator_tokens {
|
||||
Setting::Set(ref non_separator_tokens) => {
|
||||
builder.set_non_separator_tokens(non_separator_tokens.clone())
|
||||
}
|
||||
Setting::Reset => builder.reset_non_separator_tokens(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match settings.separator_tokens {
|
||||
Setting::Set(ref separator_tokens) => {
|
||||
builder.set_separator_tokens(separator_tokens.clone())
|
||||
}
|
||||
Setting::Reset => builder.reset_separator_tokens(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match settings.dictionary {
|
||||
Setting::Set(ref dictionary) => builder.set_dictionary(dictionary.clone()),
|
||||
Setting::Reset => builder.reset_dictionary(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match settings.synonyms {
|
||||
Setting::Set(ref synonyms) => builder.set_synonyms(synonyms.clone().into_iter().collect()),
|
||||
Setting::Reset => builder.reset_synonyms(),
|
||||
@@ -459,15 +502,14 @@ pub fn settings(
|
||||
})
|
||||
.transpose()?
|
||||
.unwrap_or_default();
|
||||
|
||||
let non_separator_tokens = index.non_separator_tokens(rtxn)?.unwrap_or_default();
|
||||
let separator_tokens = index.separator_tokens(rtxn)?.unwrap_or_default();
|
||||
let dictionary = index.dictionary(rtxn)?.unwrap_or_default();
|
||||
|
||||
let distinct_field = index.distinct_field(rtxn)?.map(String::from);
|
||||
|
||||
// in milli each word in the synonyms map were split on their separator. Since we lost
|
||||
// this information we are going to put space between words.
|
||||
let synonyms = index
|
||||
.synonyms(rtxn)?
|
||||
.iter()
|
||||
.map(|(key, values)| (key.join(" "), values.iter().map(|value| value.join(" ")).collect()))
|
||||
.collect();
|
||||
let synonyms = index.user_defined_synonyms(rtxn)?;
|
||||
|
||||
let min_typo_word_len = MinWordSizeTyposSetting {
|
||||
one_typo: Setting::Set(index.min_word_len_one_typo(rtxn)?),
|
||||
@@ -520,6 +562,9 @@ pub fn settings(
|
||||
sortable_attributes: Setting::Set(sortable_attributes),
|
||||
ranking_rules: Setting::Set(criteria.iter().map(|c| c.clone().into()).collect()),
|
||||
stop_words: Setting::Set(stop_words),
|
||||
non_separator_tokens: Setting::Set(non_separator_tokens),
|
||||
separator_tokens: Setting::Set(separator_tokens),
|
||||
dictionary: Setting::Set(dictionary),
|
||||
distinct_attribute: match distinct_field {
|
||||
Some(field) => Setting::Set(field),
|
||||
None => Setting::Reset,
|
||||
@@ -642,6 +687,9 @@ pub(crate) mod test {
|
||||
sortable_attributes: Setting::NotSet,
|
||||
ranking_rules: Setting::NotSet,
|
||||
stop_words: Setting::NotSet,
|
||||
non_separator_tokens: Setting::NotSet,
|
||||
separator_tokens: Setting::NotSet,
|
||||
dictionary: Setting::NotSet,
|
||||
synonyms: Setting::NotSet,
|
||||
distinct_attribute: Setting::NotSet,
|
||||
typo_tolerance: Setting::NotSet,
|
||||
@@ -663,6 +711,9 @@ pub(crate) mod test {
|
||||
sortable_attributes: Setting::NotSet,
|
||||
ranking_rules: Setting::NotSet,
|
||||
stop_words: Setting::NotSet,
|
||||
non_separator_tokens: Setting::NotSet,
|
||||
separator_tokens: Setting::NotSet,
|
||||
dictionary: Setting::NotSet,
|
||||
synonyms: Setting::NotSet,
|
||||
distinct_attribute: Setting::NotSet,
|
||||
typo_tolerance: Setting::NotSet,
|
||||
|
||||
@@ -19,7 +19,6 @@ actix-http = { version = "3.3.1", default-features = false, features = [
|
||||
"compress-gzip",
|
||||
"rustls",
|
||||
] }
|
||||
actix-utils = "3.0.1"
|
||||
actix-web = { version = "4.3.1", default-features = false, features = [
|
||||
"macros",
|
||||
"compress-brotli",
|
||||
@@ -51,14 +50,13 @@ futures-util = "0.3.28"
|
||||
http = "0.2.9"
|
||||
index-scheduler = { path = "../index-scheduler" }
|
||||
indexmap = { version = "1.9.3", features = ["serde-1"] }
|
||||
is-terminal = "0.4.8"
|
||||
itertools = "0.10.5"
|
||||
jsonwebtoken = "8.3.0"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.17"
|
||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
||||
mimalloc = { version = "0.1.37", default-features = false }
|
||||
mimalloc = { version = "0.1.36", default-features = false }
|
||||
mime = "0.3.17"
|
||||
num_cpus = "1.15.0"
|
||||
obkv = "0.2.0"
|
||||
@@ -104,8 +102,9 @@ uuid = { version = "1.3.1", features = ["serde", "v4"] }
|
||||
walkdir = "2.3.3"
|
||||
yaup = "0.2.1"
|
||||
serde_urlencoded = "0.7.1"
|
||||
actix-utils = "3.0.1"
|
||||
atty = "0.2.14"
|
||||
termcolor = "1.2.0"
|
||||
zookeeper-client = "0.4.0"
|
||||
|
||||
[dev-dependencies]
|
||||
actix-rt = "2.8.0"
|
||||
@@ -155,5 +154,5 @@ thai = ["meilisearch-types/thai"]
|
||||
greek = ["meilisearch-types/greek"]
|
||||
|
||||
[package.metadata.mini-dashboard]
|
||||
assets-url = "https://github.com/meilisearch/mini-dashboard/releases/download/v0.2.11/build.zip"
|
||||
sha1 = "83cd44ed1e5f97ecb581dc9f958a63f4ccc982d9"
|
||||
assets-url = "https://github.com/meilisearch/mini-dashboard/releases/download/v0.2.7/build.zip"
|
||||
sha1 = "28b45bf772c84f9a6e16bc1689b393bfce8da7d6"
|
||||
|
||||
@@ -312,7 +312,6 @@ impl From<Opt> for Infos {
|
||||
config_file_path,
|
||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||
no_analytics: _,
|
||||
zk_url: _,
|
||||
} = options;
|
||||
|
||||
let schedule_snapshot = match schedule_snapshot {
|
||||
@@ -575,10 +574,6 @@ pub struct SearchAggregator {
|
||||
filter_total_number_of_criteria: usize,
|
||||
used_syntax: HashMap<String, usize>,
|
||||
|
||||
// attributes_to_search_on
|
||||
// every time a search is done using attributes_to_search_on
|
||||
attributes_to_search_on_total_number_of_uses: usize,
|
||||
|
||||
// q
|
||||
// The maximum number of terms in a q request
|
||||
max_terms_number: usize,
|
||||
@@ -652,11 +647,6 @@ impl SearchAggregator {
|
||||
ret.filter_sum_of_criteria_terms = RE.split(&stringified_filters).count();
|
||||
}
|
||||
|
||||
// attributes_to_search_on
|
||||
if let Some(_) = query.attributes_to_search_on {
|
||||
ret.attributes_to_search_on_total_number_of_uses = 1;
|
||||
}
|
||||
|
||||
if let Some(ref q) = query.q {
|
||||
ret.max_terms_number = q.split_whitespace().count();
|
||||
}
|
||||
@@ -730,18 +720,9 @@ impl SearchAggregator {
|
||||
let used_syntax = self.used_syntax.entry(key).or_insert(0);
|
||||
*used_syntax = used_syntax.saturating_add(value);
|
||||
}
|
||||
|
||||
// attributes_to_search_on
|
||||
self.attributes_to_search_on_total_number_of_uses = self
|
||||
.attributes_to_search_on_total_number_of_uses
|
||||
.saturating_add(other.attributes_to_search_on_total_number_of_uses);
|
||||
|
||||
// q
|
||||
self.max_terms_number = self.max_terms_number.max(other.max_terms_number);
|
||||
|
||||
// vector
|
||||
self.max_vector_size = self.max_vector_size.max(other.max_vector_size);
|
||||
|
||||
// pagination
|
||||
self.max_limit = self.max_limit.max(other.max_limit);
|
||||
self.max_offset = self.max_offset.max(other.max_offset);
|
||||
@@ -780,17 +761,17 @@ impl SearchAggregator {
|
||||
if self.total_received == 0 {
|
||||
None
|
||||
} else {
|
||||
// the index of the 99th percentage of value
|
||||
let percentile_99th = 0.99 * (self.total_succeeded as f64 - 1.) + 1.;
|
||||
// we get all the values in a sorted manner
|
||||
let time_spent = self.time_spent.into_sorted_vec();
|
||||
// the index of the 99th percentage of value
|
||||
let percentile_99th = time_spent.len() * 99 / 100;
|
||||
// We are only interested by the slowest value of the 99th fastest results
|
||||
let time_spent = time_spent.get(percentile_99th);
|
||||
let time_spent = time_spent.get(percentile_99th as usize);
|
||||
|
||||
let properties = json!({
|
||||
"user-agent": self.user_agents,
|
||||
"requests": {
|
||||
"99th_response_time": time_spent.map(|t| format!("{:.2}", t)),
|
||||
"99th_response_time": time_spent.map(|t| format!("{:.2}", t)),
|
||||
"total_succeeded": self.total_succeeded,
|
||||
"total_failed": self.total_received.saturating_sub(self.total_succeeded), // just to be sure we never panics
|
||||
"total_received": self.total_received,
|
||||
@@ -805,15 +786,9 @@ impl SearchAggregator {
|
||||
"avg_criteria_number": format!("{:.2}", self.filter_sum_of_criteria_terms as f64 / self.filter_total_number_of_criteria as f64),
|
||||
"most_used_syntax": self.used_syntax.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)),
|
||||
},
|
||||
"attributes_to_search_on": {
|
||||
"total_number_of_uses": self.attributes_to_search_on_total_number_of_uses,
|
||||
},
|
||||
"q": {
|
||||
"max_terms_number": self.max_terms_number,
|
||||
},
|
||||
"vector": {
|
||||
"max_vector_size": self.max_vector_size,
|
||||
},
|
||||
"pagination": {
|
||||
"max_limit": self.max_limit,
|
||||
"max_offset": self.max_offset,
|
||||
@@ -868,10 +843,6 @@ pub struct MultiSearchAggregator {
|
||||
// sum of the number of search queries in the requests, use with total_received to compute an average
|
||||
total_search_count: usize,
|
||||
|
||||
// scoring
|
||||
show_ranking_score: bool,
|
||||
show_ranking_score_details: bool,
|
||||
|
||||
// context
|
||||
user_agents: HashSet<String>,
|
||||
}
|
||||
@@ -885,9 +856,6 @@ impl MultiSearchAggregator {
|
||||
let distinct_indexes: HashSet<_> =
|
||||
query.iter().map(|query| query.index_uid.as_str()).collect();
|
||||
|
||||
let show_ranking_score = query.iter().any(|query| query.show_ranking_score);
|
||||
let show_ranking_score_details = query.iter().any(|query| query.show_ranking_score_details);
|
||||
|
||||
Self {
|
||||
timestamp,
|
||||
total_received: 1,
|
||||
@@ -895,8 +863,6 @@ impl MultiSearchAggregator {
|
||||
total_distinct_index_count: distinct_indexes.len(),
|
||||
total_single_index: if distinct_indexes.len() == 1 { 1 } else { 0 },
|
||||
total_search_count: query.len(),
|
||||
show_ranking_score,
|
||||
show_ranking_score_details,
|
||||
user_agents,
|
||||
}
|
||||
}
|
||||
@@ -918,9 +884,6 @@ impl MultiSearchAggregator {
|
||||
this.total_distinct_index_count.saturating_add(other.total_distinct_index_count);
|
||||
let total_single_index = this.total_single_index.saturating_add(other.total_single_index);
|
||||
let total_search_count = this.total_search_count.saturating_add(other.total_search_count);
|
||||
let show_ranking_score = this.show_ranking_score || other.show_ranking_score;
|
||||
let show_ranking_score_details =
|
||||
this.show_ranking_score_details || other.show_ranking_score_details;
|
||||
let mut user_agents = this.user_agents;
|
||||
|
||||
for user_agent in other.user_agents.into_iter() {
|
||||
@@ -936,8 +899,6 @@ impl MultiSearchAggregator {
|
||||
total_single_index,
|
||||
total_search_count,
|
||||
user_agents,
|
||||
show_ranking_score,
|
||||
show_ranking_score_details,
|
||||
// do not add _ or ..Default::default() here
|
||||
};
|
||||
|
||||
@@ -964,10 +925,6 @@ impl MultiSearchAggregator {
|
||||
"searches": {
|
||||
"total_search_count": self.total_search_count,
|
||||
"avg_search_count": (self.total_search_count as f64) / (self.total_received as f64),
|
||||
},
|
||||
"scoring": {
|
||||
"show_ranking_score": self.show_ranking_score,
|
||||
"show_ranking_score_details": self.show_ranking_score_details,
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1188,7 +1145,6 @@ pub struct DocumentsDeletionAggregator {
|
||||
#[serde(rename = "user-agent")]
|
||||
user_agents: HashSet<String>,
|
||||
|
||||
#[serde(rename = "requests.total_received")]
|
||||
total_received: usize,
|
||||
per_document_id: bool,
|
||||
clear_all: bool,
|
||||
@@ -1339,7 +1295,6 @@ pub struct HealthAggregator {
|
||||
#[serde(rename = "user-agent")]
|
||||
user_agents: HashSet<String>,
|
||||
|
||||
#[serde(rename = "requests.total_received")]
|
||||
total_received: usize,
|
||||
}
|
||||
|
||||
@@ -1390,7 +1345,7 @@ pub struct DocumentsFetchAggregator {
|
||||
#[serde(rename = "user-agent")]
|
||||
user_agents: HashSet<String>,
|
||||
|
||||
#[serde(rename = "requests.total_received")]
|
||||
#[serde(rename = "requests.max_limit")]
|
||||
total_received: usize,
|
||||
|
||||
// a call on ../documents/:doc_id
|
||||
|
||||
@@ -39,7 +39,6 @@ use meilisearch_types::versioning::{check_version_file, create_version_file};
|
||||
use meilisearch_types::{compression, milli, VERSION_FILE_NAME};
|
||||
pub use option::Opt;
|
||||
use option::ScheduleSnapshot;
|
||||
use zookeeper_client as zk;
|
||||
|
||||
use crate::error::MeilisearchHttpError;
|
||||
|
||||
@@ -137,17 +136,14 @@ enum OnFailure {
|
||||
KeepDb,
|
||||
}
|
||||
|
||||
pub async fn setup_meilisearch(
|
||||
opt: &Opt,
|
||||
zk: Option<zk::Client>,
|
||||
) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
|
||||
pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
|
||||
let empty_db = is_empty_db(&opt.db_path);
|
||||
let (index_scheduler, auth_controller) = if let Some(ref snapshot_path) = opt.import_snapshot {
|
||||
let snapshot_path_exists = snapshot_path.exists();
|
||||
// the db is empty and the snapshot exists, import it
|
||||
if empty_db && snapshot_path_exists {
|
||||
match compression::from_tar_gz(snapshot_path, &opt.db_path) {
|
||||
Ok(()) => open_or_create_database_unchecked(opt, OnFailure::RemoveDb, zk).await?,
|
||||
Ok(()) => open_or_create_database_unchecked(opt, OnFailure::RemoveDb)?,
|
||||
Err(e) => {
|
||||
std::fs::remove_dir_all(&opt.db_path)?;
|
||||
return Err(e);
|
||||
@@ -164,14 +160,14 @@ pub async fn setup_meilisearch(
|
||||
bail!("snapshot doesn't exist at {}", snapshot_path.display())
|
||||
// the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag
|
||||
} else {
|
||||
open_or_create_database(opt, empty_db, zk).await?
|
||||
open_or_create_database(opt, empty_db)?
|
||||
}
|
||||
} else if let Some(ref path) = opt.import_dump {
|
||||
let src_path_exists = path.exists();
|
||||
// the db is empty and the dump exists, import it
|
||||
if empty_db && src_path_exists {
|
||||
let (mut index_scheduler, mut auth_controller) =
|
||||
open_or_create_database_unchecked(opt, OnFailure::RemoveDb, zk).await?;
|
||||
open_or_create_database_unchecked(opt, OnFailure::RemoveDb)?;
|
||||
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
|
||||
Ok(()) => (index_scheduler, auth_controller),
|
||||
Err(e) => {
|
||||
@@ -191,10 +187,10 @@ pub async fn setup_meilisearch(
|
||||
// the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag
|
||||
// or, the dump is missing but we can ignore that because of the ignore_missing_dump flag
|
||||
} else {
|
||||
open_or_create_database(opt, empty_db, zk).await?
|
||||
open_or_create_database(opt, empty_db)?
|
||||
}
|
||||
} else {
|
||||
open_or_create_database(opt, empty_db, zk).await?
|
||||
open_or_create_database(opt, empty_db)?
|
||||
};
|
||||
|
||||
// We create a loop in a thread that registers snapshotCreation tasks
|
||||
@@ -218,14 +214,13 @@ pub async fn setup_meilisearch(
|
||||
}
|
||||
|
||||
/// Try to start the IndexScheduler and AuthController without checking the VERSION file or anything.
|
||||
async fn open_or_create_database_unchecked(
|
||||
fn open_or_create_database_unchecked(
|
||||
opt: &Opt,
|
||||
on_failure: OnFailure,
|
||||
zk: Option<zk::Client>,
|
||||
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
||||
// we don't want to create anything in the data.ms yet, thus we
|
||||
// wrap our two builders in a closure that'll be executed later.
|
||||
let auth_controller = AuthController::new(&opt.db_path, &opt.master_key, zk);
|
||||
let auth_controller = AuthController::new(&opt.db_path, &opt.master_key);
|
||||
let instance_features = opt.to_instance_features();
|
||||
let index_scheduler_builder = || -> anyhow::Result<_> {
|
||||
Ok(IndexScheduler::new(IndexSchedulerOptions {
|
||||
@@ -250,7 +245,7 @@ async fn open_or_create_database_unchecked(
|
||||
|
||||
match (
|
||||
index_scheduler_builder(),
|
||||
auth_controller.await.map_err(anyhow::Error::from),
|
||||
auth_controller.map_err(anyhow::Error::from),
|
||||
create_version_file(&opt.db_path).map_err(anyhow::Error::from),
|
||||
) {
|
||||
(Ok(i), Ok(a), Ok(())) => Ok((i, a)),
|
||||
@@ -264,16 +259,15 @@ async fn open_or_create_database_unchecked(
|
||||
}
|
||||
|
||||
/// Ensure you're in a valid state and open the IndexScheduler + AuthController for you.
|
||||
async fn open_or_create_database(
|
||||
fn open_or_create_database(
|
||||
opt: &Opt,
|
||||
empty_db: bool,
|
||||
zk: Option<zk::Client>,
|
||||
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
||||
if !empty_db {
|
||||
check_version_file(&opt.db_path)?;
|
||||
}
|
||||
|
||||
open_or_create_database_unchecked(opt, OnFailure::KeepDb, zk).await
|
||||
open_or_create_database_unchecked(opt, OnFailure::KeepDb)
|
||||
}
|
||||
|
||||
fn import_dump(
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use std::env;
|
||||
use std::io::{stderr, Write};
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -7,12 +7,10 @@ use actix_web::http::KeepAlive;
|
||||
use actix_web::web::Data;
|
||||
use actix_web::HttpServer;
|
||||
use index_scheduler::IndexScheduler;
|
||||
use is_terminal::IsTerminal;
|
||||
use meilisearch::analytics::Analytics;
|
||||
use meilisearch::{analytics, create_app, prototype_name, setup_meilisearch, Opt};
|
||||
use meilisearch_auth::{generate_master_key, AuthController, MASTER_KEY_MIN_SIZE};
|
||||
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
|
||||
use zookeeper_client as zk;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
@@ -64,11 +62,7 @@ async fn main() -> anyhow::Result<()> {
|
||||
_ => (),
|
||||
}
|
||||
|
||||
let zk = match opt.zk_url {
|
||||
Some(ref url) => Some(zk::Client::connect(url).await.unwrap()),
|
||||
None => None,
|
||||
};
|
||||
let (index_scheduler, auth_controller) = setup_meilisearch(&opt, zk).await?;
|
||||
let (index_scheduler, auth_controller) = setup_meilisearch(&opt)?;
|
||||
|
||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||
let analytics = if !opt.no_analytics {
|
||||
@@ -196,7 +190,7 @@ Anonymous telemetry:\t\"Enabled\""
|
||||
}
|
||||
|
||||
eprintln!();
|
||||
eprintln!("Check out Meilisearch Cloud!\thttps://www.meilisearch.com/cloud?utm_campaign=oss&utm_source=engine&utm_medium=cli");
|
||||
eprintln!("Check out Meilisearch Cloud!\thttps://cloud.meilisearch.com/login?utm_campaign=oss&utm_source=engine&utm_medium=cli");
|
||||
eprintln!("Documentation:\t\t\thttps://www.meilisearch.com/docs");
|
||||
eprintln!("Source code:\t\t\thttps://github.com/meilisearch/meilisearch");
|
||||
eprintln!("Discord:\t\t\thttps://discord.meilisearch.com");
|
||||
@@ -207,7 +201,8 @@ const WARNING_BG_COLOR: Option<Color> = Some(Color::Ansi256(178));
|
||||
const WARNING_FG_COLOR: Option<Color> = Some(Color::Ansi256(0));
|
||||
|
||||
fn print_master_key_too_short_warning() {
|
||||
let choice = if stderr().is_terminal() { ColorChoice::Auto } else { ColorChoice::Never };
|
||||
let choice =
|
||||
if atty::is(atty::Stream::Stderr) { ColorChoice::Auto } else { ColorChoice::Never };
|
||||
let mut stderr = StandardStream::stderr(choice);
|
||||
stderr
|
||||
.set_color(
|
||||
@@ -232,7 +227,8 @@ fn print_master_key_too_short_warning() {
|
||||
}
|
||||
|
||||
fn print_missing_master_key_warning() {
|
||||
let choice = if stderr().is_terminal() { ColorChoice::Auto } else { ColorChoice::Never };
|
||||
let choice =
|
||||
if atty::is(atty::Stream::Stderr) { ColorChoice::Auto } else { ColorChoice::Never };
|
||||
let mut stderr = StandardStream::stderr(choice);
|
||||
stderr
|
||||
.set_color(
|
||||
|
||||
@@ -50,10 +50,4 @@ lazy_static! {
|
||||
&["kind", "value"]
|
||||
)
|
||||
.expect("Can't create a metric");
|
||||
pub static ref MEILISEARCH_LAST_UPDATE: IntGauge =
|
||||
register_int_gauge!(opts!("meilisearch_last_update", "Meilisearch Last Update"))
|
||||
.expect("Can't create a metric");
|
||||
pub static ref MEILISEARCH_IS_INDEXING: IntGauge =
|
||||
register_int_gauge!(opts!("meilisearch_is_indexing", "Meilisearch Is Indexing"))
|
||||
.expect("Can't create a metric");
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@ const MEILI_DB_PATH: &str = "MEILI_DB_PATH";
|
||||
const MEILI_HTTP_ADDR: &str = "MEILI_HTTP_ADDR";
|
||||
const MEILI_MASTER_KEY: &str = "MEILI_MASTER_KEY";
|
||||
const MEILI_ENV: &str = "MEILI_ENV";
|
||||
const MEILI_ZK_URL: &str = "MEILI_ZK_URL";
|
||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||
const MEILI_NO_ANALYTICS: &str = "MEILI_NO_ANALYTICS";
|
||||
const MEILI_HTTP_PAYLOAD_SIZE_LIMIT: &str = "MEILI_HTTP_PAYLOAD_SIZE_LIMIT";
|
||||
@@ -155,11 +154,6 @@ pub struct Opt {
|
||||
#[serde(default = "default_env")]
|
||||
pub env: String,
|
||||
|
||||
/// Sets the HTTP address and port used to communicate with the zookeeper cluster.
|
||||
/// If ran locally, the default url is `http://localhost:2181/`.
|
||||
#[clap(long, env = MEILI_ZK_URL)]
|
||||
pub zk_url: Option<String>,
|
||||
|
||||
/// Deactivates Meilisearch's built-in telemetry when provided.
|
||||
///
|
||||
/// Meilisearch automatically collects data from all instances that do not opt out using this flag.
|
||||
@@ -374,7 +368,6 @@ impl Opt {
|
||||
http_addr,
|
||||
master_key,
|
||||
env,
|
||||
zk_url,
|
||||
max_index_size: _,
|
||||
max_task_db_size: _,
|
||||
http_payload_size_limit,
|
||||
@@ -408,9 +401,6 @@ impl Opt {
|
||||
export_to_env_if_not_present(MEILI_MASTER_KEY, master_key);
|
||||
}
|
||||
export_to_env_if_not_present(MEILI_ENV, env);
|
||||
if let Some(zk_url) = zk_url {
|
||||
export_to_env_if_not_present(MEILI_ZK_URL, zk_url);
|
||||
}
|
||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||
{
|
||||
export_to_env_if_not_present(MEILI_NO_ANALYTICS, no_analytics.to_string());
|
||||
@@ -725,10 +715,6 @@ fn default_env() -> String {
|
||||
DEFAULT_ENV.to_string()
|
||||
}
|
||||
|
||||
pub fn default_zk_url() -> String {
|
||||
DEFAULT_HTTP_ADDR.to_string()
|
||||
}
|
||||
|
||||
fn default_max_index_size() -> Byte {
|
||||
Byte::from_bytes(INDEX_SIZE)
|
||||
}
|
||||
|
||||
@@ -41,10 +41,14 @@ pub async fn create_api_key(
|
||||
_req: HttpRequest,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let v = body.into_inner();
|
||||
let key = auth_controller.create_key(v).await?;
|
||||
let key = KeyView::from_key(key, &auth_controller);
|
||||
let res = tokio::task::spawn_blocking(move || -> Result<_, AuthControllerError> {
|
||||
let key = auth_controller.create_key(v)?;
|
||||
Ok(KeyView::from_key(key, &auth_controller))
|
||||
})
|
||||
.await
|
||||
.map_err(|e| ResponseError::from_msg(e.to_string(), Code::Internal))??;
|
||||
|
||||
Ok(HttpResponse::Created().json(key))
|
||||
Ok(HttpResponse::Created().json(res))
|
||||
}
|
||||
|
||||
#[derive(Deserr, Debug, Clone, Copy)]
|
||||
@@ -106,11 +110,17 @@ pub async fn patch_api_key(
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let key = path.into_inner().key;
|
||||
let patch_api_key = body.into_inner();
|
||||
let uid = Uuid::parse_str(&key).or_else(|_| auth_controller.get_uid_from_encoded_key(&key))?;
|
||||
let key = auth_controller.update_key(uid, patch_api_key).await?;
|
||||
let key = KeyView::from_key(key, &auth_controller);
|
||||
let res = tokio::task::spawn_blocking(move || -> Result<_, AuthControllerError> {
|
||||
let uid =
|
||||
Uuid::parse_str(&key).or_else(|_| auth_controller.get_uid_from_encoded_key(&key))?;
|
||||
let key = auth_controller.update_key(uid, patch_api_key)?;
|
||||
|
||||
Ok(HttpResponse::Ok().json(key))
|
||||
Ok(KeyView::from_key(key, &auth_controller))
|
||||
})
|
||||
.await
|
||||
.map_err(|e| ResponseError::from_msg(e.to_string(), Code::Internal))??;
|
||||
|
||||
Ok(HttpResponse::Ok().json(res))
|
||||
}
|
||||
|
||||
pub async fn delete_api_key(
|
||||
@@ -118,8 +128,13 @@ pub async fn delete_api_key(
|
||||
path: web::Path<AuthParam>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let key = path.into_inner().key;
|
||||
let uid = Uuid::parse_str(&key).or_else(|_| auth_controller.get_uid_from_encoded_key(&key))?;
|
||||
auth_controller.delete_key(uid).await?;
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let uid =
|
||||
Uuid::parse_str(&key).or_else(|_| auth_controller.get_uid_from_encoded_key(&key))?;
|
||||
auth_controller.delete_key(uid)
|
||||
})
|
||||
.await
|
||||
.map_err(|e| ResponseError::from_msg(e.to_string(), Code::Internal))??;
|
||||
|
||||
Ok(HttpResponse::NoContent().finish())
|
||||
}
|
||||
|
||||
@@ -64,20 +64,7 @@ async fn patch_features(
|
||||
vector_store: new_features.0.vector_store.unwrap_or(old_features.vector_store),
|
||||
};
|
||||
|
||||
// explicitly destructure for analytics rather than using the `Serialize` implementation, because
|
||||
// the it renames to camelCase, which we don't want for analytics.
|
||||
// **Do not** ignore fields with `..` or `_` here, because we want to add them in the future.
|
||||
let meilisearch_types::features::RuntimeTogglableFeatures { score_details, vector_store } =
|
||||
new_features;
|
||||
|
||||
analytics.publish(
|
||||
"Experimental features Updated".to_string(),
|
||||
json!({
|
||||
"score_details": score_details,
|
||||
"vector_store": vector_store,
|
||||
}),
|
||||
Some(&req),
|
||||
);
|
||||
analytics.publish("Experimental features Updated".to_string(), json!(new_features), Some(&req));
|
||||
index_scheduler.put_runtime_features(new_features)?;
|
||||
Ok(HttpResponse::Ok().json(new_features))
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ pub struct SearchQueryGet {
|
||||
#[deserr(default, error = DeserrQueryParamError<InvalidSearchQ>)]
|
||||
q: Option<String>,
|
||||
#[deserr(default, error = DeserrQueryParamError<InvalidSearchVector>)]
|
||||
vector: Option<CS<f32>>,
|
||||
vector: Option<Vec<f32>>,
|
||||
#[deserr(default = Param(DEFAULT_SEARCH_OFFSET()), error = DeserrQueryParamError<InvalidSearchOffset>)]
|
||||
offset: Param<usize>,
|
||||
#[deserr(default = Param(DEFAULT_SEARCH_LIMIT()), error = DeserrQueryParamError<InvalidSearchLimit>)]
|
||||
@@ -88,7 +88,7 @@ impl From<SearchQueryGet> for SearchQuery {
|
||||
|
||||
Self {
|
||||
q: other.q,
|
||||
vector: other.vector.map(CS::into_inner),
|
||||
vector: other.vector,
|
||||
offset: other.offset.0,
|
||||
limit: other.limit.0,
|
||||
page: other.page.as_deref().copied(),
|
||||
|
||||
@@ -5,7 +5,6 @@ use index_scheduler::IndexScheduler;
|
||||
use log::debug;
|
||||
use meilisearch_types::deserr::DeserrJsonError;
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
||||
use meilisearch_types::index_uid::IndexUid;
|
||||
use meilisearch_types::settings::{settings, RankingRuleView, Settings, Unchecked};
|
||||
use meilisearch_types::tasks::KindWithContent;
|
||||
@@ -310,6 +309,81 @@ make_setting_route!(
|
||||
}
|
||||
);
|
||||
|
||||
make_setting_route!(
|
||||
"/non-separator-tokens",
|
||||
put,
|
||||
std::collections::BTreeSet<String>,
|
||||
meilisearch_types::deserr::DeserrJsonError<
|
||||
meilisearch_types::error::deserr_codes::InvalidSettingsNonSeparatorTokens,
|
||||
>,
|
||||
non_separator_tokens,
|
||||
"nonSeparatorTokens",
|
||||
analytics,
|
||||
|non_separator_tokens: &Option<std::collections::BTreeSet<String>>, req: &HttpRequest| {
|
||||
use serde_json::json;
|
||||
|
||||
analytics.publish(
|
||||
"nonSeparatorTokens Updated".to_string(),
|
||||
json!({
|
||||
"non_separator_tokens": {
|
||||
"total": non_separator_tokens.as_ref().map(|non_separator_tokens| non_separator_tokens.len()),
|
||||
},
|
||||
}),
|
||||
Some(req),
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
make_setting_route!(
|
||||
"/separator-tokens",
|
||||
put,
|
||||
std::collections::BTreeSet<String>,
|
||||
meilisearch_types::deserr::DeserrJsonError<
|
||||
meilisearch_types::error::deserr_codes::InvalidSettingsSeparatorTokens,
|
||||
>,
|
||||
separator_tokens,
|
||||
"separatorTokens",
|
||||
analytics,
|
||||
|separator_tokens: &Option<std::collections::BTreeSet<String>>, req: &HttpRequest| {
|
||||
use serde_json::json;
|
||||
|
||||
analytics.publish(
|
||||
"separatorTokens Updated".to_string(),
|
||||
json!({
|
||||
"separator_tokens": {
|
||||
"total": separator_tokens.as_ref().map(|separator_tokens| separator_tokens.len()),
|
||||
},
|
||||
}),
|
||||
Some(req),
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
make_setting_route!(
|
||||
"/dictionary",
|
||||
put,
|
||||
std::collections::BTreeSet<String>,
|
||||
meilisearch_types::deserr::DeserrJsonError<
|
||||
meilisearch_types::error::deserr_codes::InvalidSettingsDictionary,
|
||||
>,
|
||||
dictionary,
|
||||
"dictionary",
|
||||
analytics,
|
||||
|dictionary: &Option<std::collections::BTreeSet<String>>, req: &HttpRequest| {
|
||||
use serde_json::json;
|
||||
|
||||
analytics.publish(
|
||||
"dictionary Updated".to_string(),
|
||||
json!({
|
||||
"dictionary": {
|
||||
"total": dictionary.as_ref().map(|dictionary| dictionary.len()),
|
||||
},
|
||||
}),
|
||||
Some(req),
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
make_setting_route!(
|
||||
"/synonyms",
|
||||
put,
|
||||
@@ -551,16 +625,10 @@ pub async fn update_all(
|
||||
.as_ref()
|
||||
.set()
|
||||
.and_then(|s| s.max_values_per_facet.as_ref().set()),
|
||||
"sort_facet_values_by_star_count": new_settings.faceting
|
||||
"sort_facet_values_by": new_settings.faceting
|
||||
.as_ref()
|
||||
.set()
|
||||
.and_then(|s| {
|
||||
s.sort_facet_values_by.as_ref().set().map(|s| s.iter().any(|(k, v)| k == "*" && v == &FacetValuesSort::Count))
|
||||
}),
|
||||
"sort_facet_values_by_total": new_settings.faceting
|
||||
.as_ref()
|
||||
.set()
|
||||
.and_then(|s| s.sort_facet_values_by.as_ref().set().map(|s| s.len())),
|
||||
.and_then(|s| s.sort_facet_values_by.as_ref().set()),
|
||||
},
|
||||
"pagination": {
|
||||
"max_total_hits": new_settings.pagination
|
||||
|
||||
@@ -49,11 +49,6 @@ pub async fn get_metrics(
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(last_update) = response.last_update {
|
||||
crate::metrics::MEILISEARCH_LAST_UPDATE.set(last_update.unix_timestamp());
|
||||
}
|
||||
crate::metrics::MEILISEARCH_IS_INDEXING.set(index_scheduler.is_task_processing()? as i64);
|
||||
|
||||
let encoder = TextEncoder::new();
|
||||
let mut buffer = vec![];
|
||||
encoder.encode(&prometheus::gather(), &mut buffer).expect("Failed to encode metrics");
|
||||
|
||||
@@ -284,6 +284,9 @@ pub fn create_all_stats(
|
||||
used_database_size += index_scheduler.used_size()?;
|
||||
database_size += auth_controller.size()?;
|
||||
used_database_size += auth_controller.used_size()?;
|
||||
let update_file_size = index_scheduler.compute_update_file_size()?;
|
||||
database_size += update_file_size;
|
||||
used_database_size += update_file_size;
|
||||
|
||||
let stats = Stats { database_size, used_database_size, last_update: last_task, indexes };
|
||||
Ok(stats)
|
||||
|
||||
@@ -325,7 +325,7 @@ async fn cancel_tasks(
|
||||
|
||||
let query = params.into_query();
|
||||
|
||||
let (tasks, _) = index_scheduler.get_task_ids_from_authorized_indexes(
|
||||
let tasks = index_scheduler.get_task_ids_from_authorized_indexes(
|
||||
&index_scheduler.read_txn()?,
|
||||
&query,
|
||||
index_scheduler.filters(),
|
||||
@@ -370,7 +370,7 @@ async fn delete_tasks(
|
||||
);
|
||||
let query = params.into_query();
|
||||
|
||||
let (tasks, _) = index_scheduler.get_task_ids_from_authorized_indexes(
|
||||
let tasks = index_scheduler.get_task_ids_from_authorized_indexes(
|
||||
&index_scheduler.read_txn()?,
|
||||
&query,
|
||||
index_scheduler.filters(),
|
||||
@@ -387,7 +387,6 @@ async fn delete_tasks(
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct AllTasks {
|
||||
results: Vec<TaskView>,
|
||||
total: u64,
|
||||
limit: u32,
|
||||
from: Option<u32>,
|
||||
next: Option<u32>,
|
||||
@@ -407,17 +406,23 @@ async fn get_tasks(
|
||||
let limit = params.limit.0;
|
||||
let query = params.into_query();
|
||||
|
||||
let filters = index_scheduler.filters();
|
||||
let (tasks, total) = index_scheduler.get_tasks_from_authorized_indexes(query, filters)?;
|
||||
let mut results: Vec<_> = tasks.iter().map(TaskView::from_task).collect();
|
||||
let mut tasks_results: Vec<TaskView> = index_scheduler
|
||||
.get_tasks_from_authorized_indexes(query, index_scheduler.filters())?
|
||||
.into_iter()
|
||||
.map(|t| TaskView::from_task(&t))
|
||||
.collect();
|
||||
|
||||
// If we were able to fetch the number +1 tasks we asked
|
||||
// it means that there is more to come.
|
||||
let next = if results.len() == limit as usize { results.pop().map(|t| t.uid) } else { None };
|
||||
let next = if tasks_results.len() == limit as usize {
|
||||
tasks_results.pop().map(|t| t.uid)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let from = results.first().map(|t| t.uid);
|
||||
let tasks = AllTasks { results, limit: limit.saturating_sub(1), total, from, next };
|
||||
let from = tasks_results.first().map(|t| t.uid);
|
||||
|
||||
let tasks = AllTasks { results: tasks_results, limit: limit.saturating_sub(1), from, next };
|
||||
Ok(HttpResponse::Ok().json(tasks))
|
||||
}
|
||||
|
||||
@@ -439,10 +444,10 @@ async fn get_task(
|
||||
analytics.publish("Tasks Seen".to_string(), json!({ "per_task_uid": true }), Some(&req));
|
||||
|
||||
let query = index_scheduler::Query { uids: Some(vec![task_uid]), ..Query::default() };
|
||||
let filters = index_scheduler.filters();
|
||||
let (tasks, _) = index_scheduler.get_tasks_from_authorized_indexes(query, filters)?;
|
||||
|
||||
if let Some(task) = tasks.first() {
|
||||
if let Some(task) =
|
||||
index_scheduler.get_tasks_from_authorized_indexes(query, index_scheduler.filters())?.first()
|
||||
{
|
||||
let task_view = TaskView::from_task(task);
|
||||
Ok(HttpResponse::Ok().json(task_view))
|
||||
} else {
|
||||
|
||||
@@ -491,6 +491,20 @@ pub fn perform_search(
|
||||
tokenizer_builder.allow_list(&script_lang_map);
|
||||
}
|
||||
|
||||
let separators = index.allowed_separators(&rtxn)?;
|
||||
let separators: Option<Vec<_>> =
|
||||
separators.as_ref().map(|x| x.iter().map(String::as_str).collect());
|
||||
if let Some(ref separators) = separators {
|
||||
tokenizer_builder.separators(separators);
|
||||
}
|
||||
|
||||
let dictionary = index.dictionary(&rtxn)?;
|
||||
let dictionary: Option<Vec<_>> =
|
||||
dictionary.as_ref().map(|x| x.iter().map(String::as_str).collect());
|
||||
if let Some(ref dictionary) = dictionary {
|
||||
tokenizer_builder.words_dict(dictionary);
|
||||
}
|
||||
|
||||
let mut formatter_builder = MatcherBuilder::new(matching_words, tokenizer_builder.build());
|
||||
formatter_builder.crop_marker(query.crop_marker);
|
||||
formatter_builder.highlight_prefix(query.highlight_pre_tag);
|
||||
|
||||
@@ -61,8 +61,6 @@ pub static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), HashSet<&'
|
||||
("DELETE", "/keys/mykey/") => hashset!{"keys.delete", "*"},
|
||||
("POST", "/keys") => hashset!{"keys.create", "*"},
|
||||
("GET", "/keys") => hashset!{"keys.get", "*"},
|
||||
("GET", "/experimental-features") => hashset!{"experimental.get", "*"},
|
||||
("PATCH", "/experimental-features") => hashset!{"experimental.update", "*"},
|
||||
};
|
||||
|
||||
authorizations
|
||||
|
||||
@@ -39,7 +39,7 @@ impl Server {
|
||||
|
||||
let options = default_settings(dir.path());
|
||||
|
||||
let (index_scheduler, auth) = setup_meilisearch(&options, None).await.unwrap();
|
||||
let (index_scheduler, auth) = setup_meilisearch(&options).unwrap();
|
||||
let service = Service { index_scheduler, auth, options, api_key: None };
|
||||
|
||||
Server { service, _dir: Some(dir) }
|
||||
@@ -54,7 +54,7 @@ impl Server {
|
||||
|
||||
options.master_key = Some("MASTER_KEY".to_string());
|
||||
|
||||
let (index_scheduler, auth) = setup_meilisearch(&options, None).await.unwrap();
|
||||
let (index_scheduler, auth) = setup_meilisearch(&options).unwrap();
|
||||
let service = Service { index_scheduler, auth, options, api_key: None };
|
||||
|
||||
Server { service, _dir: Some(dir) }
|
||||
@@ -67,7 +67,7 @@ impl Server {
|
||||
}
|
||||
|
||||
pub async fn new_with_options(options: Opt) -> Result<Self, anyhow::Error> {
|
||||
let (index_scheduler, auth) = setup_meilisearch(&options, None).await?;
|
||||
let (index_scheduler, auth) = setup_meilisearch(&options)?;
|
||||
let service = Service { index_scheduler, auth, options, api_key: None };
|
||||
|
||||
Ok(Server { service, _dir: None })
|
||||
@@ -189,14 +189,6 @@ impl Server {
|
||||
let url = format!("/tasks/{}", update_id);
|
||||
self.service.get(url).await
|
||||
}
|
||||
|
||||
pub async fn get_features(&self) -> (Value, StatusCode) {
|
||||
self.service.get("/experimental-features").await
|
||||
}
|
||||
|
||||
pub async fn set_features(&self, value: Value) -> (Value, StatusCode) {
|
||||
self.service.patch("/experimental-features", value).await
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_settings(dir: impl AsRef<Path>) -> Opt {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:08:54.713227Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:08:54.713227Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:08:54.713227Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:08:54.713227Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:08:54.713227Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:08:54.713227Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:08:54.713227Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:34:25.351927Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:34:25.351927Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -40,7 +40,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:34:48.1719Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -40,7 +40,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:34:48.1719Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -40,7 +40,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:34:48.1719Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -40,7 +40,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:34:48.1719Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -40,7 +40,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:34:48.1719Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -45,7 +45,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:26:57.319083Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:28:46.369971Z"
|
||||
}
|
||||
],
|
||||
"total": 92,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:28:46.369971Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:28:46.369971Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:28:46.369971Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:28:46.369971Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T09:28:46.369971Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:21:54.691484Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:21:54.691484Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -36,7 +36,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:24:39.812922Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -36,7 +36,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:24:39.812922Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -36,7 +36,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:24:39.812922Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -36,7 +36,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:24:39.812922Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -36,7 +36,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:24:39.812922Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -41,7 +41,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:40:28.669652Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:51:53.095314Z"
|
||||
}
|
||||
],
|
||||
"total": 92,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:51:53.095314Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:51:53.095314Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:51:53.095314Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:51:53.095314Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:51:53.095314Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:31:12.304168Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:21:54.691484Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:21:54.691484Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -36,7 +36,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:24:39.812922Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -36,7 +36,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:24:39.812922Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -36,7 +36,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:24:39.812922Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -36,7 +36,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:24:39.812922Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -36,7 +36,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:24:39.812922Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 1,
|
||||
"from": 1,
|
||||
"next": 0
|
||||
|
||||
@@ -41,7 +41,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:40:28.669652Z"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 1,
|
||||
"from": 0,
|
||||
"next": null
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:51:53.095314Z"
|
||||
}
|
||||
],
|
||||
"total": 92,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:51:53.095314Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:51:53.095314Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:51:53.095314Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
@@ -20,7 +20,6 @@ source: meilisearch/tests/dumps/mod.rs
|
||||
"finishedAt": "2021-09-08T08:51:53.095314Z"
|
||||
}
|
||||
],
|
||||
"total": 93,
|
||||
"limit": 1,
|
||||
"from": 92,
|
||||
"next": 91
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user