mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-15 00:46:56 +00:00
Compare commits
12 Commits
proper-def
...
change-net
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9bd3482230 | ||
|
|
d6e4e414d7 | ||
|
|
c0617efe76 | ||
|
|
8316c36648 | ||
|
|
572bae9da1 | ||
|
|
2a330dce83 | ||
|
|
d62a6b6f0d | ||
|
|
58b8630862 | ||
|
|
0703767fc6 | ||
|
|
e0c97325d6 | ||
|
|
0f3ef8de73 | ||
|
|
7313cefd74 |
3
Cargo.lock
generated
3
Cargo.lock
generated
@@ -3254,8 +3254,10 @@ dependencies = [
|
|||||||
"enum-iterator",
|
"enum-iterator",
|
||||||
"file-store",
|
"file-store",
|
||||||
"flate2",
|
"flate2",
|
||||||
|
"hashbrown 0.15.5",
|
||||||
"indexmap",
|
"indexmap",
|
||||||
"insta",
|
"insta",
|
||||||
|
"itertools 0.14.0",
|
||||||
"maplit",
|
"maplit",
|
||||||
"meili-snap",
|
"meili-snap",
|
||||||
"meilisearch-auth",
|
"meilisearch-auth",
|
||||||
@@ -3270,6 +3272,7 @@ dependencies = [
|
|||||||
"tempfile",
|
"tempfile",
|
||||||
"thiserror 2.0.16",
|
"thiserror 2.0.16",
|
||||||
"time",
|
"time",
|
||||||
|
"tokio",
|
||||||
"tracing",
|
"tracing",
|
||||||
"ureq",
|
"ureq",
|
||||||
"uuid",
|
"uuid",
|
||||||
|
|||||||
@@ -158,6 +158,10 @@ pub enum KindDump {
|
|||||||
UpgradeDatabase {
|
UpgradeDatabase {
|
||||||
from: (u32, u32, u32),
|
from: (u32, u32, u32),
|
||||||
},
|
},
|
||||||
|
NetworkTopologyChange {
|
||||||
|
network: Option<meilisearch_types::enterprise_edition::network::Network>,
|
||||||
|
origin: Option<meilisearch_types::tasks::Origin>,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Task> for TaskDump {
|
impl From<Task> for TaskDump {
|
||||||
@@ -240,6 +244,9 @@ impl From<KindWithContent> for KindDump {
|
|||||||
KindWithContent::UpgradeDatabase { from: version } => {
|
KindWithContent::UpgradeDatabase { from: version } => {
|
||||||
KindDump::UpgradeDatabase { from: version }
|
KindDump::UpgradeDatabase { from: version }
|
||||||
}
|
}
|
||||||
|
KindWithContent::NetworkTopologyChange { network, origin } => {
|
||||||
|
KindDump::NetworkTopologyChange { network, origin }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -253,7 +260,7 @@ pub(crate) mod test {
|
|||||||
use big_s::S;
|
use big_s::S;
|
||||||
use maplit::{btreemap, btreeset};
|
use maplit::{btreemap, btreeset};
|
||||||
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
|
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
|
||||||
use meilisearch_types::enterprise_edition::network::{Network, Remote};
|
use meilisearch_types::enterprise_edition::network::{DbNetwork, DbRemote};
|
||||||
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
||||||
use meilisearch_types::features::RuntimeTogglableFeatures;
|
use meilisearch_types::features::RuntimeTogglableFeatures;
|
||||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||||
@@ -544,10 +551,10 @@ pub(crate) mod test {
|
|||||||
RuntimeTogglableFeatures::default()
|
RuntimeTogglableFeatures::default()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_test_network() -> Network {
|
fn create_test_network() -> DbNetwork {
|
||||||
Network {
|
DbNetwork {
|
||||||
local: Some("myself".to_string()),
|
local: Some("myself".to_string()),
|
||||||
remotes: maplit::btreemap! {"other".to_string() => Remote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
|
remotes: maplit::btreemap! {"other".to_string() => DbRemote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
|
||||||
sharding: false,
|
sharding: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ pub type Batch = meilisearch_types::batches::Batch;
|
|||||||
pub type Key = meilisearch_types::keys::Key;
|
pub type Key = meilisearch_types::keys::Key;
|
||||||
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
|
pub type ChatCompletionSettings = meilisearch_types::features::ChatCompletionSettings;
|
||||||
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
|
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
|
||||||
pub type Network = meilisearch_types::enterprise_edition::network::Network;
|
pub type Network = meilisearch_types::enterprise_edition::network::DbNetwork;
|
||||||
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
|
pub type Webhooks = meilisearch_types::webhooks::WebhooksDumpView;
|
||||||
|
|
||||||
// ===== Other types to clarify the code of the compat module
|
// ===== Other types to clarify the code of the compat module
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use std::path::PathBuf;
|
|||||||
use flate2::write::GzEncoder;
|
use flate2::write::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use meilisearch_types::batches::Batch;
|
use meilisearch_types::batches::Batch;
|
||||||
use meilisearch_types::enterprise_edition::network::Network;
|
use meilisearch_types::enterprise_edition::network::DbNetwork;
|
||||||
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
|
use meilisearch_types::features::{ChatCompletionSettings, RuntimeTogglableFeatures};
|
||||||
use meilisearch_types::keys::Key;
|
use meilisearch_types::keys::Key;
|
||||||
use meilisearch_types::settings::{Checked, Settings};
|
use meilisearch_types::settings::{Checked, Settings};
|
||||||
@@ -72,7 +72,7 @@ impl DumpWriter {
|
|||||||
)?)
|
)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_network(&self, network: Network) -> Result<()> {
|
pub fn create_network(&self, network: DbNetwork) -> Result<()> {
|
||||||
Ok(std::fs::write(self.dir.path().join("network.json"), serde_json::to_string(&network)?)?)
|
Ok(std::fs::write(self.dir.path().join("network.json"), serde_json::to_string(&network)?)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ dump = { path = "../dump" }
|
|||||||
enum-iterator = "2.1.0"
|
enum-iterator = "2.1.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.1.2"
|
flate2 = "1.1.2"
|
||||||
|
hashbrown = "0.15.4"
|
||||||
indexmap = "2.9.0"
|
indexmap = "2.9.0"
|
||||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
@@ -45,6 +46,8 @@ tracing = "0.1.41"
|
|||||||
ureq = "2.12.1"
|
ureq = "2.12.1"
|
||||||
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||||
backoff = "0.4.0"
|
backoff = "0.4.0"
|
||||||
|
itertools = "0.14.0"
|
||||||
|
tokio = { version = "1.47.1", features = ["full"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
big_s = "1.0.2"
|
big_s = "1.0.2"
|
||||||
|
|||||||
@@ -234,6 +234,9 @@ impl<'a> Dump<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
KindDump::UpgradeDatabase { from } => KindWithContent::UpgradeDatabase { from },
|
KindDump::UpgradeDatabase { from } => KindWithContent::UpgradeDatabase { from },
|
||||||
|
KindDump::NetworkTopologyChange { network: new_network, origin } => {
|
||||||
|
KindWithContent::NetworkTopologyChange { network: new_network, origin }
|
||||||
|
}
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
use meilisearch_types::enterprise_edition::network::Network;
|
use meilisearch_types::enterprise_edition::network::DbNetwork;
|
||||||
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
|
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
|
||||||
use meilisearch_types::heed::types::{SerdeJson, Str};
|
use meilisearch_types::heed::types::{SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
|
||||||
@@ -24,7 +24,7 @@ mod db_keys {
|
|||||||
pub(crate) struct FeatureData {
|
pub(crate) struct FeatureData {
|
||||||
persisted: Database<Str, SerdeJson<RuntimeTogglableFeatures>>,
|
persisted: Database<Str, SerdeJson<RuntimeTogglableFeatures>>,
|
||||||
runtime: Arc<RwLock<RuntimeTogglableFeatures>>,
|
runtime: Arc<RwLock<RuntimeTogglableFeatures>>,
|
||||||
network: Arc<RwLock<Network>>,
|
network: Arc<RwLock<DbNetwork>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
#[derive(Debug, Clone, Copy)]
|
||||||
@@ -197,8 +197,8 @@ impl FeatureData {
|
|||||||
}));
|
}));
|
||||||
|
|
||||||
// Once this is stabilized, network should be stored along with webhooks in index-scheduler's persisted database
|
// Once this is stabilized, network should be stored along with webhooks in index-scheduler's persisted database
|
||||||
let network_db = runtime_features_db.remap_data_type::<SerdeJson<Network>>();
|
let network_db = runtime_features_db.remap_data_type::<SerdeJson<DbNetwork>>();
|
||||||
let network: Network = network_db.get(wtxn, db_keys::NETWORK)?.unwrap_or_default();
|
let network: DbNetwork = network_db.get(wtxn, db_keys::NETWORK)?.unwrap_or_default();
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
persisted: runtime_features_db,
|
persisted: runtime_features_db,
|
||||||
@@ -234,8 +234,8 @@ impl FeatureData {
|
|||||||
RoFeatures::new(self)
|
RoFeatures::new(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_network(&self, mut wtxn: RwTxn, new_network: Network) -> Result<()> {
|
pub fn put_network(&self, mut wtxn: RwTxn, new_network: DbNetwork) -> Result<()> {
|
||||||
self.persisted.remap_data_type::<SerdeJson<Network>>().put(
|
self.persisted.remap_data_type::<SerdeJson<DbNetwork>>().put(
|
||||||
&mut wtxn,
|
&mut wtxn,
|
||||||
db_keys::NETWORK,
|
db_keys::NETWORK,
|
||||||
&new_network,
|
&new_network,
|
||||||
@@ -247,7 +247,7 @@ impl FeatureData {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn network(&self) -> Network {
|
pub fn network(&self) -> DbNetwork {
|
||||||
Network::clone(&*self.network.read().unwrap())
|
DbNetwork::clone(&*self.network.read().unwrap())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
|||||||
run_loop_iteration: _,
|
run_loop_iteration: _,
|
||||||
embedders: _,
|
embedders: _,
|
||||||
chat_settings: _,
|
chat_settings: _,
|
||||||
|
runtime: _,
|
||||||
} = scheduler;
|
} = scheduler;
|
||||||
|
|
||||||
let rtxn = env.read_txn().unwrap();
|
let rtxn = env.read_txn().unwrap();
|
||||||
@@ -317,6 +318,9 @@ fn snapshot_details(d: &Details) -> String {
|
|||||||
Details::UpgradeDatabase { from, to } => {
|
Details::UpgradeDatabase { from, to } => {
|
||||||
format!("{{ from: {from:?}, to: {to:?} }}")
|
format!("{{ from: {from:?}, to: {to:?} }}")
|
||||||
}
|
}
|
||||||
|
Details::NetworkTopologyChange { network: new_network } => {
|
||||||
|
format!("{{ new_network: {new_network:?} }}")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ pub use features::RoFeatures;
|
|||||||
use flate2::bufread::GzEncoder;
|
use flate2::bufread::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use meilisearch_types::batches::Batch;
|
use meilisearch_types::batches::Batch;
|
||||||
use meilisearch_types::enterprise_edition::network::Network;
|
use meilisearch_types::enterprise_edition::network::DbNetwork;
|
||||||
use meilisearch_types::features::{
|
use meilisearch_types::features::{
|
||||||
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
|
ChatCompletionSettings, InstanceTogglableFeatures, RuntimeTogglableFeatures,
|
||||||
};
|
};
|
||||||
@@ -216,6 +216,8 @@ pub struct IndexScheduler {
|
|||||||
/// A counter that is incremented before every call to [`tick`](IndexScheduler::tick)
|
/// A counter that is incremented before every call to [`tick`](IndexScheduler::tick)
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
run_loop_iteration: Arc<RwLock<usize>>,
|
run_loop_iteration: Arc<RwLock<usize>>,
|
||||||
|
|
||||||
|
runtime: Option<tokio::runtime::Handle>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IndexScheduler {
|
impl IndexScheduler {
|
||||||
@@ -242,6 +244,7 @@ impl IndexScheduler {
|
|||||||
run_loop_iteration: self.run_loop_iteration.clone(),
|
run_loop_iteration: self.run_loop_iteration.clone(),
|
||||||
features: self.features.clone(),
|
features: self.features.clone(),
|
||||||
chat_settings: self.chat_settings,
|
chat_settings: self.chat_settings,
|
||||||
|
runtime: self.runtime.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -260,6 +263,7 @@ impl IndexScheduler {
|
|||||||
options: IndexSchedulerOptions,
|
options: IndexSchedulerOptions,
|
||||||
auth_env: Env<WithoutTls>,
|
auth_env: Env<WithoutTls>,
|
||||||
from_db_version: (u32, u32, u32),
|
from_db_version: (u32, u32, u32),
|
||||||
|
runtime: Option<tokio::runtime::Handle>,
|
||||||
#[cfg(test)] test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
|
#[cfg(test)] test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
|
||||||
#[cfg(test)] planned_failures: Vec<(usize, test_utils::FailureLocation)>,
|
#[cfg(test)] planned_failures: Vec<(usize, test_utils::FailureLocation)>,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
@@ -341,6 +345,7 @@ impl IndexScheduler {
|
|||||||
run_loop_iteration: Arc::new(RwLock::new(0)),
|
run_loop_iteration: Arc::new(RwLock::new(0)),
|
||||||
features,
|
features,
|
||||||
chat_settings,
|
chat_settings,
|
||||||
|
runtime,
|
||||||
};
|
};
|
||||||
|
|
||||||
this.run();
|
this.run();
|
||||||
@@ -892,13 +897,13 @@ impl IndexScheduler {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_network(&self, network: Network) -> Result<()> {
|
pub fn put_network(&self, network: DbNetwork) -> Result<()> {
|
||||||
let wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
let wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
||||||
self.features.put_network(wtxn, network)?;
|
self.features.put_network(wtxn, network)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn network(&self) -> Network {
|
pub fn network(&self) -> DbNetwork {
|
||||||
self.features.network()
|
self.features.network()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -927,9 +932,10 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
pub fn embedders(
|
pub fn embedders(
|
||||||
&self,
|
&self,
|
||||||
index_uid: String,
|
index_uid: &str,
|
||||||
embedding_configs: Vec<IndexEmbeddingConfig>,
|
embedding_configs: Vec<IndexEmbeddingConfig>,
|
||||||
) -> Result<RuntimeEmbedders> {
|
) -> Result<RuntimeEmbedders> {
|
||||||
|
let err = |err| Error::from_milli(err, Some(index_uid.to_owned()));
|
||||||
let res: Result<_> = embedding_configs
|
let res: Result<_> = embedding_configs
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(
|
.map(
|
||||||
@@ -942,7 +948,7 @@ impl IndexScheduler {
|
|||||||
let document_template = prompt
|
let document_template = prompt
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(meilisearch_types::milli::Error::from)
|
.map_err(meilisearch_types::milli::Error::from)
|
||||||
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?;
|
.map_err(err)?;
|
||||||
|
|
||||||
let fragments = fragments
|
let fragments = fragments
|
||||||
.into_inner()
|
.into_inner()
|
||||||
@@ -972,9 +978,8 @@ impl IndexScheduler {
|
|||||||
let embedder = Arc::new(
|
let embedder = Arc::new(
|
||||||
Embedder::new(embedder_options.clone(), self.scheduler.embedding_cache_cap)
|
Embedder::new(embedder_options.clone(), self.scheduler.embedding_cache_cap)
|
||||||
.map_err(meilisearch_types::milli::vector::Error::from)
|
.map_err(meilisearch_types::milli::vector::Error::from)
|
||||||
.map_err(|err| {
|
.map_err(milli::Error::from)
|
||||||
Error::from_milli(err.into(), Some(index_uid.clone()))
|
.map_err(err)?,
|
||||||
})?,
|
|
||||||
);
|
);
|
||||||
{
|
{
|
||||||
let mut embedders = self.embedders.write().unwrap();
|
let mut embedders = self.embedders.write().unwrap();
|
||||||
|
|||||||
@@ -73,6 +73,7 @@ impl From<KindWithContent> for AutobatchKind {
|
|||||||
| KindWithContent::DumpCreation { .. }
|
| KindWithContent::DumpCreation { .. }
|
||||||
| KindWithContent::Export { .. }
|
| KindWithContent::Export { .. }
|
||||||
| KindWithContent::UpgradeDatabase { .. }
|
| KindWithContent::UpgradeDatabase { .. }
|
||||||
|
| KindWithContent::NetworkTopologyChange { .. }
|
||||||
| KindWithContent::SnapshotCreation => {
|
| KindWithContent::SnapshotCreation => {
|
||||||
panic!("The autobatcher should never be called with tasks that don't apply to an index.")
|
panic!("The autobatcher should never be called with tasks that don't apply to an index.")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -55,6 +55,9 @@ pub(crate) enum Batch {
|
|||||||
UpgradeDatabase {
|
UpgradeDatabase {
|
||||||
tasks: Vec<Task>,
|
tasks: Vec<Task>,
|
||||||
},
|
},
|
||||||
|
NetworkTopologyChanges {
|
||||||
|
tasks: Vec<Task>,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -116,7 +119,8 @@ impl Batch {
|
|||||||
Batch::SnapshotCreation(tasks)
|
Batch::SnapshotCreation(tasks)
|
||||||
| Batch::TaskDeletions(tasks)
|
| Batch::TaskDeletions(tasks)
|
||||||
| Batch::UpgradeDatabase { tasks }
|
| Batch::UpgradeDatabase { tasks }
|
||||||
| Batch::IndexDeletion { tasks, .. } => {
|
| Batch::IndexDeletion { tasks, .. }
|
||||||
|
| Batch::NetworkTopologyChanges { tasks } => {
|
||||||
RoaringBitmap::from_iter(tasks.iter().map(|task| task.uid))
|
RoaringBitmap::from_iter(tasks.iter().map(|task| task.uid))
|
||||||
}
|
}
|
||||||
Batch::IndexOperation { op, .. } => match op {
|
Batch::IndexOperation { op, .. } => match op {
|
||||||
@@ -151,6 +155,7 @@ impl Batch {
|
|||||||
| Dump(_)
|
| Dump(_)
|
||||||
| Export { .. }
|
| Export { .. }
|
||||||
| UpgradeDatabase { .. }
|
| UpgradeDatabase { .. }
|
||||||
|
| NetworkTopologyChanges { .. }
|
||||||
| IndexSwap { .. } => None,
|
| IndexSwap { .. } => None,
|
||||||
IndexOperation { op, .. } => Some(op.index_uid()),
|
IndexOperation { op, .. } => Some(op.index_uid()),
|
||||||
IndexCreation { index_uid, .. }
|
IndexCreation { index_uid, .. }
|
||||||
@@ -176,6 +181,7 @@ impl fmt::Display for Batch {
|
|||||||
Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?,
|
Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?,
|
||||||
Batch::IndexSwap { .. } => f.write_str("IndexSwap")?,
|
Batch::IndexSwap { .. } => f.write_str("IndexSwap")?,
|
||||||
Batch::Export { .. } => f.write_str("Export")?,
|
Batch::Export { .. } => f.write_str("Export")?,
|
||||||
|
Batch::NetworkTopologyChanges { .. } => f.write_str("NetworkTopologyChange")?,
|
||||||
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
|
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
|
||||||
};
|
};
|
||||||
match index_uid {
|
match index_uid {
|
||||||
@@ -545,7 +551,18 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::Dump(task), current_batch)));
|
return Ok(Some((Batch::Dump(task), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
|
// 6. We batch the network changes.
|
||||||
|
let to_network = self.queue.tasks.get_kind(rtxn, Kind::NetworkTopologyChange)? & enqueued;
|
||||||
|
if !to_network.is_empty() {
|
||||||
|
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_network)?;
|
||||||
|
current_batch.processing(&mut tasks);
|
||||||
|
current_batch.reason(BatchStopReason::TaskKindCannotBeBatched {
|
||||||
|
kind: Kind::NetworkTopologyChange,
|
||||||
|
});
|
||||||
|
return Ok(Some((Batch::NetworkTopologyChanges { tasks }, current_batch)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// 7. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
|
||||||
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
|
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
|
||||||
let mut task =
|
let mut task =
|
||||||
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
|
|||||||
@@ -0,0 +1,6 @@
|
|||||||
|
// Copyright © 2025 Meilisearch Some Rights Reserved
|
||||||
|
// This file is part of Meilisearch Enterprise Edition (EE).
|
||||||
|
// Use of this source code is governed by the Business Source License 1.1,
|
||||||
|
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||||
|
|
||||||
|
mod process_network;
|
||||||
@@ -0,0 +1,362 @@
|
|||||||
|
// Copyright © 2025 Meilisearch Some Rights Reserved
|
||||||
|
// This file is part of Meilisearch Enterprise Edition (EE).
|
||||||
|
// Use of this source code is governed by the Business Source License 1.1,
|
||||||
|
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||||
|
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use bumpalo::Bump;
|
||||||
|
use itertools::{EitherOrBoth, Itertools};
|
||||||
|
use meilisearch_types::enterprise_edition::network::{DbNetwork, DbRemote, Network, Remote};
|
||||||
|
use meilisearch_types::milli::documents::PrimaryKey;
|
||||||
|
use meilisearch_types::milli::progress::{EmbedderStats, Progress};
|
||||||
|
use meilisearch_types::milli::update::new::indexer;
|
||||||
|
use meilisearch_types::milli::update::Setting;
|
||||||
|
use meilisearch_types::milli::{self};
|
||||||
|
use meilisearch_types::tasks::{KindWithContent, Status, Task};
|
||||||
|
use roaring::RoaringBitmap;
|
||||||
|
|
||||||
|
use crate::scheduler::process_export::{ExportContext, ExportOptions, TargetInstance};
|
||||||
|
use crate::{Error, IndexScheduler};
|
||||||
|
|
||||||
|
impl IndexScheduler {
|
||||||
|
pub(crate) fn process_network_changes(
|
||||||
|
&self,
|
||||||
|
progress: Progress,
|
||||||
|
mut tasks: Vec<Task>,
|
||||||
|
) -> crate::Result<Vec<Task>> {
|
||||||
|
let old_network = self.network();
|
||||||
|
let mut current_network = Some(old_network.clone());
|
||||||
|
for task in &tasks {
|
||||||
|
let KindWithContent::NetworkTopologyChange { network, origin } = &task.kind else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
current_network = match (current_network, network) {
|
||||||
|
(None, None) => None,
|
||||||
|
(None, Some(network)) => Some(accumulate(DbNetwork::default(), network.clone())?),
|
||||||
|
(Some(current_network), None) => Some(current_network),
|
||||||
|
(Some(current_network), Some(new_network)) => {
|
||||||
|
Some(accumulate(current_network, new_network.clone())?)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
'network: {
|
||||||
|
let mut new_network = current_network.unwrap_or_default();
|
||||||
|
if old_network == new_network {
|
||||||
|
// no change, exit
|
||||||
|
break 'network;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// TODO: only do this if the task originates with an end-user
|
||||||
|
let must_replicate = old_network.sharding || new_network.sharding;
|
||||||
|
|
||||||
|
if !must_replicate {
|
||||||
|
self.put_network(new_network)?;
|
||||||
|
break 'network;
|
||||||
|
}
|
||||||
|
|
||||||
|
let must_stop_processing = &self.scheduler.must_stop_processing;
|
||||||
|
|
||||||
|
/// FIXME: make it mandatory for `self` to be part of the network
|
||||||
|
let old_this = old_network.local.as_deref();
|
||||||
|
/// FIXME: error here
|
||||||
|
let new_this = new_network.local.unwrap();
|
||||||
|
|
||||||
|
// in network replication, we need to tell old nodes that they are no longer part of the network.
|
||||||
|
// This is made difficult by "node aliasing": Meilisearch has no way of knowing if two nodes with different names
|
||||||
|
// or even different URLs actually refer to the same machine in two different versions of the network.
|
||||||
|
//
|
||||||
|
// This implementation ignores aliasing: a node is the same when it has the same name.
|
||||||
|
//
|
||||||
|
// To defeat aliasing, we iterate a first time to collect all deletions and additions, then we make sure to process the deletions
|
||||||
|
// first, rather than processing the tasks in the alphalexical order of remotes.
|
||||||
|
let mut node_deletions = Vec::new();
|
||||||
|
let mut node_additions = Vec::new();
|
||||||
|
for eob in old_network
|
||||||
|
.remotes
|
||||||
|
.iter()
|
||||||
|
.merge_join_by(new_network.remotes.iter(), |(left, _), (right, _)| left.cmp(right))
|
||||||
|
{
|
||||||
|
match eob {
|
||||||
|
EitherOrBoth::Both((to_update_name, _), (_, new_node)) => {
|
||||||
|
if to_update_name.as_str() == new_this {
|
||||||
|
continue; // skip `self`
|
||||||
|
}
|
||||||
|
node_additions.push((to_update_name, new_node));
|
||||||
|
}
|
||||||
|
EitherOrBoth::Left((to_delete_name, to_delete_node)) => {
|
||||||
|
if Some(to_delete_name.as_str()) == old_this {
|
||||||
|
continue; // skip `self`
|
||||||
|
}
|
||||||
|
node_deletions.push((to_delete_name, to_delete_node));
|
||||||
|
}
|
||||||
|
EitherOrBoth::Right((to_add_name, to_add_node)) => {
|
||||||
|
if to_add_name.as_str() == new_this {
|
||||||
|
continue; // skip `self`
|
||||||
|
}
|
||||||
|
node_additions.push((to_add_name, to_add_node));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let runtime = self.runtime.clone().unwrap();
|
||||||
|
let mut in_flight = Vec::new();
|
||||||
|
// process deletions
|
||||||
|
for (to_delete_name, to_delete) in node_deletions {
|
||||||
|
// set `self` to None so that this node is forgotten about
|
||||||
|
new_network.local = None;
|
||||||
|
in_flight.push(proxy_network(&runtime, to_delete.url.as_str(), &new_network)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
runtime.block_on(async {
|
||||||
|
for task in in_flight.drain(..) {
|
||||||
|
// TODO: log and ignore errors during deletion
|
||||||
|
let res = task.await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// process additions
|
||||||
|
for (to_add_name, to_add) in node_additions {
|
||||||
|
new_network.local = Some(to_add_name.clone());
|
||||||
|
in_flight.push(proxy_network(&runtime, to_add.url.as_str(), &new_network)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
runtime.block_on(async {
|
||||||
|
for task in in_flight.drain(..) {
|
||||||
|
// TODO: handle errors during addition
|
||||||
|
let res = task.await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// balance documents
|
||||||
|
new_network.local = Some(new_this);
|
||||||
|
|
||||||
|
self.balance_documents(&new_network, &progress, &must_stop_processing)?;
|
||||||
|
|
||||||
|
self.put_network(new_network)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for task in &mut tasks {
|
||||||
|
task.status = Status::Succeeded;
|
||||||
|
}
|
||||||
|
Ok(tasks)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn balance_documents(
|
||||||
|
&self,
|
||||||
|
new_network: &DbNetwork,
|
||||||
|
progress: &Progress,
|
||||||
|
must_stop_processing: &crate::scheduler::MustStopProcessing,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
/// FIXME unwrap
|
||||||
|
let new_shards = new_network.shards().unwrap();
|
||||||
|
|
||||||
|
// TECHDEBT: this spawns a `ureq` agent additionally to `reqwest`. We probably want to harmonize all of this.
|
||||||
|
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
||||||
|
|
||||||
|
let mut indexer_alloc = Bump::new();
|
||||||
|
|
||||||
|
// process by batches of 20MiB. Allow for compression? Don't forget about embeddings
|
||||||
|
let _: Vec<()> = self.try_for_each_index(|index_uid, index| -> crate::Result<()> {
|
||||||
|
indexer_alloc.reset();
|
||||||
|
let err = |err| Error::from_milli(err, Some(index_uid.to_string()));
|
||||||
|
let index_rtxn = index.read_txn()?;
|
||||||
|
let all_docids = index.external_documents_ids();
|
||||||
|
let mut documents_to_move_to: hashbrown::HashMap<String, RoaringBitmap> =
|
||||||
|
hashbrown::HashMap::new();
|
||||||
|
let mut documents_to_delete = RoaringBitmap::new();
|
||||||
|
|
||||||
|
for res in all_docids.iter(&index_rtxn)? {
|
||||||
|
let (external_docid, docid) = res?;
|
||||||
|
match new_shards.processing_shard(external_docid) {
|
||||||
|
Some(shard) if shard.is_own => continue,
|
||||||
|
Some(shard) => {
|
||||||
|
documents_to_move_to
|
||||||
|
.entry_ref(shard.name.as_str())
|
||||||
|
.or_default()
|
||||||
|
.insert(docid);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
documents_to_delete.insert(docid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
|
||||||
|
|
||||||
|
for (remote, documents_to_move) in documents_to_move_to {
|
||||||
|
/// TODO: justify the unwrap
|
||||||
|
let remote = new_network.remotes.get(&remote).unwrap();
|
||||||
|
|
||||||
|
let target = TargetInstance {
|
||||||
|
base_url: &remote.url,
|
||||||
|
api_key: remote.write_api_key.as_deref(),
|
||||||
|
};
|
||||||
|
let options = ExportOptions {
|
||||||
|
index_uid,
|
||||||
|
payload_size: None,
|
||||||
|
override_settings: false,
|
||||||
|
extra_headers: &Default::default(),
|
||||||
|
};
|
||||||
|
let ctx = ExportContext {
|
||||||
|
index,
|
||||||
|
index_rtxn: &index_rtxn,
|
||||||
|
universe: &documents_to_move,
|
||||||
|
progress,
|
||||||
|
agent: &agent,
|
||||||
|
must_stop_processing,
|
||||||
|
};
|
||||||
|
|
||||||
|
self.export_one_index(target, options, ctx)?;
|
||||||
|
|
||||||
|
documents_to_delete |= documents_to_move;
|
||||||
|
}
|
||||||
|
|
||||||
|
if documents_to_delete.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut new_fields_ids_map = fields_ids_map.clone();
|
||||||
|
|
||||||
|
// candidates not empty => index not empty => a primary key is set
|
||||||
|
let primary_key = index.primary_key(&index_rtxn)?.unwrap();
|
||||||
|
|
||||||
|
let primary_key = PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
|
||||||
|
.map_err(milli::Error::from)
|
||||||
|
.map_err(err)?;
|
||||||
|
|
||||||
|
let mut index_wtxn = index.write_txn()?;
|
||||||
|
|
||||||
|
let mut indexer = indexer::DocumentDeletion::new();
|
||||||
|
indexer.delete_documents_by_docids(documents_to_delete);
|
||||||
|
let document_changes = indexer.into_changes(&indexer_alloc, primary_key);
|
||||||
|
let embedders = index
|
||||||
|
.embedding_configs()
|
||||||
|
.embedding_configs(&index_wtxn)
|
||||||
|
.map_err(milli::Error::from)
|
||||||
|
.map_err(err)?;
|
||||||
|
let embedders = self.embedders(index_uid, embedders)?;
|
||||||
|
let indexer_config = self.index_mapper.indexer_config();
|
||||||
|
let pool = &indexer_config.thread_pool;
|
||||||
|
|
||||||
|
indexer::index(
|
||||||
|
&mut index_wtxn,
|
||||||
|
index,
|
||||||
|
pool,
|
||||||
|
indexer_config.grenad_parameters(),
|
||||||
|
&fields_ids_map,
|
||||||
|
new_fields_ids_map,
|
||||||
|
None, // document deletion never changes primary key
|
||||||
|
&document_changes,
|
||||||
|
embedders,
|
||||||
|
&|| must_stop_processing.get(),
|
||||||
|
&progress,
|
||||||
|
&EmbedderStats::default(),
|
||||||
|
)
|
||||||
|
.map_err(err)?;
|
||||||
|
|
||||||
|
index_wtxn.commit()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn proxy_network(
|
||||||
|
runtime: &tokio::runtime::Handle,
|
||||||
|
url: &str,
|
||||||
|
network: &DbNetwork,
|
||||||
|
) -> crate::Result<tokio::task::JoinHandle<()>> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn accumulate(old_network: DbNetwork, new_network: Network) -> crate::Result<DbNetwork> {
|
||||||
|
let err = |err| Err(Error::from_milli(milli::Error::UserError(err), None));
|
||||||
|
|
||||||
|
let merged_local = match new_network.local {
|
||||||
|
Setting::Set(new_self) => Some(new_self),
|
||||||
|
Setting::Reset => None,
|
||||||
|
Setting::NotSet => old_network.local,
|
||||||
|
};
|
||||||
|
|
||||||
|
let merged_sharding = match new_network.sharding {
|
||||||
|
Setting::Set(new_sharding) => new_sharding,
|
||||||
|
Setting::Reset => false,
|
||||||
|
Setting::NotSet => old_network.sharding,
|
||||||
|
};
|
||||||
|
|
||||||
|
if merged_sharding && merged_local.is_none() {
|
||||||
|
return err(milli::UserError::NetworkShardingWithoutSelf);
|
||||||
|
}
|
||||||
|
|
||||||
|
let merged_remotes = match new_network.remotes {
|
||||||
|
Setting::Set(new_remotes) => {
|
||||||
|
let mut merged_remotes = BTreeMap::new();
|
||||||
|
for either_or_both in old_network
|
||||||
|
.remotes
|
||||||
|
.into_iter()
|
||||||
|
.merge_join_by(new_remotes.into_iter(), |left, right| left.0.cmp(&right.0))
|
||||||
|
{
|
||||||
|
match either_or_both {
|
||||||
|
EitherOrBoth::Both((name, old), (_, Some(new))) => {
|
||||||
|
let DbRemote {
|
||||||
|
url: old_url,
|
||||||
|
search_api_key: old_search_api_key,
|
||||||
|
write_api_key: old_write_api_key,
|
||||||
|
} = old;
|
||||||
|
|
||||||
|
let Remote {
|
||||||
|
url: new_url,
|
||||||
|
search_api_key: new_search_api_key,
|
||||||
|
write_api_key: new_write_api_key,
|
||||||
|
} = new;
|
||||||
|
|
||||||
|
let merged = DbRemote {
|
||||||
|
url: match new_url {
|
||||||
|
Setting::Set(new_url) => new_url,
|
||||||
|
Setting::Reset => {
|
||||||
|
return err(milli::UserError::NetworkMissingUrl(name))
|
||||||
|
}
|
||||||
|
Setting::NotSet => old_url,
|
||||||
|
},
|
||||||
|
search_api_key: match new_search_api_key {
|
||||||
|
Setting::Set(new_search_api_key) => Some(new_search_api_key),
|
||||||
|
Setting::Reset => None,
|
||||||
|
Setting::NotSet => old_search_api_key,
|
||||||
|
},
|
||||||
|
write_api_key: match new_write_api_key {
|
||||||
|
Setting::Set(new_write_api_key) => Some(new_write_api_key),
|
||||||
|
Setting::Reset => None,
|
||||||
|
Setting::NotSet => old_write_api_key,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
merged_remotes.insert(name, merged);
|
||||||
|
}
|
||||||
|
EitherOrBoth::Both((_, _), (_, None)) | EitherOrBoth::Right((_, None)) => {}
|
||||||
|
EitherOrBoth::Left((name, node)) => {
|
||||||
|
merged_remotes.insert(name, node);
|
||||||
|
}
|
||||||
|
EitherOrBoth::Right((name, Some(node))) => {
|
||||||
|
let Some(url) = node.url.set() else {
|
||||||
|
return err(milli::UserError::NetworkMissingUrl(name));
|
||||||
|
};
|
||||||
|
let node = DbRemote {
|
||||||
|
url,
|
||||||
|
search_api_key: node.search_api_key.set(),
|
||||||
|
write_api_key: node.write_api_key.set(),
|
||||||
|
};
|
||||||
|
merged_remotes.insert(name, node);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
merged_remotes
|
||||||
|
}
|
||||||
|
Setting::Reset => BTreeMap::new(),
|
||||||
|
Setting::NotSet => old_network.remotes,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(DbNetwork { local: merged_local, remotes: merged_remotes, sharding: merged_sharding })
|
||||||
|
}
|
||||||
@@ -2,6 +2,7 @@ mod autobatcher;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod autobatcher_test;
|
mod autobatcher_test;
|
||||||
mod create_batch;
|
mod create_batch;
|
||||||
|
mod enterprise_edition;
|
||||||
mod process_batch;
|
mod process_batch;
|
||||||
mod process_dump_creation;
|
mod process_dump_creation;
|
||||||
mod process_export;
|
mod process_export;
|
||||||
|
|||||||
@@ -135,6 +135,9 @@ impl IndexScheduler {
|
|||||||
Batch::Dump(task) => self
|
Batch::Dump(task) => self
|
||||||
.process_dump_creation(progress, task)
|
.process_dump_creation(progress, task)
|
||||||
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
||||||
|
Batch::NetworkTopologyChanges { tasks } => self
|
||||||
|
.process_network_changes(progress, tasks)
|
||||||
|
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
||||||
Batch::IndexOperation { op, must_create_index } => {
|
Batch::IndexOperation { op, must_create_index } => {
|
||||||
let index_uid = op.index_uid().to_string();
|
let index_uid = op.index_uid().to_string();
|
||||||
let index = if must_create_index {
|
let index = if must_create_index {
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOr
|
|||||||
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
|
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
|
||||||
use meilisearch_types::settings::{self, SecretPolicy};
|
use meilisearch_types::settings::{self, SecretPolicy};
|
||||||
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
|
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
|
||||||
|
use roaring::RoaringBitmap;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use ureq::{json, Response};
|
use ureq::{json, Response};
|
||||||
|
|
||||||
@@ -50,6 +51,7 @@ impl IndexScheduler {
|
|||||||
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
||||||
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
||||||
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
|
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
|
||||||
|
let err = |err| Error::from_milli(err, Some(uid.to_string()));
|
||||||
if must_stop_processing.get() {
|
if must_stop_processing.get() {
|
||||||
return Err(Error::AbortedTask);
|
return Err(Error::AbortedTask);
|
||||||
}
|
}
|
||||||
@@ -61,104 +63,31 @@ impl IndexScheduler {
|
|||||||
));
|
));
|
||||||
|
|
||||||
let ExportIndexSettings { filter, override_settings } = export_settings;
|
let ExportIndexSettings { filter, override_settings } = export_settings;
|
||||||
|
|
||||||
let index = self.index(uid)?;
|
let index = self.index(uid)?;
|
||||||
let index_rtxn = index.read_txn()?;
|
let index_rtxn = index.read_txn()?;
|
||||||
let bearer = api_key.map(|api_key| format!("Bearer {api_key}"));
|
let filter = filter.as_ref().map(Filter::from_json).transpose().map_err(err)?.flatten();
|
||||||
|
let filter_universe =
|
||||||
// First, check if the index already exists
|
filter.map(|f| f.evaluate(&index_rtxn, &index)).transpose().map_err(err)?;
|
||||||
let url = format!("{base_url}/indexes/{uid}");
|
let whole_universe =
|
||||||
let response = retry(&must_stop_processing, || {
|
index.documents_ids(&index_rtxn).map_err(milli::Error::from).map_err(err)?;
|
||||||
let mut request = agent.get(&url);
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
|
|
||||||
request.send_bytes(Default::default()).map_err(into_backoff_error)
|
|
||||||
});
|
|
||||||
let index_exists = match response {
|
|
||||||
Ok(response) => response.status() == 200,
|
|
||||||
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
Err(e) => return Err(e),
|
|
||||||
};
|
|
||||||
|
|
||||||
let primary_key = index
|
|
||||||
.primary_key(&index_rtxn)
|
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
|
||||||
|
|
||||||
// Create the index
|
|
||||||
if !index_exists {
|
|
||||||
let url = format!("{base_url}/indexes");
|
|
||||||
retry(&must_stop_processing, || {
|
|
||||||
let mut request = agent.post(&url);
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
let index_param = json!({ "uid": uid, "primaryKey": primary_key });
|
|
||||||
request.send_json(&index_param).map_err(into_backoff_error)
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patch the index primary key
|
|
||||||
if index_exists && *override_settings {
|
|
||||||
let url = format!("{base_url}/indexes/{uid}");
|
|
||||||
retry(&must_stop_processing, || {
|
|
||||||
let mut request = agent.patch(&url);
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
let index_param = json!({ "primaryKey": primary_key });
|
|
||||||
request.send_json(&index_param).map_err(into_backoff_error)
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send the index settings
|
|
||||||
if !index_exists || *override_settings {
|
|
||||||
let mut settings =
|
|
||||||
settings::settings(&index, &index_rtxn, SecretPolicy::RevealSecrets)
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
|
||||||
// Remove the experimental chat setting if not enabled
|
|
||||||
if self.features().check_chat_completions("exporting chat settings").is_err() {
|
|
||||||
settings.chat = Setting::NotSet;
|
|
||||||
}
|
|
||||||
// Retry logic for sending settings
|
|
||||||
let url = format!("{base_url}/indexes/{uid}/settings");
|
|
||||||
retry(&must_stop_processing, || {
|
|
||||||
let mut request = agent.patch(&url);
|
|
||||||
if let Some(bearer) = bearer.as_ref() {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
request.send_json(settings.clone()).map_err(into_backoff_error)
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let filter = filter
|
|
||||||
.as_ref()
|
|
||||||
.map(Filter::from_json)
|
|
||||||
.transpose()
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?
|
|
||||||
.flatten();
|
|
||||||
|
|
||||||
let filter_universe = filter
|
|
||||||
.map(|f| f.evaluate(&index_rtxn, &index))
|
|
||||||
.transpose()
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
|
||||||
let whole_universe = index
|
|
||||||
.documents_ids(&index_rtxn)
|
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
|
||||||
let universe = filter_universe.unwrap_or(whole_universe);
|
let universe = filter_universe.unwrap_or(whole_universe);
|
||||||
|
let target = TargetInstance { base_url, api_key };
|
||||||
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
|
let ctx = ExportContext {
|
||||||
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
index: &index,
|
||||||
|
index_rtxn: &index_rtxn,
|
||||||
// We don't need to keep this one alive as we will
|
universe: &universe,
|
||||||
// spawn many threads to process the documents
|
progress: &progress,
|
||||||
drop(index_rtxn);
|
agent: &agent,
|
||||||
|
must_stop_processing: &must_stop_processing,
|
||||||
let total_documents = universe.len() as u32;
|
};
|
||||||
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
|
let options = ExportOptions {
|
||||||
progress.update_progress(progress_step);
|
index_uid: uid,
|
||||||
|
payload_size,
|
||||||
|
override_settings: *override_settings,
|
||||||
|
extra_headers: &Default::default(),
|
||||||
|
};
|
||||||
|
let total_documents = self.export_one_index(target, options, ctx)?;
|
||||||
|
|
||||||
output.insert(
|
output.insert(
|
||||||
IndexUidPattern::new_unchecked(uid.clone()),
|
IndexUidPattern::new_unchecked(uid.clone()),
|
||||||
@@ -167,155 +96,217 @@ impl IndexScheduler {
|
|||||||
matched_documents: Some(total_documents as u64),
|
matched_documents: Some(total_documents as u64),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
let limit = payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024); // defaults to 20 MiB
|
|
||||||
let documents_url = format!("{base_url}/indexes/{uid}/documents");
|
|
||||||
|
|
||||||
let results = request_threads()
|
|
||||||
.broadcast(|ctx| {
|
|
||||||
let index_rtxn = index
|
|
||||||
.read_txn()
|
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
|
||||||
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
let mut tmp_buffer = Vec::new();
|
|
||||||
let mut compressed_buffer = Vec::new();
|
|
||||||
for (i, docid) in universe.iter().enumerate() {
|
|
||||||
if i % ctx.num_threads() != ctx.index() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let document = index
|
|
||||||
.document(&index_rtxn, docid)
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
|
||||||
|
|
||||||
let mut document = obkv_to_json(&all_fields, &fields_ids_map, document)
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
|
||||||
|
|
||||||
// TODO definitely factorize this code
|
|
||||||
'inject_vectors: {
|
|
||||||
let embeddings = index
|
|
||||||
.embeddings(&index_rtxn, docid)
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
|
||||||
|
|
||||||
if embeddings.is_empty() {
|
|
||||||
break 'inject_vectors;
|
|
||||||
}
|
|
||||||
|
|
||||||
let vectors = document
|
|
||||||
.entry(RESERVED_VECTORS_FIELD_NAME)
|
|
||||||
.or_insert(serde_json::Value::Object(Default::default()));
|
|
||||||
|
|
||||||
let serde_json::Value::Object(vectors) = vectors else {
|
|
||||||
return Err(Error::from_milli(
|
|
||||||
milli::Error::UserError(
|
|
||||||
milli::UserError::InvalidVectorsMapType {
|
|
||||||
document_id: {
|
|
||||||
if let Ok(Some(Ok(index))) = index
|
|
||||||
.external_id_of(
|
|
||||||
&index_rtxn,
|
|
||||||
std::iter::once(docid),
|
|
||||||
)
|
|
||||||
.map(|it| it.into_iter().next())
|
|
||||||
{
|
|
||||||
index
|
|
||||||
} else {
|
|
||||||
format!("internal docid={docid}")
|
|
||||||
}
|
|
||||||
},
|
|
||||||
value: vectors.clone(),
|
|
||||||
},
|
|
||||||
),
|
|
||||||
Some(uid.to_string()),
|
|
||||||
));
|
|
||||||
};
|
|
||||||
|
|
||||||
for (
|
|
||||||
embedder_name,
|
|
||||||
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
|
|
||||||
) in embeddings
|
|
||||||
{
|
|
||||||
let embeddings = ExplicitVectors {
|
|
||||||
embeddings: Some(
|
|
||||||
VectorOrArrayOfVectors::from_array_of_vectors(embeddings),
|
|
||||||
),
|
|
||||||
regenerate: regenerate &&
|
|
||||||
// Meilisearch does not handle well dumps with fragments, because as the fragments
|
|
||||||
// are marked as user-provided,
|
|
||||||
// all embeddings would be regenerated on any settings change or document update.
|
|
||||||
// To prevent this, we mark embeddings has non regenerate in this case.
|
|
||||||
!has_fragments,
|
|
||||||
};
|
|
||||||
vectors.insert(
|
|
||||||
embedder_name,
|
|
||||||
serde_json::to_value(embeddings).unwrap(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tmp_buffer.clear();
|
|
||||||
serde_json::to_writer(&mut tmp_buffer, &document)
|
|
||||||
.map_err(milli::InternalError::from)
|
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
|
||||||
|
|
||||||
// Make sure we put at least one document in the buffer even
|
|
||||||
// though we might go above the buffer limit before sending
|
|
||||||
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
|
|
||||||
// We compress the documents before sending them
|
|
||||||
let mut encoder =
|
|
||||||
GzEncoder::new(&mut compressed_buffer, Compression::default());
|
|
||||||
encoder
|
|
||||||
.write_all(&buffer)
|
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
|
|
||||||
encoder
|
|
||||||
.finish()
|
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
|
|
||||||
|
|
||||||
retry(&must_stop_processing, || {
|
|
||||||
let mut request = agent.post(&documents_url);
|
|
||||||
request = request.set("Content-Type", "application/x-ndjson");
|
|
||||||
request = request.set("Content-Encoding", "gzip");
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
|
|
||||||
})?;
|
|
||||||
buffer.clear();
|
|
||||||
compressed_buffer.clear();
|
|
||||||
}
|
|
||||||
buffer.extend_from_slice(&tmp_buffer);
|
|
||||||
|
|
||||||
if i > 0 && i % 100 == 0 {
|
|
||||||
step.fetch_add(100, atomic::Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
retry(&must_stop_processing, || {
|
|
||||||
let mut request = agent.post(&documents_url);
|
|
||||||
request = request.set("Content-Type", "application/x-ndjson");
|
|
||||||
if let Some(bearer) = &bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
request.send_bytes(&buffer).map_err(into_backoff_error)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
.map_err(|e| {
|
|
||||||
Error::from_milli(
|
|
||||||
milli::Error::InternalError(InternalError::PanicInThreadPool(e)),
|
|
||||||
Some(uid.to_string()),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
for result in results {
|
|
||||||
result?;
|
|
||||||
}
|
|
||||||
|
|
||||||
step.store(total_documents, atomic::Ordering::Relaxed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(output)
|
Ok(output)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(super) fn export_one_index(
|
||||||
|
&self,
|
||||||
|
target: TargetInstance<'_>,
|
||||||
|
options: ExportOptions<'_>,
|
||||||
|
ctx: ExportContext<'_>,
|
||||||
|
) -> Result<u64, Error> {
|
||||||
|
let err = |err| Error::from_milli(err, Some(options.index_uid.to_string()));
|
||||||
|
|
||||||
|
let bearer = target.api_key.map(|api_key| format!("Bearer {api_key}"));
|
||||||
|
let url = format!(
|
||||||
|
"{base_url}/indexes/{index_uid}",
|
||||||
|
base_url = target.base_url,
|
||||||
|
index_uid = options.index_uid
|
||||||
|
);
|
||||||
|
let response = retry(ctx.must_stop_processing, || {
|
||||||
|
let mut request = ctx.agent.get(&url);
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
|
||||||
|
request.send_bytes(Default::default()).map_err(into_backoff_error)
|
||||||
|
});
|
||||||
|
let index_exists = match response {
|
||||||
|
Ok(response) => response.status() == 200,
|
||||||
|
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => false,
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
let primary_key =
|
||||||
|
ctx.index.primary_key(&ctx.index_rtxn).map_err(milli::Error::from).map_err(err)?;
|
||||||
|
if !index_exists {
|
||||||
|
let url = format!("{base_url}/indexes", base_url = target.base_url);
|
||||||
|
retry(ctx.must_stop_processing, || {
|
||||||
|
let mut request = ctx.agent.post(&url);
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
let index_param = json!({ "uid": options.index_uid, "primaryKey": primary_key });
|
||||||
|
request.send_json(&index_param).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
if index_exists && options.override_settings {
|
||||||
|
retry(ctx.must_stop_processing, || {
|
||||||
|
let mut request = ctx.agent.patch(&url);
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
let index_param = json!({ "primaryKey": primary_key });
|
||||||
|
request.send_json(&index_param).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
if !index_exists || options.override_settings {
|
||||||
|
let mut settings =
|
||||||
|
settings::settings(&ctx.index, &ctx.index_rtxn, SecretPolicy::RevealSecrets)
|
||||||
|
.map_err(err)?;
|
||||||
|
// Remove the experimental chat setting if not enabled
|
||||||
|
if self.features().check_chat_completions("exporting chat settings").is_err() {
|
||||||
|
settings.chat = Setting::NotSet;
|
||||||
|
}
|
||||||
|
// Retry logic for sending settings
|
||||||
|
let url = format!(
|
||||||
|
"{base_url}/indexes/{index_uid}/settings",
|
||||||
|
base_url = target.base_url,
|
||||||
|
index_uid = options.index_uid
|
||||||
|
);
|
||||||
|
retry(ctx.must_stop_processing, || {
|
||||||
|
let mut request = ctx.agent.patch(&url);
|
||||||
|
if let Some(bearer) = bearer.as_ref() {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
request.send_json(settings.clone()).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let fields_ids_map = ctx.index.fields_ids_map(&ctx.index_rtxn)?;
|
||||||
|
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
||||||
|
let total_documents = ctx.universe.len() as u32;
|
||||||
|
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
|
||||||
|
ctx.progress.update_progress(progress_step);
|
||||||
|
|
||||||
|
let limit = options.payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024);
|
||||||
|
let documents_url = format!(
|
||||||
|
"{base_url}/indexes/{index_uid}/documents",
|
||||||
|
base_url = target.base_url,
|
||||||
|
index_uid = options.index_uid
|
||||||
|
);
|
||||||
|
let results = request_threads()
|
||||||
|
.broadcast(|broadcast| {
|
||||||
|
let index_rtxn = ctx.index.read_txn().map_err(milli::Error::from).map_err(err)?;
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
let mut tmp_buffer = Vec::new();
|
||||||
|
let mut compressed_buffer = Vec::new();
|
||||||
|
for (i, docid) in ctx.universe.iter().enumerate() {
|
||||||
|
if i % broadcast.num_threads() != broadcast.index() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let document = ctx.index.document(&index_rtxn, docid).map_err(err)?;
|
||||||
|
|
||||||
|
let mut document =
|
||||||
|
obkv_to_json(&all_fields, &fields_ids_map, document).map_err(err)?;
|
||||||
|
|
||||||
|
// TODO definitely factorize this code
|
||||||
|
'inject_vectors: {
|
||||||
|
let embeddings = ctx.index.embeddings(&index_rtxn, docid).map_err(err)?;
|
||||||
|
|
||||||
|
if embeddings.is_empty() {
|
||||||
|
break 'inject_vectors;
|
||||||
|
}
|
||||||
|
|
||||||
|
let vectors = document
|
||||||
|
.entry(RESERVED_VECTORS_FIELD_NAME)
|
||||||
|
.or_insert(serde_json::Value::Object(Default::default()));
|
||||||
|
|
||||||
|
let serde_json::Value::Object(vectors) = vectors else {
|
||||||
|
return Err(err(milli::Error::UserError(
|
||||||
|
milli::UserError::InvalidVectorsMapType {
|
||||||
|
document_id: {
|
||||||
|
if let Ok(Some(Ok(index))) = ctx
|
||||||
|
.index
|
||||||
|
.external_id_of(&index_rtxn, std::iter::once(docid))
|
||||||
|
.map(|it| it.into_iter().next())
|
||||||
|
{
|
||||||
|
index
|
||||||
|
} else {
|
||||||
|
format!("internal docid={docid}")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
value: vectors.clone(),
|
||||||
|
},
|
||||||
|
)));
|
||||||
|
};
|
||||||
|
|
||||||
|
for (
|
||||||
|
embedder_name,
|
||||||
|
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
|
||||||
|
) in embeddings
|
||||||
|
{
|
||||||
|
let embeddings = ExplicitVectors {
|
||||||
|
embeddings: Some(VectorOrArrayOfVectors::from_array_of_vectors(
|
||||||
|
embeddings,
|
||||||
|
)),
|
||||||
|
regenerate: regenerate &&
|
||||||
|
// Meilisearch does not handle well dumps with fragments, because as the fragments
|
||||||
|
// are marked as user-provided,
|
||||||
|
// all embeddings would be regenerated on any settings change or document update.
|
||||||
|
// To prevent this, we mark embeddings has non regenerate in this case.
|
||||||
|
!has_fragments,
|
||||||
|
};
|
||||||
|
vectors
|
||||||
|
.insert(embedder_name, serde_json::to_value(embeddings).unwrap());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp_buffer.clear();
|
||||||
|
serde_json::to_writer(&mut tmp_buffer, &document)
|
||||||
|
.map_err(milli::InternalError::from)
|
||||||
|
.map_err(milli::Error::from)
|
||||||
|
.map_err(err)?;
|
||||||
|
|
||||||
|
// Make sure we put at least one document in the buffer even
|
||||||
|
// though we might go above the buffer limit before sending
|
||||||
|
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
|
||||||
|
// We compress the documents before sending them
|
||||||
|
let mut encoder =
|
||||||
|
GzEncoder::new(&mut compressed_buffer, Compression::default());
|
||||||
|
encoder.write_all(&buffer).map_err(milli::Error::from).map_err(err)?;
|
||||||
|
encoder.finish().map_err(milli::Error::from).map_err(err)?;
|
||||||
|
|
||||||
|
retry(ctx.must_stop_processing, || {
|
||||||
|
let mut request = ctx.agent.post(&documents_url);
|
||||||
|
request = request.set("Content-Type", "application/x-ndjson");
|
||||||
|
request = request.set("Content-Encoding", "gzip");
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
buffer.clear();
|
||||||
|
compressed_buffer.clear();
|
||||||
|
}
|
||||||
|
buffer.extend_from_slice(&tmp_buffer);
|
||||||
|
|
||||||
|
if i > 0 && i % 100 == 0 {
|
||||||
|
step.fetch_add(100, atomic::Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
retry(ctx.must_stop_processing, || {
|
||||||
|
let mut request = ctx.agent.post(&documents_url);
|
||||||
|
request = request.set("Content-Type", "application/x-ndjson");
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
request.send_bytes(&buffer).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.map_err(|e| err(milli::Error::InternalError(InternalError::PanicInThreadPool(e))))?;
|
||||||
|
for result in results {
|
||||||
|
result?;
|
||||||
|
}
|
||||||
|
step.store(total_documents, atomic::Ordering::Relaxed);
|
||||||
|
Ok(total_documents as u64)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn retry<F>(must_stop_processing: &MustStopProcessing, send_request: F) -> Result<ureq::Response>
|
fn retry<F>(must_stop_processing: &MustStopProcessing, send_request: F) -> Result<ureq::Response>
|
||||||
@@ -374,4 +365,27 @@ fn ureq_error_into_error(error: ureq::Error) -> Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// export_one_index arguments
|
||||||
|
pub(super) struct TargetInstance<'a> {
|
||||||
|
pub(super) base_url: &'a str,
|
||||||
|
pub(super) api_key: Option<&'a str>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) struct ExportOptions<'a> {
|
||||||
|
pub(super) index_uid: &'a str,
|
||||||
|
pub(super) payload_size: Option<&'a Byte>,
|
||||||
|
pub(super) override_settings: bool,
|
||||||
|
pub(super) extra_headers: &'a hashbrown::HashMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) struct ExportContext<'a> {
|
||||||
|
pub(super) index: &'a meilisearch_types::milli::Index,
|
||||||
|
pub(super) index_rtxn: &'a milli::heed::RoTxn<'a>,
|
||||||
|
pub(super) universe: &'a RoaringBitmap,
|
||||||
|
pub(super) progress: &'a Progress,
|
||||||
|
pub(super) agent: &'a ureq::Agent,
|
||||||
|
pub(super) must_stop_processing: &'a MustStopProcessing,
|
||||||
|
}
|
||||||
|
|
||||||
|
// progress related
|
||||||
enum ExportIndex {}
|
enum ExportIndex {}
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ impl IndexScheduler {
|
|||||||
.embedding_configs()
|
.embedding_configs()
|
||||||
.embedding_configs(index_wtxn)
|
.embedding_configs(index_wtxn)
|
||||||
.map_err(|e| Error::from_milli(e.into(), Some(index_uid.clone())))?;
|
.map_err(|e| Error::from_milli(e.into(), Some(index_uid.clone())))?;
|
||||||
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
let embedders = self.embedders(&index_uid, embedders)?;
|
||||||
for operation in operations {
|
for operation in operations {
|
||||||
match operation {
|
match operation {
|
||||||
DocumentOperation::Replace(_content_uuid) => {
|
DocumentOperation::Replace(_content_uuid) => {
|
||||||
@@ -284,7 +284,7 @@ impl IndexScheduler {
|
|||||||
.embedding_configs()
|
.embedding_configs()
|
||||||
.embedding_configs(index_wtxn)
|
.embedding_configs(index_wtxn)
|
||||||
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
|
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
|
||||||
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
let embedders = self.embedders(&index_uid, embedders)?;
|
||||||
|
|
||||||
progress.update_progress(DocumentEditionProgress::Indexing);
|
progress.update_progress(DocumentEditionProgress::Indexing);
|
||||||
congestion = Some(
|
congestion = Some(
|
||||||
@@ -434,7 +434,7 @@ impl IndexScheduler {
|
|||||||
.embedding_configs()
|
.embedding_configs()
|
||||||
.embedding_configs(index_wtxn)
|
.embedding_configs(index_wtxn)
|
||||||
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
|
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
|
||||||
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
let embedders = self.embedders(&index_uid, embedders)?;
|
||||||
|
|
||||||
progress.update_progress(DocumentDeletionProgress::Indexing);
|
progress.update_progress(DocumentDeletionProgress::Indexing);
|
||||||
congestion = Some(
|
congestion = Some(
|
||||||
|
|||||||
@@ -722,7 +722,7 @@ fn basic_get_stats() {
|
|||||||
let kind = index_creation_task("whalo", "fish");
|
let kind = index_creation_task("whalo", "fish");
|
||||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||||
|
|
||||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
|
||||||
{
|
{
|
||||||
"indexes": {
|
"indexes": {
|
||||||
"catto": 1,
|
"catto": 1,
|
||||||
@@ -746,6 +746,7 @@ fn basic_get_stats() {
|
|||||||
"indexDeletion": 0,
|
"indexDeletion": 0,
|
||||||
"indexSwap": 0,
|
"indexSwap": 0,
|
||||||
"indexUpdate": 0,
|
"indexUpdate": 0,
|
||||||
|
"networkTopologyChange": 0,
|
||||||
"settingsUpdate": 0,
|
"settingsUpdate": 0,
|
||||||
"snapshotCreation": 0,
|
"snapshotCreation": 0,
|
||||||
"taskCancelation": 0,
|
"taskCancelation": 0,
|
||||||
@@ -753,7 +754,7 @@ fn basic_get_stats() {
|
|||||||
"upgradeDatabase": 0
|
"upgradeDatabase": 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"#);
|
"###);
|
||||||
|
|
||||||
handle.advance_till([Start, BatchCreated]);
|
handle.advance_till([Start, BatchCreated]);
|
||||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
||||||
|
|||||||
@@ -121,7 +121,7 @@ fn import_vectors() {
|
|||||||
insta::assert_json_snapshot!(simple_hf_config.embedder_options);
|
insta::assert_json_snapshot!(simple_hf_config.embedder_options);
|
||||||
let simple_hf_name = name.clone();
|
let simple_hf_name = name.clone();
|
||||||
|
|
||||||
let configs = index_scheduler.embedders("doggos".to_string(), configs).unwrap();
|
let configs = index_scheduler.embedders("doggos", configs).unwrap();
|
||||||
let hf_runtime = configs.get(&simple_hf_name).unwrap();
|
let hf_runtime = configs.get(&simple_hf_name).unwrap();
|
||||||
let hf_embedder = &hf_runtime.embedder;
|
let hf_embedder = &hf_runtime.embedder;
|
||||||
let beagle_embed = hf_embedder
|
let beagle_embed = hf_embedder
|
||||||
|
|||||||
@@ -126,7 +126,7 @@ impl IndexScheduler {
|
|||||||
std::fs::create_dir_all(&options.auth_path).unwrap();
|
std::fs::create_dir_all(&options.auth_path).unwrap();
|
||||||
let auth_env = open_auth_store_env(&options.auth_path).unwrap();
|
let auth_env = open_auth_store_env(&options.auth_path).unwrap();
|
||||||
let index_scheduler =
|
let index_scheduler =
|
||||||
Self::new(options, auth_env, version, sender, planned_failures).unwrap();
|
Self::new(options, auth_env, version, None, sender, planned_failures).unwrap();
|
||||||
|
|
||||||
// To be 100% consistent between all test we're going to start the scheduler right now
|
// To be 100% consistent between all test we're going to start the scheduler right now
|
||||||
// and ensure it's in the expected starting state.
|
// and ensure it's in the expected starting state.
|
||||||
|
|||||||
@@ -285,6 +285,7 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
|
|||||||
| K::DumpCreation { .. }
|
| K::DumpCreation { .. }
|
||||||
| K::Export { .. }
|
| K::Export { .. }
|
||||||
| K::UpgradeDatabase { .. }
|
| K::UpgradeDatabase { .. }
|
||||||
|
| K::NetworkTopologyChange { .. }
|
||||||
| K::SnapshotCreation => (),
|
| K::SnapshotCreation => (),
|
||||||
};
|
};
|
||||||
if let Some(Details::IndexSwap { swaps }) = &mut task.details {
|
if let Some(Details::IndexSwap { swaps }) = &mut task.details {
|
||||||
@@ -618,6 +619,9 @@ impl crate::IndexScheduler {
|
|||||||
Details::UpgradeDatabase { from: _, to: _ } => {
|
Details::UpgradeDatabase { from: _, to: _ } => {
|
||||||
assert_eq!(kind.as_kind(), Kind::UpgradeDatabase);
|
assert_eq!(kind.as_kind(), Kind::UpgradeDatabase);
|
||||||
}
|
}
|
||||||
|
Details::NetworkTopologyChange { .. } => {
|
||||||
|
assert_eq!(kind.as_kind(), Kind::NetworkTopologyChange);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,31 +5,85 @@
|
|||||||
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use milli::update::new::indexer::enterprise_edition::sharding::Shards;
|
use deserr::Deserr;
|
||||||
|
use milli::update::new::indexer::enterprise_edition::sharding::{Shard, Shards};
|
||||||
|
use milli::update::Setting;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use utoipa::ToSchema;
|
||||||
|
|
||||||
|
use crate::deserr::DeserrJsonError;
|
||||||
|
use crate::error::deserr_codes::{
|
||||||
|
InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf, InvalidNetworkSharding,
|
||||||
|
InvalidNetworkUrl, InvalidNetworkWriteApiKey,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserr, ToSchema, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
#[schema(rename_all = "camelCase")]
|
||||||
|
pub struct Network {
|
||||||
|
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!("http://localhost:7700"))]
|
||||||
|
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub remotes: Setting<BTreeMap<String, Option<Remote>>>,
|
||||||
|
#[schema(value_type = Option<String>, example = json!("ms-00"), rename = "self")]
|
||||||
|
#[serde(default, rename = "self")]
|
||||||
|
#[deserr(default, rename = "self", error = DeserrJsonError<InvalidNetworkSelf>)]
|
||||||
|
pub local: Setting<String>,
|
||||||
|
#[schema(value_type = Option<bool>, example = json!(true))]
|
||||||
|
#[serde(default)]
|
||||||
|
#[deserr(default, error = DeserrJsonError<InvalidNetworkSharding>)]
|
||||||
|
pub sharding: Setting<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserr, ToSchema, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
#[deserr(error = DeserrJsonError<InvalidNetworkRemotes>, rename_all = camelCase, deny_unknown_fields)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
#[schema(rename_all = "camelCase")]
|
||||||
|
pub struct Remote {
|
||||||
|
#[schema(value_type = Option<String>, example = json!({
|
||||||
|
"ms-0": Remote { url: Setting::Set("http://localhost:7700".into()), search_api_key: Setting::Reset, write_api_key: Setting::Reset },
|
||||||
|
"ms-1": Remote { url: Setting::Set("http://localhost:7701".into()), search_api_key: Setting::Set("foo".into()), write_api_key: Setting::Set("bar".into()) },
|
||||||
|
"ms-2": Remote { url: Setting::Set("http://localhost:7702".into()), search_api_key: Setting::Set("bar".into()), write_api_key: Setting::Set("foo".into()) },
|
||||||
|
}))]
|
||||||
|
#[deserr(default, error = DeserrJsonError<InvalidNetworkUrl>)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub url: Setting<String>,
|
||||||
|
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
|
||||||
|
#[deserr(default, error = DeserrJsonError<InvalidNetworkSearchApiKey>)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub search_api_key: Setting<String>,
|
||||||
|
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
|
||||||
|
#[deserr(default, error = DeserrJsonError<InvalidNetworkWriteApiKey>)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub write_api_key: Setting<String>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct Network {
|
pub struct DbNetwork {
|
||||||
#[serde(default, rename = "self")]
|
#[serde(default, rename = "self")]
|
||||||
pub local: Option<String>,
|
pub local: Option<String>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub remotes: BTreeMap<String, Remote>,
|
pub remotes: BTreeMap<String, DbRemote>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub sharding: bool,
|
pub sharding: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Network {
|
impl DbNetwork {
|
||||||
pub fn shards(&self) -> Option<Shards> {
|
pub fn shards(&self) -> Option<Shards> {
|
||||||
if self.sharding {
|
if self.sharding {
|
||||||
let this = self.local.as_deref().expect("Inconsistent `sharding` and `self`");
|
let this = self.local.as_deref();
|
||||||
let others = self
|
|
||||||
.remotes
|
Some(Shards(
|
||||||
.keys()
|
self.remotes
|
||||||
.filter(|name| name.as_str() != this)
|
.keys()
|
||||||
.map(|name| name.to_owned())
|
.map(|name| Shard {
|
||||||
.collect();
|
is_own: Some(name.as_str()) == this,
|
||||||
Some(Shards { own: vec![this.to_owned()], others })
|
name: name.to_owned(),
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@@ -38,7 +92,7 @@ impl Network {
|
|||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct Remote {
|
pub struct DbRemote {
|
||||||
pub url: String,
|
pub url: String,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub search_api_key: Option<String>,
|
pub search_api_key: Option<String>,
|
||||||
|
|||||||
@@ -529,6 +529,8 @@ impl ErrorCode for milli::Error {
|
|||||||
| UserError::DocumentEditionCompilationError(_) => {
|
| UserError::DocumentEditionCompilationError(_) => {
|
||||||
Code::EditDocumentsByFunctionError
|
Code::EditDocumentsByFunctionError
|
||||||
}
|
}
|
||||||
|
UserError::NetworkShardingWithoutSelf => Code::InvalidNetworkSharding,
|
||||||
|
UserError::NetworkMissingUrl(_) => Code::MissingNetworkUrl,
|
||||||
UserError::CelluliteError(err) => match err {
|
UserError::CelluliteError(err) => match err {
|
||||||
cellulite::Error::BuildCanceled
|
cellulite::Error::BuildCanceled
|
||||||
| cellulite::Error::VersionMismatchOnBuild(_)
|
| cellulite::Error::VersionMismatchOnBuild(_)
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ use time::{Duration, OffsetDateTime};
|
|||||||
use utoipa::ToSchema;
|
use utoipa::ToSchema;
|
||||||
|
|
||||||
use crate::batches::BatchId;
|
use crate::batches::BatchId;
|
||||||
|
use crate::enterprise_edition::network::Network;
|
||||||
use crate::error::ResponseError;
|
use crate::error::ResponseError;
|
||||||
use crate::settings::{Settings, Unchecked};
|
use crate::settings::{Settings, Unchecked};
|
||||||
use crate::tasks::{
|
use crate::tasks::{
|
||||||
@@ -142,6 +143,9 @@ pub struct DetailsView {
|
|||||||
pub old_index_uid: Option<String>,
|
pub old_index_uid: Option<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub new_index_uid: Option<String>,
|
pub new_index_uid: Option<String>,
|
||||||
|
// network
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub network: Option<Network>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DetailsView {
|
impl DetailsView {
|
||||||
@@ -314,6 +318,10 @@ impl DetailsView {
|
|||||||
// We should never be able to batch multiple renames at the same time.
|
// We should never be able to batch multiple renames at the same time.
|
||||||
(Some(left), Some(_right)) => Some(left),
|
(Some(left), Some(_right)) => Some(left),
|
||||||
},
|
},
|
||||||
|
network: match (&self.network, &other.network) {
|
||||||
|
(None, None) => None,
|
||||||
|
(_, Some(network)) | (Some(network), None) => Some(network.clone()),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -415,6 +423,9 @@ impl From<Details> for DetailsView {
|
|||||||
upgrade_to: Some(format!("v{}.{}.{}", to.0, to.1, to.2)),
|
upgrade_to: Some(format!("v{}.{}.{}", to.0, to.1, to.2)),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
Details::NetworkTopologyChange { network: new_network } => {
|
||||||
|
DetailsView { network: new_network, ..Default::default() }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ use utoipa::{schema, ToSchema};
|
|||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::batches::BatchId;
|
use crate::batches::BatchId;
|
||||||
|
use crate::enterprise_edition::network::Network;
|
||||||
use crate::error::ResponseError;
|
use crate::error::ResponseError;
|
||||||
use crate::index_uid_pattern::IndexUidPattern;
|
use crate::index_uid_pattern::IndexUidPattern;
|
||||||
use crate::keys::Key;
|
use crate::keys::Key;
|
||||||
@@ -58,6 +59,7 @@ impl Task {
|
|||||||
| TaskDeletion { .. }
|
| TaskDeletion { .. }
|
||||||
| Export { .. }
|
| Export { .. }
|
||||||
| UpgradeDatabase { .. }
|
| UpgradeDatabase { .. }
|
||||||
|
| NetworkTopologyChange { .. }
|
||||||
| IndexSwap { .. } => None,
|
| IndexSwap { .. } => None,
|
||||||
DocumentAdditionOrUpdate { index_uid, .. }
|
DocumentAdditionOrUpdate { index_uid, .. }
|
||||||
| DocumentEdition { index_uid, .. }
|
| DocumentEdition { index_uid, .. }
|
||||||
@@ -94,7 +96,8 @@ impl Task {
|
|||||||
| KindWithContent::DumpCreation { .. }
|
| KindWithContent::DumpCreation { .. }
|
||||||
| KindWithContent::SnapshotCreation
|
| KindWithContent::SnapshotCreation
|
||||||
| KindWithContent::Export { .. }
|
| KindWithContent::Export { .. }
|
||||||
| KindWithContent::UpgradeDatabase { .. } => None,
|
| KindWithContent::UpgradeDatabase { .. }
|
||||||
|
| KindWithContent::NetworkTopologyChange { .. } => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -170,6 +173,10 @@ pub enum KindWithContent {
|
|||||||
UpgradeDatabase {
|
UpgradeDatabase {
|
||||||
from: (u32, u32, u32),
|
from: (u32, u32, u32),
|
||||||
},
|
},
|
||||||
|
NetworkTopologyChange {
|
||||||
|
network: Option<Network>,
|
||||||
|
origin: Option<Origin>,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
|
||||||
@@ -206,6 +213,7 @@ impl KindWithContent {
|
|||||||
KindWithContent::SnapshotCreation => Kind::SnapshotCreation,
|
KindWithContent::SnapshotCreation => Kind::SnapshotCreation,
|
||||||
KindWithContent::Export { .. } => Kind::Export,
|
KindWithContent::Export { .. } => Kind::Export,
|
||||||
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
|
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
|
||||||
|
KindWithContent::NetworkTopologyChange { .. } => Kind::NetworkTopologyChange,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -218,6 +226,7 @@ impl KindWithContent {
|
|||||||
| TaskCancelation { .. }
|
| TaskCancelation { .. }
|
||||||
| TaskDeletion { .. }
|
| TaskDeletion { .. }
|
||||||
| Export { .. }
|
| Export { .. }
|
||||||
|
| NetworkTopologyChange { .. }
|
||||||
| UpgradeDatabase { .. } => vec![],
|
| UpgradeDatabase { .. } => vec![],
|
||||||
DocumentAdditionOrUpdate { index_uid, .. }
|
DocumentAdditionOrUpdate { index_uid, .. }
|
||||||
| DocumentEdition { index_uid, .. }
|
| DocumentEdition { index_uid, .. }
|
||||||
@@ -325,6 +334,9 @@ impl KindWithContent {
|
|||||||
versioning::VERSION_PATCH,
|
versioning::VERSION_PATCH,
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
|
KindWithContent::NetworkTopologyChange { network: new_network, origin: _ } => {
|
||||||
|
Some(Details::NetworkTopologyChange { network: new_network.clone() })
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -407,6 +419,9 @@ impl KindWithContent {
|
|||||||
versioning::VERSION_PATCH,
|
versioning::VERSION_PATCH,
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
|
KindWithContent::NetworkTopologyChange { network: new_network, origin: _s } => {
|
||||||
|
Some(Details::NetworkTopologyChange { network: new_network.clone() })
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -469,6 +484,9 @@ impl From<&KindWithContent> for Option<Details> {
|
|||||||
versioning::VERSION_PATCH,
|
versioning::VERSION_PATCH,
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
|
KindWithContent::NetworkTopologyChange { network: new_network, origin: _ } => {
|
||||||
|
Some(Details::NetworkTopologyChange { network: new_network.clone() })
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -579,6 +597,7 @@ pub enum Kind {
|
|||||||
SnapshotCreation,
|
SnapshotCreation,
|
||||||
Export,
|
Export,
|
||||||
UpgradeDatabase,
|
UpgradeDatabase,
|
||||||
|
NetworkTopologyChange,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Kind {
|
impl Kind {
|
||||||
@@ -597,7 +616,8 @@ impl Kind {
|
|||||||
| Kind::DumpCreation
|
| Kind::DumpCreation
|
||||||
| Kind::Export
|
| Kind::Export
|
||||||
| Kind::UpgradeDatabase
|
| Kind::UpgradeDatabase
|
||||||
| Kind::SnapshotCreation => false,
|
| Kind::SnapshotCreation
|
||||||
|
| Kind::NetworkTopologyChange => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -618,6 +638,7 @@ impl Display for Kind {
|
|||||||
Kind::SnapshotCreation => write!(f, "snapshotCreation"),
|
Kind::SnapshotCreation => write!(f, "snapshotCreation"),
|
||||||
Kind::Export => write!(f, "export"),
|
Kind::Export => write!(f, "export"),
|
||||||
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
|
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
|
||||||
|
Kind::NetworkTopologyChange => write!(f, "networkTopologyChange"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -653,6 +674,8 @@ impl FromStr for Kind {
|
|||||||
Ok(Kind::Export)
|
Ok(Kind::Export)
|
||||||
} else if kind.eq_ignore_ascii_case("upgradeDatabase") {
|
} else if kind.eq_ignore_ascii_case("upgradeDatabase") {
|
||||||
Ok(Kind::UpgradeDatabase)
|
Ok(Kind::UpgradeDatabase)
|
||||||
|
} else if kind.eq_ignore_ascii_case("networkTopologyChange") {
|
||||||
|
Ok(Kind::NetworkTopologyChange)
|
||||||
} else {
|
} else {
|
||||||
Err(ParseTaskKindError(kind.to_owned()))
|
Err(ParseTaskKindError(kind.to_owned()))
|
||||||
}
|
}
|
||||||
@@ -738,6 +761,9 @@ pub enum Details {
|
|||||||
from: (u32, u32, u32),
|
from: (u32, u32, u32),
|
||||||
to: (u32, u32, u32),
|
to: (u32, u32, u32),
|
||||||
},
|
},
|
||||||
|
NetworkTopologyChange {
|
||||||
|
network: Option<Network>,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
@@ -805,6 +831,7 @@ impl Details {
|
|||||||
| Self::Dump { .. }
|
| Self::Dump { .. }
|
||||||
| Self::Export { .. }
|
| Self::Export { .. }
|
||||||
| Self::UpgradeDatabase { .. }
|
| Self::UpgradeDatabase { .. }
|
||||||
|
| Self::NetworkTopologyChange { .. }
|
||||||
| Self::IndexSwap { .. } => (),
|
| Self::IndexSwap { .. } => (),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -216,7 +216,10 @@ enum OnFailure {
|
|||||||
KeepDb,
|
KeepDb,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
|
pub fn setup_meilisearch(
|
||||||
|
opt: &Opt,
|
||||||
|
handle: tokio::runtime::Handle,
|
||||||
|
) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
|
||||||
let index_scheduler_opt = IndexSchedulerOptions {
|
let index_scheduler_opt = IndexSchedulerOptions {
|
||||||
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
|
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
|
||||||
auth_path: opt.db_path.join("auth"),
|
auth_path: opt.db_path.join("auth"),
|
||||||
@@ -256,6 +259,7 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<
|
|||||||
index_scheduler_opt,
|
index_scheduler_opt,
|
||||||
OnFailure::RemoveDb,
|
OnFailure::RemoveDb,
|
||||||
binary_version, // the db is empty
|
binary_version, // the db is empty
|
||||||
|
handle,
|
||||||
)?,
|
)?,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
std::fs::remove_dir_all(&opt.db_path)?;
|
std::fs::remove_dir_all(&opt.db_path)?;
|
||||||
@@ -273,7 +277,7 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<
|
|||||||
bail!("snapshot doesn't exist at {}", snapshot_path.display())
|
bail!("snapshot doesn't exist at {}", snapshot_path.display())
|
||||||
// the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag
|
// the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag
|
||||||
} else {
|
} else {
|
||||||
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
|
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
|
||||||
}
|
}
|
||||||
} else if let Some(ref path) = opt.import_dump {
|
} else if let Some(ref path) = opt.import_dump {
|
||||||
let src_path_exists = path.exists();
|
let src_path_exists = path.exists();
|
||||||
@@ -284,6 +288,7 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<
|
|||||||
index_scheduler_opt,
|
index_scheduler_opt,
|
||||||
OnFailure::RemoveDb,
|
OnFailure::RemoveDb,
|
||||||
binary_version, // the db is empty
|
binary_version, // the db is empty
|
||||||
|
handle,
|
||||||
)?;
|
)?;
|
||||||
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
|
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
|
||||||
Ok(()) => (index_scheduler, auth_controller),
|
Ok(()) => (index_scheduler, auth_controller),
|
||||||
@@ -304,10 +309,10 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<
|
|||||||
// the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag
|
// the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag
|
||||||
// or, the dump is missing but we can ignore that because of the ignore_missing_dump flag
|
// or, the dump is missing but we can ignore that because of the ignore_missing_dump flag
|
||||||
} else {
|
} else {
|
||||||
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
|
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
|
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
|
||||||
};
|
};
|
||||||
|
|
||||||
// We create a loop in a thread that registers snapshotCreation tasks
|
// We create a loop in a thread that registers snapshotCreation tasks
|
||||||
@@ -338,6 +343,7 @@ fn open_or_create_database_unchecked(
|
|||||||
index_scheduler_opt: IndexSchedulerOptions,
|
index_scheduler_opt: IndexSchedulerOptions,
|
||||||
on_failure: OnFailure,
|
on_failure: OnFailure,
|
||||||
version: (u32, u32, u32),
|
version: (u32, u32, u32),
|
||||||
|
handle: tokio::runtime::Handle,
|
||||||
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
||||||
// we don't want to create anything in the data.ms yet, thus we
|
// we don't want to create anything in the data.ms yet, thus we
|
||||||
// wrap our two builders in a closure that'll be executed later.
|
// wrap our two builders in a closure that'll be executed later.
|
||||||
@@ -345,7 +351,7 @@ fn open_or_create_database_unchecked(
|
|||||||
let auth_env = open_auth_store_env(&index_scheduler_opt.auth_path).unwrap();
|
let auth_env = open_auth_store_env(&index_scheduler_opt.auth_path).unwrap();
|
||||||
let auth_controller = AuthController::new(auth_env.clone(), &opt.master_key);
|
let auth_controller = AuthController::new(auth_env.clone(), &opt.master_key);
|
||||||
let index_scheduler_builder = || -> anyhow::Result<_> {
|
let index_scheduler_builder = || -> anyhow::Result<_> {
|
||||||
Ok(IndexScheduler::new(index_scheduler_opt, auth_env, version)?)
|
Ok(IndexScheduler::new(index_scheduler_opt, auth_env, version, Some(handle))?)
|
||||||
};
|
};
|
||||||
|
|
||||||
match (
|
match (
|
||||||
@@ -452,6 +458,7 @@ fn open_or_create_database(
|
|||||||
index_scheduler_opt: IndexSchedulerOptions,
|
index_scheduler_opt: IndexSchedulerOptions,
|
||||||
empty_db: bool,
|
empty_db: bool,
|
||||||
binary_version: (u32, u32, u32),
|
binary_version: (u32, u32, u32),
|
||||||
|
handle: tokio::runtime::Handle,
|
||||||
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
||||||
let version = if !empty_db {
|
let version = if !empty_db {
|
||||||
check_version(opt, &index_scheduler_opt, binary_version)?
|
check_version(opt, &index_scheduler_opt, binary_version)?
|
||||||
@@ -459,7 +466,7 @@ fn open_or_create_database(
|
|||||||
binary_version
|
binary_version
|
||||||
};
|
};
|
||||||
|
|
||||||
open_or_create_database_unchecked(opt, index_scheduler_opt, OnFailure::KeepDb, version)
|
open_or_create_database_unchecked(opt, index_scheduler_opt, OnFailure::KeepDb, version, handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn import_dump(
|
fn import_dump(
|
||||||
@@ -584,7 +591,7 @@ fn import_dump(
|
|||||||
let reader = DocumentsBatchReader::from_reader(reader)?;
|
let reader = DocumentsBatchReader::from_reader(reader)?;
|
||||||
|
|
||||||
let embedder_configs = index.embedding_configs().embedding_configs(&wtxn)?;
|
let embedder_configs = index.embedding_configs().embedding_configs(&wtxn)?;
|
||||||
let embedders = index_scheduler.embedders(uid.to_string(), embedder_configs)?;
|
let embedders = index_scheduler.embedders(&uid, embedder_configs)?;
|
||||||
|
|
||||||
let builder = milli::update::IndexDocuments::new(
|
let builder = milli::update::IndexDocuments::new(
|
||||||
&mut wtxn,
|
&mut wtxn,
|
||||||
@@ -612,7 +619,7 @@ fn import_dump(
|
|||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer = indexer::DocumentOperation::new();
|
||||||
let embedders = index.embedding_configs().embedding_configs(&rtxn)?;
|
let embedders = index.embedding_configs().embedding_configs(&rtxn)?;
|
||||||
let embedders = index_scheduler.embedders(uid.clone(), embedders)?;
|
let embedders = index_scheduler.embedders(&uid, embedders)?;
|
||||||
|
|
||||||
let mmap = unsafe { memmap2::Mmap::map(index_reader.documents_file())? };
|
let mmap = unsafe { memmap2::Mmap::map(index_reader.documents_file())? };
|
||||||
|
|
||||||
|
|||||||
@@ -76,7 +76,10 @@ fn on_panic(info: &std::panic::PanicHookInfo) {
|
|||||||
|
|
||||||
#[actix_web::main]
|
#[actix_web::main]
|
||||||
async fn main() -> anyhow::Result<()> {
|
async fn main() -> anyhow::Result<()> {
|
||||||
try_main().await.inspect_err(|error| {
|
// won't panic inside of tokio::main
|
||||||
|
let runtime = tokio::runtime::Handle::current();
|
||||||
|
|
||||||
|
try_main(runtime).await.inspect_err(|error| {
|
||||||
tracing::error!(%error);
|
tracing::error!(%error);
|
||||||
let mut current = error.source();
|
let mut current = error.source();
|
||||||
let mut depth = 0;
|
let mut depth = 0;
|
||||||
@@ -88,7 +91,7 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn try_main() -> anyhow::Result<()> {
|
async fn try_main(runtime: tokio::runtime::Handle) -> anyhow::Result<()> {
|
||||||
let (opt, config_read_from) = Opt::try_build()?;
|
let (opt, config_read_from) = Opt::try_build()?;
|
||||||
|
|
||||||
std::panic::set_hook(Box::new(on_panic));
|
std::panic::set_hook(Box::new(on_panic));
|
||||||
@@ -122,7 +125,7 @@ async fn try_main() -> anyhow::Result<()> {
|
|||||||
_ => (),
|
_ => (),
|
||||||
}
|
}
|
||||||
|
|
||||||
let (index_scheduler, auth_controller) = setup_meilisearch(&opt)?;
|
let (index_scheduler, auth_controller) = setup_meilisearch(&opt, runtime)?;
|
||||||
|
|
||||||
let analytics =
|
let analytics =
|
||||||
analytics::Analytics::new(&opt, index_scheduler.clone(), auth_controller.clone()).await;
|
analytics::Analytics::new(&opt, index_scheduler.clone(), auth_controller.clone()).await;
|
||||||
|
|||||||
@@ -282,8 +282,7 @@ async fn process_search_request(
|
|||||||
if let Some(search_rules) = auth_filter.get_index_search_rules(&index_uid) {
|
if let Some(search_rules) = auth_filter.get_index_search_rules(&index_uid) {
|
||||||
add_search_rules(&mut query.filter, search_rules);
|
add_search_rules(&mut query.filter, search_rules);
|
||||||
}
|
}
|
||||||
let search_kind =
|
let search_kind = search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)?;
|
||||||
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
|
|
||||||
|
|
||||||
let permit = search_queue.try_get_search_permit().await?;
|
let permit = search_queue.try_get_search_permit().await?;
|
||||||
let features = index_scheduler.features();
|
let features = index_scheduler.features();
|
||||||
@@ -300,7 +299,7 @@ async fn process_search_request(
|
|||||||
let (search, _is_finite_pagination, _max_total_hits, _offset) =
|
let (search, _is_finite_pagination, _max_total_hits, _offset) =
|
||||||
prepare_search(&index_cloned, &rtxn, &query, &search_kind, time_budget, features)?;
|
prepare_search(&index_cloned, &rtxn, &query, &search_kind, time_budget, features)?;
|
||||||
|
|
||||||
match search_from_kind(index_uid, search_kind, search) {
|
match search_from_kind(&index_uid, search_kind, search) {
|
||||||
Ok((search_results, _)) => Ok((rtxn, Ok(search_results))),
|
Ok((search_results, _)) => Ok((rtxn, Ok(search_results))),
|
||||||
Err(MeilisearchHttpError::Milli {
|
Err(MeilisearchHttpError::Milli {
|
||||||
error: meilisearch_types::milli::Error::UserError(user_error),
|
error: meilisearch_types::milli::Error::UserError(user_error),
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ use crate::extractors::authentication::policies::*;
|
|||||||
use crate::extractors::authentication::GuardedData;
|
use crate::extractors::authentication::GuardedData;
|
||||||
use crate::extractors::payload::Payload;
|
use crate::extractors::payload::Payload;
|
||||||
use crate::extractors::sequential_extractor::SeqHandler;
|
use crate::extractors::sequential_extractor::SeqHandler;
|
||||||
use crate::routes::indexes::enterprise_edition::proxy::{proxy, Body};
|
use crate::routes::indexes::enterprise_edition::proxy::{check_leader, proxy, Body};
|
||||||
use crate::routes::indexes::search::fix_sort_query_parameters;
|
use crate::routes::indexes::search::fix_sort_query_parameters;
|
||||||
use crate::routes::{
|
use crate::routes::{
|
||||||
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
|
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
|
||||||
@@ -340,6 +340,7 @@ pub async fn delete_document(
|
|||||||
let DocumentParam { index_uid, document_id } = path.into_inner();
|
let DocumentParam { index_uid, document_id } = path.into_inner();
|
||||||
let index_uid = IndexUid::try_from(index_uid)?;
|
let index_uid = IndexUid::try_from(index_uid)?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
|
let origin = check_leader(&req, &network)?;
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
DocumentsDeletionAggregator {
|
DocumentsDeletionAggregator {
|
||||||
@@ -363,7 +364,7 @@ pub async fn delete_document(
|
|||||||
};
|
};
|
||||||
|
|
||||||
if network.sharding && !dry_run {
|
if network.sharding && !dry_run {
|
||||||
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::none(), &task).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
@@ -946,6 +947,7 @@ async fn document_addition(
|
|||||||
) -> Result<SummarizedTaskView, MeilisearchHttpError> {
|
) -> Result<SummarizedTaskView, MeilisearchHttpError> {
|
||||||
let mime_type = extract_mime_type(req)?;
|
let mime_type = extract_mime_type(req)?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
|
let origin = check_leader(&req, &network)?;
|
||||||
|
|
||||||
let format = match (
|
let format = match (
|
||||||
mime_type.as_ref().map(|m| (m.type_().as_str(), m.subtype().as_str())),
|
mime_type.as_ref().map(|m| (m.type_().as_str(), m.subtype().as_str())),
|
||||||
@@ -1081,6 +1083,7 @@ async fn document_addition(
|
|||||||
&index_scheduler,
|
&index_scheduler,
|
||||||
&index_uid,
|
&index_uid,
|
||||||
req,
|
req,
|
||||||
|
origin,
|
||||||
network,
|
network,
|
||||||
Body::with_ndjson_payload(file),
|
Body::with_ndjson_payload(file),
|
||||||
&task,
|
&task,
|
||||||
@@ -1168,6 +1171,7 @@ pub async fn delete_documents_batch(
|
|||||||
debug!(parameters = ?body, "Delete documents by batch");
|
debug!(parameters = ?body, "Delete documents by batch");
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
|
let origin = check_leader(&req, &network)?;
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
DocumentsDeletionAggregator {
|
DocumentsDeletionAggregator {
|
||||||
@@ -1194,7 +1198,8 @@ pub async fn delete_documents_batch(
|
|||||||
};
|
};
|
||||||
|
|
||||||
if network.sharding && !dry_run {
|
if network.sharding && !dry_run {
|
||||||
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
|
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::Inline(body), &task)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
@@ -1254,6 +1259,7 @@ pub async fn delete_documents_by_filter(
|
|||||||
let index_uid = index_uid.into_inner();
|
let index_uid = index_uid.into_inner();
|
||||||
let filter = body.into_inner();
|
let filter = body.into_inner();
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
|
let origin = check_leader(&req, &network)?;
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
DocumentsDeletionAggregator {
|
DocumentsDeletionAggregator {
|
||||||
@@ -1286,7 +1292,8 @@ pub async fn delete_documents_by_filter(
|
|||||||
};
|
};
|
||||||
|
|
||||||
if network.sharding && !dry_run {
|
if network.sharding && !dry_run {
|
||||||
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(filter), &task).await?;
|
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::Inline(filter), &task)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
@@ -1384,6 +1391,7 @@ pub async fn edit_documents_by_function(
|
|||||||
.check_edit_documents_by_function("Using the documents edit route")?;
|
.check_edit_documents_by_function("Using the documents edit route")?;
|
||||||
|
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
|
let origin = check_leader(&req, &network)?;
|
||||||
|
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let index_uid = index_uid.into_inner();
|
let index_uid = index_uid.into_inner();
|
||||||
@@ -1436,7 +1444,8 @@ pub async fn edit_documents_by_function(
|
|||||||
};
|
};
|
||||||
|
|
||||||
if network.sharding && !dry_run {
|
if network.sharding && !dry_run {
|
||||||
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(params), &task).await?;
|
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::Inline(params), &task)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
@@ -1483,6 +1492,7 @@ pub async fn clear_all_documents(
|
|||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
|
let origin = check_leader(&req, &network)?;
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
DocumentsDeletionAggregator {
|
DocumentsDeletionAggregator {
|
||||||
@@ -1505,7 +1515,7 @@ pub async fn clear_all_documents(
|
|||||||
};
|
};
|
||||||
|
|
||||||
if network.sharding && !dry_run {
|
if network.sharding && !dry_run {
|
||||||
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
proxy(&index_scheduler, &index_uid, &req, origin, network, Body::none(), &task).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
|
|||||||
@@ -38,6 +38,27 @@ impl Body<()> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn check_leader(
|
||||||
|
req: &HttpRequest,
|
||||||
|
network: &meilisearch_types::enterprise_edition::network::DbNetwork,
|
||||||
|
) -> Result<Option<Origin>, MeilisearchHttpError> {
|
||||||
|
match origin_from_req(req)? {
|
||||||
|
Some(origin) => Ok(Some(origin)),
|
||||||
|
None => {
|
||||||
|
let this = network
|
||||||
|
.local
|
||||||
|
.as_deref()
|
||||||
|
.expect("inconsistent `network.sharding` and `network.self`");
|
||||||
|
|
||||||
|
let is_leader = this == todo!();
|
||||||
|
if !is_leader {
|
||||||
|
return Err(MeilisearchHttpError::NotLeader { leader: todo!() });
|
||||||
|
}
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// If necessary, proxies the passed request to the network and update the task description.
|
/// If necessary, proxies the passed request to the network and update the task description.
|
||||||
///
|
///
|
||||||
/// This function reads the custom headers from the request to determine if must proxy the request or if the request
|
/// This function reads the custom headers from the request to determine if must proxy the request or if the request
|
||||||
@@ -52,11 +73,12 @@ pub async fn proxy<T: serde::Serialize>(
|
|||||||
index_scheduler: &IndexScheduler,
|
index_scheduler: &IndexScheduler,
|
||||||
index_uid: &str,
|
index_uid: &str,
|
||||||
req: &HttpRequest,
|
req: &HttpRequest,
|
||||||
network: meilisearch_types::enterprise_edition::network::Network,
|
origin: Option<Origin>,
|
||||||
|
network: meilisearch_types::enterprise_edition::network::DbNetwork,
|
||||||
body: Body<T>,
|
body: Body<T>,
|
||||||
task: &meilisearch_types::tasks::Task,
|
task: &meilisearch_types::tasks::Task,
|
||||||
) -> Result<(), MeilisearchHttpError> {
|
) -> Result<(), MeilisearchHttpError> {
|
||||||
match origin_from_req(req)? {
|
match origin {
|
||||||
Some(origin) => {
|
Some(origin) => {
|
||||||
index_scheduler.set_task_network(task.uid, TaskNetwork::Origin { origin })?
|
index_scheduler.set_task_network(task.uid, TaskNetwork::Origin { origin })?
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -260,7 +260,7 @@ pub async fn search(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let index = index_scheduler.index(&index_uid)?;
|
let index = index_scheduler.index(&index_uid)?;
|
||||||
let search_kind = search_kind(&search_query, &index_scheduler, index_uid.to_string(), &index)?;
|
let search_kind = search_kind(&search_query, &index_scheduler, &index_uid, &index)?;
|
||||||
let permit = search_queue.try_get_search_permit().await?;
|
let permit = search_queue.try_get_search_permit().await?;
|
||||||
let search_result = tokio::task::spawn_blocking(move || {
|
let search_result = tokio::task::spawn_blocking(move || {
|
||||||
perform_facet_search(
|
perform_facet_search(
|
||||||
|
|||||||
@@ -339,13 +339,12 @@ pub async fn search_with_url_query(
|
|||||||
|
|
||||||
let index = index_scheduler.index(&index_uid)?;
|
let index = index_scheduler.index(&index_uid)?;
|
||||||
|
|
||||||
let search_kind =
|
let search_kind = search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)?;
|
||||||
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
|
|
||||||
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
|
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
|
||||||
let permit = search_queue.try_get_search_permit().await?;
|
let permit = search_queue.try_get_search_permit().await?;
|
||||||
let search_result = tokio::task::spawn_blocking(move || {
|
let search_result = tokio::task::spawn_blocking(move || {
|
||||||
perform_search(
|
perform_search(
|
||||||
index_uid.to_string(),
|
&index_uid,
|
||||||
&index,
|
&index,
|
||||||
query,
|
query,
|
||||||
search_kind,
|
search_kind,
|
||||||
@@ -445,14 +444,13 @@ pub async fn search_with_post(
|
|||||||
|
|
||||||
let index = index_scheduler.index(&index_uid)?;
|
let index = index_scheduler.index(&index_uid)?;
|
||||||
|
|
||||||
let search_kind =
|
let search_kind = search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)?;
|
||||||
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
|
|
||||||
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
|
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
|
||||||
|
|
||||||
let permit = search_queue.try_get_search_permit().await?;
|
let permit = search_queue.try_get_search_permit().await?;
|
||||||
let search_result = tokio::task::spawn_blocking(move || {
|
let search_result = tokio::task::spawn_blocking(move || {
|
||||||
perform_search(
|
perform_search(
|
||||||
index_uid.to_string(),
|
&index_uid,
|
||||||
&index,
|
&index,
|
||||||
query,
|
query,
|
||||||
search_kind,
|
search_kind,
|
||||||
@@ -480,7 +478,7 @@ pub async fn search_with_post(
|
|||||||
pub fn search_kind(
|
pub fn search_kind(
|
||||||
query: &SearchQuery,
|
query: &SearchQuery,
|
||||||
index_scheduler: &IndexScheduler,
|
index_scheduler: &IndexScheduler,
|
||||||
index_uid: String,
|
index_uid: &str,
|
||||||
index: &milli::Index,
|
index: &milli::Index,
|
||||||
) -> Result<SearchKind, ResponseError> {
|
) -> Result<SearchKind, ResponseError> {
|
||||||
let is_placeholder_query =
|
let is_placeholder_query =
|
||||||
|
|||||||
@@ -227,7 +227,7 @@ async fn similar(
|
|||||||
|
|
||||||
let (embedder_name, embedder, quantized) = SearchKind::embedder(
|
let (embedder_name, embedder, quantized) = SearchKind::embedder(
|
||||||
&index_scheduler,
|
&index_scheduler,
|
||||||
index_uid.to_string(),
|
index_uid.as_str(),
|
||||||
&index,
|
&index,
|
||||||
&query.embedder,
|
&query.embedder,
|
||||||
None,
|
None,
|
||||||
|
|||||||
@@ -39,7 +39,6 @@ use crate::routes::features::RuntimeTogglableFeatures;
|
|||||||
use crate::routes::indexes::documents::{DocumentDeletionByFilter, DocumentEditionByFunction};
|
use crate::routes::indexes::documents::{DocumentDeletionByFilter, DocumentEditionByFunction};
|
||||||
use crate::routes::indexes::IndexView;
|
use crate::routes::indexes::IndexView;
|
||||||
use crate::routes::multi_search::SearchResults;
|
use crate::routes::multi_search::SearchResults;
|
||||||
use crate::routes::network::{Network, Remote};
|
|
||||||
use crate::routes::swap_indexes::SwapIndexesPayload;
|
use crate::routes::swap_indexes::SwapIndexesPayload;
|
||||||
use crate::routes::webhooks::{WebhookResults, WebhookSettings, WebhookWithMetadata};
|
use crate::routes::webhooks::{WebhookResults, WebhookSettings, WebhookWithMetadata};
|
||||||
use crate::search::{
|
use crate::search::{
|
||||||
@@ -102,7 +101,7 @@ mod webhooks;
|
|||||||
url = "/",
|
url = "/",
|
||||||
description = "Local server",
|
description = "Local server",
|
||||||
)),
|
)),
|
||||||
components(schemas(PaginationView<KeyView>, PaginationView<IndexView>, IndexView, DocumentDeletionByFilter, AllBatches, BatchStats, ProgressStepView, ProgressView, BatchView, RuntimeTogglableFeatures, SwapIndexesPayload, DocumentEditionByFunction, MergeFacets, FederationOptions, SearchQueryWithIndex, Federation, FederatedSearch, FederatedSearchResult, SearchResults, SearchResultWithIndex, SimilarQuery, SimilarResult, PaginationView<serde_json::Value>, BrowseQuery, UpdateIndexRequest, IndexUid, IndexCreateRequest, KeyView, Action, CreateApiKey, UpdateStderrLogs, LogMode, GetLogs, IndexStats, Stats, HealthStatus, HealthResponse, VersionResponse, Code, ErrorType, AllTasks, TaskView, Status, DetailsView, ResponseError, Settings<Unchecked>, Settings<Checked>, TypoSettings, MinWordSizeTyposSetting, FacetingSettings, PaginationSettings, SummarizedTaskView, Kind, Network, Remote, FilterableAttributesRule, FilterableAttributesPatterns, AttributePatterns, FilterableAttributesFeatures, FilterFeatures, Export, WebhookSettings, WebhookResults, WebhookWithMetadata, meilisearch_types::milli::vector::VectorStoreBackend))
|
components(schemas(PaginationView<KeyView>, PaginationView<IndexView>, IndexView, DocumentDeletionByFilter, AllBatches, BatchStats, ProgressStepView, ProgressView, BatchView, RuntimeTogglableFeatures, SwapIndexesPayload, DocumentEditionByFunction, MergeFacets, FederationOptions, SearchQueryWithIndex, Federation, FederatedSearch, FederatedSearchResult, SearchResults, SearchResultWithIndex, SimilarQuery, SimilarResult, PaginationView<serde_json::Value>, BrowseQuery, UpdateIndexRequest, IndexUid, IndexCreateRequest, KeyView, Action, CreateApiKey, UpdateStderrLogs, LogMode, GetLogs, IndexStats, Stats, HealthStatus, HealthResponse, VersionResponse, Code, ErrorType, AllTasks, TaskView, Status, DetailsView, ResponseError, Settings<Unchecked>, Settings<Checked>, TypoSettings, MinWordSizeTyposSetting, FacetingSettings, PaginationSettings, SummarizedTaskView, Kind, meilisearch_types::enterprise_edition::network::Network, meilisearch_types::enterprise_edition::network::Remote, FilterableAttributesRule, FilterableAttributesPatterns, AttributePatterns, FilterableAttributesFeatures, FilterFeatures, Export, WebhookSettings, WebhookResults, WebhookWithMetadata, meilisearch_types::milli::vector::VectorStoreBackend))
|
||||||
)]
|
)]
|
||||||
pub struct MeilisearchApi;
|
pub struct MeilisearchApi;
|
||||||
|
|
||||||
|
|||||||
@@ -239,26 +239,23 @@ pub async fn multi_search_with_post(
|
|||||||
})
|
})
|
||||||
.with_index(query_index)?;
|
.with_index(query_index)?;
|
||||||
|
|
||||||
let index_uid_str = index_uid.to_string();
|
let search_kind =
|
||||||
|
search_kind(&query, index_scheduler.get_ref(), &index_uid, &index)
|
||||||
let search_kind = search_kind(
|
.with_index(query_index)?;
|
||||||
&query,
|
|
||||||
index_scheduler.get_ref(),
|
|
||||||
index_uid_str.clone(),
|
|
||||||
&index,
|
|
||||||
)
|
|
||||||
.with_index(query_index)?;
|
|
||||||
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
|
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
|
||||||
|
|
||||||
let search_result = tokio::task::spawn_blocking(move || {
|
let search_result = tokio::task::spawn_blocking({
|
||||||
perform_search(
|
let index_uid = index_uid.clone();
|
||||||
index_uid_str.clone(),
|
move || {
|
||||||
&index,
|
perform_search(
|
||||||
query,
|
&index_uid,
|
||||||
search_kind,
|
&index,
|
||||||
retrieve_vector,
|
query,
|
||||||
features,
|
search_kind,
|
||||||
)
|
retrieve_vector,
|
||||||
|
features,
|
||||||
|
)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
.with_index(query_index)?;
|
.with_index(query_index)?;
|
||||||
|
|||||||
@@ -1,28 +1,21 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use actix_web::web::{self, Data};
|
use actix_web::web::{self, Data};
|
||||||
use actix_web::{HttpRequest, HttpResponse};
|
use actix_web::{HttpRequest, HttpResponse};
|
||||||
use deserr::actix_web::AwebJson;
|
use deserr::actix_web::AwebJson;
|
||||||
use deserr::Deserr;
|
|
||||||
use index_scheduler::IndexScheduler;
|
use index_scheduler::IndexScheduler;
|
||||||
use itertools::{EitherOrBoth, Itertools};
|
|
||||||
use meilisearch_types::deserr::DeserrJsonError;
|
use meilisearch_types::deserr::DeserrJsonError;
|
||||||
use meilisearch_types::enterprise_edition::network::{Network as DbNetwork, Remote as DbRemote};
|
use meilisearch_types::enterprise_edition::network::{Network, Remote};
|
||||||
use meilisearch_types::error::deserr_codes::{
|
|
||||||
InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf, InvalidNetworkSharding,
|
|
||||||
InvalidNetworkUrl, InvalidNetworkWriteApiKey,
|
|
||||||
};
|
|
||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::keys::actions;
|
use meilisearch_types::keys::actions;
|
||||||
use meilisearch_types::milli::update::Setting;
|
use meilisearch_types::milli::update::Setting;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
use utoipa::{OpenApi, ToSchema};
|
use utoipa::OpenApi;
|
||||||
|
|
||||||
use crate::analytics::{Aggregate, Analytics};
|
use crate::analytics::{Aggregate, Analytics};
|
||||||
use crate::extractors::authentication::policies::ActionPolicy;
|
use crate::extractors::authentication::policies::ActionPolicy;
|
||||||
use crate::extractors::authentication::GuardedData;
|
use crate::extractors::authentication::GuardedData;
|
||||||
use crate::extractors::sequential_extractor::SeqHandler;
|
use crate::extractors::sequential_extractor::SeqHandler;
|
||||||
|
use crate::routes::SummarizedTaskView;
|
||||||
|
|
||||||
#[derive(OpenApi)]
|
#[derive(OpenApi)]
|
||||||
#[openapi(
|
#[openapi(
|
||||||
@@ -31,7 +24,7 @@ use crate::extractors::sequential_extractor::SeqHandler;
|
|||||||
name = "Network",
|
name = "Network",
|
||||||
description = "The `/network` route allows you to describe the topology of a network of Meilisearch instances.
|
description = "The `/network` route allows you to describe the topology of a network of Meilisearch instances.
|
||||||
|
|
||||||
This route is **synchronous**. This means that no task object will be returned, and any change to the network will be made available immediately.",
|
This route is **asynchronous**. A task uid will be returned, and any change to the network will be effective after the corresponding task has been processed.",
|
||||||
external_docs(url = "https://www.meilisearch.com/docs/reference/api/network"),
|
external_docs(url = "https://www.meilisearch.com/docs/reference/api/network"),
|
||||||
)),
|
)),
|
||||||
)]
|
)]
|
||||||
@@ -83,73 +76,6 @@ async fn get_network(
|
|||||||
Ok(HttpResponse::Ok().json(network))
|
Ok(HttpResponse::Ok().json(network))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Deserr, ToSchema, Serialize)]
|
|
||||||
#[deserr(error = DeserrJsonError<InvalidNetworkRemotes>, rename_all = camelCase, deny_unknown_fields)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
#[schema(rename_all = "camelCase")]
|
|
||||||
pub struct Remote {
|
|
||||||
#[schema(value_type = Option<String>, example = json!({
|
|
||||||
"ms-0": Remote { url: Setting::Set("http://localhost:7700".into()), search_api_key: Setting::Reset, write_api_key: Setting::Reset },
|
|
||||||
"ms-1": Remote { url: Setting::Set("http://localhost:7701".into()), search_api_key: Setting::Set("foo".into()), write_api_key: Setting::Set("bar".into()) },
|
|
||||||
"ms-2": Remote { url: Setting::Set("http://localhost:7702".into()), search_api_key: Setting::Set("bar".into()), write_api_key: Setting::Set("foo".into()) },
|
|
||||||
}))]
|
|
||||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkUrl>)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub url: Setting<String>,
|
|
||||||
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
|
|
||||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkSearchApiKey>)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub search_api_key: Setting<String>,
|
|
||||||
#[schema(value_type = Option<String>, example = json!("XWnBI8QHUc-4IlqbKPLUDuhftNq19mQtjc6JvmivzJU"))]
|
|
||||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkWriteApiKey>)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub write_api_key: Setting<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserr, ToSchema, Serialize)]
|
|
||||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
#[schema(rename_all = "camelCase")]
|
|
||||||
pub struct Network {
|
|
||||||
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!("http://localhost:7700"))]
|
|
||||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub remotes: Setting<BTreeMap<String, Option<Remote>>>,
|
|
||||||
#[schema(value_type = Option<String>, example = json!("ms-00"), rename = "self")]
|
|
||||||
#[serde(default, rename = "self")]
|
|
||||||
#[deserr(default, rename = "self", error = DeserrJsonError<InvalidNetworkSelf>)]
|
|
||||||
pub local: Setting<String>,
|
|
||||||
#[schema(value_type = Option<bool>, example = json!(true))]
|
|
||||||
#[serde(default)]
|
|
||||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkSharding>)]
|
|
||||||
pub sharding: Setting<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Remote {
|
|
||||||
pub fn try_into_db_node(self, name: &str) -> Result<DbRemote, ResponseError> {
|
|
||||||
Ok(DbRemote {
|
|
||||||
url: self
|
|
||||||
.url
|
|
||||||
.set()
|
|
||||||
.ok_or(ResponseError::from_msg(
|
|
||||||
format!("Missing field `.remotes.{name}.url`"),
|
|
||||||
meilisearch_types::error::Code::MissingNetworkUrl,
|
|
||||||
))
|
|
||||||
.and_then(|url| {
|
|
||||||
if let Err(error) = url::Url::parse(&url) {
|
|
||||||
return Err(ResponseError::from_msg(
|
|
||||||
format!("Invalid `.remotes.{name}.url` (`{url}`): {error}"),
|
|
||||||
meilisearch_types::error::Code::InvalidNetworkUrl,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Ok(url)
|
|
||||||
})?,
|
|
||||||
search_api_key: self.search_api_key.set(),
|
|
||||||
write_api_key: self.write_api_key.set(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
pub struct PatchNetworkAnalytics {
|
pub struct PatchNetworkAnalytics {
|
||||||
network_size: usize,
|
network_size: usize,
|
||||||
@@ -208,111 +134,58 @@ async fn patch_network(
|
|||||||
index_scheduler.features().check_network("Using the /network route")?;
|
index_scheduler.features().check_network("Using the /network route")?;
|
||||||
|
|
||||||
let new_network = new_network.0;
|
let new_network = new_network.0;
|
||||||
let old_network = index_scheduler.network();
|
|
||||||
debug!(parameters = ?new_network, "Patch network");
|
debug!(parameters = ?new_network, "Patch network");
|
||||||
|
|
||||||
let merged_self = match new_network.local {
|
// check the URLs of all remotes
|
||||||
Setting::Set(new_self) => Some(new_self),
|
if let Setting::Set(remotes) = &new_network.remotes {
|
||||||
Setting::Reset => None,
|
for (remote_name, remote) in remotes.iter() {
|
||||||
Setting::NotSet => old_network.local,
|
let Some(remote) = remote else {
|
||||||
};
|
continue;
|
||||||
|
};
|
||||||
let merged_sharding = match new_network.sharding {
|
match &remote.url {
|
||||||
Setting::Set(new_sharding) => new_sharding,
|
Setting::Set(new_url) => {
|
||||||
Setting::Reset => false,
|
if let Err(error) = url::Url::parse(&new_url) {
|
||||||
Setting::NotSet => old_network.sharding,
|
return Err(ResponseError::from_msg(
|
||||||
};
|
format!("Invalid `.remotes.{remote_name}.url` (`{new_url}`): {error}"),
|
||||||
|
meilisearch_types::error::Code::InvalidNetworkUrl,
|
||||||
if merged_sharding && merged_self.is_none() {
|
));
|
||||||
return Err(ResponseError::from_msg(
|
|
||||||
"`.sharding`: enabling the sharding requires `.self` to be set\n - Hint: Disable `sharding` or set `self` to a value.".into(),
|
|
||||||
meilisearch_types::error::Code::InvalidNetworkSharding,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let merged_remotes = match new_network.remotes {
|
|
||||||
Setting::Set(new_remotes) => {
|
|
||||||
let mut merged_remotes = BTreeMap::new();
|
|
||||||
for either_or_both in old_network
|
|
||||||
.remotes
|
|
||||||
.into_iter()
|
|
||||||
.merge_join_by(new_remotes.into_iter(), |left, right| left.0.cmp(&right.0))
|
|
||||||
{
|
|
||||||
match either_or_both {
|
|
||||||
EitherOrBoth::Both((key, old), (_, Some(new))) => {
|
|
||||||
let DbRemote {
|
|
||||||
url: old_url,
|
|
||||||
search_api_key: old_search_api_key,
|
|
||||||
write_api_key: old_write_api_key,
|
|
||||||
} = old;
|
|
||||||
|
|
||||||
let Remote {
|
|
||||||
url: new_url,
|
|
||||||
search_api_key: new_search_api_key,
|
|
||||||
write_api_key: new_write_api_key,
|
|
||||||
} = new;
|
|
||||||
|
|
||||||
let merged = DbRemote {
|
|
||||||
url: match new_url {
|
|
||||||
Setting::Set(new_url) => {
|
|
||||||
if let Err(error) = url::Url::parse(&new_url) {
|
|
||||||
return Err(ResponseError::from_msg(
|
|
||||||
format!("Invalid `.remotes.{key}.url` (`{new_url}`): {error}"),
|
|
||||||
meilisearch_types::error::Code::InvalidNetworkUrl,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
new_url
|
|
||||||
}
|
|
||||||
Setting::Reset => {
|
|
||||||
return Err(ResponseError::from_msg(
|
|
||||||
format!(
|
|
||||||
"Field `.remotes.{key}.url` cannot be set to `null`"
|
|
||||||
),
|
|
||||||
meilisearch_types::error::Code::InvalidNetworkUrl,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
Setting::NotSet => old_url,
|
|
||||||
},
|
|
||||||
search_api_key: match new_search_api_key {
|
|
||||||
Setting::Set(new_search_api_key) => Some(new_search_api_key),
|
|
||||||
Setting::Reset => None,
|
|
||||||
Setting::NotSet => old_search_api_key,
|
|
||||||
},
|
|
||||||
write_api_key: match new_write_api_key {
|
|
||||||
Setting::Set(new_write_api_key) => Some(new_write_api_key),
|
|
||||||
Setting::Reset => None,
|
|
||||||
Setting::NotSet => old_write_api_key,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
merged_remotes.insert(key, merged);
|
|
||||||
}
|
|
||||||
EitherOrBoth::Both((_, _), (_, None)) | EitherOrBoth::Right((_, None)) => {}
|
|
||||||
EitherOrBoth::Left((key, node)) => {
|
|
||||||
merged_remotes.insert(key, node);
|
|
||||||
}
|
|
||||||
EitherOrBoth::Right((key, Some(node))) => {
|
|
||||||
let node = node.try_into_db_node(&key)?;
|
|
||||||
merged_remotes.insert(key, node);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Setting::Reset => {
|
||||||
|
return Err(ResponseError::from_msg(
|
||||||
|
format!("Field `.remotes.{remote_name}.url` cannot be set to `null`"),
|
||||||
|
meilisearch_types::error::Code::InvalidNetworkUrl,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
Setting::NotSet => (),
|
||||||
}
|
}
|
||||||
merged_remotes
|
|
||||||
}
|
}
|
||||||
Setting::Reset => BTreeMap::new(),
|
}
|
||||||
Setting::NotSet => old_network.remotes,
|
|
||||||
};
|
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
PatchNetworkAnalytics {
|
PatchNetworkAnalytics {
|
||||||
network_size: merged_remotes.len(),
|
network_size: new_network
|
||||||
network_has_self: merged_self.is_some(),
|
.remotes
|
||||||
|
.as_ref()
|
||||||
|
.set()
|
||||||
|
.map(|remotes| remotes.len())
|
||||||
|
.unwrap_or_default(),
|
||||||
|
network_has_self: new_network.local.as_ref().set().is_some(),
|
||||||
},
|
},
|
||||||
&req,
|
&req,
|
||||||
);
|
);
|
||||||
|
|
||||||
let merged_network =
|
let task = index_scheduler.register(
|
||||||
DbNetwork { local: merged_self, remotes: merged_remotes, sharding: merged_sharding };
|
meilisearch_types::tasks::KindWithContent::NetworkTopologyChange {
|
||||||
index_scheduler.put_network(merged_network.clone())?;
|
network: Some(new_network),
|
||||||
debug!(returns = ?merged_network, "Patch network");
|
origin: None,
|
||||||
Ok(HttpResponse::Ok().json(merged_network))
|
},
|
||||||
|
None,
|
||||||
|
false,
|
||||||
|
)?;
|
||||||
|
debug!(returns = ?task, "Patch network");
|
||||||
|
|
||||||
|
let task: SummarizedTaskView = task.into();
|
||||||
|
|
||||||
|
return Ok(HttpResponse::Accepted().json(task));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -226,14 +226,14 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let params = "types=createIndex";
|
let params = "types=createIndex";
|
||||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||||
snapshot!(meili_snap::json_string!(err), @r#"
|
snapshot!(meili_snap::json_string!(err), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`.",
|
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `networkTopologyChange`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
}
|
}
|
||||||
"#);
|
"###);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ use std::vec::{IntoIter, Vec};
|
|||||||
use actix_http::StatusCode;
|
use actix_http::StatusCode;
|
||||||
use index_scheduler::{IndexScheduler, RoFeatures};
|
use index_scheduler::{IndexScheduler, RoFeatures};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use meilisearch_types::enterprise_edition::network::{Network, Remote};
|
use meilisearch_types::enterprise_edition::network::{DbNetwork, DbRemote};
|
||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::milli::order_by_map::OrderByMap;
|
use meilisearch_types::milli::order_by_map::OrderByMap;
|
||||||
use meilisearch_types::milli::score_details::{ScoreDetails, WeightedScoreValue};
|
use meilisearch_types::milli::score_details::{ScoreDetails, WeightedScoreValue};
|
||||||
@@ -456,7 +456,7 @@ fn merge_metadata(
|
|||||||
}
|
}
|
||||||
|
|
||||||
type LocalQueriesByIndex = BTreeMap<String, Vec<QueryByIndex>>;
|
type LocalQueriesByIndex = BTreeMap<String, Vec<QueryByIndex>>;
|
||||||
type RemoteQueriesByHost = BTreeMap<String, (Remote, Vec<SearchQueryWithIndex>)>;
|
type RemoteQueriesByHost = BTreeMap<String, (DbRemote, Vec<SearchQueryWithIndex>)>;
|
||||||
|
|
||||||
struct PartitionedQueries {
|
struct PartitionedQueries {
|
||||||
local_queries_by_index: LocalQueriesByIndex,
|
local_queries_by_index: LocalQueriesByIndex,
|
||||||
@@ -477,7 +477,7 @@ impl PartitionedQueries {
|
|||||||
&mut self,
|
&mut self,
|
||||||
federated_query: SearchQueryWithIndex,
|
federated_query: SearchQueryWithIndex,
|
||||||
query_index: usize,
|
query_index: usize,
|
||||||
network: &Network,
|
network: &DbNetwork,
|
||||||
features: RoFeatures,
|
features: RoFeatures,
|
||||||
) -> Result<(), ResponseError> {
|
) -> Result<(), ResponseError> {
|
||||||
if let Some(pagination_field) = federated_query.has_pagination() {
|
if let Some(pagination_field) = federated_query.has_pagination() {
|
||||||
@@ -672,7 +672,7 @@ struct SearchByIndexParams<'a> {
|
|||||||
features: RoFeatures,
|
features: RoFeatures,
|
||||||
is_proxy: bool,
|
is_proxy: bool,
|
||||||
has_remote: bool,
|
has_remote: bool,
|
||||||
network: &'a Network,
|
network: &'a DbNetwork,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct SearchByIndex {
|
struct SearchByIndex {
|
||||||
@@ -755,8 +755,7 @@ impl SearchByIndex {
|
|||||||
// use an immediately invoked lambda to capture the result without returning from the function
|
// use an immediately invoked lambda to capture the result without returning from the function
|
||||||
|
|
||||||
let res: Result<(), ResponseError> = (|| {
|
let res: Result<(), ResponseError> = (|| {
|
||||||
let search_kind =
|
let search_kind = search_kind(&query, params.index_scheduler, &index_uid, &index)?;
|
||||||
search_kind(&query, params.index_scheduler, index_uid.to_string(), &index)?;
|
|
||||||
|
|
||||||
let canonicalization_kind = match (&search_kind, &query.q) {
|
let canonicalization_kind = match (&search_kind, &query.q) {
|
||||||
(SearchKind::SemanticOnly { .. }, _) => {
|
(SearchKind::SemanticOnly { .. }, _) => {
|
||||||
@@ -806,11 +805,11 @@ impl SearchByIndex {
|
|||||||
{
|
{
|
||||||
Some((previous_ranking_rules, previous_query_index, previous_index_uid))
|
Some((previous_ranking_rules, previous_query_index, previous_index_uid))
|
||||||
} else {
|
} else {
|
||||||
Some((ranking_rules, query_index, index_uid.clone()))
|
Some((ranking_rules, query_index, index_uid.to_string()))
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
self.previous_query_data =
|
self.previous_query_data =
|
||||||
Some((ranking_rules, query_index, index_uid.clone()));
|
Some((ranking_rules, query_index, index_uid.to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
match search_kind {
|
match search_kind {
|
||||||
@@ -839,7 +838,7 @@ impl SearchByIndex {
|
|||||||
search.limit(params.required_hit_count);
|
search.limit(params.required_hit_count);
|
||||||
|
|
||||||
let (result, _semantic_hit_count) =
|
let (result, _semantic_hit_count) =
|
||||||
super::super::search_from_kind(index_uid.to_string(), search_kind, search)?;
|
super::super::search_from_kind(&index_uid, search_kind, search)?;
|
||||||
let format = AttributesFormat {
|
let format = AttributesFormat {
|
||||||
attributes_to_retrieve: query.attributes_to_retrieve,
|
attributes_to_retrieve: query.attributes_to_retrieve,
|
||||||
retrieve_vectors,
|
retrieve_vectors,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
pub use error::ProxySearchError;
|
pub use error::ProxySearchError;
|
||||||
use error::ReqwestErrorWithoutUrl;
|
use error::ReqwestErrorWithoutUrl;
|
||||||
use meilisearch_types::enterprise_edition::network::Remote;
|
use meilisearch_types::enterprise_edition::network::DbRemote;
|
||||||
use rand::Rng as _;
|
use rand::Rng as _;
|
||||||
use reqwest::{Client, Response, StatusCode};
|
use reqwest::{Client, Response, StatusCode};
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
@@ -94,7 +94,7 @@ pub struct ProxySearchParams {
|
|||||||
|
|
||||||
/// Performs a federated search on a remote host and returns the results
|
/// Performs a federated search on a remote host and returns the results
|
||||||
pub async fn proxy_search(
|
pub async fn proxy_search(
|
||||||
node: &Remote,
|
node: &DbRemote,
|
||||||
queries: Vec<SearchQueryWithIndex>,
|
queries: Vec<SearchQueryWithIndex>,
|
||||||
federation: Federation,
|
federation: Federation,
|
||||||
params: &ProxySearchParams,
|
params: &ProxySearchParams,
|
||||||
|
|||||||
@@ -362,7 +362,7 @@ pub enum SearchKind {
|
|||||||
impl SearchKind {
|
impl SearchKind {
|
||||||
pub(crate) fn semantic(
|
pub(crate) fn semantic(
|
||||||
index_scheduler: &index_scheduler::IndexScheduler,
|
index_scheduler: &index_scheduler::IndexScheduler,
|
||||||
index_uid: String,
|
index_uid: &str,
|
||||||
index: &Index,
|
index: &Index,
|
||||||
embedder_name: &str,
|
embedder_name: &str,
|
||||||
vector_len: Option<usize>,
|
vector_len: Option<usize>,
|
||||||
@@ -380,7 +380,7 @@ impl SearchKind {
|
|||||||
|
|
||||||
pub(crate) fn hybrid(
|
pub(crate) fn hybrid(
|
||||||
index_scheduler: &index_scheduler::IndexScheduler,
|
index_scheduler: &index_scheduler::IndexScheduler,
|
||||||
index_uid: String,
|
index_uid: &str,
|
||||||
index: &Index,
|
index: &Index,
|
||||||
embedder_name: &str,
|
embedder_name: &str,
|
||||||
semantic_ratio: f32,
|
semantic_ratio: f32,
|
||||||
@@ -399,7 +399,7 @@ impl SearchKind {
|
|||||||
|
|
||||||
pub(crate) fn embedder(
|
pub(crate) fn embedder(
|
||||||
index_scheduler: &index_scheduler::IndexScheduler,
|
index_scheduler: &index_scheduler::IndexScheduler,
|
||||||
index_uid: String,
|
index_uid: &str,
|
||||||
index: &Index,
|
index: &Index,
|
||||||
embedder_name: &str,
|
embedder_name: &str,
|
||||||
vector_len: Option<usize>,
|
vector_len: Option<usize>,
|
||||||
@@ -1114,7 +1114,7 @@ pub fn prepare_search<'t>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn perform_search(
|
pub fn perform_search(
|
||||||
index_uid: String,
|
index_uid: &str,
|
||||||
index: &Index,
|
index: &Index,
|
||||||
query: SearchQuery,
|
query: SearchQuery,
|
||||||
search_kind: SearchKind,
|
search_kind: SearchKind,
|
||||||
@@ -1299,27 +1299,24 @@ fn compute_facet_distribution_stats<S: AsRef<str>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn search_from_kind(
|
pub fn search_from_kind(
|
||||||
index_uid: String,
|
index_uid: &str,
|
||||||
search_kind: SearchKind,
|
search_kind: SearchKind,
|
||||||
search: milli::Search<'_>,
|
search: milli::Search<'_>,
|
||||||
) -> Result<(milli::SearchResult, Option<u32>), MeilisearchHttpError> {
|
) -> Result<(milli::SearchResult, Option<u32>), MeilisearchHttpError> {
|
||||||
|
let err = |e| MeilisearchHttpError::from_milli(e, Some(index_uid.to_string()));
|
||||||
let (milli_result, semantic_hit_count) = match &search_kind {
|
let (milli_result, semantic_hit_count) = match &search_kind {
|
||||||
SearchKind::KeywordOnly => {
|
SearchKind::KeywordOnly => {
|
||||||
let results = search
|
let results = search.execute().map_err(err)?;
|
||||||
.execute()
|
|
||||||
.map_err(|e| MeilisearchHttpError::from_milli(e, Some(index_uid.to_string())))?;
|
|
||||||
(results, None)
|
(results, None)
|
||||||
}
|
}
|
||||||
SearchKind::SemanticOnly { .. } => {
|
SearchKind::SemanticOnly { .. } => {
|
||||||
let results = search
|
let results = search.execute().map_err(err)?;
|
||||||
.execute()
|
|
||||||
.map_err(|e| MeilisearchHttpError::from_milli(e, Some(index_uid.to_string())))?;
|
|
||||||
let semantic_hit_count = results.document_scores.len() as u32;
|
let semantic_hit_count = results.document_scores.len() as u32;
|
||||||
(results, Some(semantic_hit_count))
|
(results, Some(semantic_hit_count))
|
||||||
}
|
}
|
||||||
SearchKind::Hybrid { semantic_ratio, .. } => search
|
SearchKind::Hybrid { semantic_ratio, .. } => {
|
||||||
.execute_hybrid(*semantic_ratio)
|
search.execute_hybrid(*semantic_ratio).map_err(err)?
|
||||||
.map_err(|e| MeilisearchHttpError::from_milli(e, Some(index_uid)))?,
|
}
|
||||||
};
|
};
|
||||||
Ok((milli_result, semantic_hit_count))
|
Ok((milli_result, semantic_hit_count))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,14 +40,14 @@ async fn batch_bad_types() {
|
|||||||
|
|
||||||
let (response, code) = server.batches_filter("types=doggo").await;
|
let (response, code) = server.batches_filter("types=doggo").await;
|
||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r#"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `networkTopologyChange`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
}
|
}
|
||||||
"#);
|
"###);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
|||||||
@@ -49,8 +49,8 @@ impl Server<Owned> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let options = default_settings(dir.path());
|
let options = default_settings(dir.path());
|
||||||
|
let handle = tokio::runtime::Handle::current();
|
||||||
let (index_scheduler, auth) = setup_meilisearch(&options).unwrap();
|
let (index_scheduler, auth) = setup_meilisearch(&options, handle).unwrap();
|
||||||
let service = Service { index_scheduler, auth, options, api_key: None };
|
let service = Service { index_scheduler, auth, options, api_key: None };
|
||||||
|
|
||||||
Server { service, _dir: Some(dir), _marker: PhantomData }
|
Server { service, _dir: Some(dir), _marker: PhantomData }
|
||||||
@@ -65,7 +65,9 @@ impl Server<Owned> {
|
|||||||
|
|
||||||
options.master_key = Some("MASTER_KEY".to_string());
|
options.master_key = Some("MASTER_KEY".to_string());
|
||||||
|
|
||||||
let (index_scheduler, auth) = setup_meilisearch(&options).unwrap();
|
let handle = tokio::runtime::Handle::current();
|
||||||
|
|
||||||
|
let (index_scheduler, auth) = setup_meilisearch(&options, handle).unwrap();
|
||||||
let service = Service { index_scheduler, auth, options, api_key: None };
|
let service = Service { index_scheduler, auth, options, api_key: None };
|
||||||
|
|
||||||
Server { service, _dir: Some(dir), _marker: PhantomData }
|
Server { service, _dir: Some(dir), _marker: PhantomData }
|
||||||
@@ -78,7 +80,9 @@ impl Server<Owned> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn new_with_options(options: Opt) -> Result<Self, anyhow::Error> {
|
pub async fn new_with_options(options: Opt) -> Result<Self, anyhow::Error> {
|
||||||
let (index_scheduler, auth) = setup_meilisearch(&options)?;
|
let handle = tokio::runtime::Handle::current();
|
||||||
|
|
||||||
|
let (index_scheduler, auth) = setup_meilisearch(&options, handle)?;
|
||||||
let service = Service { index_scheduler, auth, options, api_key: None };
|
let service = Service { index_scheduler, auth, options, api_key: None };
|
||||||
|
|
||||||
Ok(Server { service, _dir: None, _marker: PhantomData })
|
Ok(Server { service, _dir: None, _marker: PhantomData })
|
||||||
@@ -217,8 +221,9 @@ impl Server<Shared> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let options = default_settings(dir.path());
|
let options = default_settings(dir.path());
|
||||||
|
let handle = tokio::runtime::Handle::current();
|
||||||
|
|
||||||
let (index_scheduler, auth) = setup_meilisearch(&options).unwrap();
|
let (index_scheduler, auth) = setup_meilisearch(&options, handle).unwrap();
|
||||||
let service = Service { index_scheduler, auth, api_key: None, options };
|
let service = Service { index_scheduler, auth, api_key: None, options };
|
||||||
|
|
||||||
Server { service, _dir: Some(dir), _marker: PhantomData }
|
Server { service, _dir: Some(dir), _marker: PhantomData }
|
||||||
|
|||||||
@@ -127,7 +127,7 @@ async fn remote_sharding() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -136,7 +136,7 @@ async fn remote_sharding() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -145,7 +145,7 @@ async fn remote_sharding() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
|
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms2",
|
"self": "ms2",
|
||||||
@@ -192,11 +192,11 @@ async fn remote_sharding() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
let (_response, status_code) = ms2.set_network(network.clone()).await;
|
let (_response, status_code) = ms2.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
@@ -442,7 +442,7 @@ async fn remote_sharding_retrieve_vectors() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -451,7 +451,7 @@ async fn remote_sharding_retrieve_vectors() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -460,7 +460,7 @@ async fn remote_sharding_retrieve_vectors() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
|
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms2",
|
"self": "ms2",
|
||||||
@@ -542,11 +542,11 @@ async fn remote_sharding_retrieve_vectors() {
|
|||||||
}});
|
}});
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
let (_response, status_code) = ms2.set_network(network.clone()).await;
|
let (_response, status_code) = ms2.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// multi vector search: one query per remote
|
// multi vector search: one query per remote
|
||||||
|
|
||||||
@@ -935,7 +935,7 @@ async fn error_unregistered_remote() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -944,7 +944,7 @@ async fn error_unregistered_remote() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -983,9 +983,9 @@ async fn error_unregistered_remote() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
@@ -1055,7 +1055,7 @@ async fn error_no_weighted_score() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -1064,7 +1064,7 @@ async fn error_no_weighted_score() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -1107,7 +1107,7 @@ async fn error_no_weighted_score() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
@@ -1190,7 +1190,7 @@ async fn error_bad_response() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -1199,7 +1199,7 @@ async fn error_bad_response() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -1245,7 +1245,7 @@ async fn error_bad_response() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
@@ -1329,7 +1329,7 @@ async fn error_bad_request() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -1338,7 +1338,7 @@ async fn error_bad_request() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -1377,7 +1377,7 @@ async fn error_bad_request() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
@@ -1461,7 +1461,7 @@ async fn error_bad_request_facets_by_index() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -1470,7 +1470,7 @@ async fn error_bad_request_facets_by_index() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -1510,7 +1510,7 @@ async fn error_bad_request_facets_by_index() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
@@ -1604,7 +1604,7 @@ async fn error_bad_request_facets_by_index_facet() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -1613,7 +1613,7 @@ async fn error_bad_request_facets_by_index_facet() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -1656,7 +1656,7 @@ async fn error_bad_request_facets_by_index_facet() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
@@ -1756,7 +1756,7 @@ async fn error_remote_does_not_answer() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -1765,7 +1765,7 @@ async fn error_remote_does_not_answer() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -1807,9 +1807,9 @@ async fn error_remote_does_not_answer() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
@@ -1959,7 +1959,7 @@ async fn error_remote_404() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -1968,7 +1968,7 @@ async fn error_remote_404() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -2007,9 +2007,9 @@ async fn error_remote_404() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
@@ -2156,7 +2156,7 @@ async fn error_remote_sharding_auth() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -2165,7 +2165,7 @@ async fn error_remote_sharding_auth() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -2211,7 +2211,7 @@ async fn error_remote_sharding_auth() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
@@ -2318,7 +2318,7 @@ async fn remote_sharding_auth() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -2327,7 +2327,7 @@ async fn remote_sharding_auth() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -2372,7 +2372,7 @@ async fn remote_sharding_auth() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
@@ -2475,7 +2475,7 @@ async fn error_remote_500() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -2484,7 +2484,7 @@ async fn error_remote_500() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -2527,9 +2527,9 @@ async fn error_remote_500() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
@@ -2656,7 +2656,7 @@ async fn error_remote_500_once() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -2665,7 +2665,7 @@ async fn error_remote_500_once() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -2708,9 +2708,9 @@ async fn error_remote_500_once() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
@@ -2841,7 +2841,7 @@ async fn error_remote_timeout() {
|
|||||||
// set self
|
// set self
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
@@ -2849,7 +2849,7 @@ async fn error_remote_timeout() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"202 Accepted");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
@@ -2891,9 +2891,9 @@ async fn error_remote_timeout() {
|
|||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"202 Accepted");
|
||||||
|
|
||||||
// perform multi-search
|
// perform multi-search
|
||||||
let query = "badman returns";
|
let query = "badman returns";
|
||||||
|
|||||||
@@ -95,14 +95,14 @@ async fn task_bad_types() {
|
|||||||
|
|
||||||
let (response, code) = server.tasks_filter("types=doggo").await;
|
let (response, code) = server.tasks_filter("types=doggo").await;
|
||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r#"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `networkTopologyChange`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
}
|
}
|
||||||
"#);
|
"###);
|
||||||
|
|
||||||
let (response, code) = server.cancel_tasks("types=doggo").await;
|
let (response, code) = server.cancel_tasks("types=doggo").await;
|
||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
|
|||||||
@@ -432,6 +432,10 @@ and can not be more than 511 bytes.", .document_id.to_string()
|
|||||||
InvalidChatSettingsDocumentTemplateMaxBytes,
|
InvalidChatSettingsDocumentTemplateMaxBytes,
|
||||||
#[error("{0}")]
|
#[error("{0}")]
|
||||||
DocumentEmbeddingError(String),
|
DocumentEmbeddingError(String),
|
||||||
|
#[error("enabling the sharding requires `.self` to be set\n - Hint: Disable `sharding` or set `self` to a value.")]
|
||||||
|
NetworkShardingWithoutSelf,
|
||||||
|
#[error("Field `.remotes.{0}.url` cannot be set to `null`")]
|
||||||
|
NetworkMissingUrl(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<crate::vector::Error> for Error {
|
impl From<crate::vector::Error> for Error {
|
||||||
|
|||||||
@@ -5,18 +5,24 @@
|
|||||||
|
|
||||||
use std::hash::{BuildHasher as _, BuildHasherDefault};
|
use std::hash::{BuildHasher as _, BuildHasherDefault};
|
||||||
|
|
||||||
pub struct Shards {
|
pub struct Shards(pub Vec<Shard>);
|
||||||
pub own: Vec<String>,
|
|
||||||
pub others: Vec<String>,
|
pub struct Shard {
|
||||||
|
pub is_own: bool,
|
||||||
|
pub name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Shards {
|
impl Shards {
|
||||||
pub fn must_process(&self, docid: &str) -> bool {
|
pub fn must_process(&self, docid: &str) -> bool {
|
||||||
|
self.processing_shard(docid).map(|shard| shard.is_own).unwrap_or_default()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn processing_shard<'a>(&'a self, docid: &str) -> Option<&'a Shard> {
|
||||||
let hasher = BuildHasherDefault::<twox_hash::XxHash3_64>::new();
|
let hasher = BuildHasherDefault::<twox_hash::XxHash3_64>::new();
|
||||||
let to_hash = |shard: &String| hasher.hash_one((shard, docid));
|
let to_hash = |shard: &'a Shard| (shard, hasher.hash_one((&shard.name, docid)));
|
||||||
|
|
||||||
let max_hash = self.others.iter().map(to_hash).max().unwrap_or_default();
|
let shard =
|
||||||
|
self.0.iter().map(to_hash).max_by_key(|(_, hash)| *hash).map(|(shard, _)| shard);
|
||||||
self.own.iter().map(to_hash).any(|hash| hash > max_hash)
|
shard
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user