mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-09 22:25:44 +00:00
Compare commits
9 Commits
change-net
...
openapi-co
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bb69b9029c | ||
|
|
3e4eae1227 | ||
|
|
98439617a4 | ||
|
|
64da9e1ea2 | ||
|
|
e774e7080d | ||
|
|
b1543fb477 | ||
|
|
6d0c58c7df | ||
|
|
5bf56279ed | ||
|
|
876cb17835 |
2
.github/workflows/bench-manual.yml
vendored
2
.github/workflows/bench-manual.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/bench-pr.yml
vendored
2
.github/workflows/bench-pr.yml
vendored
@@ -66,7 +66,7 @@ jobs:
|
|||||||
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
||||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
|
|
||||||
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
2
.github/workflows/bench-push-indexing.yml
vendored
2
.github/workflows/bench-push-indexing.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
|
|
||||||
# Run benchmarks
|
# Run benchmarks
|
||||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
||||||
|
|||||||
2
.github/workflows/benchmarks-manual.yml
vendored
2
.github/workflows/benchmarks-manual.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/benchmarks-pr.yml
vendored
2
.github/workflows/benchmarks-pr.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/flaky-tests.yml
vendored
4
.github/workflows/flaky-tests.yml
vendored
@@ -3,7 +3,7 @@ name: Look for flaky tests
|
|||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 4 * * *" # Every day at 4:00AM
|
- cron: '0 4 * * *' # Every day at 4:00AM
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
flaky:
|
flaky:
|
||||||
@@ -23,7 +23,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Install cargo-flaky
|
- name: Install cargo-flaky
|
||||||
run: cargo install cargo-flaky
|
run: cargo install cargo-flaky
|
||||||
- name: Run cargo flaky in the dumps
|
- name: Run cargo flaky in the dumps
|
||||||
|
|||||||
2
.github/workflows/fuzzer-indexing.yml
vendored
2
.github/workflows/fuzzer-indexing.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
|
|
||||||
# Run benchmarks
|
# Run benchmarks
|
||||||
- name: Run the fuzzer
|
- name: Run the fuzzer
|
||||||
|
|||||||
2
.github/workflows/publish-apt-brew-pkg.yml
vendored
2
.github/workflows/publish-apt-brew-pkg.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
|||||||
sudo rm -rf "/usr/share/dotnet" || true
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Install cargo-deb
|
- name: Install cargo-deb
|
||||||
run: cargo install cargo-deb
|
run: cargo install cargo-deb
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
|||||||
6
.github/workflows/publish-release-assets.yml
vendored
6
.github/workflows/publish-release-assets.yml
vendored
@@ -76,7 +76,7 @@ jobs:
|
|||||||
needs: check-version
|
needs: check-version
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --release --locked ${{ matrix.feature-flag }} ${{ matrix.extra-args }}
|
run: cargo build --release --locked ${{ matrix.feature-flag }} ${{ matrix.extra-args }}
|
||||||
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
# No need to upload binaries for dry run (cron or workflow_dispatch)
|
||||||
@@ -104,13 +104,13 @@ jobs:
|
|||||||
- name: Generate OpenAPI file
|
- name: Generate OpenAPI file
|
||||||
run: |
|
run: |
|
||||||
cd crates/openapi-generator
|
cd crates/openapi-generator
|
||||||
cargo run --release -- --pretty --output ../../meilisearch.json
|
cargo run --release -- --pretty --output ../../meilisearch-openapi.json
|
||||||
- name: Upload OpenAPI to Release
|
- name: Upload OpenAPI to Release
|
||||||
# No need to upload for dry run (cron or workflow_dispatch)
|
# No need to upload for dry run (cron or workflow_dispatch)
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.11.2
|
uses: svenstaro/upload-release-action@2.11.2
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: ./meilisearch.json
|
file: ./meilisearch-openapi.json
|
||||||
asset_name: meilisearch-openapi.json
|
asset_name: meilisearch-openapi.json
|
||||||
tag: ${{ github.ref }}
|
tag: ${{ github.ref }}
|
||||||
|
|||||||
16
.github/workflows/test-suite.yml
vendored
16
.github/workflows/test-suite.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
|||||||
- name: check free space after
|
- name: check free space after
|
||||||
run: df -h
|
run: df -h
|
||||||
- name: Setup test with Rust stable
|
- name: Setup test with Rust stable
|
||||||
uses: dtolnay/rust-toolchain@1.91.1
|
uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
with:
|
with:
|
||||||
@@ -63,7 +63,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Run cargo build without any default features
|
- name: Run cargo build without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -87,7 +87,7 @@ jobs:
|
|||||||
sudo rm -rf "/usr/share/dotnet" || true
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Run cargo build with almost all features
|
- name: Run cargo build with almost all features
|
||||||
run: |
|
run: |
|
||||||
cargo build --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
cargo build --workspace --locked --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||||
@@ -145,7 +145,7 @@ jobs:
|
|||||||
sudo rm -rf "/usr/share/dotnet" || true
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Run cargo tree without default features and check lindera is not present
|
- name: Run cargo tree without default features and check lindera is not present
|
||||||
run: |
|
run: |
|
||||||
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
||||||
@@ -167,7 +167,7 @@ jobs:
|
|||||||
sudo rm -rf "/usr/share/dotnet" || true
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -187,7 +187,7 @@ jobs:
|
|||||||
sudo rm -rf "/usr/share/dotnet" || true
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
components: clippy
|
components: clippy
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
@@ -209,7 +209,7 @@ jobs:
|
|||||||
sudo rm -rf "/usr/share/dotnet" || true
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
with:
|
with:
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
@@ -235,7 +235,7 @@ jobs:
|
|||||||
sudo rm -rf "/usr/share/dotnet" || true
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.8.0
|
uses: Swatinem/rust-cache@v2.8.0
|
||||||
- name: Run declarative tests
|
- name: Run declarative tests
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ jobs:
|
|||||||
sudo rm -rf "/usr/share/dotnet" || true
|
sudo rm -rf "/usr/share/dotnet" || true
|
||||||
sudo rm -rf "/usr/local/lib/android" || true
|
sudo rm -rf "/usr/local/lib/android" || true
|
||||||
sudo rm -rf "/usr/local/share/boost" || true
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
- uses: dtolnay/rust-toolchain@1.91.1
|
- uses: dtolnay/rust-toolchain@1.89
|
||||||
- name: Install sd
|
- name: Install sd
|
||||||
run: cargo install sd
|
run: cargo install sd
|
||||||
- name: Update Cargo.toml file
|
- name: Update Cargo.toml file
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -29,3 +29,6 @@ crates/meilisearch/db.snapshot
|
|||||||
|
|
||||||
# Fuzzcheck data for the facet indexing fuzz test
|
# Fuzzcheck data for the facet indexing fuzz test
|
||||||
crates/milli/fuzz/update::facet::incremental::fuzz::fuzz/
|
crates/milli/fuzz/update::facet::incremental::fuzz::fuzz/
|
||||||
|
|
||||||
|
# OpenAPI generator
|
||||||
|
**/meilisearch-openapi.json
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ With swagger:
|
|||||||
With the internal crate:
|
With the internal crate:
|
||||||
```bash
|
```bash
|
||||||
cd crates/openapi-generator
|
cd crates/openapi-generator
|
||||||
cargo run --release -- --pretty --output meilisearch.json
|
cargo run --release -- --pretty
|
||||||
```
|
```
|
||||||
|
|
||||||
### Logging
|
### Logging
|
||||||
|
|||||||
1017
Cargo.lock
generated
1017
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -23,7 +23,7 @@ members = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.29.0"
|
version = "1.28.2"
|
||||||
authors = [
|
authors = [
|
||||||
"Quentin de Quelen <quentin@dequelen.me>",
|
"Quentin de Quelen <quentin@dequelen.me>",
|
||||||
"Clément Renault <clement@meilisearch.com>",
|
"Clément Renault <clement@meilisearch.com>",
|
||||||
|
|||||||
@@ -9,9 +9,8 @@ use meilisearch_types::error::ResponseError;
|
|||||||
use meilisearch_types::keys::Key;
|
use meilisearch_types::keys::Key;
|
||||||
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
||||||
use meilisearch_types::settings::Unchecked;
|
use meilisearch_types::settings::Unchecked;
|
||||||
use meilisearch_types::tasks::network::{DbTaskNetwork, NetworkTopologyChange};
|
|
||||||
use meilisearch_types::tasks::{
|
use meilisearch_types::tasks::{
|
||||||
Details, ExportIndexSettings, IndexSwap, KindWithContent, Status, Task, TaskId,
|
Details, ExportIndexSettings, IndexSwap, KindWithContent, Status, Task, TaskId, TaskNetwork,
|
||||||
};
|
};
|
||||||
use meilisearch_types::InstanceUid;
|
use meilisearch_types::InstanceUid;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
@@ -96,7 +95,7 @@ pub struct TaskDump {
|
|||||||
)]
|
)]
|
||||||
pub finished_at: Option<OffsetDateTime>,
|
pub finished_at: Option<OffsetDateTime>,
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub network: Option<DbTaskNetwork>,
|
pub network: Option<TaskNetwork>,
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub custom_metadata: Option<String>,
|
pub custom_metadata: Option<String>,
|
||||||
}
|
}
|
||||||
@@ -164,7 +163,6 @@ pub enum KindDump {
|
|||||||
IndexCompaction {
|
IndexCompaction {
|
||||||
index_uid: String,
|
index_uid: String,
|
||||||
},
|
},
|
||||||
NetworkTopologyChange(NetworkTopologyChange),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Task> for TaskDump {
|
impl From<Task> for TaskDump {
|
||||||
@@ -251,9 +249,6 @@ impl From<KindWithContent> for KindDump {
|
|||||||
KindWithContent::IndexCompaction { index_uid } => {
|
KindWithContent::IndexCompaction { index_uid } => {
|
||||||
KindDump::IndexCompaction { index_uid }
|
KindDump::IndexCompaction { index_uid }
|
||||||
}
|
}
|
||||||
KindWithContent::NetworkTopologyChange(network_topology_change) => {
|
|
||||||
KindDump::NetworkTopologyChange(network_topology_change)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -565,8 +560,7 @@ pub(crate) mod test {
|
|||||||
Network {
|
Network {
|
||||||
local: Some("myself".to_string()),
|
local: Some("myself".to_string()),
|
||||||
remotes: maplit::btreemap! {"other".to_string() => Remote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
|
remotes: maplit::btreemap! {"other".to_string() => Remote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
|
||||||
leader: None,
|
sharding: false,
|
||||||
version: Default::default(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -107,14 +107,19 @@ impl Settings<Unchecked> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
#[default]
|
|
||||||
NotSet,
|
NotSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Setting<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::NotSet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Setting<T> {
|
impl<T> Setting<T> {
|
||||||
pub const fn is_not_set(&self) -> bool {
|
pub const fn is_not_set(&self) -> bool {
|
||||||
matches!(self, Self::NotSet)
|
matches!(self, Self::NotSet)
|
||||||
|
|||||||
@@ -161,14 +161,19 @@ pub struct Facets {
|
|||||||
pub min_level_size: Option<NonZeroUsize>,
|
pub min_level_size: Option<NonZeroUsize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
#[default]
|
|
||||||
NotSet,
|
NotSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Setting<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::NotSet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Setting<T> {
|
impl<T> Setting<T> {
|
||||||
pub fn map<U, F>(self, f: F) -> Setting<U>
|
pub fn map<U, F>(self, f: F) -> Setting<U>
|
||||||
where
|
where
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
use std::fmt::{self, Display, Formatter};
|
use std::fmt::{self, Display, Formatter};
|
||||||
|
use std::marker::PhantomData;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use serde::Deserialize;
|
use serde::de::Visitor;
|
||||||
|
use serde::{Deserialize, Deserializer};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use super::settings::{Settings, Unchecked};
|
use super::settings::{Settings, Unchecked};
|
||||||
@@ -80,3 +82,59 @@ impl Display for IndexUidFormatError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl std::error::Error for IndexUidFormatError {}
|
impl std::error::Error for IndexUidFormatError {}
|
||||||
|
|
||||||
|
/// A type that tries to match either a star (*) or
|
||||||
|
/// any other thing that implements `FromStr`.
|
||||||
|
#[derive(Debug)]
|
||||||
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
|
pub enum StarOr<T> {
|
||||||
|
Star,
|
||||||
|
Other(T),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de, T, E> Deserialize<'de> for StarOr<T>
|
||||||
|
where
|
||||||
|
T: FromStr<Err = E>,
|
||||||
|
E: Display,
|
||||||
|
{
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
/// Serde can't differentiate between `StarOr::Star` and `StarOr::Other` without a tag.
|
||||||
|
/// Simply using `#[serde(untagged)]` + `#[serde(rename="*")]` will lead to attempting to
|
||||||
|
/// deserialize everything as a `StarOr::Other`, including "*".
|
||||||
|
/// [`#[serde(other)]`](https://serde.rs/variant-attrs.html#other) might have helped but is
|
||||||
|
/// not supported on untagged enums.
|
||||||
|
struct StarOrVisitor<T>(PhantomData<T>);
|
||||||
|
|
||||||
|
impl<T, FE> Visitor<'_> for StarOrVisitor<T>
|
||||||
|
where
|
||||||
|
T: FromStr<Err = FE>,
|
||||||
|
FE: Display,
|
||||||
|
{
|
||||||
|
type Value = StarOr<T>;
|
||||||
|
|
||||||
|
fn expecting(&self, formatter: &mut Formatter) -> std::fmt::Result {
|
||||||
|
formatter.write_str("a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_str<SE>(self, v: &str) -> Result<Self::Value, SE>
|
||||||
|
where
|
||||||
|
SE: serde::de::Error,
|
||||||
|
{
|
||||||
|
match v {
|
||||||
|
"*" => Ok(StarOr::Star),
|
||||||
|
v => {
|
||||||
|
let other = FromStr::from_str(v).map_err(|e: T::Err| {
|
||||||
|
SE::custom(format!("Invalid `other` value: {}", e))
|
||||||
|
})?;
|
||||||
|
Ok(StarOr::Other(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
deserializer.deserialize_str(StarOrVisitor(PhantomData))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -192,14 +192,19 @@ pub struct Facets {
|
|||||||
pub min_level_size: Option<NonZeroUsize>,
|
pub min_level_size: Option<NonZeroUsize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)]
|
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
#[default]
|
|
||||||
NotSet,
|
NotSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Setting<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::NotSet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Setting<T> {
|
impl<T> Setting<T> {
|
||||||
pub fn set(self) -> Option<T> {
|
pub fn set(self) -> Option<T> {
|
||||||
match self {
|
match self {
|
||||||
|
|||||||
@@ -47,15 +47,20 @@ pub struct Settings<T> {
|
|||||||
pub _kind: PhantomData<T>,
|
pub _kind: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)]
|
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
#[default]
|
|
||||||
NotSet,
|
NotSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Setting<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::NotSet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Setting<T> {
|
impl<T> Setting<T> {
|
||||||
pub fn set(self) -> Option<T> {
|
pub fn set(self) -> Option<T> {
|
||||||
match self {
|
match self {
|
||||||
|
|||||||
@@ -322,7 +322,7 @@ impl From<Task> for TaskView {
|
|||||||
_ => None,
|
_ => None,
|
||||||
});
|
});
|
||||||
|
|
||||||
let duration = finished_at.zip(started_at).map(|(tf, ts)| tf - ts);
|
let duration = finished_at.zip(started_at).map(|(tf, ts)| (tf - ts));
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
uid: id,
|
uid: id,
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ dump = { path = "../dump" }
|
|||||||
enum-iterator = "2.3.0"
|
enum-iterator = "2.3.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.1.5"
|
flate2 = "1.1.5"
|
||||||
hashbrown = "0.15.5"
|
|
||||||
indexmap = "2.12.0"
|
indexmap = "2.12.0"
|
||||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
@@ -48,13 +47,9 @@ tracing = "0.1.41"
|
|||||||
ureq = "2.12.1"
|
ureq = "2.12.1"
|
||||||
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
uuid = { version = "1.18.1", features = ["serde", "v4"] }
|
||||||
backoff = "0.4.0"
|
backoff = "0.4.0"
|
||||||
reqwest = { version = "0.12.24", features = [
|
reqwest = { version = "0.12.24", features = ["rustls-tls", "http2"], default-features = false }
|
||||||
"rustls-tls",
|
|
||||||
"http2",
|
|
||||||
], default-features = false }
|
|
||||||
rusty-s3 = "0.8.1"
|
rusty-s3 = "0.8.1"
|
||||||
tokio = { version = "1.48.0", features = ["full"] }
|
tokio = { version = "1.48.0", features = ["full"] }
|
||||||
urlencoding = "2.1.3"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
big_s = "1.0.2"
|
big_s = "1.0.2"
|
||||||
@@ -63,6 +58,3 @@ crossbeam-channel = "0.5.15"
|
|||||||
insta = { version = "=1.39.0", features = ["json", "redactions"] }
|
insta = { version = "=1.39.0", features = ["json", "redactions"] }
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
meili-snap = { path = "../meili-snap" }
|
meili-snap = { path = "../meili-snap" }
|
||||||
|
|
||||||
[features]
|
|
||||||
enterprise = ["meilisearch-types/enterprise"]
|
|
||||||
|
|||||||
@@ -238,9 +238,6 @@ impl<'a> Dump<'a> {
|
|||||||
KindDump::IndexCompaction { index_uid } => {
|
KindDump::IndexCompaction { index_uid } => {
|
||||||
KindWithContent::IndexCompaction { index_uid }
|
KindWithContent::IndexCompaction { index_uid }
|
||||||
}
|
}
|
||||||
KindDump::NetworkTopologyChange(network_topology_change) => {
|
|
||||||
KindWithContent::NetworkTopologyChange(network_topology_change)
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -3,13 +3,10 @@ use std::fmt::Display;
|
|||||||
use meilisearch_types::batches::BatchId;
|
use meilisearch_types::batches::BatchId;
|
||||||
use meilisearch_types::error::{Code, ErrorCode};
|
use meilisearch_types::error::{Code, ErrorCode};
|
||||||
use meilisearch_types::milli::index::RollbackOutcome;
|
use meilisearch_types::milli::index::RollbackOutcome;
|
||||||
use meilisearch_types::milli::DocumentId;
|
|
||||||
use meilisearch_types::tasks::network::ReceiveTaskError;
|
|
||||||
use meilisearch_types::tasks::{Kind, Status};
|
use meilisearch_types::tasks::{Kind, Status};
|
||||||
use meilisearch_types::{heed, milli};
|
use meilisearch_types::{heed, milli};
|
||||||
use reqwest::StatusCode;
|
use reqwest::StatusCode;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::TaskId;
|
use crate::TaskId;
|
||||||
|
|
||||||
@@ -194,17 +191,6 @@ pub enum Error {
|
|||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
HeedTransaction(heed::Error),
|
HeedTransaction(heed::Error),
|
||||||
|
|
||||||
#[error("No network topology change task is currently enqueued or processing")]
|
|
||||||
ImportTaskWithoutNetworkTask,
|
|
||||||
#[error("The network task version (`{network_task}`) does not match the import task version (`{import_task}`)")]
|
|
||||||
NetworkVersionMismatch { network_task: Uuid, import_task: Uuid },
|
|
||||||
#[error("The import task emanates from an unknown remote `{0}`")]
|
|
||||||
ImportTaskUnknownRemote(String),
|
|
||||||
#[error("The import task with key `{0}` was already received")]
|
|
||||||
ImportTaskAlreadyReceived(DocumentId),
|
|
||||||
#[error("{action} requires the Enterprise Edition")]
|
|
||||||
RequiresEnterpriseEdition { action: &'static str },
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[error("Planned failure for tests.")]
|
#[error("Planned failure for tests.")]
|
||||||
PlannedFailure,
|
PlannedFailure,
|
||||||
@@ -262,11 +248,6 @@ impl Error {
|
|||||||
| Error::Persist(_)
|
| Error::Persist(_)
|
||||||
| Error::FeatureNotEnabled(_)
|
| Error::FeatureNotEnabled(_)
|
||||||
| Error::Export(_)
|
| Error::Export(_)
|
||||||
| Error::ImportTaskWithoutNetworkTask
|
|
||||||
| Error::NetworkVersionMismatch { .. }
|
|
||||||
| Error::ImportTaskAlreadyReceived(_)
|
|
||||||
| Error::ImportTaskUnknownRemote(_)
|
|
||||||
| Error::RequiresEnterpriseEdition { .. }
|
|
||||||
| Error::Anyhow(_) => true,
|
| Error::Anyhow(_) => true,
|
||||||
Error::CreateBatch(_)
|
Error::CreateBatch(_)
|
||||||
| Error::CorruptedTaskQueue
|
| Error::CorruptedTaskQueue
|
||||||
@@ -326,11 +307,6 @@ impl ErrorCode for Error {
|
|||||||
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
|
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
|
||||||
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
|
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
|
||||||
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
|
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
|
||||||
Error::ImportTaskWithoutNetworkTask => Code::ImportTaskWithoutNetworkTask,
|
|
||||||
Error::NetworkVersionMismatch { .. } => Code::NetworkVersionMismatch,
|
|
||||||
Error::ImportTaskAlreadyReceived(_) => Code::ImportTaskAlreadyReceived,
|
|
||||||
Error::ImportTaskUnknownRemote(_) => Code::ImportTaskUnknownRemote,
|
|
||||||
Error::RequiresEnterpriseEdition { .. } => Code::RequiresEnterpriseEdition,
|
|
||||||
Error::S3Error { status, .. } if status.is_client_error() => {
|
Error::S3Error { status, .. } if status.is_client_error() => {
|
||||||
Code::InvalidS3SnapshotRequest
|
Code::InvalidS3SnapshotRequest
|
||||||
}
|
}
|
||||||
@@ -369,12 +345,3 @@ impl ErrorCode for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ReceiveTaskError> for Error {
|
|
||||||
fn from(value: ReceiveTaskError) -> Self {
|
|
||||||
match value {
|
|
||||||
ReceiveTaskError::UnknownRemote(unknown) => Error::ImportTaskUnknownRemote(unknown),
|
|
||||||
ReceiveTaskError::DuplicateTask(dup) => Error::ImportTaskAlreadyReceived(dup),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -38,10 +38,6 @@ impl RoFeatures {
|
|||||||
Self { runtime }
|
Self { runtime }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_runtime_features(features: RuntimeTogglableFeatures) -> Self {
|
|
||||||
Self { runtime: features }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn runtime_features(&self) -> RuntimeTogglableFeatures {
|
pub fn runtime_features(&self) -> RuntimeTogglableFeatures {
|
||||||
self.runtime
|
self.runtime
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -361,12 +361,6 @@ impl IndexMapper {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The number of indexes in the database
|
|
||||||
#[cfg(feature = "enterprise")] // only used in enterprise edition for now
|
|
||||||
pub fn index_count(&self, rtxn: &RoTxn) -> Result<u64> {
|
|
||||||
Ok(self.index_mapping.len(rtxn)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return an index, may open it if it wasn't already opened.
|
/// Return an index, may open it if it wasn't already opened.
|
||||||
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
|
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
|
||||||
if let Some((current_name, current_index)) =
|
if let Some((current_name, current_index)) =
|
||||||
|
|||||||
@@ -27,7 +27,6 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
|||||||
queue,
|
queue,
|
||||||
scheduler,
|
scheduler,
|
||||||
persisted,
|
persisted,
|
||||||
export_default_payload_size_bytes: _,
|
|
||||||
|
|
||||||
index_mapper,
|
index_mapper,
|
||||||
features: _,
|
features: _,
|
||||||
@@ -330,9 +329,6 @@ fn snapshot_details(d: &Details) -> String {
|
|||||||
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
|
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
|
||||||
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
|
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
|
||||||
}
|
}
|
||||||
Details::NetworkTopologyChange { moved_documents, message } => {
|
|
||||||
format!("{{ moved_documents: {moved_documents:?}, message: {message:?}")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -48,7 +48,6 @@ use std::path::{Path, PathBuf};
|
|||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use byte_unit::Byte;
|
|
||||||
use dump::Dump;
|
use dump::Dump;
|
||||||
pub use error::Error;
|
pub use error::Error;
|
||||||
pub use features::RoFeatures;
|
pub use features::RoFeatures;
|
||||||
@@ -69,12 +68,10 @@ use meilisearch_types::milli::vector::{
|
|||||||
use meilisearch_types::milli::{self, Index};
|
use meilisearch_types::milli::{self, Index};
|
||||||
use meilisearch_types::network::Network;
|
use meilisearch_types::network::Network;
|
||||||
use meilisearch_types::task_view::TaskView;
|
use meilisearch_types::task_view::TaskView;
|
||||||
use meilisearch_types::tasks::network::{
|
use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork};
|
||||||
DbTaskNetwork, ImportData, ImportMetadata, Origin, TaskNetwork,
|
|
||||||
};
|
|
||||||
use meilisearch_types::tasks::{KindWithContent, Task};
|
|
||||||
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
|
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
|
||||||
use milli::vector::db::IndexEmbeddingConfig;
|
use milli::vector::db::IndexEmbeddingConfig;
|
||||||
|
use processing::ProcessingTasks;
|
||||||
pub use queue::Query;
|
pub use queue::Query;
|
||||||
use queue::Queue;
|
use queue::Queue;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
@@ -85,7 +82,6 @@ use uuid::Uuid;
|
|||||||
use versioning::Versioning;
|
use versioning::Versioning;
|
||||||
|
|
||||||
use crate::index_mapper::IndexMapper;
|
use crate::index_mapper::IndexMapper;
|
||||||
use crate::processing::ProcessingTasks;
|
|
||||||
use crate::utils::clamp_to_page_size;
|
use crate::utils::clamp_to_page_size;
|
||||||
|
|
||||||
pub(crate) type BEI128 = I128<BE>;
|
pub(crate) type BEI128 = I128<BE>;
|
||||||
@@ -148,11 +144,9 @@ pub struct IndexSchedulerOptions {
|
|||||||
/// If the autobatcher is allowed to automatically batch tasks
|
/// If the autobatcher is allowed to automatically batch tasks
|
||||||
/// it will only batch this defined maximum size (in bytes) of tasks at once.
|
/// it will only batch this defined maximum size (in bytes) of tasks at once.
|
||||||
pub batched_tasks_size_limit: u64,
|
pub batched_tasks_size_limit: u64,
|
||||||
/// The maximum size of the default payload for exporting documents, in bytes
|
|
||||||
pub export_default_payload_size_bytes: Byte,
|
|
||||||
/// The experimental features enabled for this instance.
|
/// The experimental features enabled for this instance.
|
||||||
pub instance_features: InstanceTogglableFeatures,
|
pub instance_features: InstanceTogglableFeatures,
|
||||||
/// Whether the index scheduler is able to auto upgrade or not.
|
/// The experimental features enabled for this instance.
|
||||||
pub auto_upgrade: bool,
|
pub auto_upgrade: bool,
|
||||||
/// The maximal number of entries in the search query cache of an embedder.
|
/// The maximal number of entries in the search query cache of an embedder.
|
||||||
///
|
///
|
||||||
@@ -205,9 +199,6 @@ pub struct IndexScheduler {
|
|||||||
/// to the same embeddings for the same input text.
|
/// to the same embeddings for the same input text.
|
||||||
embedders: Arc<RwLock<HashMap<EmbedderOptions, Arc<Embedder>>>>,
|
embedders: Arc<RwLock<HashMap<EmbedderOptions, Arc<Embedder>>>>,
|
||||||
|
|
||||||
/// The maximum size of the default payload for exporting documents, in bytes
|
|
||||||
pub export_default_payload_size_bytes: Byte,
|
|
||||||
|
|
||||||
// ================= test
|
// ================= test
|
||||||
// The next entry is dedicated to the tests.
|
// The next entry is dedicated to the tests.
|
||||||
/// Provide a way to set a breakpoint in multiple part of the scheduler.
|
/// Provide a way to set a breakpoint in multiple part of the scheduler.
|
||||||
@@ -243,7 +234,6 @@ impl IndexScheduler {
|
|||||||
cleanup_enabled: self.cleanup_enabled,
|
cleanup_enabled: self.cleanup_enabled,
|
||||||
experimental_no_edition_2024_for_dumps: self.experimental_no_edition_2024_for_dumps,
|
experimental_no_edition_2024_for_dumps: self.experimental_no_edition_2024_for_dumps,
|
||||||
persisted: self.persisted,
|
persisted: self.persisted,
|
||||||
export_default_payload_size_bytes: self.export_default_payload_size_bytes,
|
|
||||||
|
|
||||||
webhooks: self.webhooks.clone(),
|
webhooks: self.webhooks.clone(),
|
||||||
embedders: self.embedders.clone(),
|
embedders: self.embedders.clone(),
|
||||||
@@ -355,7 +345,6 @@ impl IndexScheduler {
|
|||||||
persisted,
|
persisted,
|
||||||
webhooks: Arc::new(webhooks),
|
webhooks: Arc::new(webhooks),
|
||||||
embedders: Default::default(),
|
embedders: Default::default(),
|
||||||
export_default_payload_size_bytes: options.export_default_payload_size_bytes,
|
|
||||||
|
|
||||||
#[cfg(test)] // Will be replaced in `new_tests` in test environments
|
#[cfg(test)] // Will be replaced in `new_tests` in test environments
|
||||||
test_breakpoint_sdr: crossbeam_channel::bounded(0).0,
|
test_breakpoint_sdr: crossbeam_channel::bounded(0).0,
|
||||||
@@ -711,14 +700,14 @@ impl IndexScheduler {
|
|||||||
self.queue.get_task_ids_from_authorized_indexes(&rtxn, query, filters, &processing)
|
self.queue.get_task_ids_from_authorized_indexes(&rtxn, query, filters, &processing)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_task_network(&self, task_id: TaskId, network: DbTaskNetwork) -> Result<Task> {
|
pub fn set_task_network(&self, task_id: TaskId, network: TaskNetwork) -> Result<()> {
|
||||||
let mut wtxn = self.env.write_txn()?;
|
let mut wtxn = self.env.write_txn()?;
|
||||||
let mut task =
|
let mut task =
|
||||||
self.queue.tasks.get_task(&wtxn, task_id)?.ok_or(Error::TaskNotFound(task_id))?;
|
self.queue.tasks.get_task(&wtxn, task_id)?.ok_or(Error::TaskNotFound(task_id))?;
|
||||||
task.network = Some(network);
|
task.network = Some(network);
|
||||||
self.queue.tasks.all_tasks.put(&mut wtxn, &task_id, &task)?;
|
self.queue.tasks.all_tasks.put(&mut wtxn, &task_id, &task)?;
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
Ok(task)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the batches matching the query from the user's point of view along
|
/// Return the batches matching the query from the user's point of view along
|
||||||
@@ -768,30 +757,18 @@ impl IndexScheduler {
|
|||||||
task_id: Option<TaskId>,
|
task_id: Option<TaskId>,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
) -> Result<Task> {
|
) -> Result<Task> {
|
||||||
self.register_with_custom_metadata(kind, task_id, None, dry_run, None)
|
self.register_with_custom_metadata(kind, task_id, None, dry_run)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Register a new task in the scheduler, with metadata.
|
/// Register a new task in the scheduler, with metadata.
|
||||||
///
|
///
|
||||||
/// If it fails and data was associated with the task, it tries to delete the associated data.
|
/// If it fails and data was associated with the task, it tries to delete the associated data.
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
///
|
|
||||||
/// - task_network: network of the task to check.
|
|
||||||
///
|
|
||||||
/// If the task is an import task, only accept it if:
|
|
||||||
///
|
|
||||||
/// 1. There is an ongoing network topology change task
|
|
||||||
/// 2. The task to register matches the network version of the network topology change task
|
|
||||||
///
|
|
||||||
/// Always accept the task if it is not an import task.
|
|
||||||
pub fn register_with_custom_metadata(
|
pub fn register_with_custom_metadata(
|
||||||
&self,
|
&self,
|
||||||
kind: KindWithContent,
|
kind: KindWithContent,
|
||||||
task_id: Option<TaskId>,
|
task_id: Option<TaskId>,
|
||||||
custom_metadata: Option<String>,
|
custom_metadata: Option<String>,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
task_network: Option<TaskNetwork>,
|
|
||||||
) -> Result<Task> {
|
) -> Result<Task> {
|
||||||
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
|
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
|
||||||
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
|
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
|
||||||
@@ -802,19 +779,7 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut wtxn = self.env.write_txn()?;
|
let mut wtxn = self.env.write_txn()?;
|
||||||
|
let task = self.queue.register(&mut wtxn, &kind, task_id, custom_metadata, dry_run)?;
|
||||||
if let Some(TaskNetwork::Import { import_from, network_change, metadata }) = &task_network {
|
|
||||||
self.update_network_task(&mut wtxn, import_from, network_change, metadata)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let task = self.queue.register(
|
|
||||||
&mut wtxn,
|
|
||||||
&kind,
|
|
||||||
task_id,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network.map(DbTaskNetwork::from),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// If the registered task is a task cancelation
|
// If the registered task is a task cancelation
|
||||||
// we inform the processing tasks to stop (if necessary).
|
// we inform the processing tasks to stop (if necessary).
|
||||||
@@ -836,91 +801,6 @@ impl IndexScheduler {
|
|||||||
Ok(task)
|
Ok(task)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn network_no_index_for_remote(
|
|
||||||
&self,
|
|
||||||
remote_name: String,
|
|
||||||
origin: Origin,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut wtxn = self.env.write_txn()?;
|
|
||||||
|
|
||||||
self.update_network_task(
|
|
||||||
&mut wtxn,
|
|
||||||
&ImportData { remote_name, index_name: None, document_count: 0 },
|
|
||||||
&origin,
|
|
||||||
&ImportMetadata { index_count: 0, task_key: None, total_index_documents: 0 },
|
|
||||||
)?;
|
|
||||||
|
|
||||||
wtxn.commit()?;
|
|
||||||
|
|
||||||
// wake up the scheduler as the task state has changed
|
|
||||||
self.scheduler.wake_up.signal();
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_network_task(
|
|
||||||
&self,
|
|
||||||
wtxn: &mut heed::RwTxn<'_>,
|
|
||||||
import_from: &ImportData,
|
|
||||||
network_change: &Origin,
|
|
||||||
metadata: &ImportMetadata,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut network_tasks = self
|
|
||||||
.queue
|
|
||||||
.tasks
|
|
||||||
.get_kind(&*wtxn, meilisearch_types::tasks::Kind::NetworkTopologyChange)?;
|
|
||||||
if network_tasks.is_empty() {
|
|
||||||
return Err(Error::ImportTaskWithoutNetworkTask);
|
|
||||||
}
|
|
||||||
let network_task = {
|
|
||||||
let processing = self.processing_tasks.read().unwrap().processing.clone();
|
|
||||||
if processing.is_disjoint(&network_tasks) {
|
|
||||||
let enqueued = self
|
|
||||||
.queue
|
|
||||||
.tasks
|
|
||||||
.get_status(&*wtxn, meilisearch_types::tasks::Status::Enqueued)?;
|
|
||||||
|
|
||||||
network_tasks &= enqueued;
|
|
||||||
if let Some(network_task) = network_tasks.into_iter().next() {
|
|
||||||
network_task
|
|
||||||
} else {
|
|
||||||
return Err(Error::ImportTaskWithoutNetworkTask);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
network_tasks &= &*processing;
|
|
||||||
network_tasks.into_iter().next().unwrap()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let mut network_task = self.queue.tasks.get_task(&*wtxn, network_task)?.unwrap();
|
|
||||||
let network_task_version = network_task
|
|
||||||
.network
|
|
||||||
.as_ref()
|
|
||||||
.map(|network| network.network_version())
|
|
||||||
.unwrap_or_default();
|
|
||||||
if network_task_version != network_change.network_version {
|
|
||||||
return Err(Error::NetworkVersionMismatch {
|
|
||||||
network_task: network_task_version,
|
|
||||||
import_task: network_change.network_version,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
let KindWithContent::NetworkTopologyChange(network_topology_change) =
|
|
||||||
&mut network_task.kind
|
|
||||||
else {
|
|
||||||
tracing::error!("unexpected network kind for network task while registering task");
|
|
||||||
return Err(Error::CorruptedTaskQueue);
|
|
||||||
};
|
|
||||||
network_topology_change.receive_remote_task(
|
|
||||||
&import_from.remote_name,
|
|
||||||
import_from.index_name.as_deref(),
|
|
||||||
metadata.task_key,
|
|
||||||
import_from.document_count,
|
|
||||||
metadata.index_count,
|
|
||||||
metadata.total_index_documents,
|
|
||||||
)?;
|
|
||||||
self.queue.tasks.update_task(wtxn, &mut network_task)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Register a new task coming from a dump in the scheduler.
|
/// Register a new task coming from a dump in the scheduler.
|
||||||
/// By taking a mutable ref we're pretty sure no one will ever import a dump while actix is running.
|
/// By taking a mutable ref we're pretty sure no one will ever import a dump while actix is running.
|
||||||
pub fn register_dumped_task(&mut self) -> Result<Dump<'_>> {
|
pub fn register_dumped_task(&mut self) -> Result<Dump<'_>> {
|
||||||
|
|||||||
@@ -42,10 +42,12 @@ impl ProcessingTasks {
|
|||||||
|
|
||||||
/// Set the processing tasks to an empty list
|
/// Set the processing tasks to an empty list
|
||||||
pub fn stop_processing(&mut self) -> Self {
|
pub fn stop_processing(&mut self) -> Self {
|
||||||
|
self.progress = None;
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
batch: std::mem::take(&mut self.batch),
|
batch: std::mem::take(&mut self.batch),
|
||||||
processing: std::mem::take(&mut self.processing),
|
processing: std::mem::take(&mut self.processing),
|
||||||
progress: std::mem::take(&mut self.progress),
|
progress: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ use file_store::FileStore;
|
|||||||
use meilisearch_types::batches::BatchId;
|
use meilisearch_types::batches::BatchId;
|
||||||
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, BEU32};
|
use meilisearch_types::milli::{CboRoaringBitmapCodec, BEU32};
|
||||||
use meilisearch_types::tasks::network::DbTaskNetwork;
|
|
||||||
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
use time::format_description::well_known::Rfc3339;
|
use time::format_description::well_known::Rfc3339;
|
||||||
@@ -260,7 +259,6 @@ impl Queue {
|
|||||||
task_id: Option<TaskId>,
|
task_id: Option<TaskId>,
|
||||||
custom_metadata: Option<String>,
|
custom_metadata: Option<String>,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
network: Option<DbTaskNetwork>,
|
|
||||||
) -> Result<Task> {
|
) -> Result<Task> {
|
||||||
let next_task_id = self.tasks.next_task_id(wtxn)?;
|
let next_task_id = self.tasks.next_task_id(wtxn)?;
|
||||||
|
|
||||||
@@ -282,7 +280,7 @@ impl Queue {
|
|||||||
details: kind.default_details(),
|
details: kind.default_details(),
|
||||||
status: Status::Enqueued,
|
status: Status::Enqueued,
|
||||||
kind: kind.clone(),
|
kind: kind.clone(),
|
||||||
network,
|
network: None,
|
||||||
custom_metadata,
|
custom_metadata,
|
||||||
};
|
};
|
||||||
// For deletion and cancelation tasks, we want to make extra sure that they
|
// For deletion and cancelation tasks, we want to make extra sure that they
|
||||||
@@ -350,7 +348,6 @@ impl Queue {
|
|||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
false,
|
false,
|
||||||
None,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -3,8 +3,7 @@ use std::ops::{Bound, RangeBounds};
|
|||||||
use meilisearch_types::heed::types::{DecodeIgnore, SerdeBincode, SerdeJson, Str};
|
use meilisearch_types::heed::types::{DecodeIgnore, SerdeBincode, SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
||||||
use meilisearch_types::tasks::network::DbTaskNetwork;
|
use meilisearch_types::tasks::{Kind, Status, Task};
|
||||||
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
|
|
||||||
use roaring::{MultiOps, RoaringBitmap};
|
use roaring::{MultiOps, RoaringBitmap};
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
|
|
||||||
@@ -115,15 +114,14 @@ impl TaskQueue {
|
|||||||
/// - CorruptedTaskQueue: The task doesn't exist in the database
|
/// - CorruptedTaskQueue: The task doesn't exist in the database
|
||||||
pub(crate) fn update_task(&self, wtxn: &mut RwTxn, task: &mut Task) -> Result<()> {
|
pub(crate) fn update_task(&self, wtxn: &mut RwTxn, task: &mut Task) -> Result<()> {
|
||||||
let old_task = self.get_task(wtxn, task.uid)?.ok_or(Error::CorruptedTaskQueue)?;
|
let old_task = self.get_task(wtxn, task.uid)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
// network topology tasks may be processed multiple times.
|
let reprocessing = old_task.status != Status::Enqueued;
|
||||||
let maybe_reprocessing = old_task.status != Status::Enqueued
|
|
||||||
|| task.kind.as_kind() == Kind::NetworkTopologyChange;
|
|
||||||
|
|
||||||
|
debug_assert!(old_task != *task);
|
||||||
debug_assert_eq!(old_task.uid, task.uid);
|
debug_assert_eq!(old_task.uid, task.uid);
|
||||||
|
|
||||||
// If we're processing a task that failed it may already contains a batch_uid
|
// If we're processing a task that failed it may already contains a batch_uid
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
maybe_reprocessing || (old_task.batch_uid.is_none() && task.batch_uid.is_some()),
|
reprocessing || (old_task.batch_uid.is_none() && task.batch_uid.is_some()),
|
||||||
"\n==> old: {old_task:?}\n==> new: {task:?}"
|
"\n==> old: {old_task:?}\n==> new: {task:?}"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -145,24 +143,13 @@ impl TaskQueue {
|
|||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Avoids rewriting part of the network topology change because of TOCTOU errors
|
|
||||||
if let (
|
|
||||||
KindWithContent::NetworkTopologyChange(old_state),
|
|
||||||
KindWithContent::NetworkTopologyChange(new_state),
|
|
||||||
) = (old_task.kind, &mut task.kind)
|
|
||||||
{
|
|
||||||
new_state.merge(old_state);
|
|
||||||
// the state possibly just changed, rewrite the details
|
|
||||||
task.details = Some(new_state.to_details());
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
old_task.enqueued_at, task.enqueued_at,
|
old_task.enqueued_at, task.enqueued_at,
|
||||||
"Cannot update a task's enqueued_at time"
|
"Cannot update a task's enqueued_at time"
|
||||||
);
|
);
|
||||||
if old_task.started_at != task.started_at {
|
if old_task.started_at != task.started_at {
|
||||||
assert!(
|
assert!(
|
||||||
maybe_reprocessing || old_task.started_at.is_none(),
|
reprocessing || old_task.started_at.is_none(),
|
||||||
"Cannot update a task's started_at time"
|
"Cannot update a task's started_at time"
|
||||||
);
|
);
|
||||||
if let Some(started_at) = old_task.started_at {
|
if let Some(started_at) = old_task.started_at {
|
||||||
@@ -174,7 +161,7 @@ impl TaskQueue {
|
|||||||
}
|
}
|
||||||
if old_task.finished_at != task.finished_at {
|
if old_task.finished_at != task.finished_at {
|
||||||
assert!(
|
assert!(
|
||||||
maybe_reprocessing || old_task.finished_at.is_none(),
|
reprocessing || old_task.finished_at.is_none(),
|
||||||
"Cannot update a task's finished_at time"
|
"Cannot update a task's finished_at time"
|
||||||
);
|
);
|
||||||
if let Some(finished_at) = old_task.finished_at {
|
if let Some(finished_at) = old_task.finished_at {
|
||||||
@@ -188,16 +175,7 @@ impl TaskQueue {
|
|||||||
task.network = match (old_task.network, task.network.take()) {
|
task.network = match (old_task.network, task.network.take()) {
|
||||||
(None, None) => None,
|
(None, None) => None,
|
||||||
(None, Some(network)) | (Some(network), None) => Some(network),
|
(None, Some(network)) | (Some(network), None) => Some(network),
|
||||||
(Some(left), Some(right)) => Some(match (left, right) {
|
(Some(_), Some(network)) => Some(network),
|
||||||
(
|
|
||||||
DbTaskNetwork::Remotes { remote_tasks: mut left, network_version: _ },
|
|
||||||
DbTaskNetwork::Remotes { remote_tasks: mut right, network_version },
|
|
||||||
) => {
|
|
||||||
left.append(&mut right);
|
|
||||||
DbTaskNetwork::Remotes { remote_tasks: left, network_version }
|
|
||||||
}
|
|
||||||
(_, right) => right,
|
|
||||||
}),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
self.all_tasks.put(wtxn, &task.uid, task)?;
|
self.all_tasks.put(wtxn, &task.uid, task)?;
|
||||||
|
|||||||
@@ -203,30 +203,26 @@ fn test_disable_auto_deletion_of_tasks() {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
{
|
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let tasks = index_scheduler
|
let tasks =
|
||||||
.queue
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
|
||||||
.unwrap();
|
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
|
||||||
}
|
drop(rtxn);
|
||||||
|
drop(proc);
|
||||||
|
|
||||||
// now we're above the max number of tasks
|
// now we're above the max number of tasks
|
||||||
// and if we try to advance in the tick function no new task deletion should be enqueued
|
// and if we try to advance in the tick function no new task deletion should be enqueued
|
||||||
handle.advance_till([Start, BatchCreated]);
|
handle.advance_till([Start, BatchCreated]);
|
||||||
{
|
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let tasks = index_scheduler
|
let tasks =
|
||||||
.queue
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
|
||||||
.unwrap();
|
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_not_been_enqueued");
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_not_been_enqueued");
|
||||||
}
|
drop(rtxn);
|
||||||
|
drop(proc);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -271,69 +267,59 @@ fn test_auto_deletion_of_tasks() {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
{
|
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let tasks = index_scheduler
|
let tasks =
|
||||||
.queue
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
|
||||||
.unwrap();
|
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
|
||||||
}
|
drop(rtxn);
|
||||||
|
drop(proc);
|
||||||
|
|
||||||
{
|
|
||||||
// now we're above the max number of tasks
|
// now we're above the max number of tasks
|
||||||
// and if we try to advance in the tick function a new task deletion should be enqueued
|
// and if we try to advance in the tick function a new task deletion should be enqueued
|
||||||
handle.advance_till([Start, BatchCreated]);
|
handle.advance_till([Start, BatchCreated]);
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let tasks = index_scheduler
|
let tasks =
|
||||||
.queue
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
|
||||||
.unwrap();
|
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_enqueued");
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_enqueued");
|
||||||
}
|
drop(rtxn);
|
||||||
|
drop(proc);
|
||||||
|
|
||||||
{
|
|
||||||
handle.advance_till([InsideProcessBatch, ProcessBatchSucceeded, AfterProcessing]);
|
handle.advance_till([InsideProcessBatch, ProcessBatchSucceeded, AfterProcessing]);
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let tasks = index_scheduler
|
let tasks =
|
||||||
.queue
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
|
||||||
.unwrap();
|
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_processed");
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_processed");
|
||||||
}
|
drop(rtxn);
|
||||||
|
drop(proc);
|
||||||
|
|
||||||
handle.advance_one_failed_batch();
|
handle.advance_one_failed_batch();
|
||||||
// a new task deletion has been enqueued
|
// a new task deletion has been enqueued
|
||||||
handle.advance_one_successful_batch();
|
handle.advance_one_successful_batch();
|
||||||
{
|
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let tasks = index_scheduler
|
let tasks =
|
||||||
.queue
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
|
||||||
.unwrap();
|
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "after_the_second_task_deletion");
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "after_the_second_task_deletion");
|
||||||
}
|
drop(rtxn);
|
||||||
|
drop(proc);
|
||||||
|
|
||||||
handle.advance_one_failed_batch();
|
handle.advance_one_failed_batch();
|
||||||
handle.advance_one_successful_batch();
|
handle.advance_one_successful_batch();
|
||||||
{
|
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let tasks = index_scheduler
|
let tasks =
|
||||||
.queue
|
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
|
||||||
.unwrap();
|
|
||||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "everything_has_been_processed");
|
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "everything_has_been_processed");
|
||||||
}
|
drop(rtxn);
|
||||||
|
drop(proc);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -74,7 +74,6 @@ impl From<KindWithContent> for AutobatchKind {
|
|||||||
| KindWithContent::DumpCreation { .. }
|
| KindWithContent::DumpCreation { .. }
|
||||||
| KindWithContent::Export { .. }
|
| KindWithContent::Export { .. }
|
||||||
| KindWithContent::UpgradeDatabase { .. }
|
| KindWithContent::UpgradeDatabase { .. }
|
||||||
| KindWithContent::NetworkTopologyChange(_)
|
|
||||||
| KindWithContent::SnapshotCreation => {
|
| KindWithContent::SnapshotCreation => {
|
||||||
panic!("The autobatcher should never be called with tasks with special priority or that don't apply to an index.")
|
panic!("The autobatcher should never be called with tasks with special priority or that don't apply to an index.")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,27 +0,0 @@
|
|||||||
use meilisearch_types::milli::progress::Progress;
|
|
||||||
use meilisearch_types::tasks::Task;
|
|
||||||
|
|
||||||
use super::create_batch::Batch;
|
|
||||||
use crate::scheduler::process_batch::ProcessBatchInfo;
|
|
||||||
use crate::utils::ProcessingBatch;
|
|
||||||
use crate::{Error, IndexScheduler, Result};
|
|
||||||
|
|
||||||
impl IndexScheduler {
|
|
||||||
pub(super) fn process_network_index_batch(
|
|
||||||
&self,
|
|
||||||
_network_task: Task,
|
|
||||||
_inner_batch: Box<Batch>,
|
|
||||||
_current_batch: &mut ProcessingBatch,
|
|
||||||
_progress: Progress,
|
|
||||||
) -> Result<(Vec<Task>, ProcessBatchInfo)> {
|
|
||||||
Err(Error::RequiresEnterpriseEdition { action: "processing a network task" })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn process_network_ready(
|
|
||||||
&self,
|
|
||||||
_task: Task,
|
|
||||||
_progress: Progress,
|
|
||||||
) -> Result<(Vec<Task>, ProcessBatchInfo)> {
|
|
||||||
Err(Error::RequiresEnterpriseEdition { action: "processing a network task" })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,7 +4,6 @@ use std::io::ErrorKind;
|
|||||||
use meilisearch_types::heed::RoTxn;
|
use meilisearch_types::heed::RoTxn;
|
||||||
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
||||||
use meilisearch_types::settings::{Settings, Unchecked};
|
use meilisearch_types::settings::{Settings, Unchecked};
|
||||||
use meilisearch_types::tasks::network::NetworkTopologyState;
|
|
||||||
use meilisearch_types::tasks::{BatchStopReason, Kind, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{BatchStopReason, Kind, KindWithContent, Status, Task};
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
@@ -60,14 +59,6 @@ pub(crate) enum Batch {
|
|||||||
index_uid: String,
|
index_uid: String,
|
||||||
task: Task,
|
task: Task,
|
||||||
},
|
},
|
||||||
#[allow(clippy::enum_variant_names)] // warranted because we are executing an inner index batch
|
|
||||||
NetworkIndexBatch {
|
|
||||||
network_task: Task,
|
|
||||||
inner_batch: Box<Batch>,
|
|
||||||
},
|
|
||||||
NetworkReady {
|
|
||||||
task: Task,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -149,14 +140,9 @@ impl Batch {
|
|||||||
..
|
..
|
||||||
} => RoaringBitmap::from_iter(tasks.iter().chain(other).map(|task| task.uid)),
|
} => RoaringBitmap::from_iter(tasks.iter().chain(other).map(|task| task.uid)),
|
||||||
},
|
},
|
||||||
Batch::IndexSwap { task } | Batch::NetworkReady { task } => {
|
Batch::IndexSwap { task } => {
|
||||||
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
||||||
}
|
}
|
||||||
Batch::NetworkIndexBatch { network_task, inner_batch } => {
|
|
||||||
let mut tasks = inner_batch.ids();
|
|
||||||
tasks.insert(network_task.uid);
|
|
||||||
tasks
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -170,14 +156,12 @@ impl Batch {
|
|||||||
| Dump(_)
|
| Dump(_)
|
||||||
| Export { .. }
|
| Export { .. }
|
||||||
| UpgradeDatabase { .. }
|
| UpgradeDatabase { .. }
|
||||||
| NetworkReady { .. }
|
|
||||||
| IndexSwap { .. } => None,
|
| IndexSwap { .. } => None,
|
||||||
IndexOperation { op, .. } => Some(op.index_uid()),
|
IndexOperation { op, .. } => Some(op.index_uid()),
|
||||||
IndexCreation { index_uid, .. }
|
IndexCreation { index_uid, .. }
|
||||||
| IndexUpdate { index_uid, .. }
|
| IndexUpdate { index_uid, .. }
|
||||||
| IndexDeletion { index_uid, .. }
|
| IndexDeletion { index_uid, .. }
|
||||||
| IndexCompaction { index_uid, .. } => Some(index_uid),
|
| IndexCompaction { index_uid, .. } => Some(index_uid),
|
||||||
NetworkIndexBatch { network_task: _, inner_batch } => inner_batch.index_uid(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -200,8 +184,6 @@ impl fmt::Display for Batch {
|
|||||||
Batch::IndexCompaction { .. } => f.write_str("IndexCompaction")?,
|
Batch::IndexCompaction { .. } => f.write_str("IndexCompaction")?,
|
||||||
Batch::Export { .. } => f.write_str("Export")?,
|
Batch::Export { .. } => f.write_str("Export")?,
|
||||||
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
|
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
|
||||||
Batch::NetworkIndexBatch { .. } => f.write_str("NetworkTopologyChange")?,
|
|
||||||
Batch::NetworkReady { .. } => f.write_str("NetworkTopologyChange")?,
|
|
||||||
};
|
};
|
||||||
match index_uid {
|
match index_uid {
|
||||||
Some(name) => f.write_fmt(format_args!(" on {name:?} from tasks: {tasks:?}")),
|
Some(name) => f.write_fmt(format_args!(" on {name:?} from tasks: {tasks:?}")),
|
||||||
@@ -470,7 +452,6 @@ impl IndexScheduler {
|
|||||||
pub(crate) fn create_next_batch(
|
pub(crate) fn create_next_batch(
|
||||||
&self,
|
&self,
|
||||||
rtxn: &RoTxn,
|
rtxn: &RoTxn,
|
||||||
processing_network_tasks: &RoaringBitmap,
|
|
||||||
) -> Result<Option<(Batch, ProcessingBatch)>> {
|
) -> Result<Option<(Batch, ProcessingBatch)>> {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.maybe_fail(crate::test_utils::FailureLocation::InsideCreateBatch)?;
|
self.maybe_fail(crate::test_utils::FailureLocation::InsideCreateBatch)?;
|
||||||
@@ -479,6 +460,7 @@ impl IndexScheduler {
|
|||||||
let mut current_batch = ProcessingBatch::new(batch_id);
|
let mut current_batch = ProcessingBatch::new(batch_id);
|
||||||
|
|
||||||
let enqueued = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
let enqueued = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
||||||
|
let count_total_enqueued = enqueued.len();
|
||||||
let failed = &self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
let failed = &self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
||||||
|
|
||||||
// 0. we get the last task to cancel.
|
// 0. we get the last task to cancel.
|
||||||
@@ -527,15 +509,7 @@ impl IndexScheduler {
|
|||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Check for enqueued network topology changes
|
// 2. we get the next task to delete
|
||||||
let network_changes = self.queue.tasks.get_kind(rtxn, Kind::NetworkTopologyChange)?
|
|
||||||
& (enqueued | processing_network_tasks);
|
|
||||||
if let Some(task_id) = network_changes.iter().next() {
|
|
||||||
let task = self.queue.tasks.get_task(rtxn, task_id)?.unwrap();
|
|
||||||
return self.start_processing_network(rtxn, task, enqueued, current_batch);
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. we get the next task to delete
|
|
||||||
let to_delete = self.queue.tasks.get_kind(rtxn, Kind::TaskDeletion)? & enqueued;
|
let to_delete = self.queue.tasks.get_kind(rtxn, Kind::TaskDeletion)? & enqueued;
|
||||||
if !to_delete.is_empty() {
|
if !to_delete.is_empty() {
|
||||||
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_delete)?;
|
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_delete)?;
|
||||||
@@ -545,7 +519,7 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
|
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. we get the next task to compact
|
// 3. we get the next task to compact
|
||||||
let to_compact = self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)? & enqueued;
|
let to_compact = self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)? & enqueued;
|
||||||
if let Some(task_id) = to_compact.min() {
|
if let Some(task_id) = to_compact.min() {
|
||||||
let mut task =
|
let mut task =
|
||||||
@@ -560,7 +534,7 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::IndexCompaction { index_uid, task }, current_batch)));
|
return Ok(Some((Batch::IndexCompaction { index_uid, task }, current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 5. we batch the export.
|
// 4. we batch the export.
|
||||||
let to_export = self.queue.tasks.get_kind(rtxn, Kind::Export)? & enqueued;
|
let to_export = self.queue.tasks.get_kind(rtxn, Kind::Export)? & enqueued;
|
||||||
if !to_export.is_empty() {
|
if !to_export.is_empty() {
|
||||||
let task_id = to_export.iter().next().expect("There must be at least one export task");
|
let task_id = to_export.iter().next().expect("There must be at least one export task");
|
||||||
@@ -571,7 +545,7 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::Export { task }, current_batch)));
|
return Ok(Some((Batch::Export { task }, current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. we batch the snapshot.
|
// 5. we batch the snapshot.
|
||||||
let to_snapshot = self.queue.tasks.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued;
|
let to_snapshot = self.queue.tasks.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued;
|
||||||
if !to_snapshot.is_empty() {
|
if !to_snapshot.is_empty() {
|
||||||
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
|
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
|
||||||
@@ -581,7 +555,7 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
|
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 7. we batch the dumps.
|
// 6. we batch the dumps.
|
||||||
let to_dump = self.queue.tasks.get_kind(rtxn, Kind::DumpCreation)? & enqueued;
|
let to_dump = self.queue.tasks.get_kind(rtxn, Kind::DumpCreation)? & enqueued;
|
||||||
if let Some(to_dump) = to_dump.min() {
|
if let Some(to_dump) = to_dump.min() {
|
||||||
let mut task =
|
let mut task =
|
||||||
@@ -594,66 +568,25 @@ impl IndexScheduler {
|
|||||||
return Ok(Some((Batch::Dump(task), current_batch)));
|
return Ok(Some((Batch::Dump(task), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let network = self.network();
|
// 7. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
|
||||||
|
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
|
||||||
|
let mut task =
|
||||||
|
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
|
|
||||||
// 8. We make a batch from the unprioritised tasks.
|
|
||||||
let (batch, current_batch) =
|
|
||||||
self.create_next_batch_unprioritized(rtxn, enqueued, current_batch, |task| {
|
|
||||||
// We want to execute all tasks, except those that have a version strictly higher than the network version
|
|
||||||
|
|
||||||
let Some(task_version) =
|
|
||||||
task.network.as_ref().map(|tastk_network| tastk_network.network_version())
|
|
||||||
else {
|
|
||||||
// do not skip tasks that have no network version, otherwise we will never execute them
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
|
|
||||||
// skip tasks with a version strictly higher than the network version
|
|
||||||
task_version > network.version
|
|
||||||
})?;
|
|
||||||
Ok(batch.map(|batch| (batch, current_batch)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_next_batch_unprioritized<F>(
|
|
||||||
&self,
|
|
||||||
rtxn: &RoTxn,
|
|
||||||
enqueued: &RoaringBitmap,
|
|
||||||
mut current_batch: ProcessingBatch,
|
|
||||||
mut skip_if: F,
|
|
||||||
) -> Result<(Option<Batch>, ProcessingBatch)>
|
|
||||||
where
|
|
||||||
F: FnMut(&Task) -> bool,
|
|
||||||
{
|
|
||||||
let count_total_enqueued = enqueued.len();
|
|
||||||
|
|
||||||
let mut enqueued_it = enqueued.iter();
|
|
||||||
let mut task;
|
|
||||||
let index_name = loop {
|
|
||||||
let Some(task_id) = enqueued_it.next() else {
|
|
||||||
return Ok((None, current_batch));
|
|
||||||
};
|
|
||||||
task = self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
|
||||||
|
|
||||||
if skip_if(&task) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// If the task is not associated with any index, verify that it is an index swap and
|
// If the task is not associated with any index, verify that it is an index swap and
|
||||||
// create the batch directly. Otherwise, get the index name associated with the task
|
// create the batch directly. Otherwise, get the index name associated with the task
|
||||||
// and use the autobatcher to batch the enqueued tasks associated with it
|
// and use the autobatcher to batch the enqueued tasks associated with it
|
||||||
|
|
||||||
if let Some(&index_name) = task.indexes().first() {
|
let index_name = if let Some(&index_name) = task.indexes().first() {
|
||||||
break index_name;
|
index_name
|
||||||
} else {
|
} else {
|
||||||
assert!(
|
assert!(matches!(&task.kind, KindWithContent::IndexSwap { swaps } if swaps.is_empty()));
|
||||||
matches!(&task.kind, KindWithContent::IndexSwap { swaps } if swaps.is_empty())
|
|
||||||
);
|
|
||||||
current_batch.processing(Some(&mut task));
|
current_batch.processing(Some(&mut task));
|
||||||
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
|
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
|
||||||
kind: Kind::IndexSwap,
|
kind: Kind::IndexSwap,
|
||||||
id: task.uid,
|
id: task.uid,
|
||||||
});
|
});
|
||||||
return Ok((Some(Batch::IndexSwap { task }), current_batch));
|
return Ok(Some((Batch::IndexSwap { task }, current_batch)));
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let index_already_exists = self.index_mapper.exists(rtxn, index_name)?;
|
let index_already_exists = self.index_mapper.exists(rtxn, index_name)?;
|
||||||
@@ -688,10 +621,6 @@ impl IndexScheduler {
|
|||||||
.get_task(rtxn, task_id)
|
.get_task(rtxn, task_id)
|
||||||
.and_then(|task| task.ok_or(Error::CorruptedTaskQueue))?;
|
.and_then(|task| task.ok_or(Error::CorruptedTaskQueue))?;
|
||||||
|
|
||||||
if skip_if(&task) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(uuid) = task.content_uuid() {
|
if let Some(uuid) = task.content_uuid() {
|
||||||
let content_size = match self.queue.file_store.compute_size(uuid) {
|
let content_size = match self.queue.file_store.compute_size(uuid) {
|
||||||
Ok(content_size) => content_size,
|
Ok(content_size) => content_size,
|
||||||
@@ -722,104 +651,19 @@ impl IndexScheduler {
|
|||||||
autobatcher::autobatch(enqueued, index_already_exists, primary_key.as_deref())
|
autobatcher::autobatch(enqueued, index_already_exists, primary_key.as_deref())
|
||||||
{
|
{
|
||||||
current_batch.reason(autobatch_stop_reason.unwrap_or(stop_reason));
|
current_batch.reason(autobatch_stop_reason.unwrap_or(stop_reason));
|
||||||
let batch = self.create_next_batch_index(
|
return Ok(self
|
||||||
|
.create_next_batch_index(
|
||||||
rtxn,
|
rtxn,
|
||||||
index_name.to_string(),
|
index_name.to_string(),
|
||||||
batchkind,
|
batchkind,
|
||||||
&mut current_batch,
|
&mut current_batch,
|
||||||
create_index,
|
create_index,
|
||||||
)?;
|
)?
|
||||||
return Ok((batch, current_batch));
|
.map(|batch| (batch, current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we found no tasks then we were notified for something that got autobatched
|
// If we found no tasks then we were notified for something that got autobatched
|
||||||
// somehow and there is nothing to do.
|
// somehow and there is nothing to do.
|
||||||
Ok((None, current_batch))
|
Ok(None)
|
||||||
}
|
|
||||||
|
|
||||||
fn start_processing_network(
|
|
||||||
&self,
|
|
||||||
rtxn: &RoTxn,
|
|
||||||
mut task: Task,
|
|
||||||
enqueued: &RoaringBitmap,
|
|
||||||
mut current_batch: ProcessingBatch,
|
|
||||||
) -> Result<Option<(Batch, ProcessingBatch)>> {
|
|
||||||
current_batch.processing(Some(&mut task));
|
|
||||||
|
|
||||||
let change_version =
|
|
||||||
task.network.as_ref().map(|network| network.network_version()).unwrap_or_default();
|
|
||||||
let KindWithContent::NetworkTopologyChange(network_topology_change) = &task.kind else {
|
|
||||||
panic!("inconsistent kind with content")
|
|
||||||
};
|
|
||||||
|
|
||||||
match network_topology_change.state() {
|
|
||||||
NetworkTopologyState::WaitingForOlderTasks => {
|
|
||||||
let res =
|
|
||||||
self.create_next_batch_unprioritized(rtxn, enqueued, current_batch, |task| {
|
|
||||||
// in this limited mode of execution, we only want to run tasks:
|
|
||||||
// 1. with a version
|
|
||||||
// 2. that version strictly lower than the network task version
|
|
||||||
|
|
||||||
// 1. skip tasks without version
|
|
||||||
let Some(task_version) =
|
|
||||||
task.network.as_ref().map(|network| network.network_version())
|
|
||||||
else {
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
// 2. skip tasks with a version equal or higher to the network task version
|
|
||||||
task_version >= change_version
|
|
||||||
});
|
|
||||||
|
|
||||||
let (batch, current_batch) = res?;
|
|
||||||
|
|
||||||
let batch = match batch {
|
|
||||||
Some(batch) => {
|
|
||||||
let inner_batch = Box::new(batch);
|
|
||||||
|
|
||||||
Batch::NetworkIndexBatch { network_task: task, inner_batch }
|
|
||||||
}
|
|
||||||
None => Batch::NetworkReady { task },
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Some((batch, current_batch)))
|
|
||||||
}
|
|
||||||
NetworkTopologyState::ImportingDocuments => {
|
|
||||||
// if the import is done we need to go to the next state
|
|
||||||
if network_topology_change.is_import_finished() {
|
|
||||||
return Ok(Some((Batch::NetworkReady { task }, current_batch)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let res =
|
|
||||||
self.create_next_batch_unprioritized(rtxn, enqueued, current_batch, |task| {
|
|
||||||
// in this limited mode of execution, we only want to run tasks:
|
|
||||||
// 1. with a version
|
|
||||||
// 2. that version equal to the network task version
|
|
||||||
|
|
||||||
// 1. skip tasks without version
|
|
||||||
let Some(task_version) =
|
|
||||||
task.network.as_ref().map(|network| network.network_version())
|
|
||||||
else {
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
// 2. skip tasks with a version different from the network task version
|
|
||||||
task_version != change_version
|
|
||||||
});
|
|
||||||
|
|
||||||
let (batch, current_batch) = res?;
|
|
||||||
|
|
||||||
let batch = batch.map(|batch| {
|
|
||||||
let inner_batch = Box::new(batch);
|
|
||||||
|
|
||||||
(Batch::NetworkIndexBatch { network_task: task, inner_batch }, current_batch)
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(batch)
|
|
||||||
}
|
|
||||||
NetworkTopologyState::ExportingDocuments | NetworkTopologyState::Finished => {
|
|
||||||
Ok(Some((Batch::NetworkReady { task }, current_batch)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,299 +0,0 @@
|
|||||||
// Copyright © 2025 Meilisearch Some Rights Reserved
|
|
||||||
// This file is part of Meilisearch Enterprise Edition (EE).
|
|
||||||
// Use of this source code is governed by the Business Source License 1.1,
|
|
||||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use bumpalo::Bump;
|
|
||||||
use meilisearch_types::milli::documents::PrimaryKey;
|
|
||||||
use meilisearch_types::milli::progress::{EmbedderStats, Progress};
|
|
||||||
use meilisearch_types::milli::update::new::indexer;
|
|
||||||
use meilisearch_types::milli::update::new::indexer::current_edition::sharding::Shards;
|
|
||||||
use meilisearch_types::milli::{self};
|
|
||||||
use meilisearch_types::network::Remote;
|
|
||||||
use meilisearch_types::tasks::network::{NetworkTopologyState, Origin};
|
|
||||||
use meilisearch_types::tasks::{KindWithContent, Status, Task};
|
|
||||||
use roaring::RoaringBitmap;
|
|
||||||
|
|
||||||
use super::create_batch::Batch;
|
|
||||||
use crate::scheduler::process_batch::ProcessBatchInfo;
|
|
||||||
use crate::scheduler::process_export::{ExportContext, ExportOptions, TargetInstance};
|
|
||||||
use crate::utils::ProcessingBatch;
|
|
||||||
use crate::{Error, IndexScheduler, Result};
|
|
||||||
|
|
||||||
impl IndexScheduler {
|
|
||||||
pub(super) fn process_network_index_batch(
|
|
||||||
&self,
|
|
||||||
mut network_task: Task,
|
|
||||||
inner_batch: Box<Batch>,
|
|
||||||
current_batch: &mut ProcessingBatch,
|
|
||||||
progress: Progress,
|
|
||||||
) -> Result<(Vec<Task>, ProcessBatchInfo)> {
|
|
||||||
let (mut tasks, info) = self.process_batch(*inner_batch, current_batch, progress)?;
|
|
||||||
let KindWithContent::NetworkTopologyChange(network_topology_change) =
|
|
||||||
&mut network_task.kind
|
|
||||||
else {
|
|
||||||
tracing::error!("unexpected network kind for network task while processing batch");
|
|
||||||
return Err(Error::CorruptedTaskQueue);
|
|
||||||
};
|
|
||||||
for task in &tasks {
|
|
||||||
let Some(network) = task.network.as_ref() else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let Some(import) = network.import_data() else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
if let Some(index_name) = import.index_name.as_deref() {
|
|
||||||
network_topology_change.process_remote_tasks(
|
|
||||||
&import.remote_name,
|
|
||||||
index_name,
|
|
||||||
import.document_count,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
network_task.details = Some(network_topology_change.to_details());
|
|
||||||
|
|
||||||
tasks.push(network_task);
|
|
||||||
Ok((tasks, info))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn process_network_ready(
|
|
||||||
&self,
|
|
||||||
mut task: Task,
|
|
||||||
progress: Progress,
|
|
||||||
) -> Result<(Vec<Task>, ProcessBatchInfo)> {
|
|
||||||
let KindWithContent::NetworkTopologyChange(network_topology_change) = &mut task.kind else {
|
|
||||||
tracing::error!("network topology change task has the wrong kind with content");
|
|
||||||
return Err(Error::CorruptedTaskQueue);
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(task_network) = &task.network else {
|
|
||||||
tracing::error!("network topology change task has no network");
|
|
||||||
return Err(Error::CorruptedTaskQueue);
|
|
||||||
};
|
|
||||||
|
|
||||||
let origin;
|
|
||||||
let origin = match task_network.origin() {
|
|
||||||
Some(origin) => origin,
|
|
||||||
None => {
|
|
||||||
let myself = network_topology_change.in_name().expect("origin is not the leader");
|
|
||||||
origin = Origin {
|
|
||||||
remote_name: myself.to_string(),
|
|
||||||
task_uid: task.uid,
|
|
||||||
network_version: task_network.network_version(),
|
|
||||||
};
|
|
||||||
&origin
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some((remotes, out_name)) = network_topology_change.export_to_process() {
|
|
||||||
let moved_documents = self.balance_documents(
|
|
||||||
remotes,
|
|
||||||
out_name,
|
|
||||||
network_topology_change.in_name(),
|
|
||||||
origin,
|
|
||||||
&progress,
|
|
||||||
&self.scheduler.must_stop_processing,
|
|
||||||
)?;
|
|
||||||
network_topology_change.set_moved(moved_documents);
|
|
||||||
}
|
|
||||||
network_topology_change.update_state();
|
|
||||||
if network_topology_change.state() == NetworkTopologyState::Finished {
|
|
||||||
task.status = Status::Succeeded;
|
|
||||||
}
|
|
||||||
|
|
||||||
task.details = Some(network_topology_change.to_details());
|
|
||||||
Ok((vec![task], Default::default()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn balance_documents(
|
|
||||||
&self,
|
|
||||||
remotes: &BTreeMap<String, Remote>,
|
|
||||||
out_name: &str,
|
|
||||||
in_name: Option<&str>,
|
|
||||||
network_change_origin: &Origin,
|
|
||||||
progress: &Progress,
|
|
||||||
must_stop_processing: &crate::scheduler::MustStopProcessing,
|
|
||||||
) -> crate::Result<u64> {
|
|
||||||
let new_shards =
|
|
||||||
Shards::from_remotes_local(remotes.keys().map(String::as_str).chain(in_name), in_name);
|
|
||||||
|
|
||||||
// TECHDEBT: this spawns a `ureq` agent additionally to `reqwest`. We probably want to harmonize all of this.
|
|
||||||
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
|
||||||
|
|
||||||
let mut indexer_alloc = Bump::new();
|
|
||||||
|
|
||||||
let scheduler_rtxn = self.env.read_txn()?;
|
|
||||||
|
|
||||||
let index_count = self.index_mapper.index_count(&scheduler_rtxn)?;
|
|
||||||
|
|
||||||
// when the instance is empty, we still need to tell that to remotes, as they cannot know of that fact and will be waiting for
|
|
||||||
// data
|
|
||||||
if index_count == 0 {
|
|
||||||
for (remote_name, remote) in remotes {
|
|
||||||
let target = TargetInstance {
|
|
||||||
base_url: &remote.url,
|
|
||||||
api_key: remote.write_api_key.as_deref(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let res = self.export_no_index(
|
|
||||||
target,
|
|
||||||
out_name,
|
|
||||||
network_change_origin,
|
|
||||||
&agent,
|
|
||||||
must_stop_processing,
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Err(err) = res {
|
|
||||||
tracing::warn!("Could not signal not to wait documents to `{remote_name}` due to error: {err}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Ok(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut total_moved_documents = 0;
|
|
||||||
|
|
||||||
self.index_mapper.try_for_each_index::<(), ()>(
|
|
||||||
&scheduler_rtxn,
|
|
||||||
|index_uid, index| -> crate::Result<()> {
|
|
||||||
indexer_alloc.reset();
|
|
||||||
let err = |err| Error::from_milli(err, Some(index_uid.to_string()));
|
|
||||||
let index_rtxn = index.read_txn()?;
|
|
||||||
let all_docids = index.external_documents_ids();
|
|
||||||
let mut documents_to_move_to =
|
|
||||||
hashbrown::HashMap::<String, RoaringBitmap>::new();
|
|
||||||
let mut documents_to_delete = RoaringBitmap::new();
|
|
||||||
|
|
||||||
for res in all_docids.iter(&index_rtxn)? {
|
|
||||||
let (external_docid, docid) = res?;
|
|
||||||
match new_shards.processing_shard(external_docid) {
|
|
||||||
Some(shard) if shard.is_own => continue,
|
|
||||||
Some(shard) => {
|
|
||||||
documents_to_move_to.entry_ref(&shard.name).or_default().insert(docid);
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
documents_to_delete.insert(docid);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
|
|
||||||
|
|
||||||
for (remote_name, remote) in remotes {
|
|
||||||
let documents_to_move =
|
|
||||||
documents_to_move_to.remove(remote_name).unwrap_or_default();
|
|
||||||
|
|
||||||
let target = TargetInstance {
|
|
||||||
base_url: &remote.url,
|
|
||||||
api_key: remote.write_api_key.as_deref(),
|
|
||||||
};
|
|
||||||
let options = ExportOptions {
|
|
||||||
index_uid,
|
|
||||||
payload_size: None,
|
|
||||||
override_settings: false,
|
|
||||||
export_mode: super::process_export::ExportMode::NetworkBalancing {
|
|
||||||
index_count,
|
|
||||||
export_old_remote_name: out_name,
|
|
||||||
network_change_origin,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
let ctx = ExportContext {
|
|
||||||
index,
|
|
||||||
index_rtxn: &index_rtxn,
|
|
||||||
universe: &documents_to_move,
|
|
||||||
progress,
|
|
||||||
agent: &agent,
|
|
||||||
must_stop_processing,
|
|
||||||
};
|
|
||||||
|
|
||||||
let res = self.export_one_index(target, options, ctx);
|
|
||||||
|
|
||||||
match res {
|
|
||||||
Ok(_) =>{ documents_to_delete |= documents_to_move;}
|
|
||||||
Err(err) => {
|
|
||||||
tracing::warn!("Could not export documents to `{remote_name}` due to error: {err}\n - Note: Documents will be kept");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if documents_to_delete.is_empty() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
total_moved_documents += documents_to_delete.len();
|
|
||||||
|
|
||||||
self.delete_documents_from_index(progress, must_stop_processing, &indexer_alloc, index_uid, index, &err, index_rtxn, documents_to_delete, fields_ids_map)
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(total_moved_documents)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
fn delete_documents_from_index(
|
|
||||||
&self,
|
|
||||||
progress: &Progress,
|
|
||||||
must_stop_processing: &super::MustStopProcessing,
|
|
||||||
indexer_alloc: &Bump,
|
|
||||||
index_uid: &str,
|
|
||||||
index: &milli::Index,
|
|
||||||
err: &impl Fn(milli::Error) -> Error,
|
|
||||||
index_rtxn: milli::heed::RoTxn<'_, milli::heed::WithoutTls>,
|
|
||||||
documents_to_delete: RoaringBitmap,
|
|
||||||
fields_ids_map: milli::FieldsIdsMap,
|
|
||||||
) -> std::result::Result<(), Error> {
|
|
||||||
let mut new_fields_ids_map = fields_ids_map.clone();
|
|
||||||
|
|
||||||
// candidates not empty => index not empty => a primary key is set
|
|
||||||
let primary_key = index.primary_key(&index_rtxn)?.unwrap();
|
|
||||||
|
|
||||||
let primary_key = PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
|
|
||||||
.map_err(milli::Error::from)
|
|
||||||
.map_err(err)?;
|
|
||||||
|
|
||||||
let mut index_wtxn = index.write_txn()?;
|
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentDeletion::new();
|
|
||||||
indexer.delete_documents_by_docids(documents_to_delete);
|
|
||||||
let document_changes = indexer.into_changes(indexer_alloc, primary_key);
|
|
||||||
let embedders = index
|
|
||||||
.embedding_configs()
|
|
||||||
.embedding_configs(&index_wtxn)
|
|
||||||
.map_err(milli::Error::from)
|
|
||||||
.map_err(err)?;
|
|
||||||
let embedders = self.embedders(index_uid.to_string(), embedders)?;
|
|
||||||
let indexer_config = self.index_mapper.indexer_config();
|
|
||||||
let pool = &indexer_config.thread_pool;
|
|
||||||
|
|
||||||
indexer::index(
|
|
||||||
&mut index_wtxn,
|
|
||||||
index,
|
|
||||||
pool,
|
|
||||||
indexer_config.grenad_parameters(),
|
|
||||||
&fields_ids_map,
|
|
||||||
new_fields_ids_map,
|
|
||||||
None, // document deletion never changes primary key
|
|
||||||
&document_changes,
|
|
||||||
embedders,
|
|
||||||
&|| must_stop_processing.get(),
|
|
||||||
progress,
|
|
||||||
&EmbedderStats::default(),
|
|
||||||
)
|
|
||||||
.map_err(err)?;
|
|
||||||
|
|
||||||
// update stats
|
|
||||||
let mut mapper_wtxn = self.env.write_txn()?;
|
|
||||||
let stats = crate::index_mapper::IndexStats::new(index, &index_wtxn).map_err(err)?;
|
|
||||||
self.index_mapper.store_stats_of(&mut mapper_wtxn, index_uid, &stats)?;
|
|
||||||
|
|
||||||
index_wtxn.commit()?;
|
|
||||||
// update stats after committing changes to index
|
|
||||||
mapper_wtxn.commit()?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,12 +1,7 @@
|
|||||||
mod autobatcher;
|
mod autobatcher;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod autobatcher_test;
|
mod autobatcher_test;
|
||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
mod community_edition;
|
|
||||||
mod create_batch;
|
mod create_batch;
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
mod enterprise_edition;
|
|
||||||
|
|
||||||
mod process_batch;
|
mod process_batch;
|
||||||
mod process_dump_creation;
|
mod process_dump_creation;
|
||||||
mod process_export;
|
mod process_export;
|
||||||
@@ -26,6 +21,7 @@ use std::path::PathBuf;
|
|||||||
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use convert_case::{Case, Casing as _};
|
||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::heed::{Env, WithoutTls};
|
use meilisearch_types::heed::{Env, WithoutTls};
|
||||||
use meilisearch_types::milli;
|
use meilisearch_types::milli;
|
||||||
@@ -137,7 +133,6 @@ impl Scheduler {
|
|||||||
max_number_of_tasks: _,
|
max_number_of_tasks: _,
|
||||||
max_number_of_batched_tasks,
|
max_number_of_batched_tasks,
|
||||||
batched_tasks_size_limit,
|
batched_tasks_size_limit,
|
||||||
export_default_payload_size_bytes: _,
|
|
||||||
instance_features: _,
|
instance_features: _,
|
||||||
auto_upgrade: _,
|
auto_upgrade: _,
|
||||||
embedding_cache_cap,
|
embedding_cache_cap,
|
||||||
@@ -183,8 +178,6 @@ impl IndexScheduler {
|
|||||||
self.breakpoint(crate::test_utils::Breakpoint::Start);
|
self.breakpoint(crate::test_utils::Breakpoint::Start);
|
||||||
}
|
}
|
||||||
|
|
||||||
let previous_processing_batch = self.processing_tasks.write().unwrap().stop_processing();
|
|
||||||
|
|
||||||
if self.cleanup_enabled {
|
if self.cleanup_enabled {
|
||||||
let mut wtxn = self.env.write_txn()?;
|
let mut wtxn = self.env.write_txn()?;
|
||||||
self.queue.cleanup_task_queue(&mut wtxn)?;
|
self.queue.cleanup_task_queue(&mut wtxn)?;
|
||||||
@@ -192,15 +185,10 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let rtxn = self.env.read_txn().map_err(Error::HeedTransaction)?;
|
let rtxn = self.env.read_txn().map_err(Error::HeedTransaction)?;
|
||||||
let (batch, mut processing_batch) = match self
|
let (batch, mut processing_batch) =
|
||||||
.create_next_batch(&rtxn, &previous_processing_batch.processing)
|
match self.create_next_batch(&rtxn).map_err(|e| Error::CreateBatch(Box::new(e)))? {
|
||||||
.map_err(|e| Error::CreateBatch(Box::new(e)))?
|
|
||||||
{
|
|
||||||
Some(batch) => batch,
|
Some(batch) => batch,
|
||||||
None => {
|
None => return Ok(TickOutcome::WaitForSignal),
|
||||||
*self.processing_tasks.write().unwrap() = previous_processing_batch;
|
|
||||||
return Ok(TickOutcome::WaitForSignal);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
let index_uid = batch.index_uid().map(ToOwned::to_owned);
|
let index_uid = batch.index_uid().map(ToOwned::to_owned);
|
||||||
drop(rtxn);
|
drop(rtxn);
|
||||||
@@ -272,14 +260,7 @@ impl IndexScheduler {
|
|||||||
self.maybe_fail(crate::test_utils::FailureLocation::AcquiringWtxn)?;
|
self.maybe_fail(crate::test_utils::FailureLocation::AcquiringWtxn)?;
|
||||||
|
|
||||||
progress.update_progress(BatchProgress::WritingTasksToDisk);
|
progress.update_progress(BatchProgress::WritingTasksToDisk);
|
||||||
|
|
||||||
processing_batch.finished();
|
processing_batch.finished();
|
||||||
// whether the batch made progress.
|
|
||||||
// a batch make progress if it failed or if it contains at least one fully processed (or cancelled) task.
|
|
||||||
//
|
|
||||||
// if a batch did not make progress, it means that all of its tasks are waiting on the scheduler to make progress,
|
|
||||||
// and so we must wait for new tasks. Such a batch is not persisted to DB, and is resumed on the next tick.
|
|
||||||
let mut batch_made_progress = false;
|
|
||||||
let mut stop_scheduler_forever = false;
|
let mut stop_scheduler_forever = false;
|
||||||
let mut wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
let mut wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
||||||
let mut canceled = RoaringBitmap::new();
|
let mut canceled = RoaringBitmap::new();
|
||||||
@@ -300,11 +281,7 @@ impl IndexScheduler {
|
|||||||
#[allow(unused_variables)]
|
#[allow(unused_variables)]
|
||||||
for (i, mut task) in tasks.into_iter().enumerate() {
|
for (i, mut task) in tasks.into_iter().enumerate() {
|
||||||
task_progress.fetch_add(1, Ordering::Relaxed);
|
task_progress.fetch_add(1, Ordering::Relaxed);
|
||||||
processing_batch.update_from_task(&task);
|
processing_batch.update(&mut task);
|
||||||
if !matches!(task.status, Status::Processing | Status::Enqueued) {
|
|
||||||
batch_made_progress = true;
|
|
||||||
processing_batch.finish_task(&mut task);
|
|
||||||
}
|
|
||||||
if task.status == Status::Canceled {
|
if task.status == Status::Canceled {
|
||||||
canceled.insert(task.uid);
|
canceled.insert(task.uid);
|
||||||
canceled_by = task.canceled_by;
|
canceled_by = task.canceled_by;
|
||||||
@@ -371,9 +348,6 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
// In case of a failure we must get back and patch all the tasks with the error.
|
// In case of a failure we must get back and patch all the tasks with the error.
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
// always persist failed batches
|
|
||||||
batch_made_progress = true;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.breakpoint(crate::test_utils::Breakpoint::ProcessBatchFailed);
|
self.breakpoint(crate::test_utils::Breakpoint::ProcessBatchFailed);
|
||||||
let (task_progress, task_progress_obj) = AtomicTaskStep::new(ids.len() as u32);
|
let (task_progress, task_progress_obj) = AtomicTaskStep::new(ids.len() as u32);
|
||||||
@@ -397,10 +371,7 @@ impl IndexScheduler {
|
|||||||
task.status = Status::Failed;
|
task.status = Status::Failed;
|
||||||
task.error = Some(error.clone());
|
task.error = Some(error.clone());
|
||||||
task.details = task.details.map(|d| d.to_failed());
|
task.details = task.details.map(|d| d.to_failed());
|
||||||
processing_batch.update_from_task(&task);
|
processing_batch.update(&mut task);
|
||||||
if !matches!(task.status, Status::Processing | Status::Enqueued) {
|
|
||||||
processing_batch.finish_task(&mut task);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.maybe_fail(
|
self.maybe_fail(
|
||||||
@@ -423,12 +394,44 @@ impl IndexScheduler {
|
|||||||
let ProcessBatchInfo { congestion, pre_commit_dabases_sizes, post_commit_dabases_sizes } =
|
let ProcessBatchInfo { congestion, pre_commit_dabases_sizes, post_commit_dabases_sizes } =
|
||||||
process_batch_info;
|
process_batch_info;
|
||||||
|
|
||||||
processing_batch.write_stats(
|
processing_batch.stats.progress_trace =
|
||||||
&progress,
|
progress.accumulated_durations().into_iter().map(|(k, v)| (k, v.into())).collect();
|
||||||
congestion,
|
processing_batch.stats.write_channel_congestion = congestion.map(|congestion| {
|
||||||
pre_commit_dabases_sizes,
|
let mut congestion_info = serde_json::Map::new();
|
||||||
post_commit_dabases_sizes,
|
congestion_info.insert("attempts".into(), congestion.attempts.into());
|
||||||
);
|
congestion_info.insert("blocking_attempts".into(), congestion.blocking_attempts.into());
|
||||||
|
congestion_info.insert("blocking_ratio".into(), congestion.congestion_ratio().into());
|
||||||
|
congestion_info
|
||||||
|
});
|
||||||
|
processing_batch.stats.internal_database_sizes = pre_commit_dabases_sizes
|
||||||
|
.iter()
|
||||||
|
.flat_map(|(dbname, pre_size)| {
|
||||||
|
post_commit_dabases_sizes
|
||||||
|
.get(dbname)
|
||||||
|
.map(|post_size| {
|
||||||
|
use std::cmp::Ordering::{Equal, Greater, Less};
|
||||||
|
|
||||||
|
use byte_unit::Byte;
|
||||||
|
use byte_unit::UnitType::Binary;
|
||||||
|
|
||||||
|
let post = Byte::from_u64(*post_size as u64).get_appropriate_unit(Binary);
|
||||||
|
let diff_size = post_size.abs_diff(*pre_size) as u64;
|
||||||
|
let diff = Byte::from_u64(diff_size).get_appropriate_unit(Binary);
|
||||||
|
let sign = match post_size.cmp(pre_size) {
|
||||||
|
Equal => return None,
|
||||||
|
Greater => "+",
|
||||||
|
Less => "-",
|
||||||
|
};
|
||||||
|
|
||||||
|
Some((
|
||||||
|
dbname.to_case(Case::Camel),
|
||||||
|
format!("{post:#.2} ({sign}{diff:#.2})").into(),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
.into_iter()
|
||||||
|
.flatten()
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
if let Some(congestion) = congestion {
|
if let Some(congestion) = congestion {
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
@@ -441,16 +444,13 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
tracing::debug!("call trace: {:?}", progress.accumulated_durations());
|
tracing::debug!("call trace: {:?}", progress.accumulated_durations());
|
||||||
|
|
||||||
if batch_made_progress {
|
|
||||||
self.queue.write_batch(&mut wtxn, processing_batch, &ids)?;
|
self.queue.write_batch(&mut wtxn, processing_batch, &ids)?;
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.maybe_fail(crate::test_utils::FailureLocation::CommittingWtxn)?;
|
self.maybe_fail(crate::test_utils::FailureLocation::CommittingWtxn)?;
|
||||||
|
|
||||||
wtxn.commit().map_err(Error::HeedTransaction)?;
|
wtxn.commit().map_err(Error::HeedTransaction)?;
|
||||||
|
|
||||||
if batch_made_progress {
|
|
||||||
// We should stop processing AFTER everything is processed and written to disk otherwise, a batch (which only lives in RAM) may appear in the processing task
|
// We should stop processing AFTER everything is processed and written to disk otherwise, a batch (which only lives in RAM) may appear in the processing task
|
||||||
// and then become « not found » for some time until the commit everything is written and the final commit is made.
|
// and then become « not found » for some time until the commit everything is written and the final commit is made.
|
||||||
self.processing_tasks.write().unwrap().stop_processing();
|
self.processing_tasks.write().unwrap().stop_processing();
|
||||||
@@ -480,10 +480,10 @@ impl IndexScheduler {
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
self.notify_webhooks(ids);
|
self.notify_webhooks(ids);
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.breakpoint(crate::test_utils::Breakpoint::AfterProcessing);
|
self.breakpoint(crate::test_utils::Breakpoint::AfterProcessing);
|
||||||
|
|
||||||
if stop_scheduler_forever {
|
if stop_scheduler_forever {
|
||||||
Ok(TickOutcome::StopProcessingForever)
|
Ok(TickOutcome::StopProcessingForever)
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -539,10 +539,6 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
Ok((tasks, ProcessBatchInfo::default()))
|
Ok((tasks, ProcessBatchInfo::default()))
|
||||||
}
|
}
|
||||||
Batch::NetworkIndexBatch { network_task, inner_batch } => {
|
|
||||||
self.process_network_index_batch(network_task, inner_batch, current_batch, progress)
|
|
||||||
}
|
|
||||||
Batch::NetworkReady { task } => self.process_network_ready(task, progress),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::io::{self, Write as _};
|
use std::io::{self, Write as _};
|
||||||
use std::ops::ControlFlow;
|
|
||||||
use std::sync::atomic;
|
use std::sync::atomic;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
@@ -8,7 +7,6 @@ use backoff::ExponentialBackoff;
|
|||||||
use byte_unit::Byte;
|
use byte_unit::Byte;
|
||||||
use flate2::write::GzEncoder;
|
use flate2::write::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use meilisearch_types::error::Code;
|
|
||||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||||
use meilisearch_types::milli::constants::RESERVED_VECTORS_FIELD_NAME;
|
use meilisearch_types::milli::constants::RESERVED_VECTORS_FIELD_NAME;
|
||||||
use meilisearch_types::milli::index::EmbeddingsWithMetadata;
|
use meilisearch_types::milli::index::EmbeddingsWithMetadata;
|
||||||
@@ -17,9 +15,7 @@ use meilisearch_types::milli::update::{request_threads, Setting};
|
|||||||
use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOrArrayOfVectors};
|
use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOrArrayOfVectors};
|
||||||
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
|
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
|
||||||
use meilisearch_types::settings::{self, SecretPolicy};
|
use meilisearch_types::settings::{self, SecretPolicy};
|
||||||
use meilisearch_types::tasks::network::{headers, ImportData, ImportMetadata, Origin};
|
|
||||||
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
|
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
|
||||||
use roaring::RoaringBitmap;
|
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use ureq::{json, Response};
|
use ureq::{json, Response};
|
||||||
|
|
||||||
@@ -54,7 +50,6 @@ impl IndexScheduler {
|
|||||||
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
||||||
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
||||||
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
|
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
|
||||||
let err = |err| Error::from_milli(err, Some(uid.to_string()));
|
|
||||||
if must_stop_processing.get() {
|
if must_stop_processing.get() {
|
||||||
return Err(Error::AbortedTask);
|
return Err(Error::AbortedTask);
|
||||||
}
|
}
|
||||||
@@ -66,62 +61,14 @@ impl IndexScheduler {
|
|||||||
));
|
));
|
||||||
|
|
||||||
let ExportIndexSettings { filter, override_settings } = export_settings;
|
let ExportIndexSettings { filter, override_settings } = export_settings;
|
||||||
|
|
||||||
let index = self.index(uid)?;
|
let index = self.index(uid)?;
|
||||||
let index_rtxn = index.read_txn()?;
|
let index_rtxn = index.read_txn()?;
|
||||||
let filter = filter.as_ref().map(Filter::from_json).transpose().map_err(err)?.flatten();
|
let bearer = api_key.map(|api_key| format!("Bearer {api_key}"));
|
||||||
let filter_universe =
|
|
||||||
filter.map(|f| f.evaluate(&index_rtxn, &index)).transpose().map_err(err)?;
|
|
||||||
let whole_universe =
|
|
||||||
index.documents_ids(&index_rtxn).map_err(milli::Error::from).map_err(err)?;
|
|
||||||
let universe = filter_universe.unwrap_or(whole_universe);
|
|
||||||
let target = TargetInstance { base_url, api_key };
|
|
||||||
let ctx = ExportContext {
|
|
||||||
index: &index,
|
|
||||||
index_rtxn: &index_rtxn,
|
|
||||||
universe: &universe,
|
|
||||||
progress: &progress,
|
|
||||||
agent: &agent,
|
|
||||||
must_stop_processing: &must_stop_processing,
|
|
||||||
};
|
|
||||||
let options = ExportOptions {
|
|
||||||
index_uid: uid,
|
|
||||||
payload_size,
|
|
||||||
override_settings: *override_settings,
|
|
||||||
export_mode: ExportMode::ExportRoute,
|
|
||||||
};
|
|
||||||
let total_documents = self.export_one_index(target, options, ctx)?;
|
|
||||||
|
|
||||||
output.insert(
|
// First, check if the index already exists
|
||||||
IndexUidPattern::new_unchecked(uid.clone()),
|
let url = format!("{base_url}/indexes/{uid}");
|
||||||
DetailsExportIndexSettings {
|
let response = retry(&must_stop_processing, || {
|
||||||
settings: (*export_settings).clone(),
|
let mut request = agent.get(&url);
|
||||||
matched_documents: Some(total_documents),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(output)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn export_one_index(
|
|
||||||
&self,
|
|
||||||
target: TargetInstance<'_>,
|
|
||||||
options: ExportOptions<'_>,
|
|
||||||
ctx: ExportContext<'_>,
|
|
||||||
) -> Result<u64, Error> {
|
|
||||||
let err = |err| Error::from_milli(err, Some(options.index_uid.to_string()));
|
|
||||||
let total_index_documents = ctx.universe.len();
|
|
||||||
let task_network = options.task_network(total_index_documents);
|
|
||||||
|
|
||||||
let bearer = target.api_key.map(|api_key| format!("Bearer {api_key}"));
|
|
||||||
let url = format!(
|
|
||||||
"{base_url}/indexes/{index_uid}",
|
|
||||||
base_url = target.base_url,
|
|
||||||
index_uid = options.index_uid
|
|
||||||
);
|
|
||||||
let response = retry(ctx.must_stop_processing, || {
|
|
||||||
let mut request = ctx.agent.get(&url);
|
|
||||||
if let Some(bearer) = &bearer {
|
if let Some(bearer) = &bearer {
|
||||||
request = request.set("Authorization", bearer);
|
request = request.set("Authorization", bearer);
|
||||||
}
|
}
|
||||||
@@ -130,135 +77,126 @@ impl IndexScheduler {
|
|||||||
});
|
});
|
||||||
let index_exists = match response {
|
let index_exists = match response {
|
||||||
Ok(response) => response.status() == 200,
|
Ok(response) => response.status() == 200,
|
||||||
Err(Error::FromRemoteWhenExporting { code, .. })
|
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => {
|
||||||
if code == Code::IndexNotFound.name() =>
|
|
||||||
{
|
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
};
|
};
|
||||||
let primary_key =
|
|
||||||
ctx.index.primary_key(ctx.index_rtxn).map_err(milli::Error::from).map_err(err)?;
|
let primary_key = index
|
||||||
|
.primary_key(&index_rtxn)
|
||||||
|
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||||
|
|
||||||
|
// Create the index
|
||||||
if !index_exists {
|
if !index_exists {
|
||||||
let url = format!("{base_url}/indexes", base_url = target.base_url);
|
let url = format!("{base_url}/indexes");
|
||||||
let _ = handle_response(retry(ctx.must_stop_processing, || {
|
retry(&must_stop_processing, || {
|
||||||
let mut request = ctx.agent.post(&url);
|
let mut request = agent.post(&url);
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
if let Some((import_data, origin, metadata)) = &task_network {
|
|
||||||
request = set_network_ureq_headers(request, import_data, origin, metadata);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(bearer) = bearer.as_ref() {
|
|
||||||
request = request.set("Authorization", bearer);
|
request = request.set("Authorization", bearer);
|
||||||
}
|
}
|
||||||
let index_param = json!({ "uid": options.index_uid, "primaryKey": primary_key });
|
let index_param = json!({ "uid": uid, "primaryKey": primary_key });
|
||||||
|
|
||||||
request.send_json(&index_param).map_err(into_backoff_error)
|
request.send_json(&index_param).map_err(into_backoff_error)
|
||||||
}))?;
|
})?;
|
||||||
}
|
|
||||||
if index_exists && options.override_settings {
|
|
||||||
let _ = handle_response(retry(ctx.must_stop_processing, || {
|
|
||||||
let mut request = ctx.agent.patch(&url);
|
|
||||||
if let Some((import_data, origin, metadata)) = &task_network {
|
|
||||||
request = set_network_ureq_headers(request, import_data, origin, metadata);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Patch the index primary key
|
||||||
|
if index_exists && *override_settings {
|
||||||
|
let url = format!("{base_url}/indexes/{uid}");
|
||||||
|
retry(&must_stop_processing, || {
|
||||||
|
let mut request = agent.patch(&url);
|
||||||
if let Some(bearer) = &bearer {
|
if let Some(bearer) = &bearer {
|
||||||
request = request.set("Authorization", bearer);
|
request = request.set("Authorization", bearer);
|
||||||
}
|
}
|
||||||
let index_param = json!({ "primaryKey": primary_key });
|
let index_param = json!({ "primaryKey": primary_key });
|
||||||
request.send_json(&index_param).map_err(into_backoff_error)
|
request.send_json(&index_param).map_err(into_backoff_error)
|
||||||
}))?;
|
})?;
|
||||||
}
|
}
|
||||||
if !index_exists || options.override_settings {
|
|
||||||
|
// Send the index settings
|
||||||
|
if !index_exists || *override_settings {
|
||||||
let mut settings =
|
let mut settings =
|
||||||
settings::settings(ctx.index, ctx.index_rtxn, SecretPolicy::RevealSecrets)
|
settings::settings(&index, &index_rtxn, SecretPolicy::RevealSecrets)
|
||||||
.map_err(err)?;
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||||
// Remove the experimental chat setting if not enabled
|
// Remove the experimental chat setting if not enabled
|
||||||
if self.features().check_chat_completions("exporting chat settings").is_err() {
|
if self.features().check_chat_completions("exporting chat settings").is_err() {
|
||||||
settings.chat = Setting::NotSet;
|
settings.chat = Setting::NotSet;
|
||||||
}
|
}
|
||||||
// Retry logic for sending settings
|
// Retry logic for sending settings
|
||||||
let url = format!(
|
let url = format!("{base_url}/indexes/{uid}/settings");
|
||||||
"{base_url}/indexes/{index_uid}/settings",
|
retry(&must_stop_processing, || {
|
||||||
base_url = target.base_url,
|
let mut request = agent.patch(&url);
|
||||||
index_uid = options.index_uid
|
|
||||||
);
|
|
||||||
|
|
||||||
let _ = handle_response(retry(ctx.must_stop_processing, || {
|
|
||||||
let mut request = ctx.agent.patch(&url);
|
|
||||||
|
|
||||||
if let Some((import_data, origin, metadata)) = &task_network {
|
|
||||||
request = set_network_ureq_headers(request, import_data, origin, metadata);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(bearer) = bearer.as_ref() {
|
if let Some(bearer) = bearer.as_ref() {
|
||||||
request = request.set("Authorization", bearer);
|
request = request.set("Authorization", bearer);
|
||||||
}
|
}
|
||||||
request.send_json(settings.clone()).map_err(into_backoff_error)
|
request.send_json(settings.clone()).map_err(into_backoff_error)
|
||||||
}))?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let fields_ids_map = ctx.index.fields_ids_map(ctx.index_rtxn)?;
|
let filter = filter
|
||||||
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
.as_ref()
|
||||||
let total_documents = ctx.universe.len() as u32;
|
.map(Filter::from_json)
|
||||||
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
|
.transpose()
|
||||||
ctx.progress.update_progress(progress_step);
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?
|
||||||
|
.flatten();
|
||||||
|
|
||||||
let limit = options
|
let filter_universe = filter
|
||||||
.payload_size
|
.map(|f| f.evaluate(&index_rtxn, &index))
|
||||||
.map(|ps| ps.as_u64() as usize)
|
.transpose()
|
||||||
.unwrap_or(self.export_default_payload_size_bytes.as_u64() as usize);
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||||
let documents_url = format!(
|
let whole_universe = index
|
||||||
"{base_url}/indexes/{index_uid}/documents",
|
.documents_ids(&index_rtxn)
|
||||||
base_url = target.base_url,
|
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||||
index_uid = options.index_uid
|
let universe = filter_universe.unwrap_or(whole_universe);
|
||||||
|
|
||||||
|
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
|
||||||
|
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
||||||
|
|
||||||
|
// We don't need to keep this one alive as we will
|
||||||
|
// spawn many threads to process the documents
|
||||||
|
drop(index_rtxn);
|
||||||
|
|
||||||
|
let total_documents = universe.len() as u32;
|
||||||
|
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
|
||||||
|
progress.update_progress(progress_step);
|
||||||
|
|
||||||
|
output.insert(
|
||||||
|
IndexUidPattern::new_unchecked(uid.clone()),
|
||||||
|
DetailsExportIndexSettings {
|
||||||
|
settings: (*export_settings).clone(),
|
||||||
|
matched_documents: Some(total_documents as u64),
|
||||||
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
// no document to send, but we must still send a task when performing network balancing
|
let limit = payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024); // defaults to 20 MiB
|
||||||
if ctx.universe.is_empty() {
|
let documents_url = format!("{base_url}/indexes/{uid}/documents");
|
||||||
if let Some((import_data, network_change_origin, metadata)) = task_network {
|
|
||||||
let mut compressed_buffer = Vec::new();
|
|
||||||
// ignore control flow, we're returning anyway
|
|
||||||
let _ = send_buffer(
|
|
||||||
b" ", // needs something otherwise meili complains about missing payload
|
|
||||||
&mut compressed_buffer,
|
|
||||||
ctx.must_stop_processing,
|
|
||||||
ctx.agent,
|
|
||||||
&documents_url,
|
|
||||||
bearer.as_deref(),
|
|
||||||
Some(&(import_data, network_change_origin.clone(), metadata)),
|
|
||||||
&err,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
return Ok(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
let results = request_threads()
|
let results = request_threads()
|
||||||
.broadcast(|broadcast| {
|
.broadcast(|ctx| {
|
||||||
let mut task_network = options.task_network(total_index_documents);
|
let index_rtxn = index
|
||||||
|
.read_txn()
|
||||||
let index_rtxn = ctx.index.read_txn().map_err(milli::Error::from).map_err(err)?;
|
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||||
|
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
let mut tmp_buffer = Vec::new();
|
let mut tmp_buffer = Vec::new();
|
||||||
let mut compressed_buffer = Vec::new();
|
let mut compressed_buffer = Vec::new();
|
||||||
for (i, docid) in ctx.universe.iter().enumerate() {
|
for (i, docid) in universe.iter().enumerate() {
|
||||||
if i % broadcast.num_threads() != broadcast.index() {
|
if i % ctx.num_threads() != ctx.index() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if let Some((import_data, _, metadata)) = &mut task_network {
|
|
||||||
import_data.document_count += 1;
|
|
||||||
metadata.task_key = Some(docid);
|
|
||||||
}
|
|
||||||
|
|
||||||
let document = ctx.index.document(&index_rtxn, docid).map_err(err)?;
|
let document = index
|
||||||
|
.document(&index_rtxn, docid)
|
||||||
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||||
|
|
||||||
let mut document =
|
let mut document = obkv_to_json(&all_fields, &fields_ids_map, document)
|
||||||
obkv_to_json(&all_fields, &fields_ids_map, document).map_err(err)?;
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||||
|
|
||||||
// TODO definitely factorize this code
|
// TODO definitely factorize this code
|
||||||
'inject_vectors: {
|
'inject_vectors: {
|
||||||
let embeddings = ctx.index.embeddings(&index_rtxn, docid).map_err(err)?;
|
let embeddings = index
|
||||||
|
.embeddings(&index_rtxn, docid)
|
||||||
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||||
|
|
||||||
if embeddings.is_empty() {
|
if embeddings.is_empty() {
|
||||||
break 'inject_vectors;
|
break 'inject_vectors;
|
||||||
@@ -269,12 +207,15 @@ impl IndexScheduler {
|
|||||||
.or_insert(serde_json::Value::Object(Default::default()));
|
.or_insert(serde_json::Value::Object(Default::default()));
|
||||||
|
|
||||||
let serde_json::Value::Object(vectors) = vectors else {
|
let serde_json::Value::Object(vectors) = vectors else {
|
||||||
return Err(err(milli::Error::UserError(
|
return Err(Error::from_milli(
|
||||||
|
milli::Error::UserError(
|
||||||
milli::UserError::InvalidVectorsMapType {
|
milli::UserError::InvalidVectorsMapType {
|
||||||
document_id: {
|
document_id: {
|
||||||
if let Ok(Some(Ok(index))) = ctx
|
if let Ok(Some(Ok(index))) = index
|
||||||
.index
|
.external_id_of(
|
||||||
.external_id_of(&index_rtxn, std::iter::once(docid))
|
&index_rtxn,
|
||||||
|
std::iter::once(docid),
|
||||||
|
)
|
||||||
.map(|it| it.into_iter().next())
|
.map(|it| it.into_iter().next())
|
||||||
{
|
{
|
||||||
index
|
index
|
||||||
@@ -284,7 +225,9 @@ impl IndexScheduler {
|
|||||||
},
|
},
|
||||||
value: vectors.clone(),
|
value: vectors.clone(),
|
||||||
},
|
},
|
||||||
)));
|
),
|
||||||
|
Some(uid.to_string()),
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
for (
|
for (
|
||||||
@@ -293,9 +236,9 @@ impl IndexScheduler {
|
|||||||
) in embeddings
|
) in embeddings
|
||||||
{
|
{
|
||||||
let embeddings = ExplicitVectors {
|
let embeddings = ExplicitVectors {
|
||||||
embeddings: Some(VectorOrArrayOfVectors::from_array_of_vectors(
|
embeddings: Some(
|
||||||
embeddings,
|
VectorOrArrayOfVectors::from_array_of_vectors(embeddings),
|
||||||
)),
|
),
|
||||||
regenerate: regenerate &&
|
regenerate: regenerate &&
|
||||||
// Meilisearch does not handle well dumps with fragments, because as the fragments
|
// Meilisearch does not handle well dumps with fragments, because as the fragments
|
||||||
// are marked as user-provided,
|
// are marked as user-provided,
|
||||||
@@ -303,39 +246,42 @@ impl IndexScheduler {
|
|||||||
// To prevent this, we mark embeddings has non regenerate in this case.
|
// To prevent this, we mark embeddings has non regenerate in this case.
|
||||||
!has_fragments,
|
!has_fragments,
|
||||||
};
|
};
|
||||||
vectors
|
vectors.insert(
|
||||||
.insert(embedder_name, serde_json::to_value(embeddings).unwrap());
|
embedder_name,
|
||||||
|
serde_json::to_value(embeddings).unwrap(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp_buffer.clear();
|
tmp_buffer.clear();
|
||||||
serde_json::to_writer(&mut tmp_buffer, &document)
|
serde_json::to_writer(&mut tmp_buffer, &document)
|
||||||
.map_err(milli::InternalError::from)
|
.map_err(milli::InternalError::from)
|
||||||
.map_err(milli::Error::from)
|
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||||
.map_err(err)?;
|
|
||||||
|
|
||||||
// Make sure we put at least one document in the buffer even
|
// Make sure we put at least one document in the buffer even
|
||||||
// though we might go above the buffer limit before sending
|
// though we might go above the buffer limit before sending
|
||||||
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
|
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
|
||||||
let control_flow = send_buffer(
|
// We compress the documents before sending them
|
||||||
&buffer,
|
let mut encoder =
|
||||||
&mut compressed_buffer,
|
GzEncoder::new(&mut compressed_buffer, Compression::default());
|
||||||
ctx.must_stop_processing,
|
encoder
|
||||||
ctx.agent,
|
.write_all(&buffer)
|
||||||
&documents_url,
|
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
|
||||||
bearer.as_deref(),
|
encoder
|
||||||
task_network.as_ref(),
|
.finish()
|
||||||
&err,
|
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
|
||||||
)?;
|
|
||||||
|
retry(&must_stop_processing, || {
|
||||||
|
let mut request = agent.post(&documents_url);
|
||||||
|
request = request.set("Content-Type", "application/x-ndjson");
|
||||||
|
request = request.set("Content-Encoding", "gzip");
|
||||||
|
if let Some(bearer) = &bearer {
|
||||||
|
request = request.set("Authorization", bearer);
|
||||||
|
}
|
||||||
|
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
|
||||||
|
})?;
|
||||||
buffer.clear();
|
buffer.clear();
|
||||||
compressed_buffer.clear();
|
compressed_buffer.clear();
|
||||||
if let Some((import_data, _, metadata)) = &mut task_network {
|
|
||||||
import_data.document_count = 0;
|
|
||||||
metadata.task_key = None;
|
|
||||||
}
|
|
||||||
if control_flow.is_break() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
buffer.extend_from_slice(&tmp_buffer);
|
buffer.extend_from_slice(&tmp_buffer);
|
||||||
|
|
||||||
@@ -344,160 +290,31 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// send the last buffered documents if any
|
retry(&must_stop_processing, || {
|
||||||
if !buffer.is_empty() {
|
let mut request = agent.post(&documents_url);
|
||||||
// ignore control flow here
|
request = request.set("Content-Type", "application/x-ndjson");
|
||||||
let _ = send_buffer(
|
|
||||||
&buffer,
|
|
||||||
&mut compressed_buffer,
|
|
||||||
ctx.must_stop_processing,
|
|
||||||
ctx.agent,
|
|
||||||
&documents_url,
|
|
||||||
bearer.as_deref(),
|
|
||||||
task_network.as_ref(),
|
|
||||||
&err,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
.map_err(|e| err(milli::Error::InternalError(InternalError::PanicInThreadPool(e))))?;
|
|
||||||
for result in results {
|
|
||||||
result?;
|
|
||||||
}
|
|
||||||
step.store(total_documents, atomic::Ordering::Relaxed);
|
|
||||||
Ok(total_documents as u64)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")] // only used in enterprise edition for now
|
|
||||||
pub(super) fn export_no_index(
|
|
||||||
&self,
|
|
||||||
target: TargetInstance<'_>,
|
|
||||||
export_old_remote_name: &str,
|
|
||||||
network_change_origin: &Origin,
|
|
||||||
agent: &ureq::Agent,
|
|
||||||
must_stop_processing: &MustStopProcessing,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let bearer = target.api_key.map(|api_key| format!("Bearer {api_key}"));
|
|
||||||
let url = format!("{base_url}/network", base_url = target.base_url,);
|
|
||||||
|
|
||||||
{
|
|
||||||
let _ = handle_response(retry(must_stop_processing, || {
|
|
||||||
let request = agent.patch(&url);
|
|
||||||
let mut request = set_network_ureq_headers(
|
|
||||||
request,
|
|
||||||
&ImportData {
|
|
||||||
remote_name: export_old_remote_name.to_string(),
|
|
||||||
index_name: None,
|
|
||||||
document_count: 0,
|
|
||||||
},
|
|
||||||
network_change_origin,
|
|
||||||
&ImportMetadata { index_count: 0, task_key: None, total_index_documents: 0 },
|
|
||||||
);
|
|
||||||
request = request.set("Content-Type", "application/json");
|
|
||||||
if let Some(bearer) = &bearer {
|
if let Some(bearer) = &bearer {
|
||||||
request = request.set("Authorization", bearer);
|
request = request.set("Authorization", bearer);
|
||||||
}
|
}
|
||||||
request
|
request.send_bytes(&buffer).map_err(into_backoff_error)
|
||||||
.send_json(
|
})?;
|
||||||
// empty payload that will be disregarded
|
|
||||||
serde_json::Value::Object(Default::default()),
|
|
||||||
)
|
|
||||||
.map_err(into_backoff_error)
|
|
||||||
}))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
})
|
||||||
|
.map_err(|e| {
|
||||||
|
Error::from_milli(
|
||||||
|
milli::Error::InternalError(InternalError::PanicInThreadPool(e)),
|
||||||
|
Some(uid.to_string()),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
for result in results {
|
||||||
|
result?;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_network_ureq_headers(
|
step.store(total_documents, atomic::Ordering::Relaxed);
|
||||||
request: ureq::Request,
|
|
||||||
import_data: &ImportData,
|
|
||||||
origin: &Origin,
|
|
||||||
metadata: &ImportMetadata,
|
|
||||||
) -> ureq::Request {
|
|
||||||
let request = request
|
|
||||||
.set(headers::PROXY_ORIGIN_REMOTE_HEADER, &urlencoding::encode(&origin.remote_name))
|
|
||||||
.set(headers::PROXY_ORIGIN_TASK_UID_HEADER, &origin.task_uid.to_string())
|
|
||||||
.set(headers::PROXY_ORIGIN_NETWORK_VERSION_HEADER, &origin.network_version.to_string())
|
|
||||||
.set(headers::PROXY_IMPORT_REMOTE_HEADER, &urlencoding::encode(&import_data.remote_name))
|
|
||||||
.set(headers::PROXY_IMPORT_DOCS_HEADER, &import_data.document_count.to_string())
|
|
||||||
.set(headers::PROXY_IMPORT_INDEX_COUNT_HEADER, &metadata.index_count.to_string())
|
|
||||||
.set(
|
|
||||||
headers::PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
&metadata.total_index_documents.to_string(),
|
|
||||||
);
|
|
||||||
let request = if let Some(index_name) = import_data.index_name.as_deref() {
|
|
||||||
request.set(headers::PROXY_IMPORT_INDEX_HEADER, &urlencoding::encode(index_name))
|
|
||||||
} else {
|
|
||||||
request
|
|
||||||
};
|
|
||||||
if let Some(task_key) = metadata.task_key {
|
|
||||||
request.set(headers::PROXY_IMPORT_TASK_KEY_HEADER, &task_key.to_string())
|
|
||||||
} else {
|
|
||||||
request
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
Ok(output)
|
||||||
fn send_buffer<'a>(
|
|
||||||
buffer: &'a [u8],
|
|
||||||
mut compressed_buffer: &'a mut Vec<u8>,
|
|
||||||
must_stop_processing: &MustStopProcessing,
|
|
||||||
agent: &ureq::Agent,
|
|
||||||
documents_url: &'a str,
|
|
||||||
bearer: Option<&'a str>,
|
|
||||||
task_network: Option<&(ImportData, Origin, ImportMetadata)>,
|
|
||||||
err: &'a impl Fn(milli::Error) -> crate::Error,
|
|
||||||
) -> Result<ControlFlow<(), ()>> {
|
|
||||||
// We compress the documents before sending them
|
|
||||||
let mut encoder: GzEncoder<&mut &mut Vec<u8>> =
|
|
||||||
GzEncoder::new(&mut compressed_buffer, Compression::default());
|
|
||||||
encoder.write_all(buffer).map_err(milli::Error::from).map_err(err)?;
|
|
||||||
encoder.finish().map_err(milli::Error::from).map_err(err)?;
|
|
||||||
|
|
||||||
let res = retry(must_stop_processing, || {
|
|
||||||
let mut request = agent.post(documents_url);
|
|
||||||
request = request.set("Content-Type", "application/x-ndjson");
|
|
||||||
request = request.set("Content-Encoding", "gzip");
|
|
||||||
if let Some(bearer) = bearer {
|
|
||||||
request = request.set("Authorization", bearer);
|
|
||||||
}
|
|
||||||
if let Some((import_data, origin, metadata)) = task_network {
|
|
||||||
request = set_network_ureq_headers(request, import_data, origin, metadata);
|
|
||||||
}
|
|
||||||
request.send_bytes(compressed_buffer).map_err(into_backoff_error)
|
|
||||||
});
|
|
||||||
|
|
||||||
handle_response(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_response(res: Result<Response>) -> Result<ControlFlow<()>> {
|
|
||||||
match res {
|
|
||||||
Ok(_response) => Ok(ControlFlow::Continue(())),
|
|
||||||
Err(Error::FromRemoteWhenExporting { code, .. })
|
|
||||||
if code == Code::ImportTaskAlreadyReceived.name() =>
|
|
||||||
{
|
|
||||||
Ok(ControlFlow::Continue(()))
|
|
||||||
}
|
|
||||||
Err(Error::FromRemoteWhenExporting { code, message, .. })
|
|
||||||
if code == Code::ImportTaskUnknownRemote.name() =>
|
|
||||||
{
|
|
||||||
tracing::warn!("remote answered with: {message}");
|
|
||||||
Ok(ControlFlow::Break(()))
|
|
||||||
}
|
|
||||||
// note: there has already been many attempts to get this due to exponential backoff
|
|
||||||
Err(Error::FromRemoteWhenExporting { code, message, .. })
|
|
||||||
if code == Code::ImportTaskWithoutNetworkTask.name() =>
|
|
||||||
{
|
|
||||||
tracing::warn!("remote answered with: {message}");
|
|
||||||
Ok(ControlFlow::Break(()))
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
tracing::warn!("error while exporting: {e}");
|
|
||||||
Err(e)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -557,64 +374,4 @@ fn ureq_error_into_error(error: ureq::Error) -> Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// export_one_index arguments
|
|
||||||
pub(super) struct TargetInstance<'a> {
|
|
||||||
pub(super) base_url: &'a str,
|
|
||||||
pub(super) api_key: Option<&'a str>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) struct ExportOptions<'a> {
|
|
||||||
pub(super) index_uid: &'a str,
|
|
||||||
pub(super) payload_size: Option<&'a Byte>,
|
|
||||||
pub(super) override_settings: bool,
|
|
||||||
pub(super) export_mode: ExportMode<'a>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ExportOptions<'_> {
|
|
||||||
fn task_network(
|
|
||||||
&self,
|
|
||||||
total_index_documents: u64,
|
|
||||||
) -> Option<(ImportData, Origin, ImportMetadata)> {
|
|
||||||
if let ExportMode::NetworkBalancing {
|
|
||||||
index_count,
|
|
||||||
export_old_remote_name,
|
|
||||||
network_change_origin,
|
|
||||||
} = self.export_mode
|
|
||||||
{
|
|
||||||
Some((
|
|
||||||
ImportData {
|
|
||||||
remote_name: export_old_remote_name.to_string(),
|
|
||||||
index_name: Some(self.index_uid.to_string()),
|
|
||||||
document_count: 0,
|
|
||||||
},
|
|
||||||
network_change_origin.clone(),
|
|
||||||
ImportMetadata { index_count, task_key: None, total_index_documents },
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) struct ExportContext<'a> {
|
|
||||||
pub(super) index: &'a meilisearch_types::milli::Index,
|
|
||||||
pub(super) index_rtxn: &'a milli::heed::RoTxn<'a>,
|
|
||||||
pub(super) universe: &'a RoaringBitmap,
|
|
||||||
pub(super) progress: &'a Progress,
|
|
||||||
pub(super) agent: &'a ureq::Agent,
|
|
||||||
pub(super) must_stop_processing: &'a MustStopProcessing,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) enum ExportMode<'a> {
|
|
||||||
ExportRoute,
|
|
||||||
#[cfg_attr(not(feature = "enterprise"), allow(dead_code))]
|
|
||||||
NetworkBalancing {
|
|
||||||
index_count: u64,
|
|
||||||
|
|
||||||
export_old_remote_name: &'a str,
|
|
||||||
network_change_origin: &'a Origin,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// progress related
|
|
||||||
enum ExportIndex {}
|
enum ExportIndex {}
|
||||||
|
|||||||
@@ -747,7 +747,6 @@ fn basic_get_stats() {
|
|||||||
"indexDeletion": 0,
|
"indexDeletion": 0,
|
||||||
"indexSwap": 0,
|
"indexSwap": 0,
|
||||||
"indexUpdate": 0,
|
"indexUpdate": 0,
|
||||||
"networkTopologyChange": 0,
|
|
||||||
"settingsUpdate": 0,
|
"settingsUpdate": 0,
|
||||||
"snapshotCreation": 0,
|
"snapshotCreation": 0,
|
||||||
"taskCancelation": 0,
|
"taskCancelation": 0,
|
||||||
@@ -783,7 +782,6 @@ fn basic_get_stats() {
|
|||||||
"indexDeletion": 0,
|
"indexDeletion": 0,
|
||||||
"indexSwap": 0,
|
"indexSwap": 0,
|
||||||
"indexUpdate": 0,
|
"indexUpdate": 0,
|
||||||
"networkTopologyChange": 0,
|
|
||||||
"settingsUpdate": 0,
|
"settingsUpdate": 0,
|
||||||
"snapshotCreation": 0,
|
"snapshotCreation": 0,
|
||||||
"taskCancelation": 0,
|
"taskCancelation": 0,
|
||||||
@@ -826,7 +824,6 @@ fn basic_get_stats() {
|
|||||||
"indexDeletion": 0,
|
"indexDeletion": 0,
|
||||||
"indexSwap": 0,
|
"indexSwap": 0,
|
||||||
"indexUpdate": 0,
|
"indexUpdate": 0,
|
||||||
"networkTopologyChange": 0,
|
|
||||||
"settingsUpdate": 0,
|
"settingsUpdate": 0,
|
||||||
"snapshotCreation": 0,
|
"snapshotCreation": 0,
|
||||||
"taskCancelation": 0,
|
"taskCancelation": 0,
|
||||||
@@ -870,7 +867,6 @@ fn basic_get_stats() {
|
|||||||
"indexDeletion": 0,
|
"indexDeletion": 0,
|
||||||
"indexSwap": 0,
|
"indexSwap": 0,
|
||||||
"indexUpdate": 0,
|
"indexUpdate": 0,
|
||||||
"networkTopologyChange": 0,
|
|
||||||
"settingsUpdate": 0,
|
"settingsUpdate": 0,
|
||||||
"snapshotCreation": 0,
|
"snapshotCreation": 0,
|
||||||
"taskCancelation": 0,
|
"taskCancelation": 0,
|
||||||
|
|||||||
@@ -112,7 +112,6 @@ impl IndexScheduler {
|
|||||||
max_number_of_batched_tasks: usize::MAX,
|
max_number_of_batched_tasks: usize::MAX,
|
||||||
batched_tasks_size_limit: u64::MAX,
|
batched_tasks_size_limit: u64::MAX,
|
||||||
instance_features: Default::default(),
|
instance_features: Default::default(),
|
||||||
export_default_payload_size_bytes: byte_unit::Byte::parse_str("20MiB", false).unwrap(),
|
|
||||||
auto_upgrade: true, // Don't cost much and will ensure the happy path works
|
auto_upgrade: true, // Don't cost much and will ensure the happy path works
|
||||||
embedding_cache_cap: 10,
|
embedding_cache_cap: 10,
|
||||||
experimental_no_snapshot_compaction: false,
|
experimental_no_snapshot_compaction: false,
|
||||||
|
|||||||
@@ -4,11 +4,9 @@ use std::collections::{BTreeSet, HashSet};
|
|||||||
use std::ops::Bound;
|
use std::ops::Bound;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use convert_case::{Case, Casing as _};
|
|
||||||
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchId, BatchStats};
|
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchId, BatchStats};
|
||||||
use meilisearch_types::heed::{Database, RoTxn, RwTxn};
|
use meilisearch_types::heed::{Database, RoTxn, RwTxn};
|
||||||
use meilisearch_types::milli::progress::Progress;
|
use meilisearch_types::milli::CboRoaringBitmapCodec;
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, ChannelCongestion};
|
|
||||||
use meilisearch_types::task_view::DetailsView;
|
use meilisearch_types::task_view::DetailsView;
|
||||||
use meilisearch_types::tasks::{
|
use meilisearch_types::tasks::{
|
||||||
BatchStopReason, Details, IndexSwap, Kind, KindWithContent, Status,
|
BatchStopReason, Details, IndexSwap, Kind, KindWithContent, Status,
|
||||||
@@ -121,8 +119,17 @@ impl ProcessingBatch {
|
|||||||
self.stats.total_nb_tasks = 0;
|
self.stats.total_nb_tasks = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update batch task from a processed task
|
/// Update the timestamp of the tasks and the inner structure of this structure.
|
||||||
pub fn update_from_task(&mut self, task: &Task) {
|
pub fn update(&mut self, task: &mut Task) {
|
||||||
|
// We must re-set this value in case we're dealing with a task that has been added between
|
||||||
|
// the `processing` and `finished` state
|
||||||
|
// We must re-set this value in case we're dealing with a task that has been added between
|
||||||
|
// the `processing` and `finished` state or that failed.
|
||||||
|
task.batch_uid = Some(self.uid);
|
||||||
|
// Same
|
||||||
|
task.started_at = Some(self.started_at);
|
||||||
|
task.finished_at = self.finished_at;
|
||||||
|
|
||||||
self.statuses.insert(task.status);
|
self.statuses.insert(task.status);
|
||||||
|
|
||||||
// Craft an aggregation of the details of all the tasks encountered in this batch.
|
// Craft an aggregation of the details of all the tasks encountered in this batch.
|
||||||
@@ -137,63 +144,6 @@ impl ProcessingBatch {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the timestamp of the tasks after they're done
|
|
||||||
pub fn finish_task(&self, task: &mut Task) {
|
|
||||||
// We must re-set this value in case we're dealing with a task that has been added between
|
|
||||||
// the `processing` and `finished` state or that failed.
|
|
||||||
task.batch_uid = Some(self.uid);
|
|
||||||
// Same
|
|
||||||
task.started_at = Some(self.started_at);
|
|
||||||
task.finished_at = self.finished_at;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write_stats(
|
|
||||||
&mut self,
|
|
||||||
progress: &Progress,
|
|
||||||
congestion: Option<ChannelCongestion>,
|
|
||||||
pre_commit_dabases_sizes: indexmap::IndexMap<&'static str, usize>,
|
|
||||||
post_commit_dabases_sizes: indexmap::IndexMap<&'static str, usize>,
|
|
||||||
) {
|
|
||||||
self.stats.progress_trace =
|
|
||||||
progress.accumulated_durations().into_iter().map(|(k, v)| (k, v.into())).collect();
|
|
||||||
self.stats.write_channel_congestion = congestion.map(|congestion| {
|
|
||||||
let mut congestion_info = serde_json::Map::new();
|
|
||||||
congestion_info.insert("attempts".into(), congestion.attempts.into());
|
|
||||||
congestion_info.insert("blocking_attempts".into(), congestion.blocking_attempts.into());
|
|
||||||
congestion_info.insert("blocking_ratio".into(), congestion.congestion_ratio().into());
|
|
||||||
congestion_info
|
|
||||||
});
|
|
||||||
self.stats.internal_database_sizes = pre_commit_dabases_sizes
|
|
||||||
.iter()
|
|
||||||
.flat_map(|(dbname, pre_size)| {
|
|
||||||
post_commit_dabases_sizes
|
|
||||||
.get(dbname)
|
|
||||||
.map(|post_size| {
|
|
||||||
use std::cmp::Ordering::{Equal, Greater, Less};
|
|
||||||
|
|
||||||
use byte_unit::Byte;
|
|
||||||
use byte_unit::UnitType::Binary;
|
|
||||||
|
|
||||||
let post = Byte::from_u64(*post_size as u64).get_appropriate_unit(Binary);
|
|
||||||
let diff_size = post_size.abs_diff(*pre_size) as u64;
|
|
||||||
let diff = Byte::from_u64(diff_size).get_appropriate_unit(Binary);
|
|
||||||
let sign = match post_size.cmp(pre_size) {
|
|
||||||
Equal => return None,
|
|
||||||
Greater => "+",
|
|
||||||
Less => "-",
|
|
||||||
};
|
|
||||||
|
|
||||||
Some((
|
|
||||||
dbname.to_case(Case::Camel),
|
|
||||||
format!("{post:#.2} ({sign}{diff:#.2})").into(),
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.into_iter()
|
|
||||||
.flatten()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn to_batch(&self) -> Batch {
|
pub fn to_batch(&self) -> Batch {
|
||||||
Batch {
|
Batch {
|
||||||
uid: self.uid,
|
uid: self.uid,
|
||||||
@@ -336,7 +286,6 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
|
|||||||
| K::DumpCreation { .. }
|
| K::DumpCreation { .. }
|
||||||
| K::Export { .. }
|
| K::Export { .. }
|
||||||
| K::UpgradeDatabase { .. }
|
| K::UpgradeDatabase { .. }
|
||||||
| K::NetworkTopologyChange(_)
|
|
||||||
| K::SnapshotCreation => (),
|
| K::SnapshotCreation => (),
|
||||||
};
|
};
|
||||||
if let Some(Details::IndexSwap { swaps }) = &mut task.details {
|
if let Some(Details::IndexSwap { swaps }) = &mut task.details {
|
||||||
@@ -678,9 +627,6 @@ impl crate::IndexScheduler {
|
|||||||
} => {
|
} => {
|
||||||
assert_eq!(kind.as_kind(), Kind::IndexCompaction);
|
assert_eq!(kind.as_kind(), Kind::IndexCompaction);
|
||||||
}
|
}
|
||||||
Details::NetworkTopologyChange { moved_documents: _, message: _ } => {
|
|
||||||
assert_eq!(kind.as_kind(), Kind::NetworkTopologyChange);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ license.workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
actix-web = { version = "4.12.0", default-features = false }
|
actix-web = { version = "4.12.0", default-features = false }
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.100"
|
||||||
base64 = "0.22.1"
|
|
||||||
bumpalo = "3.19.0"
|
bumpalo = "3.19.0"
|
||||||
bumparaw-collections = "0.1.4"
|
bumparaw-collections = "0.1.4"
|
||||||
byte-unit = { version = "5.1.6", features = ["serde"] }
|
byte-unit = { version = "5.1.6", features = ["serde"] }
|
||||||
@@ -25,7 +24,6 @@ enum-iterator = "2.3.0"
|
|||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.1.5"
|
flate2 = "1.1.5"
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
itertools = "0.14.0"
|
|
||||||
memmap2 = "0.9.9"
|
memmap2 = "0.9.9"
|
||||||
milli = { path = "../milli" }
|
milli = { path = "../milli" }
|
||||||
roaring = { version = "0.10.12", features = ["serde"] }
|
roaring = { version = "0.10.12", features = ["serde"] }
|
||||||
|
|||||||
@@ -9,17 +9,21 @@ use crate::network::Network;
|
|||||||
|
|
||||||
impl Network {
|
impl Network {
|
||||||
pub fn shards(&self) -> Option<Shards> {
|
pub fn shards(&self) -> Option<Shards> {
|
||||||
if self.sharding() {
|
if self.sharding {
|
||||||
Some(Shards::from_remotes_local(
|
let this = self.local.as_deref().expect("Inconsistent `sharding` and `self`");
|
||||||
self.remotes.keys().map(String::as_str),
|
let others = self
|
||||||
self.local.as_deref(),
|
.remotes
|
||||||
))
|
.keys()
|
||||||
|
.filter(|name| name.as_str() != this)
|
||||||
|
.map(|name| name.to_owned())
|
||||||
|
.collect();
|
||||||
|
Some(Shards { own: vec![this.to_owned()], others })
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sharding(&self) -> bool {
|
pub fn sharding(&self) -> bool {
|
||||||
self.leader.is_some()
|
self.sharding
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ macro_rules! make_error_codes {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// return error name, used as error code
|
/// return error name, used as error code
|
||||||
pub fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
match self {
|
match self {
|
||||||
$(
|
$(
|
||||||
Code::$code_ident => stringify!($code_ident).to_case(convert_case::Case::Snake)
|
Code::$code_ident => stringify!($code_ident).to_case(convert_case::Case::Snake)
|
||||||
@@ -214,9 +214,6 @@ ImmutableApiKeyUid , InvalidRequest , BAD_REQU
|
|||||||
ImmutableApiKeyUpdatedAt , InvalidRequest , BAD_REQUEST;
|
ImmutableApiKeyUpdatedAt , InvalidRequest , BAD_REQUEST;
|
||||||
ImmutableIndexCreatedAt , InvalidRequest , BAD_REQUEST;
|
ImmutableIndexCreatedAt , InvalidRequest , BAD_REQUEST;
|
||||||
ImmutableIndexUpdatedAt , InvalidRequest , BAD_REQUEST;
|
ImmutableIndexUpdatedAt , InvalidRequest , BAD_REQUEST;
|
||||||
ImportTaskAlreadyReceived , InvalidRequest , PRECONDITION_FAILED;
|
|
||||||
ImportTaskUnknownRemote , InvalidRequest , PRECONDITION_FAILED;
|
|
||||||
ImportTaskWithoutNetworkTask , InvalidRequest , SERVICE_UNAVAILABLE;
|
|
||||||
IndexAlreadyExists , InvalidRequest , CONFLICT ;
|
IndexAlreadyExists , InvalidRequest , CONFLICT ;
|
||||||
IndexCreationFailed , Internal , INTERNAL_SERVER_ERROR;
|
IndexCreationFailed , Internal , INTERNAL_SERVER_ERROR;
|
||||||
IndexNotFound , InvalidRequest , NOT_FOUND;
|
IndexNotFound , InvalidRequest , NOT_FOUND;
|
||||||
@@ -273,9 +270,9 @@ InvalidMultiSearchQueryRankingRules , InvalidRequest , BAD_REQU
|
|||||||
InvalidMultiSearchQueryPosition , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchQueryPosition , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchRemote , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchRemote , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidMultiSearchWeight , InvalidRequest , BAD_REQUEST ;
|
InvalidMultiSearchWeight , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidNetworkLeader , InvalidRequest , BAD_REQUEST ;
|
|
||||||
InvalidNetworkRemotes , InvalidRequest , BAD_REQUEST ;
|
InvalidNetworkRemotes , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidNetworkSelf , InvalidRequest , BAD_REQUEST ;
|
InvalidNetworkSelf , InvalidRequest , BAD_REQUEST ;
|
||||||
|
InvalidNetworkSharding , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidNetworkSearchApiKey , InvalidRequest , BAD_REQUEST ;
|
InvalidNetworkSearchApiKey , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidNetworkWriteApiKey , InvalidRequest , BAD_REQUEST ;
|
InvalidNetworkWriteApiKey , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidNetworkUrl , InvalidRequest , BAD_REQUEST ;
|
InvalidNetworkUrl , InvalidRequest , BAD_REQUEST ;
|
||||||
@@ -380,9 +377,7 @@ MissingPayload , InvalidRequest , BAD_REQU
|
|||||||
MissingSearchHybrid , InvalidRequest , BAD_REQUEST ;
|
MissingSearchHybrid , InvalidRequest , BAD_REQUEST ;
|
||||||
MissingSwapIndexes , InvalidRequest , BAD_REQUEST ;
|
MissingSwapIndexes , InvalidRequest , BAD_REQUEST ;
|
||||||
MissingTaskFilters , InvalidRequest , BAD_REQUEST ;
|
MissingTaskFilters , InvalidRequest , BAD_REQUEST ;
|
||||||
NetworkVersionMismatch , InvalidRequest , PRECONDITION_FAILED ;
|
|
||||||
NoSpaceLeftOnDevice , System , UNPROCESSABLE_ENTITY;
|
NoSpaceLeftOnDevice , System , UNPROCESSABLE_ENTITY;
|
||||||
NotLeader , InvalidRequest , BAD_REQUEST ;
|
|
||||||
PayloadTooLarge , InvalidRequest , PAYLOAD_TOO_LARGE ;
|
PayloadTooLarge , InvalidRequest , PAYLOAD_TOO_LARGE ;
|
||||||
RemoteBadResponse , System , BAD_GATEWAY ;
|
RemoteBadResponse , System , BAD_GATEWAY ;
|
||||||
RemoteBadRequest , InvalidRequest , BAD_REQUEST ;
|
RemoteBadRequest , InvalidRequest , BAD_REQUEST ;
|
||||||
@@ -396,9 +391,6 @@ TaskFileNotFound , InvalidRequest , NOT_FOUN
|
|||||||
BatchNotFound , InvalidRequest , NOT_FOUND ;
|
BatchNotFound , InvalidRequest , NOT_FOUND ;
|
||||||
TooManyOpenFiles , System , UNPROCESSABLE_ENTITY ;
|
TooManyOpenFiles , System , UNPROCESSABLE_ENTITY ;
|
||||||
TooManyVectors , InvalidRequest , BAD_REQUEST ;
|
TooManyVectors , InvalidRequest , BAD_REQUEST ;
|
||||||
UnexpectedNetworkPreviousRemotes , InvalidRequest , BAD_REQUEST ;
|
|
||||||
NetworkVersionTooOld , InvalidRequest , BAD_REQUEST ;
|
|
||||||
UnprocessedNetworkTask , InvalidRequest , BAD_REQUEST ;
|
|
||||||
UnretrievableDocument , Internal , BAD_REQUEST ;
|
UnretrievableDocument , Internal , BAD_REQUEST ;
|
||||||
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
|
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
|
||||||
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
@@ -11,9 +10,7 @@ pub struct Network {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub remotes: BTreeMap<String, Remote>,
|
pub remotes: BTreeMap<String, Remote>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub leader: Option<String>,
|
pub sharding: bool,
|
||||||
#[serde(default)]
|
|
||||||
pub version: Uuid,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||||
|
|||||||
@@ -9,12 +9,12 @@ use utoipa::ToSchema;
|
|||||||
use crate::batches::BatchId;
|
use crate::batches::BatchId;
|
||||||
use crate::error::ResponseError;
|
use crate::error::ResponseError;
|
||||||
use crate::settings::{Settings, Unchecked};
|
use crate::settings::{Settings, Unchecked};
|
||||||
use crate::tasks::network::DbTaskNetwork;
|
|
||||||
use crate::tasks::{
|
use crate::tasks::{
|
||||||
serialize_duration, Details, DetailsExportIndexSettings, IndexSwap, Kind, Status, Task, TaskId,
|
serialize_duration, Details, DetailsExportIndexSettings, IndexSwap, Kind, Status, Task, TaskId,
|
||||||
|
TaskNetwork,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, Clone, PartialEq, Serialize, ToSchema)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
#[schema(rename_all = "camelCase")]
|
#[schema(rename_all = "camelCase")]
|
||||||
pub struct TaskView {
|
pub struct TaskView {
|
||||||
@@ -54,7 +54,7 @@ pub struct TaskView {
|
|||||||
pub finished_at: Option<OffsetDateTime>,
|
pub finished_at: Option<OffsetDateTime>,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub network: Option<DbTaskNetwork>,
|
pub network: Option<TaskNetwork>,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub custom_metadata: Option<String>,
|
pub custom_metadata: Option<String>,
|
||||||
@@ -151,11 +151,6 @@ pub struct DetailsView {
|
|||||||
pub pre_compaction_size: Option<String>,
|
pub pre_compaction_size: Option<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub post_compaction_size: Option<String>,
|
pub post_compaction_size: Option<String>,
|
||||||
// network topology change
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub moved_documents: Option<u64>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub message: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DetailsView {
|
impl DetailsView {
|
||||||
@@ -166,17 +161,6 @@ impl DetailsView {
|
|||||||
(None, Some(doc)) | (Some(doc), None) => Some(doc),
|
(None, Some(doc)) | (Some(doc), None) => Some(doc),
|
||||||
(Some(left), Some(right)) => Some(left + right),
|
(Some(left), Some(right)) => Some(left + right),
|
||||||
},
|
},
|
||||||
moved_documents: match (self.moved_documents, other.moved_documents) {
|
|
||||||
(None, None) => None,
|
|
||||||
(None, Some(doc)) | (Some(doc), None) => Some(doc),
|
|
||||||
(Some(left), Some(right)) => Some(left + right),
|
|
||||||
},
|
|
||||||
message: match (&mut self.message, &other.message) {
|
|
||||||
(None, None) => None,
|
|
||||||
(None, Some(message)) => Some(message.clone()),
|
|
||||||
(Some(message), None) => Some(std::mem::take(message)),
|
|
||||||
(Some(message), Some(_)) => Some(std::mem::take(message)),
|
|
||||||
},
|
|
||||||
indexed_documents: match (self.indexed_documents, other.indexed_documents) {
|
indexed_documents: match (self.indexed_documents, other.indexed_documents) {
|
||||||
(None, None) => None,
|
(None, None) => None,
|
||||||
(None, Some(None)) | (Some(None), None) | (Some(None), Some(None)) => Some(None),
|
(None, Some(None)) | (Some(None), None) | (Some(None), Some(None)) => Some(None),
|
||||||
@@ -467,11 +451,6 @@ impl From<Details> for DetailsView {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Details::NetworkTopologyChange { moved_documents, message } => DetailsView {
|
|
||||||
moved_documents: Some(moved_documents),
|
|
||||||
message: Some(message),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,8 +23,6 @@ use crate::{versioning, InstanceUid};
|
|||||||
|
|
||||||
pub type TaskId = u32;
|
pub type TaskId = u32;
|
||||||
|
|
||||||
pub mod network;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct Task {
|
pub struct Task {
|
||||||
@@ -46,7 +44,7 @@ pub struct Task {
|
|||||||
pub kind: KindWithContent,
|
pub kind: KindWithContent,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub network: Option<network::DbTaskNetwork>,
|
pub network: Option<TaskNetwork>,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub custom_metadata: Option<String>,
|
pub custom_metadata: Option<String>,
|
||||||
@@ -63,7 +61,6 @@ impl Task {
|
|||||||
| TaskDeletion { .. }
|
| TaskDeletion { .. }
|
||||||
| Export { .. }
|
| Export { .. }
|
||||||
| UpgradeDatabase { .. }
|
| UpgradeDatabase { .. }
|
||||||
| NetworkTopologyChange { .. }
|
|
||||||
| IndexSwap { .. } => None,
|
| IndexSwap { .. } => None,
|
||||||
DocumentAdditionOrUpdate { index_uid, .. }
|
DocumentAdditionOrUpdate { index_uid, .. }
|
||||||
| DocumentEdition { index_uid, .. }
|
| DocumentEdition { index_uid, .. }
|
||||||
@@ -102,7 +99,6 @@ impl Task {
|
|||||||
| KindWithContent::SnapshotCreation
|
| KindWithContent::SnapshotCreation
|
||||||
| KindWithContent::Export { .. }
|
| KindWithContent::Export { .. }
|
||||||
| KindWithContent::UpgradeDatabase { .. }
|
| KindWithContent::UpgradeDatabase { .. }
|
||||||
| KindWithContent::NetworkTopologyChange { .. }
|
|
||||||
| KindWithContent::IndexCompaction { .. } => None,
|
| KindWithContent::IndexCompaction { .. } => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -182,7 +178,6 @@ pub enum KindWithContent {
|
|||||||
IndexCompaction {
|
IndexCompaction {
|
||||||
index_uid: String,
|
index_uid: String,
|
||||||
},
|
},
|
||||||
NetworkTopologyChange(network::NetworkTopologyChange),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
|
||||||
@@ -220,7 +215,6 @@ impl KindWithContent {
|
|||||||
KindWithContent::Export { .. } => Kind::Export,
|
KindWithContent::Export { .. } => Kind::Export,
|
||||||
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
|
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
|
||||||
KindWithContent::IndexCompaction { .. } => Kind::IndexCompaction,
|
KindWithContent::IndexCompaction { .. } => Kind::IndexCompaction,
|
||||||
KindWithContent::NetworkTopologyChange { .. } => Kind::NetworkTopologyChange,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,7 +227,6 @@ impl KindWithContent {
|
|||||||
| TaskCancelation { .. }
|
| TaskCancelation { .. }
|
||||||
| TaskDeletion { .. }
|
| TaskDeletion { .. }
|
||||||
| Export { .. }
|
| Export { .. }
|
||||||
| NetworkTopologyChange { .. }
|
|
||||||
| UpgradeDatabase { .. } => vec![],
|
| UpgradeDatabase { .. } => vec![],
|
||||||
DocumentAdditionOrUpdate { index_uid, .. }
|
DocumentAdditionOrUpdate { index_uid, .. }
|
||||||
| DocumentEdition { index_uid, .. }
|
| DocumentEdition { index_uid, .. }
|
||||||
@@ -347,10 +340,6 @@ impl KindWithContent {
|
|||||||
pre_compaction_size: None,
|
pre_compaction_size: None,
|
||||||
post_compaction_size: None,
|
post_compaction_size: None,
|
||||||
}),
|
}),
|
||||||
KindWithContent::NetworkTopologyChange { .. } => Some(Details::NetworkTopologyChange {
|
|
||||||
moved_documents: 0,
|
|
||||||
message: "processing tasks for previous network versions".into(),
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -403,7 +392,7 @@ impl KindWithContent {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
KindWithContent::IndexSwap { .. } => {
|
KindWithContent::IndexSwap { .. } => {
|
||||||
unimplemented!("do not call `default_finished_details` for `IndexSwap` tasks")
|
todo!()
|
||||||
}
|
}
|
||||||
KindWithContent::TaskCancelation { query, tasks } => Some(Details::TaskCancelation {
|
KindWithContent::TaskCancelation { query, tasks } => Some(Details::TaskCancelation {
|
||||||
matched_tasks: tasks.len(),
|
matched_tasks: tasks.len(),
|
||||||
@@ -438,9 +427,6 @@ impl KindWithContent {
|
|||||||
pre_compaction_size: None,
|
pre_compaction_size: None,
|
||||||
post_compaction_size: None,
|
post_compaction_size: None,
|
||||||
}),
|
}),
|
||||||
KindWithContent::NetworkTopologyChange(network_topology_change) => {
|
|
||||||
Some(network_topology_change.to_details())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -508,9 +494,6 @@ impl From<&KindWithContent> for Option<Details> {
|
|||||||
pre_compaction_size: None,
|
pre_compaction_size: None,
|
||||||
post_compaction_size: None,
|
post_compaction_size: None,
|
||||||
}),
|
}),
|
||||||
KindWithContent::NetworkTopologyChange(network_topology_change) => {
|
|
||||||
Some(network_topology_change.to_details())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -622,7 +605,6 @@ pub enum Kind {
|
|||||||
Export,
|
Export,
|
||||||
UpgradeDatabase,
|
UpgradeDatabase,
|
||||||
IndexCompaction,
|
IndexCompaction,
|
||||||
NetworkTopologyChange,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Kind {
|
impl Kind {
|
||||||
@@ -642,7 +624,6 @@ impl Kind {
|
|||||||
| Kind::DumpCreation
|
| Kind::DumpCreation
|
||||||
| Kind::Export
|
| Kind::Export
|
||||||
| Kind::UpgradeDatabase
|
| Kind::UpgradeDatabase
|
||||||
| Kind::NetworkTopologyChange
|
|
||||||
| Kind::SnapshotCreation => false,
|
| Kind::SnapshotCreation => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -665,7 +646,6 @@ impl Display for Kind {
|
|||||||
Kind::Export => write!(f, "export"),
|
Kind::Export => write!(f, "export"),
|
||||||
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
|
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
|
||||||
Kind::IndexCompaction => write!(f, "indexCompaction"),
|
Kind::IndexCompaction => write!(f, "indexCompaction"),
|
||||||
Kind::NetworkTopologyChange => write!(f, "networkTopologyChange"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -703,8 +683,6 @@ impl FromStr for Kind {
|
|||||||
Ok(Kind::UpgradeDatabase)
|
Ok(Kind::UpgradeDatabase)
|
||||||
} else if kind.eq_ignore_ascii_case("indexCompaction") {
|
} else if kind.eq_ignore_ascii_case("indexCompaction") {
|
||||||
Ok(Kind::IndexCompaction)
|
Ok(Kind::IndexCompaction)
|
||||||
} else if kind.eq_ignore_ascii_case("networkTopologyChange") {
|
|
||||||
Ok(Kind::NetworkTopologyChange)
|
|
||||||
} else {
|
} else {
|
||||||
Err(ParseTaskKindError(kind.to_owned()))
|
Err(ParseTaskKindError(kind.to_owned()))
|
||||||
}
|
}
|
||||||
@@ -795,10 +773,36 @@ pub enum Details {
|
|||||||
pre_compaction_size: Option<Byte>,
|
pre_compaction_size: Option<Byte>,
|
||||||
post_compaction_size: Option<Byte>,
|
post_compaction_size: Option<Byte>,
|
||||||
},
|
},
|
||||||
NetworkTopologyChange {
|
}
|
||||||
moved_documents: u64,
|
|
||||||
message: String,
|
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
},
|
#[serde(untagged, rename_all = "camelCase")]
|
||||||
|
pub enum TaskNetwork {
|
||||||
|
Origin { origin: Origin },
|
||||||
|
Remotes { remote_tasks: BTreeMap<String, RemoteTask> },
|
||||||
|
}
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Origin {
|
||||||
|
pub remote_name: String,
|
||||||
|
pub task_uid: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct RemoteTask {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
task_uid: Option<TaskId>,
|
||||||
|
error: Option<ResponseError>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Result<TaskId, ResponseError>> for RemoteTask {
|
||||||
|
fn from(res: Result<TaskId, ResponseError>) -> RemoteTask {
|
||||||
|
match res {
|
||||||
|
Ok(task_uid) => RemoteTask { task_uid: Some(task_uid), error: None },
|
||||||
|
Err(err) => RemoteTask { task_uid: None, error: Some(err) },
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
@@ -841,9 +845,6 @@ impl Details {
|
|||||||
| Self::Export { .. }
|
| Self::Export { .. }
|
||||||
| Self::UpgradeDatabase { .. }
|
| Self::UpgradeDatabase { .. }
|
||||||
| Self::IndexSwap { .. } => (),
|
| Self::IndexSwap { .. } => (),
|
||||||
Self::NetworkTopologyChange { moved_documents: _, message } => {
|
|
||||||
*message = format!("Failed. Previous status: {}", message);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
details
|
details
|
||||||
@@ -1,522 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use base64::Engine as _;
|
|
||||||
use itertools::{EitherOrBoth, Itertools as _};
|
|
||||||
use milli::{CboRoaringBitmapCodec, DocumentId};
|
|
||||||
use roaring::RoaringBitmap;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use utoipa::ToSchema;
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::error::ResponseError;
|
|
||||||
use crate::network::{Network, Remote};
|
|
||||||
use crate::tasks::{Details, TaskId};
|
|
||||||
|
|
||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
mod community_edition;
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
mod enterprise_edition;
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
|
||||||
#[serde(untagged, rename_all = "camelCase")]
|
|
||||||
// This type is used in the database, care should be taken when modifying it.
|
|
||||||
pub enum DbTaskNetwork {
|
|
||||||
/// Tasks that were duplicated from `origin`
|
|
||||||
Origin { origin: Origin },
|
|
||||||
/// Tasks that were duplicated as `remote_tasks`
|
|
||||||
Remotes {
|
|
||||||
remote_tasks: BTreeMap<String, RemoteTask>,
|
|
||||||
#[serde(default)]
|
|
||||||
network_version: Uuid,
|
|
||||||
},
|
|
||||||
/// Document import tasks sent in the context of `network_change`
|
|
||||||
Import { import_from: ImportData, network_change: Origin },
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DbTaskNetwork {
|
|
||||||
pub fn network_version(&self) -> Uuid {
|
|
||||||
match self {
|
|
||||||
DbTaskNetwork::Origin { origin } => origin.network_version,
|
|
||||||
DbTaskNetwork::Remotes { remote_tasks: _, network_version } => *network_version,
|
|
||||||
DbTaskNetwork::Import { import_from: _, network_change } => {
|
|
||||||
network_change.network_version
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn import_data(&self) -> Option<&ImportData> {
|
|
||||||
match self {
|
|
||||||
DbTaskNetwork::Origin { .. } | DbTaskNetwork::Remotes { .. } => None,
|
|
||||||
DbTaskNetwork::Import { import_from, .. } => Some(import_from),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn origin(&self) -> Option<&Origin> {
|
|
||||||
match self {
|
|
||||||
DbTaskNetwork::Origin { origin } => Some(origin),
|
|
||||||
DbTaskNetwork::Remotes { .. } => None,
|
|
||||||
DbTaskNetwork::Import { network_change, .. } => Some(network_change),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum TaskNetwork {
|
|
||||||
/// Tasks that were duplicated from `origin`
|
|
||||||
Origin { origin: Origin },
|
|
||||||
/// Tasks that were duplicated as `remote_tasks`
|
|
||||||
Remotes { remote_tasks: BTreeMap<String, RemoteTask>, network_version: Uuid },
|
|
||||||
/// Document import tasks sent in the context of `network_change`
|
|
||||||
Import { import_from: ImportData, network_change: Origin, metadata: ImportMetadata },
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TaskNetwork {
|
|
||||||
pub fn network_version(&self) -> Uuid {
|
|
||||||
match self {
|
|
||||||
TaskNetwork::Origin { origin } => origin.network_version,
|
|
||||||
TaskNetwork::Remotes { remote_tasks: _, network_version } => *network_version,
|
|
||||||
TaskNetwork::Import { import_from: _, network_change, metadata: _ } => {
|
|
||||||
network_change.network_version
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<TaskNetwork> for DbTaskNetwork {
|
|
||||||
fn from(value: TaskNetwork) -> Self {
|
|
||||||
match value {
|
|
||||||
TaskNetwork::Origin { origin } => DbTaskNetwork::Origin { origin },
|
|
||||||
TaskNetwork::Remotes { remote_tasks, network_version } => {
|
|
||||||
DbTaskNetwork::Remotes { remote_tasks, network_version }
|
|
||||||
}
|
|
||||||
TaskNetwork::Import { import_from, network_change, metadata: _ } => {
|
|
||||||
DbTaskNetwork::Import { import_from, network_change }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct Origin {
|
|
||||||
pub remote_name: String,
|
|
||||||
pub task_uid: u32,
|
|
||||||
#[serde(default)]
|
|
||||||
pub network_version: Uuid,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Import data stored in a task
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct ImportData {
|
|
||||||
/// Remote that this task is imported from
|
|
||||||
pub remote_name: String,
|
|
||||||
/// Index relevant to this task
|
|
||||||
pub index_name: Option<String>,
|
|
||||||
/// Number of documents in this task
|
|
||||||
pub document_count: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Import metadata associated with a task but not stored in the task
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct ImportMetadata {
|
|
||||||
/// Total number of indexes to import from this host
|
|
||||||
pub index_count: u64,
|
|
||||||
/// Key unique to this (network_change, index, host, key).
|
|
||||||
///
|
|
||||||
/// In practice, an internal document id of one of the documents to import.
|
|
||||||
pub task_key: Option<DocumentId>,
|
|
||||||
/// Total number of documents to import for this index from this host.
|
|
||||||
pub total_index_documents: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct RemoteTask {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
task_uid: Option<TaskId>,
|
|
||||||
error: Option<ResponseError>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Result<TaskId, ResponseError>> for RemoteTask {
|
|
||||||
fn from(res: Result<TaskId, ResponseError>) -> RemoteTask {
|
|
||||||
match res {
|
|
||||||
Ok(task_uid) => RemoteTask { task_uid: Some(task_uid), error: None },
|
|
||||||
Err(err) => RemoteTask { task_uid: None, error: Some(err) },
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Contains the full state of a network topology change.
|
|
||||||
///
|
|
||||||
/// A network topology change task is unique in that it can be processed in multiple different batches, as its resolution
|
|
||||||
/// depends on various document additions tasks being processed.
|
|
||||||
///
|
|
||||||
/// A network topology task has 4 states:
|
|
||||||
///
|
|
||||||
/// 1. Processing any task that was meant for an earlier version of the network. This is necessary to know that we have the right version of
|
|
||||||
/// documents.
|
|
||||||
/// 2. Sending all documents that must be moved to other remotes.
|
|
||||||
/// 3. Processing any task coming from the remotes.
|
|
||||||
/// 4. Finished.
|
|
||||||
///
|
|
||||||
/// Furthermore, it maintains some stats
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct NetworkTopologyChange {
|
|
||||||
state: NetworkTopologyState,
|
|
||||||
// in name, `None` if the node is no longer part of the network
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
in_name: Option<String>,
|
|
||||||
// out name, `None` if the node is new to the network
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
out_name: Option<String>,
|
|
||||||
out_remotes: BTreeMap<String, Remote>,
|
|
||||||
in_remotes: BTreeMap<String, InRemote>,
|
|
||||||
stats: NetworkTopologyStats,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NetworkTopologyChange {
|
|
||||||
pub fn new(old_network: Network, new_network: Network) -> Self {
|
|
||||||
// we use our old name as export name
|
|
||||||
let out_name = old_network.local;
|
|
||||||
// we use our new name as import name
|
|
||||||
let in_name = new_network.local;
|
|
||||||
// we export to the new network
|
|
||||||
let mut out_remotes = new_network.remotes;
|
|
||||||
// don't export to ourselves
|
|
||||||
if let Some(in_name) = &in_name {
|
|
||||||
out_remotes.remove(in_name);
|
|
||||||
}
|
|
||||||
let in_remotes = old_network
|
|
||||||
.remotes
|
|
||||||
.into_keys()
|
|
||||||
// don't await imports from ourselves
|
|
||||||
.filter(|name| Some(name.as_str()) != out_name.as_deref())
|
|
||||||
.map(|name| (name, InRemote::new()))
|
|
||||||
.collect();
|
|
||||||
Self {
|
|
||||||
state: NetworkTopologyState::WaitingForOlderTasks,
|
|
||||||
in_name,
|
|
||||||
out_name,
|
|
||||||
out_remotes,
|
|
||||||
in_remotes,
|
|
||||||
stats: NetworkTopologyStats { moved_documents: 0 },
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn state(&self) -> NetworkTopologyState {
|
|
||||||
self.state
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn out_name(&self) -> Option<&str> {
|
|
||||||
// unwrap: one of out name or in_name must be defined
|
|
||||||
self.out_name.as_deref()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn in_name(&self) -> Option<&str> {
|
|
||||||
self.in_name.as_deref()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn to_details(&self) -> Details {
|
|
||||||
let message = match self.state {
|
|
||||||
NetworkTopologyState::WaitingForOlderTasks => {
|
|
||||||
"Waiting for tasks enqueued before the network change to finish processing".into()
|
|
||||||
}
|
|
||||||
NetworkTopologyState::ExportingDocuments => "Exporting documents".into(),
|
|
||||||
NetworkTopologyState::ImportingDocuments => {
|
|
||||||
let mut finished_count = 0;
|
|
||||||
let mut first_ongoing = None;
|
|
||||||
let mut ongoing_total_indexes = 0;
|
|
||||||
let mut ongoing_processed_documents = 0;
|
|
||||||
let mut ongoing_missing_documents = 0;
|
|
||||||
let mut ongoing_total_documents = 0;
|
|
||||||
let mut other_ongoing_count = 0;
|
|
||||||
let mut first_waiting = None;
|
|
||||||
let mut other_waiting_count = 0;
|
|
||||||
for (remote_name, in_remote) in &self.in_remotes {
|
|
||||||
match &in_remote.import_state {
|
|
||||||
ImportState::WaitingForInitialTask => {
|
|
||||||
first_waiting = match first_waiting {
|
|
||||||
None => Some(remote_name),
|
|
||||||
first_waiting => {
|
|
||||||
other_waiting_count += 1;
|
|
||||||
first_waiting
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
ImportState::Ongoing { import_index_state, total_indexes } => {
|
|
||||||
first_ongoing = match first_ongoing {
|
|
||||||
None => {
|
|
||||||
ongoing_total_indexes = *total_indexes;
|
|
||||||
Some(remote_name)
|
|
||||||
}
|
|
||||||
first_ongoing => {
|
|
||||||
other_ongoing_count += 1;
|
|
||||||
first_ongoing
|
|
||||||
}
|
|
||||||
};
|
|
||||||
for import_state in import_index_state.values() {
|
|
||||||
match import_state {
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents,
|
|
||||||
processed_documents,
|
|
||||||
received_documents,
|
|
||||||
task_keys: _,
|
|
||||||
} => {
|
|
||||||
ongoing_total_documents += total_documents;
|
|
||||||
ongoing_processed_documents += processed_documents;
|
|
||||||
ongoing_missing_documents +=
|
|
||||||
total_documents.saturating_sub(*received_documents);
|
|
||||||
}
|
|
||||||
ImportIndexState::Finished { total_documents } => {
|
|
||||||
ongoing_total_documents += total_documents;
|
|
||||||
ongoing_processed_documents += total_documents;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ImportState::Finished { total_indexes, total_documents } => {
|
|
||||||
finished_count += 1;
|
|
||||||
ongoing_total_indexes = *total_indexes;
|
|
||||||
ongoing_total_documents += *total_documents;
|
|
||||||
ongoing_processed_documents += *total_documents;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
format!(
|
|
||||||
"Importing documents from {total} remotes{waiting}{ongoing}{finished}",
|
|
||||||
total = self.in_remotes.len(),
|
|
||||||
waiting = if let Some(first_waiting) = first_waiting {
|
|
||||||
format!(
|
|
||||||
", waiting on first task from `{}`{others}",
|
|
||||||
first_waiting,
|
|
||||||
others = if other_waiting_count > 0 {
|
|
||||||
format!(" and {other_waiting_count} other remotes")
|
|
||||||
} else {
|
|
||||||
"".into()
|
|
||||||
}
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
"".into()
|
|
||||||
},
|
|
||||||
ongoing = if let Some(first_ongoing) = first_ongoing {
|
|
||||||
format!(", awaiting {ongoing_missing_documents} and processed {ongoing_processed_documents} out of {ongoing_total_documents} documents in {ongoing_total_indexes} indexes from `{first_ongoing}`{others}",
|
|
||||||
others=if other_ongoing_count > 0 {format!(" and {other_ongoing_count} other remotes")} else {"".into()})
|
|
||||||
} else {
|
|
||||||
"".into()
|
|
||||||
},
|
|
||||||
finished = if finished_count >= 0 {
|
|
||||||
format!(", {finished_count} remotes finished processing")
|
|
||||||
} else {
|
|
||||||
"".into()
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
||||||
NetworkTopologyState::Finished => "Finished".into(),
|
|
||||||
};
|
|
||||||
Details::NetworkTopologyChange { moved_documents: self.stats.moved_documents, message }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn merge(&mut self, other: NetworkTopologyChange) {
|
|
||||||
// The topology change has a guarantee of forward progress, so for each field we're going to keep the "most advanced" values.
|
|
||||||
let Self { state, in_name: _, out_name: _, out_remotes: _, in_remotes, stats } = self;
|
|
||||||
|
|
||||||
*state = Ord::max(*state, other.state);
|
|
||||||
*stats = Ord::max(*stats, other.stats);
|
|
||||||
|
|
||||||
for (old_value, new_value) in other.in_remotes.into_values().zip(in_remotes.values_mut()) {
|
|
||||||
new_value.import_state = match (old_value.import_state, std::mem::take(&mut new_value.import_state)) {
|
|
||||||
// waiting for initial task is always older
|
|
||||||
(ImportState::WaitingForInitialTask, newer)
|
|
||||||
| (newer, ImportState::WaitingForInitialTask)
|
|
||||||
|
|
||||||
// finished is always newer
|
|
||||||
| (_, newer @ ImportState::Finished { .. })
|
|
||||||
| (newer @ ImportState::Finished { .. }, _) => newer,
|
|
||||||
(
|
|
||||||
ImportState::Ongoing { import_index_state: left_import, total_indexes: left_total_indexes },
|
|
||||||
ImportState::Ongoing { import_index_state: right_import, total_indexes: right_total_indexes },
|
|
||||||
) => {
|
|
||||||
let import_index_state = left_import.into_iter().merge_join_by(right_import.into_iter(), |(k,_), (x, _)|k.cmp(x)).map(|eob|
|
|
||||||
match eob {
|
|
||||||
EitherOrBoth::Both((name, left), (_, right)) => {
|
|
||||||
let newer = merge_import_index_state(left, right);
|
|
||||||
(name, newer)
|
|
||||||
},
|
|
||||||
EitherOrBoth::Left(import) |
|
|
||||||
EitherOrBoth::Right(import) => import,
|
|
||||||
}
|
|
||||||
).collect();
|
|
||||||
|
|
||||||
ImportState::Ongoing{ import_index_state, total_indexes : u64::max(left_total_indexes, right_total_indexes) }
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn merge_import_index_state(left: ImportIndexState, right: ImportIndexState) -> ImportIndexState {
|
|
||||||
match (left, right) {
|
|
||||||
(_, newer @ ImportIndexState::Finished { .. }) => newer,
|
|
||||||
(newer @ ImportIndexState::Finished { .. }, _) => newer,
|
|
||||||
(
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents: left_total_documents,
|
|
||||||
received_documents: left_received_documents,
|
|
||||||
processed_documents: left_processed_documents,
|
|
||||||
task_keys: mut left_task_keys,
|
|
||||||
},
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents: right_total_documents,
|
|
||||||
received_documents: right_received_documents,
|
|
||||||
processed_documents: right_processed_documents,
|
|
||||||
task_keys: right_task_keys,
|
|
||||||
},
|
|
||||||
) => {
|
|
||||||
let total_documents = u64::max(left_total_documents, right_total_documents);
|
|
||||||
let received_documents = u64::max(left_received_documents, right_received_documents);
|
|
||||||
let processed_documents = u64::max(left_processed_documents, right_processed_documents);
|
|
||||||
left_task_keys.0 |= &right_task_keys.0;
|
|
||||||
let task_keys = left_task_keys;
|
|
||||||
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents,
|
|
||||||
received_documents,
|
|
||||||
processed_documents,
|
|
||||||
task_keys,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq, PartialOrd, Ord)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub enum NetworkTopologyState {
|
|
||||||
WaitingForOlderTasks,
|
|
||||||
ExportingDocuments,
|
|
||||||
ImportingDocuments,
|
|
||||||
Finished,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq, PartialOrd, Ord)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct NetworkTopologyStats {
|
|
||||||
#[serde(default)]
|
|
||||||
pub moved_documents: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct InRemote {
|
|
||||||
import_state: ImportState,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InRemote {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self { import_state: ImportState::WaitingForInitialTask }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
enum ImportState {
|
|
||||||
/// Initially Meilisearch doesn't know how many documents it should expect from a remote.
|
|
||||||
/// Any task from each remote contains the information of how many indexes will be imported,
|
|
||||||
/// and the number of documents to import for the index of the task.
|
|
||||||
#[default]
|
|
||||||
WaitingForInitialTask,
|
|
||||||
Ongoing {
|
|
||||||
import_index_state: BTreeMap<String, ImportIndexState>,
|
|
||||||
total_indexes: u64,
|
|
||||||
},
|
|
||||||
Finished {
|
|
||||||
total_indexes: u64,
|
|
||||||
total_documents: u64,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
enum ImportIndexState {
|
|
||||||
Ongoing {
|
|
||||||
total_documents: u64,
|
|
||||||
received_documents: u64,
|
|
||||||
processed_documents: u64,
|
|
||||||
task_keys: TaskKeys,
|
|
||||||
},
|
|
||||||
Finished {
|
|
||||||
total_documents: u64,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
|
||||||
pub struct TaskKeys(pub RoaringBitmap);
|
|
||||||
|
|
||||||
impl Serialize for TaskKeys {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
let TaskKeys(task_keys) = self;
|
|
||||||
let mut bytes = Vec::new();
|
|
||||||
CboRoaringBitmapCodec::serialize_into_vec(task_keys, &mut bytes);
|
|
||||||
let encoded = base64::prelude::BASE64_STANDARD.encode(&bytes);
|
|
||||||
serializer.serialize_str(&encoded)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'de> Deserialize<'de> for TaskKeys {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::Deserializer<'de>,
|
|
||||||
{
|
|
||||||
deserializer.deserialize_str(TaskKeysVisitor)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct TaskKeysVisitor;
|
|
||||||
impl<'de> serde::de::Visitor<'de> for TaskKeysVisitor {
|
|
||||||
type Value = TaskKeys;
|
|
||||||
|
|
||||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
|
||||||
formatter.write_str("a base64 encoded cbo roaring bitmap")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn visit_str<E>(self, encoded: &str) -> Result<Self::Value, E>
|
|
||||||
where
|
|
||||||
E: serde::de::Error,
|
|
||||||
{
|
|
||||||
let decoded = base64::prelude::BASE64_STANDARD.decode(encoded).map_err(|_err| {
|
|
||||||
E::invalid_value(serde::de::Unexpected::Str(encoded), &"a base64 string")
|
|
||||||
})?;
|
|
||||||
self.visit_bytes(&decoded)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn visit_bytes<E>(self, decoded: &[u8]) -> Result<Self::Value, E>
|
|
||||||
where
|
|
||||||
E: serde::de::Error,
|
|
||||||
{
|
|
||||||
let task_keys = CboRoaringBitmapCodec::deserialize_from(decoded).map_err(|_err| {
|
|
||||||
E::invalid_value(serde::de::Unexpected::Bytes(decoded), &"a cbo roaring bitmap")
|
|
||||||
})?;
|
|
||||||
Ok(TaskKeys(task_keys))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum ReceiveTaskError {
|
|
||||||
UnknownRemote(String),
|
|
||||||
DuplicateTask(DocumentId),
|
|
||||||
}
|
|
||||||
|
|
||||||
pub mod headers {
|
|
||||||
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "X-Meili-Proxy-Origin-Remote";
|
|
||||||
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "X-Meili-Proxy-Origin-TaskUid";
|
|
||||||
pub const PROXY_ORIGIN_NETWORK_VERSION_HEADER: &str = "X-Meili-Proxy-Origin-Network-Version";
|
|
||||||
pub const PROXY_IMPORT_REMOTE_HEADER: &str = "X-Meili-Proxy-Import-Remote";
|
|
||||||
pub const PROXY_IMPORT_INDEX_COUNT_HEADER: &str = "X-Meili-Proxy-Import-Index-Count";
|
|
||||||
pub const PROXY_IMPORT_INDEX_HEADER: &str = "X-Meili-Proxy-Import-Index";
|
|
||||||
pub const PROXY_IMPORT_TASK_KEY_HEADER: &str = "X-Meili-Proxy-Import-Task-Key";
|
|
||||||
pub const PROXY_IMPORT_DOCS_HEADER: &str = "X-Meili-Proxy-Import-Docs";
|
|
||||||
pub const PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER: &str = "X-Meili-Proxy-Import-Total-Index-Docs";
|
|
||||||
}
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use milli::DocumentId;
|
|
||||||
|
|
||||||
use crate::network::Remote;
|
|
||||||
use crate::tasks::network::{ImportState, InRemote, NetworkTopologyChange, ReceiveTaskError};
|
|
||||||
|
|
||||||
impl NetworkTopologyChange {
|
|
||||||
pub fn export_to_process(&self) -> Option<(&BTreeMap<String, Remote>, &str)> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_moved(&mut self, _moved_documents: u64) {}
|
|
||||||
|
|
||||||
pub fn update_state(&mut self) {}
|
|
||||||
|
|
||||||
pub fn receive_remote_task(
|
|
||||||
&mut self,
|
|
||||||
_remote_name: &str,
|
|
||||||
_index_name: Option<&str>,
|
|
||||||
_task_key: Option<DocumentId>,
|
|
||||||
_document_count: u64,
|
|
||||||
_total_indexes: u64,
|
|
||||||
_total_index_documents: u64,
|
|
||||||
) -> Result<(), ReceiveTaskError> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn process_remote_tasks(
|
|
||||||
&mut self,
|
|
||||||
_remote_name: &str,
|
|
||||||
_index_name: &str,
|
|
||||||
_document_count: u64,
|
|
||||||
) {
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_import_finished(&self) -> bool {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InRemote {
|
|
||||||
pub fn is_finished(&self) -> bool {
|
|
||||||
matches!(self.import_state, ImportState::Finished { .. })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for InRemote {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,239 +0,0 @@
|
|||||||
// Copyright © 2025 Meilisearch Some Rights Reserved
|
|
||||||
// This file is part of Meilisearch Enterprise Edition (EE).
|
|
||||||
// Use of this source code is governed by the Business Source License 1.1,
|
|
||||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use milli::DocumentId;
|
|
||||||
use roaring::RoaringBitmap;
|
|
||||||
|
|
||||||
use super::TaskKeys;
|
|
||||||
use crate::network::Remote;
|
|
||||||
use crate::tasks::network::{
|
|
||||||
ImportIndexState, ImportState, InRemote, NetworkTopologyChange, NetworkTopologyState,
|
|
||||||
ReceiveTaskError,
|
|
||||||
};
|
|
||||||
|
|
||||||
impl NetworkTopologyChange {
|
|
||||||
pub fn export_to_process(&self) -> Option<(&BTreeMap<String, Remote>, &str)> {
|
|
||||||
if self.state != NetworkTopologyState::ExportingDocuments {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.out_remotes.is_empty() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let out_name = self.out_name()?;
|
|
||||||
Some((&self.out_remotes, out_name))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_moved(&mut self, moved_documents: u64) {
|
|
||||||
self.stats.moved_documents = moved_documents;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compute the next state from the current state of the task.
|
|
||||||
pub fn update_state(&mut self) {
|
|
||||||
self.state = match self.state {
|
|
||||||
NetworkTopologyState::WaitingForOlderTasks => {
|
|
||||||
// no more older tasks, so finished waiting
|
|
||||||
NetworkTopologyState::ExportingDocuments
|
|
||||||
}
|
|
||||||
NetworkTopologyState::ExportingDocuments => {
|
|
||||||
// processed all exported documents
|
|
||||||
if self.is_import_finished() {
|
|
||||||
NetworkTopologyState::Finished
|
|
||||||
} else {
|
|
||||||
NetworkTopologyState::ImportingDocuments
|
|
||||||
}
|
|
||||||
}
|
|
||||||
NetworkTopologyState::ImportingDocuments => {
|
|
||||||
if self.is_import_finished() {
|
|
||||||
NetworkTopologyState::Finished
|
|
||||||
} else {
|
|
||||||
NetworkTopologyState::ImportingDocuments
|
|
||||||
}
|
|
||||||
}
|
|
||||||
NetworkTopologyState::Finished => NetworkTopologyState::Finished,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn receive_remote_task(
|
|
||||||
&mut self,
|
|
||||||
remote_name: &str,
|
|
||||||
index_name: Option<&str>,
|
|
||||||
task_key: Option<DocumentId>,
|
|
||||||
document_count: u64,
|
|
||||||
total_indexes: u64,
|
|
||||||
total_index_documents: u64,
|
|
||||||
) -> Result<(), ReceiveTaskError> {
|
|
||||||
let remote = self
|
|
||||||
.in_remotes
|
|
||||||
.get_mut(remote_name)
|
|
||||||
.ok_or_else(|| ReceiveTaskError::UnknownRemote(remote_name.to_string()))?;
|
|
||||||
remote.import_state = match std::mem::take(&mut remote.import_state) {
|
|
||||||
ImportState::WaitingForInitialTask => {
|
|
||||||
if total_indexes == 0 {
|
|
||||||
ImportState::Finished { total_indexes, total_documents: 0 }
|
|
||||||
} else {
|
|
||||||
let mut task_keys = RoaringBitmap::new();
|
|
||||||
if let Some(index_name) = index_name {
|
|
||||||
if let Some(task_key) = task_key {
|
|
||||||
task_keys.insert(task_key);
|
|
||||||
}
|
|
||||||
let mut import_index_state = BTreeMap::new();
|
|
||||||
import_index_state.insert(
|
|
||||||
index_name.to_owned(),
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents: total_index_documents,
|
|
||||||
received_documents: document_count,
|
|
||||||
task_keys: TaskKeys(task_keys),
|
|
||||||
processed_documents: 0,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
ImportState::Ongoing { import_index_state, total_indexes }
|
|
||||||
} else {
|
|
||||||
ImportState::WaitingForInitialTask
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ImportState::Ongoing { mut import_index_state, total_indexes } => {
|
|
||||||
if let Some(index_name) = index_name {
|
|
||||||
if let Some((index_name, mut index_state)) =
|
|
||||||
import_index_state.remove_entry(index_name)
|
|
||||||
{
|
|
||||||
index_state = match index_state {
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents,
|
|
||||||
received_documents: previously_received,
|
|
||||||
processed_documents,
|
|
||||||
mut task_keys,
|
|
||||||
} => {
|
|
||||||
if let Some(task_key) = task_key {
|
|
||||||
if !task_keys.0.insert(task_key) {
|
|
||||||
return Err(ReceiveTaskError::DuplicateTask(task_key));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents,
|
|
||||||
received_documents: previously_received + document_count,
|
|
||||||
processed_documents,
|
|
||||||
task_keys,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ImportIndexState::Finished { total_documents } => {
|
|
||||||
ImportIndexState::Finished { total_documents }
|
|
||||||
}
|
|
||||||
};
|
|
||||||
import_index_state.insert(index_name, index_state);
|
|
||||||
} else {
|
|
||||||
let mut task_keys = RoaringBitmap::new();
|
|
||||||
if let Some(task_key) = task_key {
|
|
||||||
task_keys.insert(task_key);
|
|
||||||
}
|
|
||||||
let state = ImportIndexState::Ongoing {
|
|
||||||
total_documents: total_index_documents,
|
|
||||||
received_documents: document_count,
|
|
||||||
processed_documents: 0,
|
|
||||||
task_keys: TaskKeys(task_keys),
|
|
||||||
};
|
|
||||||
import_index_state.insert(index_name.to_string(), state);
|
|
||||||
}
|
|
||||||
ImportState::Ongoing { import_index_state, total_indexes }
|
|
||||||
} else {
|
|
||||||
ImportState::Ongoing { import_index_state, total_indexes }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ImportState::Finished { total_indexes, total_documents } => {
|
|
||||||
ImportState::Finished { total_indexes, total_documents }
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn process_remote_tasks(
|
|
||||||
&mut self,
|
|
||||||
remote_name: &str,
|
|
||||||
index_name: &str,
|
|
||||||
document_count: u64,
|
|
||||||
) {
|
|
||||||
let remote = self
|
|
||||||
.in_remotes
|
|
||||||
.get_mut(remote_name)
|
|
||||||
.expect("process_remote_tasks called on a remote that is not in `in_remotes`");
|
|
||||||
remote.import_state = match std::mem::take(&mut remote.import_state) {
|
|
||||||
ImportState::WaitingForInitialTask => panic!("no task received yet one processed"),
|
|
||||||
ImportState::Ongoing { mut import_index_state, total_indexes } => {
|
|
||||||
let (index_name, mut index_state) =
|
|
||||||
import_index_state.remove_entry(index_name).unwrap();
|
|
||||||
index_state = match index_state {
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents,
|
|
||||||
received_documents,
|
|
||||||
processed_documents: previously_processed,
|
|
||||||
task_keys,
|
|
||||||
} => {
|
|
||||||
let newly_processed_documents = previously_processed + document_count;
|
|
||||||
if newly_processed_documents >= total_documents {
|
|
||||||
ImportIndexState::Finished { total_documents }
|
|
||||||
} else {
|
|
||||||
ImportIndexState::Ongoing {
|
|
||||||
total_documents,
|
|
||||||
received_documents,
|
|
||||||
processed_documents: newly_processed_documents,
|
|
||||||
task_keys,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ImportIndexState::Finished { total_documents } => {
|
|
||||||
ImportIndexState::Finished { total_documents }
|
|
||||||
}
|
|
||||||
};
|
|
||||||
import_index_state.insert(index_name, index_state);
|
|
||||||
if import_index_state.len() as u64 == total_indexes
|
|
||||||
&& import_index_state.values().all(|index| index.is_finished())
|
|
||||||
{
|
|
||||||
let total_documents =
|
|
||||||
import_index_state.values().map(|index| index.total_documents()).sum();
|
|
||||||
ImportState::Finished { total_indexes, total_documents }
|
|
||||||
} else {
|
|
||||||
ImportState::Ongoing { import_index_state, total_indexes }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ImportState::Finished { total_indexes, total_documents } => {
|
|
||||||
ImportState::Finished { total_indexes, total_documents }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_import_finished(&self) -> bool {
|
|
||||||
self.in_remotes.values().all(|remote| remote.is_finished())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InRemote {
|
|
||||||
pub fn is_finished(&self) -> bool {
|
|
||||||
matches!(self.import_state, ImportState::Finished { .. })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for InRemote {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ImportIndexState {
|
|
||||||
pub fn is_finished(&self) -> bool {
|
|
||||||
matches!(self, ImportIndexState::Finished { .. })
|
|
||||||
}
|
|
||||||
|
|
||||||
fn total_documents(&self) -> u64 {
|
|
||||||
match *self {
|
|
||||||
ImportIndexState::Ongoing { total_documents, .. }
|
|
||||||
| ImportIndexState::Finished { total_documents } => total_documents,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -160,7 +160,7 @@ mini-dashboard = [
|
|||||||
]
|
]
|
||||||
chinese = ["meilisearch-types/chinese"]
|
chinese = ["meilisearch-types/chinese"]
|
||||||
chinese-pinyin = ["meilisearch-types/chinese-pinyin"]
|
chinese-pinyin = ["meilisearch-types/chinese-pinyin"]
|
||||||
enterprise = ["meilisearch-types/enterprise", "index-scheduler/enterprise"]
|
enterprise = ["meilisearch-types/enterprise"]
|
||||||
hebrew = ["meilisearch-types/hebrew"]
|
hebrew = ["meilisearch-types/hebrew"]
|
||||||
japanese = ["meilisearch-types/japanese"]
|
japanese = ["meilisearch-types/japanese"]
|
||||||
korean = ["meilisearch-types/korean"]
|
korean = ["meilisearch-types/korean"]
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::any::TypeId;
|
use std::any::TypeId;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::Path;
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
@@ -344,14 +344,14 @@ impl Infos {
|
|||||||
experimental_no_edition_2024_for_dumps,
|
experimental_no_edition_2024_for_dumps,
|
||||||
experimental_vector_store_setting: vector_store_setting,
|
experimental_vector_store_setting: vector_store_setting,
|
||||||
gpu_enabled: meilisearch_types::milli::vector::is_cuda_enabled(),
|
gpu_enabled: meilisearch_types::milli::vector::is_cuda_enabled(),
|
||||||
db_path: db_path != Path::new("./data.ms"),
|
db_path: db_path != PathBuf::from("./data.ms"),
|
||||||
import_dump: import_dump.is_some(),
|
import_dump: import_dump.is_some(),
|
||||||
dump_dir: dump_dir != Path::new("dumps/"),
|
dump_dir: dump_dir != PathBuf::from("dumps/"),
|
||||||
ignore_missing_dump,
|
ignore_missing_dump,
|
||||||
ignore_dump_if_db_exists,
|
ignore_dump_if_db_exists,
|
||||||
import_snapshot: import_snapshot.is_some(),
|
import_snapshot: import_snapshot.is_some(),
|
||||||
schedule_snapshot,
|
schedule_snapshot,
|
||||||
snapshot_dir: snapshot_dir != Path::new("snapshots/"),
|
snapshot_dir: snapshot_dir != PathBuf::from("snapshots/"),
|
||||||
uses_s3_snapshots: s3_snapshot_options.is_some(),
|
uses_s3_snapshots: s3_snapshot_options.is_some(),
|
||||||
ignore_missing_snapshot,
|
ignore_missing_snapshot,
|
||||||
ignore_snapshot_if_db_exists,
|
ignore_snapshot_if_db_exists,
|
||||||
|
|||||||
@@ -6,14 +6,10 @@ use meilisearch_types::error::{Code, ErrorCode, ResponseError};
|
|||||||
use meilisearch_types::index_uid::{IndexUid, IndexUidFormatError};
|
use meilisearch_types::index_uid::{IndexUid, IndexUidFormatError};
|
||||||
use meilisearch_types::milli;
|
use meilisearch_types::milli;
|
||||||
use meilisearch_types::milli::OrderBy;
|
use meilisearch_types::milli::OrderBy;
|
||||||
use meilisearch_types::tasks::network::headers::{
|
|
||||||
PROXY_IMPORT_DOCS_HEADER, PROXY_IMPORT_INDEX_COUNT_HEADER, PROXY_IMPORT_INDEX_HEADER,
|
|
||||||
PROXY_IMPORT_REMOTE_HEADER, PROXY_IMPORT_TASK_KEY_HEADER, PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER,
|
|
||||||
};
|
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use tokio::task::JoinError;
|
use tokio::task::JoinError;
|
||||||
use uuid::Uuid;
|
|
||||||
|
use crate::routes::indexes::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER};
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
#[allow(clippy::large_enum_variant)]
|
#[allow(clippy::large_enum_variant)]
|
||||||
@@ -97,58 +93,8 @@ pub enum MeilisearchHttpError {
|
|||||||
} else { PROXY_ORIGIN_TASK_UID_HEADER }
|
} else { PROXY_ORIGIN_TASK_UID_HEADER }
|
||||||
)]
|
)]
|
||||||
InconsistentOriginHeaders { is_remote_missing: bool },
|
InconsistentOriginHeaders { is_remote_missing: bool },
|
||||||
#[error("Inconsistent `Import` headers: {remote}: {remote_status}, {index}: {index_status}, {docs}: {docs_status}.\n - Hint: either all three headers should be provided, or none of them",
|
|
||||||
remote = PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
remote_status = if *is_remote_missing { "missing" } else{ "provided" },
|
|
||||||
index = PROXY_IMPORT_INDEX_HEADER,
|
|
||||||
index_status = if *is_index_missing { "missing" } else { "provided" },
|
|
||||||
docs = PROXY_IMPORT_DOCS_HEADER,
|
|
||||||
docs_status = if *is_docs_missing { "missing" } else { "provided" }
|
|
||||||
)]
|
|
||||||
InconsistentImportHeaders {
|
|
||||||
is_remote_missing: bool,
|
|
||||||
is_index_missing: bool,
|
|
||||||
is_docs_missing: bool,
|
|
||||||
},
|
|
||||||
#[error("Inconsistent `Import-Metadata` headers: {index_count}: {index_count_status}, {task_key}: {task_key_status}, {total_index_documents}: {total_index_documents_status}.\n - Hint: either all three headers should be provided, or none of them",
|
|
||||||
index_count = PROXY_IMPORT_INDEX_COUNT_HEADER,
|
|
||||||
index_count_status = if *is_index_count_missing { "missing" } else { "provided"},
|
|
||||||
task_key = PROXY_IMPORT_TASK_KEY_HEADER,
|
|
||||||
task_key_status = if *is_task_key_missing { "missing" } else { "provided"},
|
|
||||||
total_index_documents = PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
total_index_documents_status = if *is_total_index_documents_missing { "missing" } else { "provided"},
|
|
||||||
)]
|
|
||||||
InconsistentImportMetadataHeaders {
|
|
||||||
is_index_count_missing: bool,
|
|
||||||
is_task_key_missing: bool,
|
|
||||||
is_total_index_documents_missing: bool,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[error(
|
|
||||||
"Inconsistent task network headers: origin headers: {origin_status}, import headers: {import_status}, import metadata: {import_metadata_status}",
|
|
||||||
origin_status = if *is_missing_origin { "missing"} else { "present" },
|
|
||||||
import_status = if *is_missing_import { "missing"} else { "present" },
|
|
||||||
import_metadata_status = if *is_missing_import_metadata { "missing"} else { "present" })]
|
|
||||||
InconsistentTaskNetworkHeaders {
|
|
||||||
is_missing_origin: bool,
|
|
||||||
is_missing_import: bool,
|
|
||||||
is_missing_import_metadata: bool,
|
|
||||||
},
|
|
||||||
#[error("Invalid value for header {header_name}: {msg}")]
|
#[error("Invalid value for header {header_name}: {msg}")]
|
||||||
InvalidHeaderValue { header_name: &'static str, msg: String },
|
InvalidHeaderValue { header_name: &'static str, msg: String },
|
||||||
#[error("This remote is not the leader of the network.\n - Note: only the leader `{leader}` can receive new tasks.")]
|
|
||||||
NotLeader { leader: String },
|
|
||||||
#[error("Unexpected `previousRemotes` in network call.\n - Note: `previousRemote` is reserved for internal use.")]
|
|
||||||
UnexpectedNetworkPreviousRemotes,
|
|
||||||
#[error("The network version in request is too old.\n - Received: {received}\n - Expected at least: {expected_at_least}")]
|
|
||||||
NetworkVersionTooOld { received: Uuid, expected_at_least: Uuid },
|
|
||||||
#[error("Remote `{remote}` encountered an error: {error}")]
|
|
||||||
RemoteIndexScheduler { remote: String, error: index_scheduler::Error },
|
|
||||||
#[error("{if_remote}Already has a pending network task with uid {task_uid}.\n - Note: No network task can be registered while any previous network task is not done processing.\n - Hint: Wait for task {task_uid} to complete or cancel it.",
|
|
||||||
if_remote=if let Some(remote) = remote {
|
|
||||||
format!("Remote `{remote}` encountered an error: ")
|
|
||||||
} else {"".into()} )]
|
|
||||||
UnprocessedNetworkTask { remote: Option<String>, task_uid: meilisearch_types::tasks::TaskId },
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MeilisearchHttpError {
|
impl MeilisearchHttpError {
|
||||||
@@ -176,7 +122,6 @@ impl ErrorCode for MeilisearchHttpError {
|
|||||||
MeilisearchHttpError::SerdeJson(_) => Code::Internal,
|
MeilisearchHttpError::SerdeJson(_) => Code::Internal,
|
||||||
MeilisearchHttpError::HeedError(_) => Code::Internal,
|
MeilisearchHttpError::HeedError(_) => Code::Internal,
|
||||||
MeilisearchHttpError::IndexScheduler(e) => e.error_code(),
|
MeilisearchHttpError::IndexScheduler(e) => e.error_code(),
|
||||||
MeilisearchHttpError::RemoteIndexScheduler { error, .. } => error.error_code(),
|
|
||||||
MeilisearchHttpError::Milli { error, .. } => error.error_code(),
|
MeilisearchHttpError::Milli { error, .. } => error.error_code(),
|
||||||
MeilisearchHttpError::Payload(e) => e.error_code(),
|
MeilisearchHttpError::Payload(e) => e.error_code(),
|
||||||
MeilisearchHttpError::FileStore(_) => Code::Internal,
|
MeilisearchHttpError::FileStore(_) => Code::Internal,
|
||||||
@@ -197,19 +142,10 @@ impl ErrorCode for MeilisearchHttpError {
|
|||||||
MeilisearchHttpError::PersonalizationInFederatedQuery(_) => {
|
MeilisearchHttpError::PersonalizationInFederatedQuery(_) => {
|
||||||
Code::InvalidMultiSearchQueryPersonalization
|
Code::InvalidMultiSearchQueryPersonalization
|
||||||
}
|
}
|
||||||
MeilisearchHttpError::InconsistentOriginHeaders { .. }
|
MeilisearchHttpError::InconsistentOriginHeaders { .. } => {
|
||||||
| MeilisearchHttpError::InconsistentImportHeaders { .. }
|
|
||||||
| MeilisearchHttpError::InconsistentImportMetadataHeaders { .. }
|
|
||||||
| MeilisearchHttpError::InconsistentTaskNetworkHeaders { .. } => {
|
|
||||||
Code::InconsistentDocumentChangeHeaders
|
Code::InconsistentDocumentChangeHeaders
|
||||||
}
|
}
|
||||||
MeilisearchHttpError::InvalidHeaderValue { .. } => Code::InvalidHeaderValue,
|
MeilisearchHttpError::InvalidHeaderValue { .. } => Code::InvalidHeaderValue,
|
||||||
MeilisearchHttpError::NotLeader { .. } => Code::NotLeader,
|
|
||||||
MeilisearchHttpError::UnexpectedNetworkPreviousRemotes => {
|
|
||||||
Code::UnexpectedNetworkPreviousRemotes
|
|
||||||
}
|
|
||||||
MeilisearchHttpError::NetworkVersionTooOld { .. } => Code::NetworkVersionTooOld,
|
|
||||||
MeilisearchHttpError::UnprocessedNetworkTask { .. } => Code::UnprocessedNetworkTask,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ pub mod option;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod option_test;
|
mod option_test;
|
||||||
pub mod personalization;
|
pub mod personalization;
|
||||||
pub mod proxy;
|
|
||||||
pub mod routes;
|
pub mod routes;
|
||||||
pub mod search;
|
pub mod search;
|
||||||
pub mod search_queue;
|
pub mod search_queue;
|
||||||
@@ -230,7 +229,6 @@ pub fn setup_meilisearch(
|
|||||||
autobatching_enabled: true,
|
autobatching_enabled: true,
|
||||||
cleanup_enabled: !opt.experimental_replication_parameters,
|
cleanup_enabled: !opt.experimental_replication_parameters,
|
||||||
max_number_of_tasks: 1_000_000,
|
max_number_of_tasks: 1_000_000,
|
||||||
export_default_payload_size_bytes: almost_as_big_as(opt.http_payload_size_limit),
|
|
||||||
max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks,
|
max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks,
|
||||||
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size.map_or_else(
|
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size.map_or_else(
|
||||||
|| {
|
|| {
|
||||||
@@ -341,13 +339,6 @@ pub fn setup_meilisearch(
|
|||||||
Ok((index_scheduler, auth_controller))
|
Ok((index_scheduler, auth_controller))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the input - 1MiB, or at least 20MiB
|
|
||||||
fn almost_as_big_as(input: byte_unit::Byte) -> byte_unit::Byte {
|
|
||||||
let with_margin = input.subtract(byte_unit::Byte::MEBIBYTE);
|
|
||||||
let at_least = byte_unit::Byte::MEBIBYTE.multiply(20).unwrap();
|
|
||||||
with_margin.unwrap_or(at_least).max(at_least)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Try to start the IndexScheduler and AuthController without checking the VERSION file or anything.
|
/// Try to start the IndexScheduler and AuthController without checking the VERSION file or anything.
|
||||||
fn open_or_create_database_unchecked(
|
fn open_or_create_database_unchecked(
|
||||||
opt: &Opt,
|
opt: &Opt,
|
||||||
|
|||||||
@@ -1,43 +0,0 @@
|
|||||||
use std::fs::File;
|
|
||||||
|
|
||||||
use meilisearch_types::network::Remote;
|
|
||||||
|
|
||||||
pub enum Body<T, F>
|
|
||||||
where
|
|
||||||
T: serde::Serialize,
|
|
||||||
F: FnMut(&str, &Remote, &mut T),
|
|
||||||
{
|
|
||||||
NdJsonPayload(File),
|
|
||||||
Inline(T),
|
|
||||||
Generated(T, F),
|
|
||||||
None,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Body<(), fn(&str, &Remote, &mut ())> {
|
|
||||||
pub fn with_ndjson_payload(file: File) -> Self {
|
|
||||||
Self::NdJsonPayload(file)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn none() -> Self {
|
|
||||||
Self::None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Body<T, fn(&str, &Remote, &mut T)>
|
|
||||||
where
|
|
||||||
T: serde::Serialize,
|
|
||||||
{
|
|
||||||
pub fn inline(payload: T) -> Self {
|
|
||||||
Self::Inline(payload)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, F> Body<T, F>
|
|
||||||
where
|
|
||||||
T: serde::Serialize,
|
|
||||||
F: FnMut(&str, &Remote, &mut T),
|
|
||||||
{
|
|
||||||
pub fn generated(initial: T, f: F) -> Self {
|
|
||||||
Self::Generated(initial, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
use actix_web::HttpRequest;
|
|
||||||
use index_scheduler::IndexScheduler;
|
|
||||||
use meilisearch_types::network::{Network, Remote};
|
|
||||||
use meilisearch_types::tasks::network::{DbTaskNetwork, TaskNetwork};
|
|
||||||
use meilisearch_types::tasks::Task;
|
|
||||||
|
|
||||||
use crate::error::MeilisearchHttpError;
|
|
||||||
use crate::proxy::Body;
|
|
||||||
|
|
||||||
pub fn task_network_and_check_leader_and_version(
|
|
||||||
_req: &HttpRequest,
|
|
||||||
_network: &Network,
|
|
||||||
) -> Result<Option<TaskNetwork>, MeilisearchHttpError> {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn proxy<T, F>(
|
|
||||||
_index_scheduler: &IndexScheduler,
|
|
||||||
_index_uid: Option<&str>,
|
|
||||||
_req: &HttpRequest,
|
|
||||||
_task_network: DbTaskNetwork,
|
|
||||||
_network: Network,
|
|
||||||
_body: Body<T, F>,
|
|
||||||
task: &Task,
|
|
||||||
) -> Result<Task, MeilisearchHttpError>
|
|
||||||
where
|
|
||||||
T: serde::Serialize,
|
|
||||||
F: FnMut(&str, &Remote, &mut T),
|
|
||||||
{
|
|
||||||
Ok(task.clone())
|
|
||||||
}
|
|
||||||
@@ -1,803 +0,0 @@
|
|||||||
// Copyright © 2025 Meilisearch Some Rights Reserved
|
|
||||||
// This file is part of Meilisearch Enterprise Edition (EE).
|
|
||||||
// Use of this source code is governed by the Business Source License 1.1,
|
|
||||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use actix_web::http::header::CONTENT_TYPE;
|
|
||||||
use actix_web::HttpRequest;
|
|
||||||
use bytes::Bytes;
|
|
||||||
use index_scheduler::IndexScheduler;
|
|
||||||
use meilisearch_types::error::ResponseError;
|
|
||||||
use meilisearch_types::milli::DocumentId;
|
|
||||||
use meilisearch_types::network::Remote;
|
|
||||||
use meilisearch_types::tasks::network::headers::{
|
|
||||||
PROXY_IMPORT_DOCS_HEADER, PROXY_IMPORT_INDEX_COUNT_HEADER, PROXY_IMPORT_INDEX_HEADER,
|
|
||||||
PROXY_IMPORT_REMOTE_HEADER, PROXY_IMPORT_TASK_KEY_HEADER, PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
PROXY_ORIGIN_NETWORK_VERSION_HEADER, PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER,
|
|
||||||
};
|
|
||||||
use meilisearch_types::tasks::network::{
|
|
||||||
DbTaskNetwork, ImportData, ImportMetadata, Origin, TaskNetwork,
|
|
||||||
};
|
|
||||||
use meilisearch_types::tasks::Task;
|
|
||||||
use reqwest::StatusCode;
|
|
||||||
use serde::de::DeserializeOwned;
|
|
||||||
use serde_json::Value;
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::error::MeilisearchHttpError;
|
|
||||||
use crate::proxy::{Body, ProxyError, ReqwestErrorWithoutUrl};
|
|
||||||
use crate::routes::SummarizedTaskView;
|
|
||||||
|
|
||||||
mod timeouts {
|
|
||||||
use std::sync::LazyLock;
|
|
||||||
|
|
||||||
pub static CONNECT_SECONDS: LazyLock<u64> =
|
|
||||||
LazyLock::new(|| fetch_or_default("MEILI_EXPERIMENTAL_PROXY_CONNECT_TIMEOUT_SECONDS", 3));
|
|
||||||
|
|
||||||
pub static BACKOFF_SECONDS: LazyLock<u64> =
|
|
||||||
LazyLock::new(|| fetch_or_default("MEILI_EXPERIMENTAL_PROXY_BACKOFF_TIMEOUT_SECONDS", 25));
|
|
||||||
|
|
||||||
pub static REQUEST_SECONDS: LazyLock<u64> =
|
|
||||||
LazyLock::new(|| fetch_or_default("MEILI_EXPERIMENTAL_PROXY_REQUEST_TIMEOUT_SECONDS", 30));
|
|
||||||
|
|
||||||
fn fetch_or_default(key: &str, default: u64) -> u64 {
|
|
||||||
match std::env::var(key) {
|
|
||||||
Ok(timeout) => timeout.parse().unwrap_or_else(|_| {
|
|
||||||
panic!("`{key}` environment variable is not parseable as an integer: {timeout}")
|
|
||||||
}),
|
|
||||||
Err(std::env::VarError::NotPresent) => default,
|
|
||||||
Err(std::env::VarError::NotUnicode(_)) => {
|
|
||||||
panic!("`{key}` environment variable is not set to a integer")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, F> Body<T, F>
|
|
||||||
where
|
|
||||||
T: serde::Serialize,
|
|
||||||
F: FnMut(&str, &Remote, &mut T),
|
|
||||||
{
|
|
||||||
pub fn into_bytes_iter(
|
|
||||||
self,
|
|
||||||
remotes: impl IntoIterator<Item = (String, Remote)>,
|
|
||||||
) -> Result<
|
|
||||||
impl Iterator<Item = (Option<Bytes>, (String, Remote))>,
|
|
||||||
meilisearch_types::milli::Error,
|
|
||||||
> {
|
|
||||||
let bytes = match self {
|
|
||||||
Body::NdJsonPayload(file) => {
|
|
||||||
Some(Bytes::from_owner(unsafe { memmap2::Mmap::map(&file)? }))
|
|
||||||
}
|
|
||||||
|
|
||||||
Body::Inline(payload) => {
|
|
||||||
Some(Bytes::copy_from_slice(&serde_json::to_vec(&payload).unwrap()))
|
|
||||||
}
|
|
||||||
|
|
||||||
Body::None => None,
|
|
||||||
|
|
||||||
Body::Generated(mut initial, mut f) => {
|
|
||||||
return Ok(either::Right(remotes.into_iter().map(move |(name, remote)| {
|
|
||||||
f(&name, &remote, &mut initial);
|
|
||||||
let bytes =
|
|
||||||
Some(Bytes::copy_from_slice(&serde_json::to_vec(&initial).unwrap()));
|
|
||||||
(bytes, (name, remote))
|
|
||||||
})));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(either::Left(std::iter::repeat(bytes).zip(remotes)))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn into_bytes(
|
|
||||||
self,
|
|
||||||
remote_name: &str,
|
|
||||||
remote: &Remote,
|
|
||||||
) -> Result<Option<Bytes>, meilisearch_types::milli::Error> {
|
|
||||||
Ok(match self {
|
|
||||||
Body::NdJsonPayload(file) => {
|
|
||||||
Some(Bytes::from_owner(unsafe { memmap2::Mmap::map(&file)? }))
|
|
||||||
}
|
|
||||||
|
|
||||||
Body::Inline(payload) => {
|
|
||||||
Some(Bytes::copy_from_slice(&serde_json::to_vec(&payload).unwrap()))
|
|
||||||
}
|
|
||||||
|
|
||||||
Body::None => None,
|
|
||||||
|
|
||||||
Body::Generated(mut initial, mut f) => {
|
|
||||||
f(remote_name, remote, &mut initial);
|
|
||||||
Some(Bytes::copy_from_slice(&serde_json::to_vec(&initial).unwrap()))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parses the header to determine if this task is a duplicate and originates with a remote.
|
|
||||||
///
|
|
||||||
/// If not, checks whether this remote is the leader and return `MeilisearchHttpError::NotLeader` if not.
|
|
||||||
///
|
|
||||||
/// If there is no leader, returns `Ok(None)`
|
|
||||||
///
|
|
||||||
/// # Errors
|
|
||||||
///
|
|
||||||
/// - `MeiliearchHttpError::NotLeader`: if the following are true simultaneously:
|
|
||||||
/// 1. The task originates with the current node
|
|
||||||
/// 2. There's a declared `leader`
|
|
||||||
/// 3. The declared leader is **not** the current node
|
|
||||||
/// - `MeilisearchHttpError::InvalidHeaderValue`: if headers cannot be parsed as a task network.
|
|
||||||
/// - `MeilisearchHttpError::InconsistentTaskNetwork`: if only some of the headers are present.
|
|
||||||
pub fn task_network_and_check_leader_and_version(
|
|
||||||
req: &HttpRequest,
|
|
||||||
network: &meilisearch_types::network::Network,
|
|
||||||
) -> Result<Option<TaskNetwork>, MeilisearchHttpError> {
|
|
||||||
let task_network =
|
|
||||||
match (origin_from_req(req)?, import_data_from_req(req)?, import_metadata_from_req(req)?) {
|
|
||||||
(Some(network_change), Some(import_from), Some(metadata)) => {
|
|
||||||
TaskNetwork::Import { import_from, network_change, metadata }
|
|
||||||
}
|
|
||||||
(Some(origin), None, None) => TaskNetwork::Origin { origin },
|
|
||||||
(None, None, None) => {
|
|
||||||
match (network.leader.as_deref(), network.local.as_deref()) {
|
|
||||||
// 1. Always allowed if there is no leader
|
|
||||||
(None, _) => return Ok(None),
|
|
||||||
// 2. Allowed if the leader is self
|
|
||||||
(Some(leader), Some(this)) if leader == this => (),
|
|
||||||
// 3. Any other change is disallowed
|
|
||||||
(Some(leader), _) => {
|
|
||||||
return Err(MeilisearchHttpError::NotLeader { leader: leader.to_string() })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TaskNetwork::Remotes {
|
|
||||||
remote_tasks: Default::default(),
|
|
||||||
network_version: network.version,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// all good cases were matched, so this is always an error
|
|
||||||
(origin, import_from, metadata) => {
|
|
||||||
return Err(MeilisearchHttpError::InconsistentTaskNetworkHeaders {
|
|
||||||
is_missing_origin: origin.is_none(),
|
|
||||||
is_missing_import: import_from.is_none(),
|
|
||||||
is_missing_import_metadata: metadata.is_none(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if task_network.network_version() < network.version {
|
|
||||||
return Err(MeilisearchHttpError::NetworkVersionTooOld {
|
|
||||||
received: task_network.network_version(),
|
|
||||||
expected_at_least: network.version,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Some(task_network))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Updates the task description and, if necessary, proxies the passed request to the network and update the task description.
|
|
||||||
///
|
|
||||||
/// This function reads the custom headers from the request to determine if must proxy the request or if the request
|
|
||||||
/// has already been proxied.
|
|
||||||
///
|
|
||||||
/// - when it must proxy the request, the endpoint, method and query params are retrieved from the passed `req`, then the `body` is
|
|
||||||
/// sent to all remotes of the `network` (except `self`). The response from the remotes are collected to update the passed `task`
|
|
||||||
/// with the task ids from the task queues of the remotes.
|
|
||||||
/// - when the request has already been proxied, the custom headers contains information about the remote that created the initial task.
|
|
||||||
/// This information is copied to the passed task.
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
///
|
|
||||||
/// The updated task. The task is read back from the database to avoid erasing concurrent changes.
|
|
||||||
pub async fn proxy<T, F>(
|
|
||||||
index_scheduler: &IndexScheduler,
|
|
||||||
index_uid: Option<&str>,
|
|
||||||
req: &HttpRequest,
|
|
||||||
mut task_network: DbTaskNetwork,
|
|
||||||
network: meilisearch_types::network::Network,
|
|
||||||
body: Body<T, F>,
|
|
||||||
task: &Task,
|
|
||||||
) -> Result<Task, MeilisearchHttpError>
|
|
||||||
where
|
|
||||||
T: serde::Serialize,
|
|
||||||
F: FnMut(&str, &Remote, &mut T),
|
|
||||||
{
|
|
||||||
if let DbTaskNetwork::Remotes { remote_tasks, network_version } = &mut task_network {
|
|
||||||
let network_version = *network_version;
|
|
||||||
let this = network
|
|
||||||
.local
|
|
||||||
.as_deref()
|
|
||||||
.expect("inconsistent `network.leader` and `network.self`")
|
|
||||||
.to_owned();
|
|
||||||
|
|
||||||
let content_type = match &body {
|
|
||||||
// for file bodies, force x-ndjson
|
|
||||||
Body::NdJsonPayload(_) => Some(b"application/x-ndjson".as_slice()),
|
|
||||||
// otherwise get content type from request
|
|
||||||
_ => req.headers().get(CONTENT_TYPE).map(|h| h.as_bytes()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut in_flight_remote_queries = BTreeMap::new();
|
|
||||||
let client = reqwest::ClientBuilder::new()
|
|
||||||
.connect_timeout(std::time::Duration::from_secs(*timeouts::CONNECT_SECONDS))
|
|
||||||
.build()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let method = from_old_http_method(req.method());
|
|
||||||
|
|
||||||
// send payload to all remotes
|
|
||||||
for (body, (node_name, node)) in body
|
|
||||||
.into_bytes_iter(network.remotes.into_iter().filter(|(name, _)| name.as_str() != this))
|
|
||||||
.map_err(|err| {
|
|
||||||
MeilisearchHttpError::from_milli(err, index_uid.map(ToOwned::to_owned))
|
|
||||||
})?
|
|
||||||
{
|
|
||||||
tracing::trace!(node_name, "proxying task to remote");
|
|
||||||
|
|
||||||
let client = client.clone();
|
|
||||||
let api_key = node.write_api_key;
|
|
||||||
let this = this.clone();
|
|
||||||
let method = method.clone();
|
|
||||||
let path_and_query = req.uri().path_and_query().map(|paq| paq.as_str()).unwrap_or("/");
|
|
||||||
|
|
||||||
in_flight_remote_queries.insert(
|
|
||||||
node_name,
|
|
||||||
tokio::spawn({
|
|
||||||
let url = format!("{}{}", node.url, path_and_query);
|
|
||||||
|
|
||||||
let url_encoded_this = urlencoding::encode(&this).into_owned();
|
|
||||||
let url_encoded_task_uid = task.uid.to_string(); // it's url encoded i promize
|
|
||||||
|
|
||||||
let content_type = content_type.map(|b| b.to_owned());
|
|
||||||
|
|
||||||
let backoff = backoff::ExponentialBackoffBuilder::new()
|
|
||||||
.with_max_elapsed_time(Some(std::time::Duration::from_secs(
|
|
||||||
*timeouts::BACKOFF_SECONDS,
|
|
||||||
)))
|
|
||||||
.build();
|
|
||||||
|
|
||||||
backoff::future::retry(backoff, move || {
|
|
||||||
let url = url.clone();
|
|
||||||
let client = client.clone();
|
|
||||||
let url_encoded_this = url_encoded_this.clone();
|
|
||||||
let url_encoded_task_uid = url_encoded_task_uid.clone();
|
|
||||||
let content_type = content_type.clone();
|
|
||||||
|
|
||||||
let body = body.clone();
|
|
||||||
let api_key = api_key.clone();
|
|
||||||
let method = method.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
try_proxy(
|
|
||||||
method,
|
|
||||||
&url,
|
|
||||||
content_type.as_deref(),
|
|
||||||
network_version,
|
|
||||||
api_key.as_deref(),
|
|
||||||
&client,
|
|
||||||
&url_encoded_this,
|
|
||||||
&url_encoded_task_uid,
|
|
||||||
body,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for all in-flight queries to finish and collect their results
|
|
||||||
for (node_name, handle) in in_flight_remote_queries {
|
|
||||||
match handle.await {
|
|
||||||
Ok(Ok(res)) => {
|
|
||||||
let task_uid = res.task_uid;
|
|
||||||
|
|
||||||
remote_tasks.insert(node_name, Ok(task_uid).into());
|
|
||||||
}
|
|
||||||
Ok(Err(error)) => {
|
|
||||||
remote_tasks.insert(node_name, Err(error.as_response_error()).into());
|
|
||||||
}
|
|
||||||
Err(panic) => match panic.try_into_panic() {
|
|
||||||
Ok(panic) => {
|
|
||||||
let msg = match panic.downcast_ref::<&'static str>() {
|
|
||||||
Some(s) => *s,
|
|
||||||
None => match panic.downcast_ref::<String>() {
|
|
||||||
Some(s) => &s[..],
|
|
||||||
None => "Box<dyn Any>",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
remote_tasks.insert(
|
|
||||||
node_name,
|
|
||||||
Err(ResponseError::from_msg(
|
|
||||||
msg.to_string(),
|
|
||||||
meilisearch_types::error::Code::Internal,
|
|
||||||
))
|
|
||||||
.into(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
tracing::error!("proxy task was unexpectedly cancelled")
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(index_scheduler.set_task_network(task.uid, task_network)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_request<T, F, U>(
|
|
||||||
path_and_query: &str,
|
|
||||||
method: reqwest::Method,
|
|
||||||
content_type: Option<String>,
|
|
||||||
body: Body<T, F>,
|
|
||||||
remote_name: &str,
|
|
||||||
remote: &Remote,
|
|
||||||
) -> Result<U, ProxyError>
|
|
||||||
where
|
|
||||||
T: serde::Serialize,
|
|
||||||
F: FnMut(&str, &Remote, &mut T),
|
|
||||||
U: DeserializeOwned,
|
|
||||||
{
|
|
||||||
let content_type = match &body {
|
|
||||||
// for file bodies, force x-ndjson
|
|
||||||
Body::NdJsonPayload(_) => Some("application/x-ndjson".into()),
|
|
||||||
// otherwise get content type from request
|
|
||||||
_ => content_type,
|
|
||||||
};
|
|
||||||
|
|
||||||
let body = body.into_bytes(remote_name, remote).map_err(Box::new)?;
|
|
||||||
|
|
||||||
let client = reqwest::ClientBuilder::new()
|
|
||||||
.connect_timeout(std::time::Duration::from_secs(*timeouts::CONNECT_SECONDS))
|
|
||||||
.build()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let url = format!("{}{}", remote.url, path_and_query);
|
|
||||||
|
|
||||||
// send payload to remote
|
|
||||||
tracing::trace!(remote_name, "sending request to remote");
|
|
||||||
let api_key = remote.write_api_key.clone();
|
|
||||||
|
|
||||||
let backoff = backoff::ExponentialBackoffBuilder::new()
|
|
||||||
.with_max_elapsed_time(Some(std::time::Duration::from_secs(*timeouts::BACKOFF_SECONDS)))
|
|
||||||
.build();
|
|
||||||
|
|
||||||
backoff::future::retry(backoff, move || {
|
|
||||||
let url = url.clone();
|
|
||||||
let client = client.clone();
|
|
||||||
let content_type = content_type.clone();
|
|
||||||
|
|
||||||
let body = body.clone();
|
|
||||||
let api_key = api_key.clone();
|
|
||||||
let method = method.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
let request = client
|
|
||||||
.request(method, url)
|
|
||||||
.timeout(std::time::Duration::from_secs(*timeouts::REQUEST_SECONDS));
|
|
||||||
let request = if let Some(body) = body { request.body(body) } else { request };
|
|
||||||
let request =
|
|
||||||
if let Some(api_key) = api_key { request.bearer_auth(api_key) } else { request };
|
|
||||||
let request = if let Some(content_type) = content_type {
|
|
||||||
request.header(CONTENT_TYPE.as_str(), content_type)
|
|
||||||
} else {
|
|
||||||
request
|
|
||||||
};
|
|
||||||
|
|
||||||
let response = request.send().await;
|
|
||||||
let response = match response {
|
|
||||||
Ok(response) => response,
|
|
||||||
Err(error) if error.is_timeout() => {
|
|
||||||
return Err(backoff::Error::transient(ProxyError::Timeout))
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
return Err(backoff::Error::transient(ProxyError::CouldNotSendRequest(
|
|
||||||
ReqwestErrorWithoutUrl::new(error),
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
handle_response(response).await
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_response<U>(response: reqwest::Response) -> Result<U, backoff::Error<ProxyError>>
|
|
||||||
where
|
|
||||||
U: DeserializeOwned,
|
|
||||||
{
|
|
||||||
match response.status() {
|
|
||||||
status_code if status_code.is_success() => (),
|
|
||||||
StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => {
|
|
||||||
return Err(backoff::Error::Permanent(ProxyError::AuthenticationError))
|
|
||||||
}
|
|
||||||
status_code if status_code.is_client_error() => {
|
|
||||||
let response = parse_error(response).await;
|
|
||||||
return Err(backoff::Error::Permanent(ProxyError::BadRequest {
|
|
||||||
status_code,
|
|
||||||
response,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
status_code if status_code.is_server_error() => {
|
|
||||||
let response = parse_error(response).await;
|
|
||||||
return Err(backoff::Error::transient(ProxyError::RemoteError {
|
|
||||||
status_code,
|
|
||||||
response,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
status_code => {
|
|
||||||
tracing::warn!(
|
|
||||||
status_code = status_code.as_u16(),
|
|
||||||
"remote replied with unexpected status code"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let response: U = match parse_response(response).await {
|
|
||||||
Ok(response) => response,
|
|
||||||
Err(response) => {
|
|
||||||
return Err(backoff::Error::permanent(ProxyError::CouldNotParseResponse { response }))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn from_old_http_method(method: &actix_http::Method) -> reqwest::Method {
|
|
||||||
match method {
|
|
||||||
&actix_http::Method::CONNECT => reqwest::Method::CONNECT,
|
|
||||||
&actix_http::Method::DELETE => reqwest::Method::DELETE,
|
|
||||||
&actix_http::Method::GET => reqwest::Method::GET,
|
|
||||||
&actix_http::Method::HEAD => reqwest::Method::HEAD,
|
|
||||||
&actix_http::Method::OPTIONS => reqwest::Method::OPTIONS,
|
|
||||||
&actix_http::Method::PATCH => reqwest::Method::PATCH,
|
|
||||||
&actix_http::Method::POST => reqwest::Method::POST,
|
|
||||||
&actix_http::Method::PUT => reqwest::Method::PUT,
|
|
||||||
&actix_http::Method::TRACE => reqwest::Method::TRACE,
|
|
||||||
method => reqwest::Method::from_bytes(method.as_str().as_bytes()).unwrap(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
async fn try_proxy(
|
|
||||||
method: reqwest::Method,
|
|
||||||
url: &str,
|
|
||||||
content_type: Option<&[u8]>,
|
|
||||||
network_version: Uuid,
|
|
||||||
api_key: Option<&str>,
|
|
||||||
client: &reqwest::Client,
|
|
||||||
url_encoded_this: &str,
|
|
||||||
url_encoded_task_uid: &str,
|
|
||||||
body: Option<Bytes>,
|
|
||||||
) -> Result<SummarizedTaskView, backoff::Error<ProxyError>> {
|
|
||||||
let request = client
|
|
||||||
.request(method, url)
|
|
||||||
.timeout(std::time::Duration::from_secs(*timeouts::REQUEST_SECONDS));
|
|
||||||
let request = if let Some(body) = body { request.body(body) } else { request };
|
|
||||||
let request = if let Some(api_key) = api_key { request.bearer_auth(api_key) } else { request };
|
|
||||||
let request = request.header(PROXY_ORIGIN_TASK_UID_HEADER, url_encoded_task_uid);
|
|
||||||
let request = request.header(PROXY_ORIGIN_NETWORK_VERSION_HEADER, &network_version.to_string());
|
|
||||||
let request = request.header(PROXY_ORIGIN_REMOTE_HEADER, url_encoded_this);
|
|
||||||
let request = if let Some(content_type) = content_type {
|
|
||||||
request.header(CONTENT_TYPE.as_str(), content_type)
|
|
||||||
} else {
|
|
||||||
request
|
|
||||||
};
|
|
||||||
|
|
||||||
let response = request.send().await;
|
|
||||||
let response = match response {
|
|
||||||
Ok(response) => response,
|
|
||||||
Err(error) if error.is_timeout() => {
|
|
||||||
return Err(backoff::Error::transient(ProxyError::Timeout))
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
return Err(backoff::Error::transient(ProxyError::CouldNotSendRequest(
|
|
||||||
ReqwestErrorWithoutUrl::new(error),
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
handle_response(response).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn parse_error(response: reqwest::Response) -> Result<String, ReqwestErrorWithoutUrl> {
|
|
||||||
let bytes = match response.bytes().await {
|
|
||||||
Ok(bytes) => bytes,
|
|
||||||
Err(error) => return Err(ReqwestErrorWithoutUrl::new(error)),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(parse_bytes_as_error(&bytes))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_bytes_as_error(bytes: &[u8]) -> String {
|
|
||||||
match serde_json::from_slice::<Value>(bytes) {
|
|
||||||
Ok(value) => value.to_string(),
|
|
||||||
Err(_) => String::from_utf8_lossy(bytes).into_owned(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn parse_response<T: DeserializeOwned>(
|
|
||||||
response: reqwest::Response,
|
|
||||||
) -> Result<T, Result<String, ReqwestErrorWithoutUrl>> {
|
|
||||||
let bytes = match response.bytes().await {
|
|
||||||
Ok(bytes) => bytes,
|
|
||||||
Err(error) => return Err(Err(ReqwestErrorWithoutUrl::new(error))),
|
|
||||||
};
|
|
||||||
|
|
||||||
match serde_json::from_slice::<T>(&bytes) {
|
|
||||||
Ok(value) => Ok(value),
|
|
||||||
Err(_) => Err(Ok(parse_bytes_as_error(&bytes))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn origin_from_req(req: &HttpRequest) -> Result<Option<Origin>, MeilisearchHttpError> {
|
|
||||||
let (remote_name, task_uid, network_version) = match (
|
|
||||||
req.headers().get(PROXY_ORIGIN_REMOTE_HEADER),
|
|
||||||
req.headers().get(PROXY_ORIGIN_TASK_UID_HEADER),
|
|
||||||
req.headers().get(PROXY_ORIGIN_NETWORK_VERSION_HEADER),
|
|
||||||
) {
|
|
||||||
(None, None, _) => return Ok(None),
|
|
||||||
(None, Some(_), _) => {
|
|
||||||
return Err(MeilisearchHttpError::InconsistentOriginHeaders { is_remote_missing: true })
|
|
||||||
}
|
|
||||||
(Some(_), None, _) => {
|
|
||||||
return Err(MeilisearchHttpError::InconsistentOriginHeaders {
|
|
||||||
is_remote_missing: false,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
(Some(remote_name), Some(task_uid), network_version) => {
|
|
||||||
let remote_name = urlencoding::decode(remote_name.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_ORIGIN_REMOTE_HEADER,
|
|
||||||
msg: format!("while parsing remote name as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_ORIGIN_REMOTE_HEADER,
|
|
||||||
msg: format!("while URL-decoding remote name: {err}"),
|
|
||||||
})?;
|
|
||||||
let task_uid = urlencoding::decode(task_uid.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
|
||||||
msg: format!("while parsing task UID as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
|
||||||
msg: format!("while URL-decoding task UID: {err}"),
|
|
||||||
})?;
|
|
||||||
let network_version = match network_version {
|
|
||||||
Some(network_version) => Some({
|
|
||||||
urlencoding::decode(network_version.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_ORIGIN_NETWORK_VERSION_HEADER,
|
|
||||||
msg: format!("while parsing network version as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_ORIGIN_NETWORK_VERSION_HEADER,
|
|
||||||
msg: format!("while URL-decoding network version: {err}"),
|
|
||||||
}
|
|
||||||
})?
|
|
||||||
}),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
(remote_name, task_uid, network_version)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let task_uid: u32 =
|
|
||||||
task_uid.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
|
||||||
msg: format!("while parsing the task UID as an integer: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let network_version: Uuid = if let Some(network_version) = network_version {
|
|
||||||
Uuid::parse_str(&network_version).map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_ORIGIN_NETWORK_VERSION_HEADER,
|
|
||||||
msg: format!("while parsing the network version as an UUID: {err}"),
|
|
||||||
}
|
|
||||||
})?
|
|
||||||
} else {
|
|
||||||
Uuid::nil()
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Some(Origin { remote_name: remote_name.into_owned(), task_uid, network_version }))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn import_data_from_req(req: &HttpRequest) -> Result<Option<ImportData>, MeilisearchHttpError> {
|
|
||||||
let (remote_name, index_name, documents) = match (
|
|
||||||
req.headers().get(PROXY_IMPORT_REMOTE_HEADER),
|
|
||||||
req.headers().get(PROXY_IMPORT_INDEX_HEADER),
|
|
||||||
req.headers().get(PROXY_IMPORT_DOCS_HEADER),
|
|
||||||
) {
|
|
||||||
(None, None, None) => return Ok(None),
|
|
||||||
(Some(remote_name), Some(index_name), Some(documents)) => {
|
|
||||||
let remote_name = urlencoding::decode(remote_name.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
msg: format!("while parsing import remote name as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
msg: format!("while URL-decoding import remote name: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let index_name = urlencoding::decode(index_name.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_INDEX_HEADER,
|
|
||||||
msg: format!("while parsing import index name as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_INDEX_HEADER,
|
|
||||||
msg: format!("while URL-decoding import index name: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let documents = urlencoding::decode(documents.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_DOCS_HEADER,
|
|
||||||
msg: format!("while parsing documents as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_DOCS_HEADER,
|
|
||||||
msg: format!("while URL-decoding documents: {err}"),
|
|
||||||
})?;
|
|
||||||
(remote_name, Some(index_name), documents)
|
|
||||||
}
|
|
||||||
(Some(remote_name), None, Some(documents)) => {
|
|
||||||
let remote_name = urlencoding::decode(remote_name.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
msg: format!("while parsing import remote name as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
msg: format!("while URL-decoding import remote name: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let documents = urlencoding::decode(documents.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_DOCS_HEADER,
|
|
||||||
msg: format!("while parsing documents as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_DOCS_HEADER,
|
|
||||||
msg: format!("while URL-decoding documents: {err}"),
|
|
||||||
})?;
|
|
||||||
(remote_name, None, documents)
|
|
||||||
}
|
|
||||||
// catch-all pattern that has to contain an inconsistency since we already matched (None, None, None) and (Some, Some, Some)
|
|
||||||
(remote_name, index_name, documents) => {
|
|
||||||
return Err(MeilisearchHttpError::InconsistentImportHeaders {
|
|
||||||
is_remote_missing: remote_name.is_none(),
|
|
||||||
is_index_missing: index_name.is_none(),
|
|
||||||
is_docs_missing: documents.is_none(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let document_count: u64 =
|
|
||||||
documents.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_DOCS_HEADER,
|
|
||||||
msg: format!("while parsing the documents as an integer: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(Some(ImportData {
|
|
||||||
remote_name: remote_name.to_string(),
|
|
||||||
index_name: index_name.map(|index_name| index_name.to_string()),
|
|
||||||
document_count,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn import_metadata_from_req(
|
|
||||||
req: &HttpRequest,
|
|
||||||
) -> Result<Option<ImportMetadata>, MeilisearchHttpError> {
|
|
||||||
let (index_count, task_key, total_index_documents) = match (
|
|
||||||
req.headers().get(PROXY_IMPORT_INDEX_COUNT_HEADER),
|
|
||||||
req.headers().get(PROXY_IMPORT_TASK_KEY_HEADER),
|
|
||||||
req.headers().get(PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER),
|
|
||||||
) {
|
|
||||||
(None, None, None) => return Ok(None),
|
|
||||||
(Some(index_count), Some(task_key), Some(total_index_documents)) => {
|
|
||||||
let index_count = urlencoding::decode(index_count.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
msg: format!("while parsing import index count as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_INDEX_COUNT_HEADER,
|
|
||||||
msg: format!("while URL-decoding import index count: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let task_key = urlencoding::decode(task_key.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TASK_KEY_HEADER,
|
|
||||||
msg: format!("while parsing import task key as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TASK_KEY_HEADER,
|
|
||||||
msg: format!("while URL-decoding import task key: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let total_index_documents =
|
|
||||||
urlencoding::decode(total_index_documents.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
msg: format!("while parsing total index documents as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
msg: format!("while URL-decoding total index documents: {err}"),
|
|
||||||
})?;
|
|
||||||
(index_count, Some(task_key), total_index_documents)
|
|
||||||
}
|
|
||||||
(Some(index_count), None, Some(total_index_documents)) => {
|
|
||||||
let index_count = urlencoding::decode(index_count.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
|
||||||
msg: format!("while parsing import index count as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_INDEX_COUNT_HEADER,
|
|
||||||
msg: format!("while URL-decoding import index count: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let total_index_documents =
|
|
||||||
urlencoding::decode(total_index_documents.to_str().map_err(|err| {
|
|
||||||
MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
msg: format!("while parsing total index documents as UTF-8: {err}"),
|
|
||||||
}
|
|
||||||
})?)
|
|
||||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
msg: format!("while URL-decoding total index documents: {err}"),
|
|
||||||
})?;
|
|
||||||
(index_count, None, total_index_documents)
|
|
||||||
}
|
|
||||||
// catch-all pattern that has to contain an inconsistency since we already matched (None, None, None) and (Some, Some, Some)
|
|
||||||
(index_count, task_key, total_index_documents) => {
|
|
||||||
return Err(MeilisearchHttpError::InconsistentImportMetadataHeaders {
|
|
||||||
is_index_count_missing: index_count.is_none(),
|
|
||||||
is_task_key_missing: task_key.is_none(),
|
|
||||||
is_total_index_documents_missing: total_index_documents.is_none(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let index_count: u64 =
|
|
||||||
index_count.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_INDEX_COUNT_HEADER,
|
|
||||||
msg: format!("while parsing the index count as an integer: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let task_key = task_key
|
|
||||||
.map(|task_key| {
|
|
||||||
let task_key: Result<DocumentId, _> =
|
|
||||||
task_key.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TASK_KEY_HEADER,
|
|
||||||
msg: format!("while parsing import task key as an integer: {err}"),
|
|
||||||
});
|
|
||||||
task_key
|
|
||||||
})
|
|
||||||
.transpose()?;
|
|
||||||
|
|
||||||
let total_index_documents: u64 =
|
|
||||||
total_index_documents.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
|
||||||
msg: format!("while parsing the total index documents as an integer: {err}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(Some(ImportMetadata { index_count, task_key, total_index_documents }))
|
|
||||||
}
|
|
||||||
@@ -1,63 +0,0 @@
|
|||||||
use meilisearch_types::error::{ErrorCode as _, ResponseError};
|
|
||||||
use reqwest::StatusCode;
|
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
|
||||||
pub enum ProxyError {
|
|
||||||
#[error("{0}")]
|
|
||||||
CouldNotSendRequest(ReqwestErrorWithoutUrl),
|
|
||||||
#[error("could not authenticate against the remote host\n - hint: check that the remote instance was registered with a valid API key having the `documents.add` action")]
|
|
||||||
AuthenticationError,
|
|
||||||
#[error(
|
|
||||||
"could not parse response from the remote host as a document addition response{}\n - hint: check that the remote instance is a Meilisearch instance running the same version",
|
|
||||||
response_from_remote(response)
|
|
||||||
)]
|
|
||||||
CouldNotParseResponse { response: Result<String, ReqwestErrorWithoutUrl> },
|
|
||||||
#[error("remote host responded with code {}{}\n - hint: check that the remote instance has the correct index configuration for that request\n - hint: check that the `network` experimental feature is enabled on the remote instance", status_code.as_u16(), response_from_remote(response))]
|
|
||||||
BadRequest { status_code: StatusCode, response: Result<String, ReqwestErrorWithoutUrl> },
|
|
||||||
#[error("remote host did not answer before the deadline")]
|
|
||||||
Timeout,
|
|
||||||
#[error("remote host responded with code {}{}", status_code.as_u16(), response_from_remote(response))]
|
|
||||||
RemoteError { status_code: StatusCode, response: Result<String, ReqwestErrorWithoutUrl> },
|
|
||||||
#[error("error while preparing the request: {error}")]
|
|
||||||
Milli {
|
|
||||||
#[from]
|
|
||||||
error: Box<meilisearch_types::milli::Error>,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ProxyError {
|
|
||||||
pub fn as_response_error(&self) -> ResponseError {
|
|
||||||
use meilisearch_types::error::Code;
|
|
||||||
let message = self.to_string();
|
|
||||||
let code = match self {
|
|
||||||
ProxyError::CouldNotSendRequest(_) => Code::RemoteCouldNotSendRequest,
|
|
||||||
ProxyError::AuthenticationError => Code::RemoteInvalidApiKey,
|
|
||||||
ProxyError::BadRequest { .. } => Code::RemoteBadRequest,
|
|
||||||
ProxyError::Timeout => Code::RemoteTimeout,
|
|
||||||
ProxyError::RemoteError { .. } => Code::RemoteRemoteError,
|
|
||||||
ProxyError::CouldNotParseResponse { .. } => Code::RemoteBadResponse,
|
|
||||||
ProxyError::Milli { error } => error.error_code(),
|
|
||||||
};
|
|
||||||
ResponseError::from_msg(message, code)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
|
||||||
#[error(transparent)]
|
|
||||||
pub struct ReqwestErrorWithoutUrl(reqwest::Error);
|
|
||||||
impl ReqwestErrorWithoutUrl {
|
|
||||||
pub fn new(inner: reqwest::Error) -> Self {
|
|
||||||
Self(inner.without_url())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn response_from_remote(response: &Result<String, ReqwestErrorWithoutUrl>) -> String {
|
|
||||||
match response {
|
|
||||||
Ok(response) => {
|
|
||||||
format!(":\n - response from remote: {}", response)
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
format!(":\n - additionally, could not retrieve response from remote: {error}")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
pub mod community_edition;
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub mod enterprise_edition;
|
|
||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
pub use community_edition::{proxy, task_network_and_check_leader_and_version};
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub use enterprise_edition::{
|
|
||||||
import_data_from_req, import_metadata_from_req, origin_from_req, proxy, send_request,
|
|
||||||
task_network_and_check_leader_and_version,
|
|
||||||
};
|
|
||||||
|
|
||||||
mod body;
|
|
||||||
mod error;
|
|
||||||
|
|
||||||
pub use body::Body;
|
|
||||||
pub use error::{ProxyError, ReqwestErrorWithoutUrl};
|
|
||||||
39
crates/meilisearch/src/routes/indexes/community_edition.rs
Normal file
39
crates/meilisearch/src/routes/indexes/community_edition.rs
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
pub mod proxy {
|
||||||
|
|
||||||
|
use std::fs::File;
|
||||||
|
|
||||||
|
use actix_web::HttpRequest;
|
||||||
|
use index_scheduler::IndexScheduler;
|
||||||
|
|
||||||
|
use crate::error::MeilisearchHttpError;
|
||||||
|
|
||||||
|
pub enum Body<T: serde::Serialize> {
|
||||||
|
NdJsonPayload,
|
||||||
|
Inline(T),
|
||||||
|
None,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Body<()> {
|
||||||
|
pub fn with_ndjson_payload(_file: File) -> Self {
|
||||||
|
Self::NdJsonPayload
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn none() -> Self {
|
||||||
|
Self::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "Meili-Proxy-Origin-Remote";
|
||||||
|
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "Meili-Proxy-Origin-TaskUid";
|
||||||
|
|
||||||
|
pub async fn proxy<T: serde::Serialize>(
|
||||||
|
_index_scheduler: &IndexScheduler,
|
||||||
|
_index_uid: &str,
|
||||||
|
_req: &HttpRequest,
|
||||||
|
_network: meilisearch_types::network::Network,
|
||||||
|
_body: Body<T>,
|
||||||
|
_task: &meilisearch_types::tasks::Task,
|
||||||
|
) -> Result<(), MeilisearchHttpError> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -34,7 +34,7 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
|
|||||||
/// Compact an index
|
/// Compact an index
|
||||||
#[utoipa::path(
|
#[utoipa::path(
|
||||||
post,
|
post,
|
||||||
path = "{indexUid}/compact",
|
path = "/{indexUid}/compact",
|
||||||
tag = "Compact an index",
|
tag = "Compact an index",
|
||||||
security(("Bearer" = ["search", "*"])),
|
security(("Bearer" = ["search", "*"])),
|
||||||
params(("indexUid" = String, Path, example = "movies", description = "Index Unique Identifier", nullable = false)),
|
params(("indexUid" = String, Path, example = "movies", description = "Index Unique Identifier", nullable = false)),
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ use crate::extractors::authentication::policies::*;
|
|||||||
use crate::extractors::authentication::GuardedData;
|
use crate::extractors::authentication::GuardedData;
|
||||||
use crate::extractors::payload::Payload;
|
use crate::extractors::payload::Payload;
|
||||||
use crate::extractors::sequential_extractor::SeqHandler;
|
use crate::extractors::sequential_extractor::SeqHandler;
|
||||||
use crate::proxy::{proxy, task_network_and_check_leader_and_version, Body};
|
use crate::routes::indexes::current_edition::proxy::{proxy, Body};
|
||||||
use crate::routes::indexes::search::fix_sort_query_parameters;
|
use crate::routes::indexes::search::fix_sort_query_parameters;
|
||||||
use crate::routes::{
|
use crate::routes::{
|
||||||
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
|
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
|
||||||
@@ -342,7 +342,6 @@ pub async fn delete_document(
|
|||||||
let DocumentParam { index_uid, document_id } = path.into_inner();
|
let DocumentParam { index_uid, document_id } = path.into_inner();
|
||||||
let index_uid = IndexUid::try_from(index_uid)?;
|
let index_uid = IndexUid::try_from(index_uid)?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
let task_network = task_network_and_check_leader_and_version(&req, &network)?;
|
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
DocumentsDeletionAggregator {
|
DocumentsDeletionAggregator {
|
||||||
@@ -360,23 +359,16 @@ pub async fn delete_document(
|
|||||||
};
|
};
|
||||||
let uid = get_task_id(&req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let mut task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
index_scheduler.register_with_custom_metadata(
|
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||||
task,
|
|
||||||
uid,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await??
|
.await??
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
if network.sharding() && !dry_run {
|
||||||
proxy(&index_scheduler, Some(&index_uid), &req, task_network, network, Body::none(), &task)
|
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
@@ -975,7 +967,6 @@ async fn document_addition(
|
|||||||
) -> Result<SummarizedTaskView, MeilisearchHttpError> {
|
) -> Result<SummarizedTaskView, MeilisearchHttpError> {
|
||||||
let mime_type = extract_mime_type(req)?;
|
let mime_type = extract_mime_type(req)?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
let task_network = task_network_and_check_leader_and_version(req, &network)?;
|
|
||||||
|
|
||||||
let format = match (
|
let format = match (
|
||||||
mime_type.as_ref().map(|m| (m.type_().as_str(), m.subtype().as_str())),
|
mime_type.as_ref().map(|m| (m.type_().as_str(), m.subtype().as_str())),
|
||||||
@@ -1094,16 +1085,9 @@ async fn document_addition(
|
|||||||
index_uid: index_uid.to_string(),
|
index_uid: index_uid.to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// FIXME: not new to #6000, but _any_ error here will cause the payload to unduly persist
|
|
||||||
let scheduler = index_scheduler.clone();
|
let scheduler = index_scheduler.clone();
|
||||||
let mut task = match tokio::task::spawn_blocking(move || {
|
let task = match tokio::task::spawn_blocking(move || {
|
||||||
scheduler.register_with_custom_metadata(
|
scheduler.register_with_custom_metadata(task, task_id, custom_metadata, dry_run)
|
||||||
task,
|
|
||||||
task_id,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await?
|
.await?
|
||||||
{
|
{
|
||||||
@@ -1114,13 +1098,12 @@ async fn document_addition(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
if network.sharding() {
|
||||||
if let Some(file) = file {
|
if let Some(file) = file {
|
||||||
proxy(
|
proxy(
|
||||||
&index_scheduler,
|
&index_scheduler,
|
||||||
Some(&index_uid),
|
&index_uid,
|
||||||
req,
|
req,
|
||||||
task_network,
|
|
||||||
network,
|
network,
|
||||||
Body::with_ndjson_payload(file),
|
Body::with_ndjson_payload(file),
|
||||||
&task,
|
&task,
|
||||||
@@ -1211,7 +1194,6 @@ pub async fn delete_documents_batch(
|
|||||||
|
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
let task_network = task_network_and_check_leader_and_version(&req, &network)?;
|
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
DocumentsDeletionAggregator {
|
DocumentsDeletionAggregator {
|
||||||
@@ -1232,31 +1214,16 @@ pub async fn delete_documents_batch(
|
|||||||
KindWithContent::DocumentDeletion { index_uid: index_uid.to_string(), documents_ids: ids };
|
KindWithContent::DocumentDeletion { index_uid: index_uid.to_string(), documents_ids: ids };
|
||||||
let uid = get_task_id(&req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let mut task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
index_scheduler.register_with_custom_metadata(
|
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||||
task,
|
|
||||||
uid,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await??
|
.await??
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
if network.sharding() && !dry_run {
|
||||||
proxy(
|
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
|
||||||
&index_scheduler,
|
|
||||||
Some(&index_uid),
|
|
||||||
&req,
|
|
||||||
task_network,
|
|
||||||
network,
|
|
||||||
Body::inline(body),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
@@ -1319,7 +1286,6 @@ pub async fn delete_documents_by_filter(
|
|||||||
let index_uid = index_uid.into_inner();
|
let index_uid = index_uid.into_inner();
|
||||||
let filter = body.into_inner();
|
let filter = body.into_inner();
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
let task_network = task_network_and_check_leader_and_version(&req, &network)?;
|
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
DocumentsDeletionAggregator {
|
DocumentsDeletionAggregator {
|
||||||
@@ -1346,31 +1312,16 @@ pub async fn delete_documents_by_filter(
|
|||||||
|
|
||||||
let uid = get_task_id(&req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let mut task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
index_scheduler.register_with_custom_metadata(
|
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||||
task,
|
|
||||||
uid,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await??
|
.await??
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
if network.sharding() && !dry_run {
|
||||||
proxy(
|
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(filter), &task).await?;
|
||||||
&index_scheduler,
|
|
||||||
Some(&index_uid),
|
|
||||||
&req,
|
|
||||||
task_network,
|
|
||||||
network,
|
|
||||||
Body::inline(filter),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
@@ -1470,7 +1421,6 @@ pub async fn edit_documents_by_function(
|
|||||||
.check_edit_documents_by_function("Using the documents edit route")?;
|
.check_edit_documents_by_function("Using the documents edit route")?;
|
||||||
|
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
let task_network = task_network_and_check_leader_and_version(&req, &network)?;
|
|
||||||
|
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let index_uid = index_uid.into_inner();
|
let index_uid = index_uid.into_inner();
|
||||||
@@ -1517,31 +1467,16 @@ pub async fn edit_documents_by_function(
|
|||||||
|
|
||||||
let uid = get_task_id(&req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let mut task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
index_scheduler.register_with_custom_metadata(
|
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||||
task,
|
|
||||||
uid,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await??
|
.await??
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
if network.sharding() && !dry_run {
|
||||||
proxy(
|
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
|
||||||
&index_scheduler,
|
|
||||||
Some(&index_uid),
|
|
||||||
&req,
|
|
||||||
task_network,
|
|
||||||
network,
|
|
||||||
Body::inline(body),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
@@ -1590,7 +1525,6 @@ pub async fn clear_all_documents(
|
|||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let network = index_scheduler.network();
|
let network = index_scheduler.network();
|
||||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
||||||
let task_network = task_network_and_check_leader_and_version(&req, &network)?;
|
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
DocumentsDeletionAggregator {
|
DocumentsDeletionAggregator {
|
||||||
@@ -1606,24 +1540,17 @@ pub async fn clear_all_documents(
|
|||||||
let uid = get_task_id(&req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
|
|
||||||
let mut task = {
|
let task = {
|
||||||
let index_scheduler = index_scheduler.clone();
|
let index_scheduler = index_scheduler.clone();
|
||||||
|
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
index_scheduler.register_with_custom_metadata(
|
index_scheduler.register_with_custom_metadata(task, uid, custom_metadata, dry_run)
|
||||||
task,
|
|
||||||
uid,
|
|
||||||
custom_metadata,
|
|
||||||
dry_run,
|
|
||||||
task_network,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await??
|
.await??
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
if network.sharding() && !dry_run {
|
||||||
proxy(&index_scheduler, Some(&index_uid), &req, task_network, network, Body::none(), &task)
|
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
let task: SummarizedTaskView = task.into();
|
||||||
|
|||||||
@@ -0,0 +1,426 @@
|
|||||||
|
// Copyright © 2025 Meilisearch Some Rights Reserved
|
||||||
|
// This file is part of Meilisearch Enterprise Edition (EE).
|
||||||
|
// Use of this source code is governed by the Business Source License 1.1,
|
||||||
|
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||||
|
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::fs::File;
|
||||||
|
|
||||||
|
use actix_web::http::header::CONTENT_TYPE;
|
||||||
|
use actix_web::HttpRequest;
|
||||||
|
use bytes::Bytes;
|
||||||
|
use index_scheduler::IndexScheduler;
|
||||||
|
use meilisearch_types::error::ResponseError;
|
||||||
|
use meilisearch_types::tasks::{Origin, RemoteTask, TaskNetwork};
|
||||||
|
use reqwest::StatusCode;
|
||||||
|
use serde::de::DeserializeOwned;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use crate::error::MeilisearchHttpError;
|
||||||
|
use crate::routes::indexes::enterprise_edition::proxy::error::{
|
||||||
|
ProxyDocumentChangeError, ReqwestErrorWithoutUrl,
|
||||||
|
};
|
||||||
|
use crate::routes::SummarizedTaskView;
|
||||||
|
|
||||||
|
pub enum Body<T: serde::Serialize> {
|
||||||
|
NdJsonPayload(File),
|
||||||
|
Inline(T),
|
||||||
|
None,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Body<()> {
|
||||||
|
pub fn with_ndjson_payload(file: File) -> Self {
|
||||||
|
Self::NdJsonPayload(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn none() -> Self {
|
||||||
|
Self::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// If necessary, proxies the passed request to the network and update the task description.
|
||||||
|
///
|
||||||
|
/// This function reads the custom headers from the request to determine if must proxy the request or if the request
|
||||||
|
/// has already been proxied.
|
||||||
|
///
|
||||||
|
/// - when it must proxy the request, the endpoint, method and query params are retrieved from the passed `req`, then the `body` is
|
||||||
|
/// sent to all remotes of the `network` (except `self`). The response from the remotes are collected to update the passed `task`
|
||||||
|
/// with the task ids from the task queues of the remotes.
|
||||||
|
/// - when the request has already been proxied, the custom headers contains information about the remote that created the initial task.
|
||||||
|
/// This information is copied to the passed task.
|
||||||
|
pub async fn proxy<T: serde::Serialize>(
|
||||||
|
index_scheduler: &IndexScheduler,
|
||||||
|
index_uid: &str,
|
||||||
|
req: &HttpRequest,
|
||||||
|
network: meilisearch_types::network::Network,
|
||||||
|
body: Body<T>,
|
||||||
|
task: &meilisearch_types::tasks::Task,
|
||||||
|
) -> Result<(), MeilisearchHttpError> {
|
||||||
|
match origin_from_req(req)? {
|
||||||
|
Some(origin) => {
|
||||||
|
index_scheduler.set_task_network(task.uid, TaskNetwork::Origin { origin })?
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let this = network
|
||||||
|
.local
|
||||||
|
.as_deref()
|
||||||
|
.expect("inconsistent `network.sharding` and `network.self`")
|
||||||
|
.to_owned();
|
||||||
|
|
||||||
|
let content_type = match &body {
|
||||||
|
// for file bodies, force x-ndjson
|
||||||
|
Body::NdJsonPayload(_) => Some(b"application/x-ndjson".as_slice()),
|
||||||
|
// otherwise get content type from request
|
||||||
|
_ => req.headers().get(CONTENT_TYPE).map(|h| h.as_bytes()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let body = match body {
|
||||||
|
Body::NdJsonPayload(file) => Some(Bytes::from_owner(unsafe {
|
||||||
|
memmap2::Mmap::map(&file).map_err(|err| {
|
||||||
|
MeilisearchHttpError::from_milli(err.into(), Some(index_uid.to_owned()))
|
||||||
|
})?
|
||||||
|
})),
|
||||||
|
|
||||||
|
Body::Inline(payload) => {
|
||||||
|
Some(Bytes::copy_from_slice(&serde_json::to_vec(&payload).unwrap()))
|
||||||
|
}
|
||||||
|
|
||||||
|
Body::None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut in_flight_remote_queries = BTreeMap::new();
|
||||||
|
let client = reqwest::ClientBuilder::new()
|
||||||
|
.connect_timeout(std::time::Duration::from_secs(3))
|
||||||
|
.build()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let method = from_old_http_method(req.method());
|
||||||
|
|
||||||
|
// send payload to all remotes
|
||||||
|
for (node_name, node) in
|
||||||
|
network.remotes.into_iter().filter(|(name, _)| name.as_str() != this)
|
||||||
|
{
|
||||||
|
let body = body.clone();
|
||||||
|
let client = client.clone();
|
||||||
|
let api_key = node.write_api_key;
|
||||||
|
let this = this.clone();
|
||||||
|
let method = method.clone();
|
||||||
|
let path_and_query =
|
||||||
|
req.uri().path_and_query().map(|paq| paq.as_str()).unwrap_or("/");
|
||||||
|
|
||||||
|
in_flight_remote_queries.insert(
|
||||||
|
node_name,
|
||||||
|
tokio::spawn({
|
||||||
|
let url = format!("{}{}", node.url, path_and_query);
|
||||||
|
|
||||||
|
let url_encoded_this = urlencoding::encode(&this).into_owned();
|
||||||
|
let url_encoded_task_uid = task.uid.to_string(); // it's url encoded i promize
|
||||||
|
|
||||||
|
let content_type = content_type.map(|b| b.to_owned());
|
||||||
|
|
||||||
|
let backoff = backoff::ExponentialBackoffBuilder::new()
|
||||||
|
.with_max_elapsed_time(Some(std::time::Duration::from_secs(25)))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
backoff::future::retry(backoff, move || {
|
||||||
|
let url = url.clone();
|
||||||
|
let client = client.clone();
|
||||||
|
let url_encoded_this = url_encoded_this.clone();
|
||||||
|
let url_encoded_task_uid = url_encoded_task_uid.clone();
|
||||||
|
let content_type = content_type.clone();
|
||||||
|
|
||||||
|
let body = body.clone();
|
||||||
|
let api_key = api_key.clone();
|
||||||
|
let method = method.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
try_proxy(
|
||||||
|
method,
|
||||||
|
&url,
|
||||||
|
content_type.as_deref(),
|
||||||
|
api_key.as_deref(),
|
||||||
|
&client,
|
||||||
|
&url_encoded_this,
|
||||||
|
&url_encoded_task_uid,
|
||||||
|
body,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for all in-flight queries to finish and collect their results
|
||||||
|
let mut remote_tasks: BTreeMap<String, RemoteTask> = BTreeMap::new();
|
||||||
|
for (node_name, handle) in in_flight_remote_queries {
|
||||||
|
match handle.await {
|
||||||
|
Ok(Ok(res)) => {
|
||||||
|
let task_uid = res.task_uid;
|
||||||
|
|
||||||
|
remote_tasks.insert(node_name, Ok(task_uid).into());
|
||||||
|
}
|
||||||
|
Ok(Err(error)) => {
|
||||||
|
remote_tasks.insert(node_name, Err(error.as_response_error()).into());
|
||||||
|
}
|
||||||
|
Err(panic) => match panic.try_into_panic() {
|
||||||
|
Ok(panic) => {
|
||||||
|
let msg = match panic.downcast_ref::<&'static str>() {
|
||||||
|
Some(s) => *s,
|
||||||
|
None => match panic.downcast_ref::<String>() {
|
||||||
|
Some(s) => &s[..],
|
||||||
|
None => "Box<dyn Any>",
|
||||||
|
},
|
||||||
|
};
|
||||||
|
remote_tasks.insert(
|
||||||
|
node_name,
|
||||||
|
Err(ResponseError::from_msg(
|
||||||
|
msg.to_string(),
|
||||||
|
meilisearch_types::error::Code::Internal,
|
||||||
|
))
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
tracing::error!("proxy task was unexpectedly cancelled")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// edit details to contain the return values from the remotes
|
||||||
|
index_scheduler.set_task_network(task.uid, TaskNetwork::Remotes { remote_tasks })?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_old_http_method(method: &actix_http::Method) -> reqwest::Method {
|
||||||
|
match method {
|
||||||
|
&actix_http::Method::CONNECT => reqwest::Method::CONNECT,
|
||||||
|
&actix_http::Method::DELETE => reqwest::Method::DELETE,
|
||||||
|
&actix_http::Method::GET => reqwest::Method::GET,
|
||||||
|
&actix_http::Method::HEAD => reqwest::Method::HEAD,
|
||||||
|
&actix_http::Method::OPTIONS => reqwest::Method::OPTIONS,
|
||||||
|
&actix_http::Method::PATCH => reqwest::Method::PATCH,
|
||||||
|
&actix_http::Method::POST => reqwest::Method::POST,
|
||||||
|
&actix_http::Method::PUT => reqwest::Method::PUT,
|
||||||
|
&actix_http::Method::TRACE => reqwest::Method::TRACE,
|
||||||
|
method => reqwest::Method::from_bytes(method.as_str().as_bytes()).unwrap(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
async fn try_proxy(
|
||||||
|
method: reqwest::Method,
|
||||||
|
url: &str,
|
||||||
|
content_type: Option<&[u8]>,
|
||||||
|
api_key: Option<&str>,
|
||||||
|
client: &reqwest::Client,
|
||||||
|
url_encoded_this: &str,
|
||||||
|
url_encoded_task_uid: &str,
|
||||||
|
body: Option<Bytes>,
|
||||||
|
) -> Result<SummarizedTaskView, backoff::Error<ProxyDocumentChangeError>> {
|
||||||
|
let request = client.request(method, url).timeout(std::time::Duration::from_secs(30));
|
||||||
|
let request = if let Some(body) = body { request.body(body) } else { request };
|
||||||
|
let request = if let Some(api_key) = api_key { request.bearer_auth(api_key) } else { request };
|
||||||
|
let request = request.header(PROXY_ORIGIN_TASK_UID_HEADER, url_encoded_task_uid);
|
||||||
|
let request = request.header(PROXY_ORIGIN_REMOTE_HEADER, url_encoded_this);
|
||||||
|
let request = if let Some(content_type) = content_type {
|
||||||
|
request.header(CONTENT_TYPE.as_str(), content_type)
|
||||||
|
} else {
|
||||||
|
request
|
||||||
|
};
|
||||||
|
|
||||||
|
let response = request.send().await;
|
||||||
|
let response = match response {
|
||||||
|
Ok(response) => response,
|
||||||
|
Err(error) if error.is_timeout() => {
|
||||||
|
return Err(backoff::Error::transient(ProxyDocumentChangeError::Timeout))
|
||||||
|
}
|
||||||
|
Err(error) => {
|
||||||
|
return Err(backoff::Error::transient(ProxyDocumentChangeError::CouldNotSendRequest(
|
||||||
|
ReqwestErrorWithoutUrl::new(error),
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match response.status() {
|
||||||
|
status_code if status_code.is_success() => (),
|
||||||
|
StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => {
|
||||||
|
return Err(backoff::Error::Permanent(ProxyDocumentChangeError::AuthenticationError))
|
||||||
|
}
|
||||||
|
status_code if status_code.is_client_error() => {
|
||||||
|
let response = parse_error(response).await;
|
||||||
|
return Err(backoff::Error::Permanent(ProxyDocumentChangeError::BadRequest {
|
||||||
|
status_code,
|
||||||
|
response,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
status_code if status_code.is_server_error() => {
|
||||||
|
let response = parse_error(response).await;
|
||||||
|
return Err(backoff::Error::transient(ProxyDocumentChangeError::RemoteError {
|
||||||
|
status_code,
|
||||||
|
response,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
status_code => {
|
||||||
|
tracing::warn!(
|
||||||
|
status_code = status_code.as_u16(),
|
||||||
|
"remote replied with unexpected status code"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let response = match parse_response(response).await {
|
||||||
|
Ok(response) => response,
|
||||||
|
Err(response) => {
|
||||||
|
return Err(backoff::Error::transient(
|
||||||
|
ProxyDocumentChangeError::CouldNotParseResponse { response },
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn parse_error(response: reqwest::Response) -> Result<String, ReqwestErrorWithoutUrl> {
|
||||||
|
let bytes = match response.bytes().await {
|
||||||
|
Ok(bytes) => bytes,
|
||||||
|
Err(error) => return Err(ReqwestErrorWithoutUrl::new(error)),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(parse_bytes_as_error(&bytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_bytes_as_error(bytes: &[u8]) -> String {
|
||||||
|
match serde_json::from_slice::<Value>(bytes) {
|
||||||
|
Ok(value) => value.to_string(),
|
||||||
|
Err(_) => String::from_utf8_lossy(bytes).into_owned(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn parse_response<T: DeserializeOwned>(
|
||||||
|
response: reqwest::Response,
|
||||||
|
) -> Result<T, Result<String, ReqwestErrorWithoutUrl>> {
|
||||||
|
let bytes = match response.bytes().await {
|
||||||
|
Ok(bytes) => bytes,
|
||||||
|
Err(error) => return Err(Err(ReqwestErrorWithoutUrl::new(error))),
|
||||||
|
};
|
||||||
|
|
||||||
|
match serde_json::from_slice::<T>(&bytes) {
|
||||||
|
Ok(value) => Ok(value),
|
||||||
|
Err(_) => Err(Ok(parse_bytes_as_error(&bytes))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod error {
|
||||||
|
use meilisearch_types::error::ResponseError;
|
||||||
|
use reqwest::StatusCode;
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum ProxyDocumentChangeError {
|
||||||
|
#[error("{0}")]
|
||||||
|
CouldNotSendRequest(ReqwestErrorWithoutUrl),
|
||||||
|
#[error("could not authenticate against the remote host\n - hint: check that the remote instance was registered with a valid API key having the `documents.add` action")]
|
||||||
|
AuthenticationError,
|
||||||
|
#[error(
|
||||||
|
"could not parse response from the remote host as a document addition response{}\n - hint: check that the remote instance is a Meilisearch instance running the same version",
|
||||||
|
response_from_remote(response)
|
||||||
|
)]
|
||||||
|
CouldNotParseResponse { response: Result<String, ReqwestErrorWithoutUrl> },
|
||||||
|
#[error("remote host responded with code {}{}\n - hint: check that the remote instance has the correct index configuration for that request\n - hint: check that the `network` experimental feature is enabled on the remote instance", status_code.as_u16(), response_from_remote(response))]
|
||||||
|
BadRequest { status_code: StatusCode, response: Result<String, ReqwestErrorWithoutUrl> },
|
||||||
|
#[error("remote host did not answer before the deadline")]
|
||||||
|
Timeout,
|
||||||
|
#[error("remote host responded with code {}{}", status_code.as_u16(), response_from_remote(response))]
|
||||||
|
RemoteError { status_code: StatusCode, response: Result<String, ReqwestErrorWithoutUrl> },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProxyDocumentChangeError {
|
||||||
|
pub fn as_response_error(&self) -> ResponseError {
|
||||||
|
use meilisearch_types::error::Code;
|
||||||
|
let message = self.to_string();
|
||||||
|
let code = match self {
|
||||||
|
ProxyDocumentChangeError::CouldNotSendRequest(_) => Code::RemoteCouldNotSendRequest,
|
||||||
|
ProxyDocumentChangeError::AuthenticationError => Code::RemoteInvalidApiKey,
|
||||||
|
ProxyDocumentChangeError::BadRequest { .. } => Code::RemoteBadRequest,
|
||||||
|
ProxyDocumentChangeError::Timeout => Code::RemoteTimeout,
|
||||||
|
ProxyDocumentChangeError::RemoteError { .. } => Code::RemoteRemoteError,
|
||||||
|
ProxyDocumentChangeError::CouldNotParseResponse { .. } => Code::RemoteBadResponse,
|
||||||
|
};
|
||||||
|
ResponseError::from_msg(message, code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
#[error(transparent)]
|
||||||
|
pub struct ReqwestErrorWithoutUrl(reqwest::Error);
|
||||||
|
impl ReqwestErrorWithoutUrl {
|
||||||
|
pub fn new(inner: reqwest::Error) -> Self {
|
||||||
|
Self(inner.without_url())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn response_from_remote(response: &Result<String, ReqwestErrorWithoutUrl>) -> String {
|
||||||
|
match response {
|
||||||
|
Ok(response) => {
|
||||||
|
format!(":\n - response from remote: {}", response)
|
||||||
|
}
|
||||||
|
Err(error) => {
|
||||||
|
format!(":\n - additionally, could not retrieve response from remote: {error}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "Meili-Proxy-Origin-Remote";
|
||||||
|
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "Meili-Proxy-Origin-TaskUid";
|
||||||
|
|
||||||
|
pub fn origin_from_req(req: &HttpRequest) -> Result<Option<Origin>, MeilisearchHttpError> {
|
||||||
|
let (remote_name, task_uid) = match (
|
||||||
|
req.headers().get(PROXY_ORIGIN_REMOTE_HEADER),
|
||||||
|
req.headers().get(PROXY_ORIGIN_TASK_UID_HEADER),
|
||||||
|
) {
|
||||||
|
(None, None) => return Ok(None),
|
||||||
|
(None, Some(_)) => {
|
||||||
|
return Err(MeilisearchHttpError::InconsistentOriginHeaders { is_remote_missing: true })
|
||||||
|
}
|
||||||
|
(Some(_), None) => {
|
||||||
|
return Err(MeilisearchHttpError::InconsistentOriginHeaders {
|
||||||
|
is_remote_missing: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
(Some(remote_name), Some(task_uid)) => (
|
||||||
|
urlencoding::decode(remote_name.to_str().map_err(|err| {
|
||||||
|
MeilisearchHttpError::InvalidHeaderValue {
|
||||||
|
header_name: PROXY_ORIGIN_REMOTE_HEADER,
|
||||||
|
msg: format!("while parsing remote name as UTF-8: {err}"),
|
||||||
|
}
|
||||||
|
})?)
|
||||||
|
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||||
|
header_name: PROXY_ORIGIN_REMOTE_HEADER,
|
||||||
|
msg: format!("while URL-decoding remote name: {err}"),
|
||||||
|
})?,
|
||||||
|
urlencoding::decode(task_uid.to_str().map_err(|err| {
|
||||||
|
MeilisearchHttpError::InvalidHeaderValue {
|
||||||
|
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
||||||
|
msg: format!("while parsing task UID as UTF-8: {err}"),
|
||||||
|
}
|
||||||
|
})?)
|
||||||
|
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||||
|
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
||||||
|
msg: format!("while URL-decoding task UID: {err}"),
|
||||||
|
})?,
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
let task_uid: usize =
|
||||||
|
task_uid.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||||
|
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
||||||
|
msg: format!("while parsing the task UID as an integer: {err}"),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(Some(Origin { remote_name: remote_name.into_owned(), task_uid }))
|
||||||
|
}
|
||||||
@@ -25,13 +25,21 @@ use crate::analytics::{Aggregate, Analytics};
|
|||||||
use crate::extractors::authentication::policies::*;
|
use crate::extractors::authentication::policies::*;
|
||||||
use crate::extractors::authentication::{AuthenticationError, GuardedData};
|
use crate::extractors::authentication::{AuthenticationError, GuardedData};
|
||||||
use crate::extractors::sequential_extractor::SeqHandler;
|
use crate::extractors::sequential_extractor::SeqHandler;
|
||||||
use crate::proxy::{proxy, task_network_and_check_leader_and_version, Body};
|
|
||||||
use crate::routes::is_dry_run;
|
use crate::routes::is_dry_run;
|
||||||
use crate::Opt;
|
use crate::Opt;
|
||||||
|
|
||||||
pub mod compact;
|
pub mod compact;
|
||||||
pub mod documents;
|
pub mod documents;
|
||||||
|
|
||||||
|
#[cfg(not(feature = "enterprise"))]
|
||||||
|
mod community_edition;
|
||||||
|
#[cfg(feature = "enterprise")]
|
||||||
|
mod enterprise_edition;
|
||||||
|
#[cfg(not(feature = "enterprise"))]
|
||||||
|
use community_edition as current_edition;
|
||||||
|
#[cfg(feature = "enterprise")]
|
||||||
|
use enterprise_edition as current_edition;
|
||||||
|
|
||||||
pub mod facet_search;
|
pub mod facet_search;
|
||||||
pub mod search;
|
pub mod search;
|
||||||
mod search_analytics;
|
mod search_analytics;
|
||||||
@@ -42,6 +50,8 @@ mod settings_analytics;
|
|||||||
pub mod similar;
|
pub mod similar;
|
||||||
mod similar_analytics;
|
mod similar_analytics;
|
||||||
|
|
||||||
|
pub use current_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER};
|
||||||
|
|
||||||
#[derive(OpenApi)]
|
#[derive(OpenApi)]
|
||||||
#[openapi(
|
#[openapi(
|
||||||
nest(
|
nest(
|
||||||
@@ -193,7 +203,7 @@ pub async fn list_indexes(
|
|||||||
Ok(HttpResponse::Ok().json(ret))
|
Ok(HttpResponse::Ok().json(ret))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserr, Serialize, Debug, ToSchema)]
|
#[derive(Deserr, Debug, ToSchema)]
|
||||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||||
#[schema(rename_all = "camelCase")]
|
#[schema(rename_all = "camelCase")]
|
||||||
pub struct IndexCreateRequest {
|
pub struct IndexCreateRequest {
|
||||||
@@ -263,10 +273,6 @@ pub async fn create_index(
|
|||||||
analytics: web::Data<Analytics>,
|
analytics: web::Data<Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
debug!(parameters = ?body, "Create index");
|
debug!(parameters = ?body, "Create index");
|
||||||
|
|
||||||
let network = index_scheduler.network();
|
|
||||||
let task_network = task_network_and_check_leader_and_version(&req, &network)?;
|
|
||||||
|
|
||||||
let IndexCreateRequest { primary_key, uid } = body.into_inner();
|
let IndexCreateRequest { primary_key, uid } = body.into_inner();
|
||||||
|
|
||||||
let allow_index_creation = index_scheduler.filters().allow_index_creation(&uid);
|
let allow_index_creation = index_scheduler.filters().allow_index_creation(&uid);
|
||||||
@@ -276,32 +282,13 @@ pub async fn create_index(
|
|||||||
&req,
|
&req,
|
||||||
);
|
);
|
||||||
|
|
||||||
let task = KindWithContent::IndexCreation {
|
let task = KindWithContent::IndexCreation { index_uid: uid.to_string(), primary_key };
|
||||||
index_uid: uid.to_string(),
|
let uid = get_task_id(&req, &opt)?;
|
||||||
primary_key: primary_key.clone(),
|
|
||||||
};
|
|
||||||
let tuid = get_task_id(&req, &opt)?;
|
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let scheduler = index_scheduler.clone();
|
let task: SummarizedTaskView =
|
||||||
let mut task = tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run))
|
||||||
scheduler.register_with_custom_metadata(task, tuid, None, dry_run, task_network)
|
.await??
|
||||||
})
|
.into();
|
||||||
.await??;
|
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
|
||||||
proxy(
|
|
||||||
&index_scheduler,
|
|
||||||
None,
|
|
||||||
&req,
|
|
||||||
task_network,
|
|
||||||
network,
|
|
||||||
Body::inline(IndexCreateRequest { primary_key, uid }),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let task = SummarizedTaskView::from(task);
|
|
||||||
debug!(returns = ?task, "Create index");
|
debug!(returns = ?task, "Create index");
|
||||||
|
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
Ok(HttpResponse::Accepted().json(task))
|
||||||
@@ -395,7 +382,7 @@ impl Aggregate for IndexUpdatedAggregate {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserr, Serialize, Debug, ToSchema)]
|
#[derive(Deserr, Debug, ToSchema)]
|
||||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields = deny_immutable_fields_index)]
|
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields = deny_immutable_fields_index)]
|
||||||
#[schema(rename_all = "camelCase")]
|
#[schema(rename_all = "camelCase")]
|
||||||
pub struct UpdateIndexRequest {
|
pub struct UpdateIndexRequest {
|
||||||
@@ -447,10 +434,6 @@ pub async fn update_index(
|
|||||||
analytics: web::Data<Analytics>,
|
analytics: web::Data<Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
debug!(parameters = ?body, "Update index");
|
debug!(parameters = ?body, "Update index");
|
||||||
|
|
||||||
let network = index_scheduler.network();
|
|
||||||
let task_network = task_network_and_check_leader_and_version(&req, &network)?;
|
|
||||||
|
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let body = body.into_inner();
|
let body = body.into_inner();
|
||||||
|
|
||||||
@@ -465,33 +448,17 @@ pub async fn update_index(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let task = KindWithContent::IndexUpdate {
|
let task = KindWithContent::IndexUpdate {
|
||||||
index_uid: index_uid.clone().into_inner(),
|
index_uid: index_uid.into_inner(),
|
||||||
primary_key: body.primary_key.clone(),
|
primary_key: body.primary_key,
|
||||||
new_index_uid: body.uid.clone(),
|
new_index_uid: body.uid,
|
||||||
};
|
};
|
||||||
|
|
||||||
let uid = get_task_id(&req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let scheduler = index_scheduler.clone();
|
let task: SummarizedTaskView =
|
||||||
let mut task = tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run))
|
||||||
scheduler.register_with_custom_metadata(task, uid, None, dry_run, task_network)
|
.await??
|
||||||
})
|
.into();
|
||||||
.await??;
|
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
|
||||||
proxy(
|
|
||||||
&index_scheduler,
|
|
||||||
Some(&index_uid),
|
|
||||||
&req,
|
|
||||||
task_network,
|
|
||||||
network,
|
|
||||||
Body::inline(body),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let task = SummarizedTaskView::from(task);
|
|
||||||
|
|
||||||
debug!(returns = ?task, "Update index");
|
debug!(returns = ?task, "Update index");
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
Ok(HttpResponse::Accepted().json(task))
|
||||||
@@ -532,27 +499,14 @@ pub async fn delete_index(
|
|||||||
req: HttpRequest,
|
req: HttpRequest,
|
||||||
opt: web::Data<Opt>,
|
opt: web::Data<Opt>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let network = index_scheduler.network();
|
|
||||||
let task_network = task_network_and_check_leader_and_version(&req, &network)?;
|
|
||||||
|
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
let task = KindWithContent::IndexDeletion { index_uid: index_uid.clone().into_inner() };
|
let task = KindWithContent::IndexDeletion { index_uid: index_uid.into_inner() };
|
||||||
let uid = get_task_id(&req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(&req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
let scheduler = index_scheduler.clone();
|
let task: SummarizedTaskView =
|
||||||
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run))
|
||||||
let mut task = tokio::task::spawn_blocking(move || {
|
.await??
|
||||||
scheduler.register_with_custom_metadata(task, uid, None, dry_run, task_network)
|
.into();
|
||||||
})
|
|
||||||
.await??;
|
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
|
||||||
proxy(&index_scheduler, Some(&index_uid), &req, task_network, network, Body::none(), &task)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let task = SummarizedTaskView::from(task);
|
|
||||||
|
|
||||||
debug!(returns = ?task, "Delete index");
|
debug!(returns = ?task, "Delete index");
|
||||||
|
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
Ok(HttpResponse::Accepted().json(task))
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ use super::settings_analytics::*;
|
|||||||
use crate::analytics::Analytics;
|
use crate::analytics::Analytics;
|
||||||
use crate::extractors::authentication::policies::*;
|
use crate::extractors::authentication::policies::*;
|
||||||
use crate::extractors::authentication::GuardedData;
|
use crate::extractors::authentication::GuardedData;
|
||||||
use crate::proxy::{proxy, task_network_and_check_leader_and_version, Body};
|
|
||||||
use crate::routes::{get_task_id, is_dry_run, SummarizedTaskView};
|
use crate::routes::{get_task_id, is_dry_run, SummarizedTaskView};
|
||||||
use crate::Opt;
|
use crate::Opt;
|
||||||
|
|
||||||
@@ -77,13 +76,14 @@ macro_rules! make_setting_route {
|
|||||||
use meilisearch_types::index_uid::IndexUid;
|
use meilisearch_types::index_uid::IndexUid;
|
||||||
use meilisearch_types::milli::update::Setting;
|
use meilisearch_types::milli::update::Setting;
|
||||||
use meilisearch_types::settings::{settings, Settings};
|
use meilisearch_types::settings::{settings, Settings};
|
||||||
|
use meilisearch_types::tasks::KindWithContent;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
use $crate::analytics::Analytics;
|
use $crate::analytics::Analytics;
|
||||||
use $crate::extractors::authentication::policies::*;
|
use $crate::extractors::authentication::policies::*;
|
||||||
use $crate::extractors::authentication::GuardedData;
|
use $crate::extractors::authentication::GuardedData;
|
||||||
use $crate::extractors::sequential_extractor::SeqHandler;
|
use $crate::extractors::sequential_extractor::SeqHandler;
|
||||||
use $crate::Opt;
|
use $crate::Opt;
|
||||||
use $crate::routes::SummarizedTaskView;
|
use $crate::routes::{is_dry_run, get_task_id, SummarizedTaskView};
|
||||||
#[allow(unused_imports)]
|
#[allow(unused_imports)]
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
@@ -130,7 +130,21 @@ macro_rules! make_setting_route {
|
|||||||
|
|
||||||
let new_settings = Settings { $attr: Setting::Reset.into(), ..Default::default() };
|
let new_settings = Settings { $attr: Setting::Reset.into(), ..Default::default() };
|
||||||
|
|
||||||
let task = register_new_settings(new_settings, true, index_scheduler, &req, index_uid, opt).await?;
|
let allow_index_creation =
|
||||||
|
index_scheduler.filters().allow_index_creation(&index_uid);
|
||||||
|
|
||||||
|
let task = KindWithContent::SettingsUpdate {
|
||||||
|
index_uid: index_uid.to_string(),
|
||||||
|
new_settings: Box::new(new_settings),
|
||||||
|
is_deletion: true,
|
||||||
|
allow_index_creation,
|
||||||
|
};
|
||||||
|
let uid = get_task_id(&req, &opt)?;
|
||||||
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
|
let task: SummarizedTaskView =
|
||||||
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run))
|
||||||
|
.await??
|
||||||
|
.into();
|
||||||
|
|
||||||
debug!(returns = ?task, "Delete settings");
|
debug!(returns = ?task, "Delete settings");
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
Ok(HttpResponse::Accepted().json(task))
|
||||||
@@ -197,7 +211,26 @@ macro_rules! make_setting_route {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let task = register_new_settings(new_settings, false, index_scheduler, &req, index_uid, opt).await?;
|
let new_settings = $crate::routes::indexes::settings::validate_settings(
|
||||||
|
new_settings,
|
||||||
|
&index_scheduler,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let allow_index_creation =
|
||||||
|
index_scheduler.filters().allow_index_creation(&index_uid);
|
||||||
|
|
||||||
|
let task = KindWithContent::SettingsUpdate {
|
||||||
|
index_uid: index_uid.to_string(),
|
||||||
|
new_settings: Box::new(new_settings),
|
||||||
|
is_deletion: false,
|
||||||
|
allow_index_creation,
|
||||||
|
};
|
||||||
|
let uid = get_task_id(&req, &opt)?;
|
||||||
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
|
let task: SummarizedTaskView =
|
||||||
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run))
|
||||||
|
.await??
|
||||||
|
.into();
|
||||||
|
|
||||||
debug!(returns = ?task, "Update settings");
|
debug!(returns = ?task, "Update settings");
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
Ok(HttpResponse::Accepted().json(task))
|
||||||
@@ -538,13 +571,14 @@ pub async fn update_all(
|
|||||||
index_uid: web::Path<String>,
|
index_uid: web::Path<String>,
|
||||||
body: AwebJson<Settings<Unchecked>, DeserrJsonError>,
|
body: AwebJson<Settings<Unchecked>, DeserrJsonError>,
|
||||||
req: HttpRequest,
|
req: HttpRequest,
|
||||||
opt: Data<Opt>,
|
opt: web::Data<Opt>,
|
||||||
analytics: Data<Analytics>,
|
analytics: web::Data<Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||||
|
|
||||||
let new_settings: Settings<Unchecked> = body.into_inner();
|
let new_settings = body.into_inner();
|
||||||
debug!(parameters = ?new_settings, "Update all settings");
|
debug!(parameters = ?new_settings, "Update all settings");
|
||||||
|
let new_settings = validate_settings(new_settings, &index_scheduler)?;
|
||||||
|
|
||||||
analytics.publish(
|
analytics.publish(
|
||||||
SettingsAnalytics {
|
SettingsAnalytics {
|
||||||
@@ -592,62 +626,23 @@ pub async fn update_all(
|
|||||||
&req,
|
&req,
|
||||||
);
|
);
|
||||||
|
|
||||||
let task =
|
|
||||||
register_new_settings(new_settings, false, index_scheduler, &req, index_uid, opt).await?;
|
|
||||||
|
|
||||||
debug!(returns = ?task, "Update all settings");
|
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn register_new_settings(
|
|
||||||
new_settings: Settings<Unchecked>,
|
|
||||||
is_deletion: bool,
|
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::SETTINGS_UPDATE }>, Data<IndexScheduler>>,
|
|
||||||
req: &HttpRequest,
|
|
||||||
index_uid: IndexUid,
|
|
||||||
opt: Data<Opt>,
|
|
||||||
) -> Result<SummarizedTaskView, ResponseError> {
|
|
||||||
let network = index_scheduler.network();
|
|
||||||
let task_network = task_network_and_check_leader_and_version(req, &network)?;
|
|
||||||
|
|
||||||
// validate settings unless this is a duplicated task
|
|
||||||
let new_settings = if task_network.is_none() {
|
|
||||||
validate_settings(new_settings, &index_scheduler)?
|
|
||||||
} else {
|
|
||||||
new_settings
|
|
||||||
};
|
|
||||||
|
|
||||||
let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid);
|
let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid);
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
||||||
let task = KindWithContent::SettingsUpdate {
|
let task = KindWithContent::SettingsUpdate {
|
||||||
index_uid: index_uid.clone(),
|
index_uid,
|
||||||
new_settings: Box::new(new_settings.clone()),
|
new_settings: Box::new(new_settings),
|
||||||
is_deletion,
|
is_deletion: false,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
};
|
};
|
||||||
let uid = get_task_id(req, &opt)?;
|
let uid = get_task_id(&req, &opt)?;
|
||||||
let dry_run = is_dry_run(req, &opt)?;
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
|
let task: SummarizedTaskView =
|
||||||
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run))
|
||||||
|
.await??
|
||||||
|
.into();
|
||||||
|
|
||||||
let scheduler = index_scheduler.clone();
|
debug!(returns = ?task, "Update all settings");
|
||||||
let mut task = tokio::task::spawn_blocking(move || {
|
Ok(HttpResponse::Accepted().json(task))
|
||||||
scheduler.register_with_custom_metadata(task, uid, None, dry_run, task_network)
|
|
||||||
})
|
|
||||||
.await??;
|
|
||||||
|
|
||||||
if let Some(task_network) = task.network.take() {
|
|
||||||
proxy(
|
|
||||||
&index_scheduler,
|
|
||||||
Some(&index_uid),
|
|
||||||
req,
|
|
||||||
task_network,
|
|
||||||
network,
|
|
||||||
Body::inline(new_settings),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(task.into())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[utoipa::path(
|
#[utoipa::path(
|
||||||
@@ -736,8 +731,20 @@ pub async fn delete_all(
|
|||||||
|
|
||||||
let new_settings = Settings::cleared().into_unchecked();
|
let new_settings = Settings::cleared().into_unchecked();
|
||||||
|
|
||||||
let task =
|
let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid);
|
||||||
register_new_settings(new_settings, true, index_scheduler, &req, index_uid, opt).await?;
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
||||||
|
let task = KindWithContent::SettingsUpdate {
|
||||||
|
index_uid,
|
||||||
|
new_settings: Box::new(new_settings),
|
||||||
|
is_deletion: true,
|
||||||
|
allow_index_creation,
|
||||||
|
};
|
||||||
|
let uid = get_task_id(&req, &opt)?;
|
||||||
|
let dry_run = is_dry_run(&req, &opt)?;
|
||||||
|
let task: SummarizedTaskView =
|
||||||
|
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run))
|
||||||
|
.await??
|
||||||
|
.into();
|
||||||
|
|
||||||
debug!(returns = ?task, "Delete all settings");
|
debug!(returns = ?task, "Delete all settings");
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
Ok(HttpResponse::Accepted().json(task))
|
||||||
|
|||||||
@@ -204,22 +204,22 @@ pub fn parse_include_metadata_header(req: &HttpRequest) -> bool {
|
|||||||
pub struct SummarizedTaskView {
|
pub struct SummarizedTaskView {
|
||||||
/// The task unique identifier.
|
/// The task unique identifier.
|
||||||
#[schema(value_type = u32)]
|
#[schema(value_type = u32)]
|
||||||
pub task_uid: TaskId,
|
task_uid: TaskId,
|
||||||
/// The index affected by this task. May be `null` if the task is not linked to any index.
|
/// The index affected by this task. May be `null` if the task is not linked to any index.
|
||||||
pub index_uid: Option<String>,
|
index_uid: Option<String>,
|
||||||
/// The status of the task.
|
/// The status of the task.
|
||||||
pub status: Status,
|
status: Status,
|
||||||
/// The type of the task.
|
/// The type of the task.
|
||||||
#[serde(rename = "type")]
|
#[serde(rename = "type")]
|
||||||
pub kind: Kind,
|
kind: Kind,
|
||||||
/// The date on which the task was enqueued.
|
/// The date on which the task was enqueued.
|
||||||
#[serde(
|
#[serde(
|
||||||
serialize_with = "time::serde::rfc3339::serialize",
|
serialize_with = "time::serde::rfc3339::serialize",
|
||||||
deserialize_with = "time::serde::rfc3339::deserialize"
|
deserialize_with = "time::serde::rfc3339::deserialize"
|
||||||
)]
|
)]
|
||||||
pub enqueued_at: OffsetDateTime,
|
enqueued_at: OffsetDateTime,
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub custom_metadata: Option<String>,
|
custom_metadata: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Task> for SummarizedTaskView {
|
impl From<Task> for SummarizedTaskView {
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ use index_scheduler::IndexScheduler;
|
|||||||
use itertools::{EitherOrBoth, Itertools};
|
use itertools::{EitherOrBoth, Itertools};
|
||||||
use meilisearch_types::deserr::DeserrJsonError;
|
use meilisearch_types::deserr::DeserrJsonError;
|
||||||
use meilisearch_types::error::deserr_codes::{
|
use meilisearch_types::error::deserr_codes::{
|
||||||
InvalidNetworkLeader, InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf,
|
InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf, InvalidNetworkSharding,
|
||||||
InvalidNetworkUrl, InvalidNetworkWriteApiKey,
|
InvalidNetworkUrl, InvalidNetworkWriteApiKey,
|
||||||
};
|
};
|
||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
@@ -20,21 +20,10 @@ use tracing::debug;
|
|||||||
use utoipa::{OpenApi, ToSchema};
|
use utoipa::{OpenApi, ToSchema};
|
||||||
|
|
||||||
use crate::analytics::{Aggregate, Analytics};
|
use crate::analytics::{Aggregate, Analytics};
|
||||||
use crate::error::MeilisearchHttpError;
|
|
||||||
use crate::extractors::authentication::policies::ActionPolicy;
|
use crate::extractors::authentication::policies::ActionPolicy;
|
||||||
use crate::extractors::authentication::GuardedData;
|
use crate::extractors::authentication::GuardedData;
|
||||||
use crate::extractors::sequential_extractor::SeqHandler;
|
use crate::extractors::sequential_extractor::SeqHandler;
|
||||||
|
|
||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
mod community_edition;
|
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
mod enterprise_edition;
|
|
||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
use community_edition as current_edition;
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
use enterprise_edition as current_edition;
|
|
||||||
|
|
||||||
#[derive(OpenApi)]
|
#[derive(OpenApi)]
|
||||||
#[openapi(
|
#[openapi(
|
||||||
paths(get_network, patch_network),
|
paths(get_network, patch_network),
|
||||||
@@ -94,7 +83,7 @@ async fn get_network(
|
|||||||
Ok(HttpResponse::Ok().json(network))
|
Ok(HttpResponse::Ok().json(network))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserr, ToSchema, Serialize)]
|
#[derive(Debug, Deserr, ToSchema, Serialize)]
|
||||||
#[deserr(error = DeserrJsonError<InvalidNetworkRemotes>, rename_all = camelCase, deny_unknown_fields)]
|
#[deserr(error = DeserrJsonError<InvalidNetworkRemotes>, rename_all = camelCase, deny_unknown_fields)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
#[schema(rename_all = "camelCase")]
|
#[schema(rename_all = "camelCase")]
|
||||||
@@ -117,19 +106,12 @@ pub struct Remote {
|
|||||||
pub write_api_key: Setting<String>,
|
pub write_api_key: Setting<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserr, ToSchema, Serialize)]
|
#[derive(Debug, Deserr, ToSchema, Serialize)]
|
||||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
#[schema(rename_all = "camelCase")]
|
#[schema(rename_all = "camelCase")]
|
||||||
pub struct Network {
|
pub struct Network {
|
||||||
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!({
|
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!("http://localhost:7700"))]
|
||||||
"ms-00": {
|
|
||||||
"url": "http://localhost:7700"
|
|
||||||
},
|
|
||||||
"ms-01": {
|
|
||||||
"url": "http://localhost:7701"
|
|
||||||
}
|
|
||||||
}))]
|
|
||||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
|
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub remotes: Setting<BTreeMap<String, Option<Remote>>>,
|
pub remotes: Setting<BTreeMap<String, Option<Remote>>>,
|
||||||
@@ -137,21 +119,10 @@ pub struct Network {
|
|||||||
#[serde(default, rename = "self")]
|
#[serde(default, rename = "self")]
|
||||||
#[deserr(default, rename = "self", error = DeserrJsonError<InvalidNetworkSelf>)]
|
#[deserr(default, rename = "self", error = DeserrJsonError<InvalidNetworkSelf>)]
|
||||||
pub local: Setting<String>,
|
pub local: Setting<String>,
|
||||||
#[schema(value_type = Option<String>, example = json!("ms-00"))]
|
#[schema(value_type = Option<bool>, example = json!(true))]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkLeader>)]
|
#[deserr(default, error = DeserrJsonError<InvalidNetworkSharding>)]
|
||||||
pub leader: Setting<String>,
|
pub sharding: Setting<bool>,
|
||||||
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!({
|
|
||||||
"ms-00": {
|
|
||||||
"url": "http://localhost:7700"
|
|
||||||
},
|
|
||||||
"ms-01": {
|
|
||||||
"url": "http://localhost:7701"
|
|
||||||
}
|
|
||||||
}))]
|
|
||||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub previous_remotes: Setting<BTreeMap<String, Option<Remote>>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Remote {
|
impl Remote {
|
||||||
@@ -235,34 +206,40 @@ async fn patch_network(
|
|||||||
analytics: Data<Analytics>,
|
analytics: Data<Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
index_scheduler.features().check_network("Using the /network route")?;
|
index_scheduler.features().check_network("Using the /network route")?;
|
||||||
current_edition::patch_network(index_scheduler, new_network, req, analytics).await
|
|
||||||
|
let new_network = new_network.0;
|
||||||
|
let old_network = index_scheduler.network();
|
||||||
|
debug!(parameters = ?new_network, "Patch network");
|
||||||
|
|
||||||
|
#[cfg(not(feature = "enterprise"))]
|
||||||
|
if new_network.sharding.set().is_some() {
|
||||||
|
use meilisearch_types::error::Code;
|
||||||
|
|
||||||
|
return Err(ResponseError::from_msg(
|
||||||
|
"Meilisearch Enterprise Edition is required to set `network.sharding`".into(),
|
||||||
|
Code::RequiresEnterpriseEdition,
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_networks(
|
|
||||||
old_network: DbNetwork,
|
|
||||||
new_network: Network,
|
|
||||||
) -> Result<DbNetwork, ResponseError> {
|
|
||||||
let merged_self = match new_network.local {
|
let merged_self = match new_network.local {
|
||||||
Setting::Set(new_self) => Some(new_self),
|
Setting::Set(new_self) => Some(new_self),
|
||||||
Setting::Reset => None,
|
Setting::Reset => None,
|
||||||
Setting::NotSet => old_network.local,
|
Setting::NotSet => old_network.local,
|
||||||
};
|
};
|
||||||
let merged_leader = match new_network.leader {
|
|
||||||
Setting::Set(new_leader) => Some(new_leader),
|
let merged_sharding = match new_network.sharding {
|
||||||
Setting::Reset => None,
|
Setting::Set(new_sharding) => new_sharding,
|
||||||
Setting::NotSet => old_network.leader,
|
Setting::Reset => false,
|
||||||
|
Setting::NotSet => old_network.sharding,
|
||||||
};
|
};
|
||||||
match (merged_leader.as_deref(), merged_self.as_deref()) {
|
|
||||||
// 1. Always allowed if there is no leader
|
if merged_sharding && merged_self.is_none() {
|
||||||
(None, _) => (),
|
return Err(ResponseError::from_msg(
|
||||||
// 2. Allowed if the leader is self
|
"`.sharding`: enabling the sharding requires `.self` to be set\n - Hint: Disable `sharding` or set `self` to a value.".into(),
|
||||||
(Some(leader), Some(this)) if leader == this => (),
|
meilisearch_types::error::Code::InvalidNetworkSharding,
|
||||||
// 3. Any other change is disallowed
|
));
|
||||||
(Some(leader), _) => {
|
|
||||||
return Err(MeilisearchHttpError::NotLeader { leader: leader.to_string() }.into())
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
let new_version = uuid::Uuid::now_v7();
|
|
||||||
let merged_remotes = match new_network.remotes {
|
let merged_remotes = match new_network.remotes {
|
||||||
Setting::Set(new_remotes) => {
|
Setting::Set(new_remotes) => {
|
||||||
let mut merged_remotes = BTreeMap::new();
|
let mut merged_remotes = BTreeMap::new();
|
||||||
@@ -334,11 +311,19 @@ fn merge_networks(
|
|||||||
Setting::Reset => BTreeMap::new(),
|
Setting::Reset => BTreeMap::new(),
|
||||||
Setting::NotSet => old_network.remotes,
|
Setting::NotSet => old_network.remotes,
|
||||||
};
|
};
|
||||||
let merged_network = DbNetwork {
|
|
||||||
local: merged_self,
|
analytics.publish(
|
||||||
remotes: merged_remotes,
|
PatchNetworkAnalytics {
|
||||||
leader: merged_leader,
|
network_size: merged_remotes.len(),
|
||||||
version: new_version,
|
network_has_self: merged_self.is_some(),
|
||||||
};
|
},
|
||||||
Ok(merged_network)
|
&req,
|
||||||
|
);
|
||||||
|
|
||||||
|
let merged_network =
|
||||||
|
DbNetwork { local: merged_self, remotes: merged_remotes, sharding: merged_sharding };
|
||||||
|
|
||||||
|
index_scheduler.put_network(merged_network.clone())?;
|
||||||
|
debug!(returns = ?merged_network, "Patch network");
|
||||||
|
Ok(HttpResponse::Ok().json(merged_network))
|
||||||
}
|
}
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
use actix_web::web::Data;
|
|
||||||
use actix_web::{HttpRequest, HttpResponse};
|
|
||||||
use deserr::actix_web::AwebJson;
|
|
||||||
use index_scheduler::IndexScheduler;
|
|
||||||
use meilisearch_types::deserr::DeserrJsonError;
|
|
||||||
use meilisearch_types::error::ResponseError;
|
|
||||||
use meilisearch_types::keys::actions;
|
|
||||||
use meilisearch_types::milli::update::Setting;
|
|
||||||
use tracing::debug;
|
|
||||||
|
|
||||||
use super::{merge_networks, Network, PatchNetworkAnalytics};
|
|
||||||
use crate::analytics::Analytics;
|
|
||||||
use crate::error::MeilisearchHttpError;
|
|
||||||
use crate::extractors::authentication::policies::ActionPolicy;
|
|
||||||
use crate::extractors::authentication::GuardedData;
|
|
||||||
|
|
||||||
pub async fn patch_network(
|
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::NETWORK_UPDATE }>, Data<IndexScheduler>>,
|
|
||||||
new_network: AwebJson<Network, DeserrJsonError>,
|
|
||||||
req: HttpRequest,
|
|
||||||
analytics: Data<Analytics>,
|
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
|
||||||
let new_network = new_network.0;
|
|
||||||
let old_network = index_scheduler.network();
|
|
||||||
debug!(parameters = ?new_network, "Patch network");
|
|
||||||
|
|
||||||
if new_network.leader.as_ref().set().is_some() {
|
|
||||||
use meilisearch_types::error::Code;
|
|
||||||
|
|
||||||
return Err(ResponseError::from_msg(
|
|
||||||
"Meilisearch Enterprise Edition is required to set `network.leader`".into(),
|
|
||||||
Code::RequiresEnterpriseEdition,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !matches!(new_network.previous_remotes, Setting::NotSet) {
|
|
||||||
return Err(MeilisearchHttpError::UnexpectedNetworkPreviousRemotes.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
let merged_network = merge_networks(old_network.clone(), new_network)?;
|
|
||||||
|
|
||||||
index_scheduler.put_network(merged_network.clone())?;
|
|
||||||
|
|
||||||
analytics.publish(
|
|
||||||
PatchNetworkAnalytics {
|
|
||||||
network_size: merged_network.remotes.len(),
|
|
||||||
network_has_self: merged_network.local.is_some(),
|
|
||||||
},
|
|
||||||
&req,
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(HttpResponse::Ok().json(merged_network))
|
|
||||||
}
|
|
||||||
@@ -1,379 +0,0 @@
|
|||||||
// Copyright © 2025 Meilisearch Some Rights Reserved
|
|
||||||
// This file is part of Meilisearch Enterprise Edition (EE).
|
|
||||||
// Use of this source code is governed by the Business Source License 1.1,
|
|
||||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use actix_web::web::Data;
|
|
||||||
use actix_web::{HttpRequest, HttpResponse};
|
|
||||||
use deserr::actix_web::AwebJson;
|
|
||||||
use futures::TryStreamExt;
|
|
||||||
use index_scheduler::{IndexScheduler, Query, RoFeatures};
|
|
||||||
use itertools::{EitherOrBoth, Itertools};
|
|
||||||
use meilisearch_auth::AuthFilter;
|
|
||||||
use meilisearch_types::deserr::DeserrJsonError;
|
|
||||||
use meilisearch_types::error::{Code, ResponseError};
|
|
||||||
use meilisearch_types::features::RuntimeTogglableFeatures;
|
|
||||||
use meilisearch_types::keys::actions;
|
|
||||||
use meilisearch_types::milli::update::Setting;
|
|
||||||
use meilisearch_types::network::{Network as DbNetwork, Remote as DbRemote};
|
|
||||||
use meilisearch_types::tasks::network::{headers, NetworkTopologyChange, Origin, TaskNetwork};
|
|
||||||
use meilisearch_types::tasks::KindWithContent;
|
|
||||||
use tracing::debug;
|
|
||||||
|
|
||||||
use super::{merge_networks, Network, PatchNetworkAnalytics, Remote};
|
|
||||||
use crate::analytics::Analytics;
|
|
||||||
use crate::error::MeilisearchHttpError;
|
|
||||||
use crate::extractors::authentication::policies::ActionPolicy;
|
|
||||||
use crate::extractors::authentication::GuardedData;
|
|
||||||
use crate::proxy::{self, proxy, Body, ProxyError};
|
|
||||||
use crate::routes::tasks::AllTasks;
|
|
||||||
use crate::routes::SummarizedTaskView;
|
|
||||||
|
|
||||||
pub async fn patch_network(
|
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::NETWORK_UPDATE }>, Data<IndexScheduler>>,
|
|
||||||
new_network: AwebJson<Network, DeserrJsonError>,
|
|
||||||
req: HttpRequest,
|
|
||||||
analytics: Data<Analytics>,
|
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
|
||||||
match (
|
|
||||||
proxy::origin_from_req(&req)?,
|
|
||||||
proxy::import_data_from_req(&req)?,
|
|
||||||
proxy::import_metadata_from_req(&req)?,
|
|
||||||
) {
|
|
||||||
(Some(origin), None, None) => {
|
|
||||||
patch_network_with_origin(index_scheduler, new_network, req, origin, analytics).await
|
|
||||||
}
|
|
||||||
(None, None, None) => {
|
|
||||||
patch_network_without_origin(index_scheduler, new_network, req, analytics).await
|
|
||||||
}
|
|
||||||
(Some(origin), Some(import_data), Some(metadata)) => {
|
|
||||||
if metadata.index_count == 0 {
|
|
||||||
tokio::task::spawn_blocking(move || {
|
|
||||||
index_scheduler.network_no_index_for_remote(import_data.remote_name, origin)
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.map_err(|e| ResponseError::from_msg(e.to_string(), Code::Internal))??;
|
|
||||||
Ok(HttpResponse::Ok().finish())
|
|
||||||
} else {
|
|
||||||
Err(MeilisearchHttpError::InvalidHeaderValue {
|
|
||||||
header_name: headers::PROXY_IMPORT_INDEX_COUNT_HEADER,
|
|
||||||
msg: format!("Expected 0 indexes, got `{}`", metadata.index_count),
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(origin, import_data, metadata) => {
|
|
||||||
Err(MeilisearchHttpError::InconsistentTaskNetworkHeaders {
|
|
||||||
is_missing_origin: origin.is_none(),
|
|
||||||
is_missing_import: import_data.is_none(),
|
|
||||||
is_missing_import_metadata: metadata.is_none(),
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn patch_network_without_origin(
|
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::NETWORK_UPDATE }>, Data<IndexScheduler>>,
|
|
||||||
new_network: AwebJson<Network, DeserrJsonError>,
|
|
||||||
req: HttpRequest,
|
|
||||||
analytics: Data<Analytics>,
|
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
|
||||||
let new_network = new_network.0;
|
|
||||||
let old_network = index_scheduler.network();
|
|
||||||
debug!(parameters = ?new_network, "Patch network");
|
|
||||||
|
|
||||||
if !matches!(new_network.previous_remotes, Setting::NotSet) {
|
|
||||||
return Err(MeilisearchHttpError::UnexpectedNetworkPreviousRemotes.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
let merged_network = merge_networks(old_network.clone(), new_network)?;
|
|
||||||
|
|
||||||
// When a network task must be created, perform some sanity checks against common errors:
|
|
||||||
// - missing experimental feature on an host from the network
|
|
||||||
// - a network task is already enqueued
|
|
||||||
//
|
|
||||||
// These checks are by no mean perfect (they are not atomic since the network is involved), but they should
|
|
||||||
// help preventing a bad situation.
|
|
||||||
if merged_network.leader.is_some() {
|
|
||||||
let query = Query {
|
|
||||||
statuses: Some(vec![
|
|
||||||
meilisearch_types::tasks::Status::Enqueued,
|
|
||||||
meilisearch_types::tasks::Status::Processing,
|
|
||||||
]),
|
|
||||||
types: Some(vec![meilisearch_types::tasks::Kind::NetworkTopologyChange]),
|
|
||||||
limit: Some(1),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let filters = AuthFilter::default();
|
|
||||||
let (tasks, _) = index_scheduler.get_task_ids_from_authorized_indexes(&query, &filters)?;
|
|
||||||
|
|
||||||
if let Some(first) = tasks.min() {
|
|
||||||
return Err(MeilisearchHttpError::UnprocessedNetworkTask {
|
|
||||||
remote: None,
|
|
||||||
task_uid: first,
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
futures::stream::iter(
|
|
||||||
old_network
|
|
||||||
.remotes
|
|
||||||
.iter()
|
|
||||||
.merge_join_by(merged_network.remotes.iter(), |(left, _), (right, _)| {
|
|
||||||
left.cmp(right)
|
|
||||||
})
|
|
||||||
.map(|eob| -> Result<_, ResponseError> {
|
|
||||||
match eob {
|
|
||||||
EitherOrBoth::Both(_, (remote_name, remote))
|
|
||||||
| EitherOrBoth::Right((remote_name, remote)) => {
|
|
||||||
Ok((remote_name, remote, false))
|
|
||||||
}
|
|
||||||
EitherOrBoth::Left((remote_name, remote)) => {
|
|
||||||
Ok((remote_name, remote, true))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.try_for_each_concurrent(Some(40), |(remote_name, remote, allow_unreachable)| {
|
|
||||||
async move {
|
|
||||||
{
|
|
||||||
// 1. check that the experimental feature is enabled
|
|
||||||
let remote_features: RuntimeTogglableFeatures = match proxy::send_request(
|
|
||||||
"/experimental-features",
|
|
||||||
reqwest::Method::GET,
|
|
||||||
None,
|
|
||||||
Body::none(),
|
|
||||||
remote_name,
|
|
||||||
remote,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(remote_features) => remote_features,
|
|
||||||
Err(ProxyError::Timeout | ProxyError::CouldNotSendRequest(_))
|
|
||||||
if allow_unreachable =>
|
|
||||||
{
|
|
||||||
return Ok(())
|
|
||||||
}
|
|
||||||
Err(err) => return Err(err.as_response_error()),
|
|
||||||
};
|
|
||||||
let remote_features = RoFeatures::from_runtime_features(remote_features);
|
|
||||||
remote_features.check_network("receiving a proxied network task").map_err(
|
|
||||||
|error| MeilisearchHttpError::RemoteIndexScheduler {
|
|
||||||
remote: remote_name.to_owned(),
|
|
||||||
error,
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// 2. check whether there are any unfinished network task
|
|
||||||
let network_tasks: AllTasks = match proxy::send_request(
|
|
||||||
"/tasks?types=networkTopologyChange&statuses=enqueued,processing&limit=1",
|
|
||||||
reqwest::Method::GET,
|
|
||||||
None,
|
|
||||||
Body::none(),
|
|
||||||
remote_name,
|
|
||||||
remote,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(network_tasks) => network_tasks,
|
|
||||||
Err(ProxyError::Timeout | ProxyError::CouldNotSendRequest(_))
|
|
||||||
if allow_unreachable =>
|
|
||||||
{
|
|
||||||
return Ok(())
|
|
||||||
}
|
|
||||||
Err(err) => return Err(err.as_response_error()),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let [first, ..] = network_tasks.results.as_slice() {
|
|
||||||
return Err(ResponseError::from(
|
|
||||||
MeilisearchHttpError::UnprocessedNetworkTask {
|
|
||||||
remote: Some(remote_name.to_owned()),
|
|
||||||
task_uid: first.uid,
|
|
||||||
},
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
index_scheduler.put_network(merged_network.clone())?;
|
|
||||||
|
|
||||||
analytics.publish(
|
|
||||||
PatchNetworkAnalytics {
|
|
||||||
network_size: merged_network.remotes.len(),
|
|
||||||
network_has_self: merged_network.local.is_some(),
|
|
||||||
},
|
|
||||||
&req,
|
|
||||||
);
|
|
||||||
|
|
||||||
if merged_network.leader.is_some() {
|
|
||||||
let network_topology_change =
|
|
||||||
NetworkTopologyChange::new(old_network.clone(), merged_network.clone());
|
|
||||||
let task = KindWithContent::NetworkTopologyChange(network_topology_change);
|
|
||||||
let mut task = {
|
|
||||||
let index_scheduler = index_scheduler.clone();
|
|
||||||
tokio::task::spawn_blocking(move || {
|
|
||||||
index_scheduler.register_with_custom_metadata(
|
|
||||||
task,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
Some(TaskNetwork::Remotes {
|
|
||||||
remote_tasks: Default::default(),
|
|
||||||
network_version: merged_network.version,
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.await??
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut proxied_network = Network {
|
|
||||||
remotes: Setting::Set(to_settings_remotes(&merged_network.remotes)),
|
|
||||||
local: Setting::NotSet,
|
|
||||||
leader: Setting::some_or_not_set(merged_network.leader.clone()),
|
|
||||||
previous_remotes: Setting::Set(to_settings_remotes(&old_network.remotes)),
|
|
||||||
};
|
|
||||||
let mut deleted_network = old_network;
|
|
||||||
|
|
||||||
// only keep the deleted remotes, to inform them that they're deleted.
|
|
||||||
// deleted remotes are remotes that appear in the old version of the network, but not the new version.
|
|
||||||
let deleted_remotes = &mut deleted_network.remotes;
|
|
||||||
deleted_remotes.retain(|node, _| !merged_network.remotes.contains_key(node));
|
|
||||||
|
|
||||||
// proxy network change to the remaining remotes.
|
|
||||||
let updated_task = proxy(
|
|
||||||
&index_scheduler,
|
|
||||||
None,
|
|
||||||
&req,
|
|
||||||
task.network.take().unwrap(), // set in register
|
|
||||||
merged_network,
|
|
||||||
Body::generated(proxied_network.clone(), |name, _remote, network| {
|
|
||||||
network.local = Setting::Set(name.to_string());
|
|
||||||
}),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
// unwrap: network was set by `proxy`
|
|
||||||
let task_network = updated_task.network.unwrap();
|
|
||||||
|
|
||||||
proxied_network.previous_remotes = Setting::NotSet;
|
|
||||||
|
|
||||||
if deleted_network.leader.is_some() {
|
|
||||||
// proxy network change to the deleted remotes
|
|
||||||
proxy(
|
|
||||||
&index_scheduler,
|
|
||||||
None,
|
|
||||||
&req,
|
|
||||||
task_network,
|
|
||||||
deleted_network,
|
|
||||||
Body::generated(proxied_network.clone(), |_name, _remote, network| {
|
|
||||||
network.local = Setting::Reset;
|
|
||||||
}),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
|
||||||
debug!("returns: {:?}", task);
|
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
|
||||||
} else {
|
|
||||||
Ok(HttpResponse::Ok().json(merged_network))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn patch_network_with_origin(
|
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::NETWORK_UPDATE }>, Data<IndexScheduler>>,
|
|
||||||
merged_network: AwebJson<Network, DeserrJsonError>,
|
|
||||||
req: HttpRequest,
|
|
||||||
origin: Origin,
|
|
||||||
analytics: Data<Analytics>,
|
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
|
||||||
let merged_network = merged_network.into_inner();
|
|
||||||
debug!(parameters = ?merged_network, ?origin, "Patch network");
|
|
||||||
let mut remotes = BTreeMap::new();
|
|
||||||
let mut old_network = index_scheduler.network();
|
|
||||||
|
|
||||||
for (name, remote) in merged_network.remotes.set().into_iter().flat_map(|x| x.into_iter()) {
|
|
||||||
let Some(remote) = remote else { continue };
|
|
||||||
let remote = remote.try_into_db_node(&name)?;
|
|
||||||
remotes.insert(name, remote);
|
|
||||||
}
|
|
||||||
let mut previous_remotes = BTreeMap::new();
|
|
||||||
for (name, remote) in
|
|
||||||
merged_network.previous_remotes.set().into_iter().flat_map(|x| x.into_iter())
|
|
||||||
{
|
|
||||||
let Some(remote) = remote else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let remote = remote.try_into_db_node(&name)?;
|
|
||||||
previous_remotes.insert(name, remote);
|
|
||||||
}
|
|
||||||
|
|
||||||
old_network.remotes = previous_remotes;
|
|
||||||
|
|
||||||
let new_leader = merged_network.leader.set().ok_or_else(|| {
|
|
||||||
ResponseError::from_msg("Duplicated task without leader".into(), Code::InvalidNetworkLeader)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let new_network = DbNetwork {
|
|
||||||
local: merged_network.local.set(),
|
|
||||||
remotes,
|
|
||||||
leader: Some(new_leader),
|
|
||||||
version: origin.network_version,
|
|
||||||
};
|
|
||||||
index_scheduler.put_network(new_network.clone())?;
|
|
||||||
|
|
||||||
analytics.publish(
|
|
||||||
PatchNetworkAnalytics {
|
|
||||||
network_size: new_network.remotes.len(),
|
|
||||||
network_has_self: new_network.local.is_some(),
|
|
||||||
},
|
|
||||||
&req,
|
|
||||||
);
|
|
||||||
|
|
||||||
let network_topology_change = NetworkTopologyChange::new(old_network, new_network);
|
|
||||||
let task = KindWithContent::NetworkTopologyChange(network_topology_change);
|
|
||||||
let task = {
|
|
||||||
tokio::task::spawn_blocking(move || {
|
|
||||||
index_scheduler.register_with_custom_metadata(
|
|
||||||
task,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
Some(TaskNetwork::Origin { origin }),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.await??
|
|
||||||
};
|
|
||||||
|
|
||||||
let task: SummarizedTaskView = task.into();
|
|
||||||
debug!("returns: {:?}", task);
|
|
||||||
Ok(HttpResponse::Accepted().json(task))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_settings_remotes(
|
|
||||||
db_remotes: &BTreeMap<String, DbRemote>,
|
|
||||||
) -> BTreeMap<String, Option<Remote>> {
|
|
||||||
db_remotes
|
|
||||||
.iter()
|
|
||||||
.map(|(name, remote)| {
|
|
||||||
(
|
|
||||||
name.clone(),
|
|
||||||
Some(Remote {
|
|
||||||
url: Setting::Set(remote.url.clone()),
|
|
||||||
search_api_key: Setting::some_or_not_set(remote.search_api_key.clone()),
|
|
||||||
write_api_key: Setting::some_or_not_set(remote.write_api_key.clone()),
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
@@ -14,7 +14,7 @@ use meilisearch_types::index_uid::IndexUid;
|
|||||||
use meilisearch_types::star_or::{OptionStarOr, OptionStarOrList};
|
use meilisearch_types::star_or::{OptionStarOr, OptionStarOrList};
|
||||||
use meilisearch_types::task_view::TaskView;
|
use meilisearch_types::task_view::TaskView;
|
||||||
use meilisearch_types::tasks::{Kind, KindWithContent, Status};
|
use meilisearch_types::tasks::{Kind, KindWithContent, Status};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::Serialize;
|
||||||
use time::format_description::well_known::Rfc3339;
|
use time::format_description::well_known::Rfc3339;
|
||||||
use time::macros::format_description;
|
use time::macros::format_description;
|
||||||
use time::{Date, Duration, OffsetDateTime, Time};
|
use time::{Date, Duration, OffsetDateTime, Time};
|
||||||
@@ -488,18 +488,18 @@ async fn delete_tasks(
|
|||||||
Ok(HttpResponse::Ok().json(task))
|
Ok(HttpResponse::Ok().json(task))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, Serialize, ToSchema)]
|
||||||
pub struct AllTasks {
|
pub struct AllTasks {
|
||||||
/// The list of tasks that matched the filter.
|
/// The list of tasks that matched the filter.
|
||||||
pub results: Vec<TaskView>,
|
results: Vec<TaskView>,
|
||||||
/// Total number of browsable results using offset/limit parameters for the given resource.
|
/// Total number of browsable results using offset/limit parameters for the given resource.
|
||||||
pub total: u64,
|
total: u64,
|
||||||
/// Limit given for the query. If limit is not provided as a query parameter, this parameter displays the default limit value.
|
/// Limit given for the query. If limit is not provided as a query parameter, this parameter displays the default limit value.
|
||||||
pub limit: u32,
|
limit: u32,
|
||||||
/// The first task uid returned.
|
/// The first task uid returned.
|
||||||
pub from: Option<u32>,
|
from: Option<u32>,
|
||||||
/// Represents the value to send in from to fetch the next slice of the results. The first item for the next slice starts at this exact number. When the returned value is null, it means that all the data have been browsed in the given order.
|
/// Represents the value to send in from to fetch the next slice of the results. The first item for the next slice starts at this exact number. When the returned value is null, it means that all the data have been browsed in the given order.
|
||||||
pub next: Option<u32>,
|
next: Option<u32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get all tasks
|
/// Get all tasks
|
||||||
|
|||||||
@@ -228,7 +228,7 @@ mod tests {
|
|||||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||||
snapshot!(meili_snap::json_string!(err), @r###"
|
snapshot!(meili_snap::json_string!(err), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`, `networkTopologyChange`.",
|
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
|
|||||||
@@ -789,12 +789,11 @@ impl TryFrom<Value> for ExternalDocumentId {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Deserr, ToSchema, Serialize)]
|
#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserr, ToSchema, Serialize)]
|
||||||
#[deserr(rename_all = camelCase)]
|
#[deserr(rename_all = camelCase)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub enum MatchingStrategy {
|
pub enum MatchingStrategy {
|
||||||
/// Remove query words from last to first
|
/// Remove query words from last to first
|
||||||
#[default]
|
|
||||||
Last,
|
Last,
|
||||||
/// All query words are mandatory
|
/// All query words are mandatory
|
||||||
All,
|
All,
|
||||||
@@ -802,6 +801,12 @@ pub enum MatchingStrategy {
|
|||||||
Frequency,
|
Frequency,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for MatchingStrategy {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::Last
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<MatchingStrategy> for TermsMatchingStrategy {
|
impl From<MatchingStrategy> for TermsMatchingStrategy {
|
||||||
fn from(other: MatchingStrategy) -> Self {
|
fn from(other: MatchingStrategy) -> Self {
|
||||||
match other {
|
match other {
|
||||||
|
|||||||
@@ -187,7 +187,7 @@ macro_rules! compute_forbidden_search {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn search_authorized_simple_token() {
|
async fn search_authorized_simple_token() {
|
||||||
let tenant_tokens = [
|
let tenant_tokens = vec![
|
||||||
hashmap! {
|
hashmap! {
|
||||||
"searchRules" => json!({"*": {}}),
|
"searchRules" => json!({"*": {}}),
|
||||||
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
||||||
@@ -239,7 +239,7 @@ async fn search_authorized_simple_token() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn search_authorized_filter_token() {
|
async fn search_authorized_filter_token() {
|
||||||
let tenant_tokens = [
|
let tenant_tokens = vec![
|
||||||
hashmap! {
|
hashmap! {
|
||||||
"searchRules" => json!({"*": {"filter": "color = blue"}}),
|
"searchRules" => json!({"*": {"filter": "color = blue"}}),
|
||||||
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
||||||
@@ -292,7 +292,7 @@ async fn search_authorized_filter_token() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn filter_search_authorized_filter_token() {
|
async fn filter_search_authorized_filter_token() {
|
||||||
let tenant_tokens = [
|
let tenant_tokens = vec![
|
||||||
hashmap! {
|
hashmap! {
|
||||||
"searchRules" => json!({"*": {"filter": "color = blue"}}),
|
"searchRules" => json!({"*": {"filter": "color = blue"}}),
|
||||||
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
||||||
@@ -353,7 +353,7 @@ async fn filter_search_authorized_filter_token() {
|
|||||||
/// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above.
|
/// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above.
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn error_search_token_forbidden_parent_key() {
|
async fn error_search_token_forbidden_parent_key() {
|
||||||
let tenant_tokens = [
|
let tenant_tokens = vec![
|
||||||
hashmap! {
|
hashmap! {
|
||||||
"searchRules" => json!({"*": {}}),
|
"searchRules" => json!({"*": {}}),
|
||||||
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
||||||
@@ -389,7 +389,7 @@ async fn error_search_token_forbidden_parent_key() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn error_search_forbidden_token() {
|
async fn error_search_forbidden_token() {
|
||||||
let tenant_tokens = [
|
let tenant_tokens = vec![
|
||||||
// bad index
|
// bad index
|
||||||
hashmap! {
|
hashmap! {
|
||||||
"searchRules" => json!({"products": {}}),
|
"searchRules" => json!({"products": {}}),
|
||||||
|
|||||||
@@ -680,7 +680,7 @@ async fn multi_search_authorized_simple_token() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn single_search_authorized_filter_token() {
|
async fn single_search_authorized_filter_token() {
|
||||||
let tenant_tokens = [
|
let tenant_tokens = vec![
|
||||||
hashmap! {
|
hashmap! {
|
||||||
"searchRules" => json!({"*": {"filter": "color = blue"}}),
|
"searchRules" => json!({"*": {"filter": "color = blue"}}),
|
||||||
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
||||||
@@ -733,7 +733,7 @@ async fn single_search_authorized_filter_token() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn multi_search_authorized_filter_token() {
|
async fn multi_search_authorized_filter_token() {
|
||||||
let both_tenant_tokens = [
|
let both_tenant_tokens = vec![
|
||||||
hashmap! {
|
hashmap! {
|
||||||
"searchRules" => json!({"sales": {"filter": "color = blue"}, "products": {"filter": "doggos.age <= 5"}}),
|
"searchRules" => json!({"sales": {"filter": "color = blue"}, "products": {"filter": "doggos.age <= 5"}}),
|
||||||
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
||||||
@@ -842,7 +842,7 @@ async fn filter_single_search_authorized_filter_token() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn filter_multi_search_authorized_filter_token() {
|
async fn filter_multi_search_authorized_filter_token() {
|
||||||
let tenant_tokens = [
|
let tenant_tokens = vec![
|
||||||
hashmap! {
|
hashmap! {
|
||||||
"searchRules" => json!({"sales": {"filter": "color = blue"}, "products": {"filter": "doggos.age <= 5"}}),
|
"searchRules" => json!({"sales": {"filter": "color = blue"}, "products": {"filter": "doggos.age <= 5"}}),
|
||||||
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
||||||
@@ -900,7 +900,7 @@ async fn filter_multi_search_authorized_filter_token() {
|
|||||||
/// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above.
|
/// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above.
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn error_single_search_token_forbidden_parent_key() {
|
async fn error_single_search_token_forbidden_parent_key() {
|
||||||
let tenant_tokens = [
|
let tenant_tokens = vec![
|
||||||
hashmap! {
|
hashmap! {
|
||||||
"searchRules" => json!({"*": {}}),
|
"searchRules" => json!({"*": {}}),
|
||||||
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
||||||
@@ -941,7 +941,7 @@ async fn error_single_search_token_forbidden_parent_key() {
|
|||||||
/// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above.
|
/// Tests that those Tenant Token are incompatible with the REFUSED_KEYS defined above.
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn error_multi_search_token_forbidden_parent_key() {
|
async fn error_multi_search_token_forbidden_parent_key() {
|
||||||
let tenant_tokens = [
|
let tenant_tokens = vec![
|
||||||
hashmap! {
|
hashmap! {
|
||||||
"searchRules" => json!({"*": {}}),
|
"searchRules" => json!({"*": {}}),
|
||||||
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ async fn batch_bad_types() {
|
|||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`, `networkTopologyChange`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
|
|||||||
@@ -143,8 +143,6 @@ impl Display for Value {
|
|||||||
".processingTimeMs" => "[duration]",
|
".processingTimeMs" => "[duration]",
|
||||||
".details.embedders.*.url" => "[url]",
|
".details.embedders.*.url" => "[url]",
|
||||||
".details.dumpUid" => "[dump_uid]",
|
".details.dumpUid" => "[dump_uid]",
|
||||||
".network.network_version" => "[version]",
|
|
||||||
".network.origin.networkVersion" => "[version]",
|
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -93,20 +93,6 @@ impl Service {
|
|||||||
self.request(req).await
|
self.request(req).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn patch_str(
|
|
||||||
&self,
|
|
||||||
url: impl AsRef<str>,
|
|
||||||
body: impl AsRef<str>,
|
|
||||||
headers: Vec<(&str, &str)>,
|
|
||||||
) -> (Value, StatusCode) {
|
|
||||||
let mut req =
|
|
||||||
test::TestRequest::patch().uri(url.as_ref()).set_payload(body.as_ref().to_string());
|
|
||||||
for header in headers {
|
|
||||||
req = req.insert_header(header);
|
|
||||||
}
|
|
||||||
self.request(req).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn patch(&self, url: impl AsRef<str>, body: Value) -> (Value, StatusCode) {
|
pub async fn patch(&self, url: impl AsRef<str>, body: Value) -> (Value, StatusCode) {
|
||||||
self.patch_encoded(url, body, Encoder::Plain).await
|
self.patch_encoded(url, body, Encoder::Plain).await
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ async fn errors_on_param() {
|
|||||||
meili_snap::snapshot!(code, @"400 Bad Request");
|
meili_snap::snapshot!(code, @"400 Bad Request");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Unknown field `selfie`: expected one of `remotes`, `self`, `leader`, `previousRemotes`",
|
"message": "Unknown field `selfie`: expected one of `remotes`, `self`, `sharding`",
|
||||||
"code": "bad_request",
|
"code": "bad_request",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||||
@@ -186,7 +186,7 @@ async fn errors_on_param() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": null,
|
"self": null,
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -196,8 +196,7 @@ async fn errors_on_param() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = server
|
let (response, code) = server
|
||||||
@@ -266,24 +265,22 @@ async fn auth() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "master",
|
"self": "master",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
let (response, code) = server.get_network().await;
|
let (response, code) = server.get_network().await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "master",
|
"self": "master",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -292,12 +289,11 @@ async fn auth() {
|
|||||||
let (response, code) = server.get_network().await;
|
let (response, code) = server.get_network().await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "master",
|
"self": "master",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -311,12 +307,11 @@ async fn auth() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "api_key",
|
"self": "api_key",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -395,20 +390,18 @@ async fn get_and_set_network() {
|
|||||||
{
|
{
|
||||||
"self": null,
|
"self": null,
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "00000000-0000-0000-0000-000000000000"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
// adding self
|
// adding self
|
||||||
let (response, code) = server.set_network(json!({"self": "myself"})).await;
|
let (response, code) = server.set_network(json!({"self": "myself"})).await;
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "myself",
|
"self": "myself",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -426,7 +419,7 @@ async fn get_and_set_network() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "myself",
|
"self": "myself",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -441,8 +434,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -456,7 +448,7 @@ async fn get_and_set_network() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "myself",
|
"self": "myself",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -471,8 +463,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -487,7 +478,7 @@ async fn get_and_set_network() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "myself",
|
"self": "myself",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -507,8 +498,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -520,7 +510,7 @@ async fn get_and_set_network() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "myself",
|
"self": "myself",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -535,8 +525,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -544,7 +533,7 @@ async fn get_and_set_network() {
|
|||||||
let (response, code) = server.set_network(json!({"self": Null})).await;
|
let (response, code) = server.set_network(json!({"self": Null})).await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": null,
|
"self": null,
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -559,8 +548,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -568,7 +556,7 @@ async fn get_and_set_network() {
|
|||||||
let (response, code) = server.set_network(json!({"self": "thy"})).await;
|
let (response, code) = server.set_network(json!({"self": "thy"})).await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "thy",
|
"self": "thy",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -583,8 +571,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -592,7 +579,7 @@ async fn get_and_set_network() {
|
|||||||
let (response, code) = server.set_network(json!({})).await;
|
let (response, code) = server.set_network(json!({})).await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "thy",
|
"self": "thy",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -607,8 +594,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -616,7 +602,7 @@ async fn get_and_set_network() {
|
|||||||
let (response, code) = server.set_network(json!({"remotes": {}})).await;
|
let (response, code) = server.set_network(json!({"remotes": {}})).await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "thy",
|
"self": "thy",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -631,8 +617,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -640,7 +625,7 @@ async fn get_and_set_network() {
|
|||||||
let (response, code) = server.get_network().await;
|
let (response, code) = server.get_network().await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "thy",
|
"self": "thy",
|
||||||
"remotes": {
|
"remotes": {
|
||||||
@@ -655,8 +640,7 @@ async fn get_and_set_network() {
|
|||||||
"writeApiKey": null
|
"writeApiKey": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -668,12 +652,11 @@ async fn get_and_set_network() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
meili_snap::snapshot!(code, @"200 OK");
|
meili_snap::snapshot!(code, @"200 OK");
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response, {".version" => "[version]"}), @r###"
|
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "thy",
|
"self": "thy",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -128,32 +128,29 @@ async fn remote_sharding() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
|
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms2",
|
"self": "ms2",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -192,6 +189,8 @@ async fn remote_sharding() {
|
|||||||
}
|
}
|
||||||
}});
|
}});
|
||||||
|
|
||||||
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
@@ -447,32 +446,29 @@ async fn remote_sharding_retrieve_vectors() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
|
let (response, code) = ms2.set_network(json!({"self": "ms2"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms2",
|
"self": "ms2",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -948,22 +944,20 @@ async fn error_unregistered_remote() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -1070,22 +1064,20 @@ async fn error_no_weighted_score() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -1208,22 +1200,20 @@ async fn error_bad_response() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -1350,22 +1340,20 @@ async fn error_bad_request() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -1485,22 +1473,20 @@ async fn error_bad_request_facets_by_index() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -1631,22 +1617,20 @@ async fn error_bad_request_facets_by_index_facet() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -1786,7 +1770,7 @@ async fn error_remote_does_not_answer() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
@@ -1795,7 +1779,7 @@ async fn error_remote_does_not_answer() {
|
|||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
@@ -1989,22 +1973,20 @@ async fn error_remote_404() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -2190,22 +2172,20 @@ async fn error_remote_sharding_auth() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -2355,22 +2335,20 @@ async fn remote_sharding_auth() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -2515,22 +2493,20 @@ async fn error_remote_500() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -2700,22 +2676,20 @@ async fn error_remote_500_once() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
"leader": null,
|
"sharding": false
|
||||||
"version": "[version]"
|
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -2889,7 +2863,7 @@ async fn error_remote_timeout() {
|
|||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms0",
|
"self": "ms0",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
@@ -2898,7 +2872,7 @@ async fn error_remote_timeout() {
|
|||||||
"###);
|
"###);
|
||||||
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
let (response, code) = ms1.set_network(json!({"self": "ms1"})).await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]"}), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"self": "ms1",
|
"self": "ms1",
|
||||||
"remotes": {},
|
"remotes": {},
|
||||||
@@ -3108,8 +3082,8 @@ impl LocalMeili {
|
|||||||
let (value, code) = rt.block_on(async {
|
let (value, code) = rt.block_on(async {
|
||||||
match req.method.as_str() {
|
match req.method.as_str() {
|
||||||
"POST" => server.service.post_str(&req.url, body, headers.clone()).await,
|
"POST" => server.service.post_str(&req.url, body, headers.clone()).await,
|
||||||
"PUT" => server.service.put_str(&req.url, body, headers.clone()).await,
|
"PUT" => server.service.put_str(&req.url, body, headers).await,
|
||||||
"PATCH" => server.service.patch_str(&req.url, body, headers).await,
|
"PATCH" => server.service.patch(&req.url, req.body_json().unwrap()).await,
|
||||||
"GET" => server.service.get(&req.url).await,
|
"GET" => server.service.get(&req.url).await,
|
||||||
"DELETE" => server.service.delete(&req.url).await,
|
"DELETE" => server.service.delete(&req.url).await,
|
||||||
_ => unimplemented!(),
|
_ => unimplemented!(),
|
||||||
@@ -3187,6 +3161,35 @@ async fn remote_auto_sharding() {
|
|||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response["network"]), @"true");
|
snapshot!(json_string!(response["network"]), @"true");
|
||||||
|
|
||||||
|
// set self & sharding
|
||||||
|
let (response, code) = ms0.set_network(json!({"self": "ms0", "sharding": true})).await;
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"self": "ms0",
|
||||||
|
"remotes": {},
|
||||||
|
"sharding": true
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
let (response, code) = ms1.set_network(json!({"self": "ms1", "sharding": true})).await;
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"self": "ms1",
|
||||||
|
"remotes": {},
|
||||||
|
"sharding": true
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
let (response, code) = ms2.set_network(json!({"self": "ms2", "sharding": true})).await;
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"self": "ms2",
|
||||||
|
"remotes": {},
|
||||||
|
"sharding": true
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
|
||||||
// wrap servers
|
// wrap servers
|
||||||
let ms0 = Arc::new(ms0);
|
let ms0 = Arc::new(ms0);
|
||||||
let ms1 = Arc::new(ms1);
|
let ms1 = Arc::new(ms1);
|
||||||
@@ -3197,10 +3200,7 @@ async fn remote_auto_sharding() {
|
|||||||
let rms2 = LocalMeili::new(ms2.clone()).await;
|
let rms2 = LocalMeili::new(ms2.clone()).await;
|
||||||
|
|
||||||
// set network
|
// set network
|
||||||
let network = json!({
|
let network = json!({"remotes": {
|
||||||
"self": "ms0",
|
|
||||||
"leader": "ms0",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
"ms0": {
|
||||||
"url": rms0.url()
|
"url": rms0.url()
|
||||||
},
|
},
|
||||||
@@ -3214,99 +3214,12 @@ async fn remote_auto_sharding() {
|
|||||||
|
|
||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (task, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"202 Accepted");
|
|
||||||
|
|
||||||
let t0 = task.uid();
|
|
||||||
let (t, _) = ms0.get_task(t0).await;
|
|
||||||
|
|
||||||
let t1 = t["network"]["remote_tasks"]["ms1"]["taskUid"].as_u64().unwrap();
|
|
||||||
let t2 = t["network"]["remote_tasks"]["ms2"]["taskUid"].as_u64().unwrap();
|
|
||||||
|
|
||||||
ms0.wait_task(t0).await.succeeded();
|
|
||||||
ms1.wait_task(t1).await.succeeded();
|
|
||||||
ms2.wait_task(t2).await.succeeded();
|
|
||||||
|
|
||||||
let (response, status_code) = ms0.get_network().await;
|
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]", ".remotes.*.url" => "[url]"}), @r###"
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
{
|
|
||||||
"self": "ms0",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms1": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms2": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"leader": "ms0",
|
|
||||||
"version": "[version]"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, status_code) = ms1.get_network().await;
|
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]", ".remotes.*.url" => "[url]"}), @r###"
|
let (_response, status_code) = ms2.set_network(network.clone()).await;
|
||||||
{
|
|
||||||
"self": "ms1",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms1": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms2": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"leader": "ms0",
|
|
||||||
"version": "[version]"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, status_code) = ms2.get_network().await;
|
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]", ".remotes.*.url" => "[url]"}), @r###"
|
|
||||||
{
|
|
||||||
"self": "ms2",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms1": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms2": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"leader": "ms0",
|
|
||||||
"version": "[version]"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
// add documents
|
// add documents
|
||||||
let documents = SCORE_DOCUMENTS.clone();
|
let documents = SCORE_DOCUMENTS.clone();
|
||||||
@@ -3560,11 +3473,11 @@ async fn sharding_not_enterprise() {
|
|||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response["network"]), @"true");
|
snapshot!(json_string!(response["network"]), @"true");
|
||||||
|
|
||||||
let (response, code) = ms0.set_network(json!({"self": "ms0", "leader": "ms0"})).await;
|
let (response, code) = ms0.set_network(json!({"self": "ms0", "sharding": true})).await;
|
||||||
snapshot!(code, @"451 Unavailable For Legal Reasons");
|
snapshot!(code, @"451 Unavailable For Legal Reasons");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Meilisearch Enterprise Edition is required to set `network.leader`",
|
"message": "Meilisearch Enterprise Edition is required to set `network.sharding`",
|
||||||
"code": "requires_enterprise_edition",
|
"code": "requires_enterprise_edition",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#requires_enterprise_edition"
|
"link": "https://docs.meilisearch.com/errors#requires_enterprise_edition"
|
||||||
@@ -3591,6 +3504,36 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(response["network"]), @"true");
|
snapshot!(json_string!(response["network"]), @"true");
|
||||||
|
|
||||||
|
// set self & sharding
|
||||||
|
|
||||||
|
let (response, code) = ms0.set_network(json!({"self": "ms0", "sharding": true})).await;
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"self": "ms0",
|
||||||
|
"remotes": {},
|
||||||
|
"sharding": true
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
let (response, code) = ms1.set_network(json!({"self": "ms1", "sharding": true})).await;
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"self": "ms1",
|
||||||
|
"remotes": {},
|
||||||
|
"sharding": true
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
let (response, code) = ms2.set_network(json!({"self": "ms2", "sharding": true})).await;
|
||||||
|
snapshot!(code, @"200 OK");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"self": "ms2",
|
||||||
|
"remotes": {},
|
||||||
|
"sharding": true
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
|
||||||
// wrap servers
|
// wrap servers
|
||||||
let ms0 = Arc::new(ms0);
|
let ms0 = Arc::new(ms0);
|
||||||
let ms1 = Arc::new(ms1);
|
let ms1 = Arc::new(ms1);
|
||||||
@@ -3601,10 +3544,7 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
let rms2 = LocalMeili::new(ms2.clone()).await;
|
let rms2 = LocalMeili::new(ms2.clone()).await;
|
||||||
|
|
||||||
// set network
|
// set network
|
||||||
let network = json!({
|
let network = json!({"remotes": {
|
||||||
"self": "ms0",
|
|
||||||
"leader": "ms0",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
"ms0": {
|
||||||
"url": rms0.url()
|
"url": rms0.url()
|
||||||
},
|
},
|
||||||
@@ -3618,99 +3558,12 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
|
|
||||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||||
|
|
||||||
let (task, status_code) = ms0.set_network(network.clone()).await;
|
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||||
snapshot!(status_code, @"202 Accepted");
|
|
||||||
|
|
||||||
let t0 = task.uid();
|
|
||||||
let (t, _) = ms0.get_task(t0).await;
|
|
||||||
|
|
||||||
let t1 = t["network"]["remote_tasks"]["ms1"]["taskUid"].as_u64().unwrap();
|
|
||||||
let t2 = t["network"]["remote_tasks"]["ms2"]["taskUid"].as_u64().unwrap();
|
|
||||||
|
|
||||||
ms0.wait_task(t0).await.succeeded();
|
|
||||||
ms1.wait_task(t1).await.succeeded();
|
|
||||||
ms2.wait_task(t2).await.succeeded();
|
|
||||||
|
|
||||||
let (response, status_code) = ms0.get_network().await;
|
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]", ".remotes.*.url" => "[url]"}), @r###"
|
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||||
{
|
|
||||||
"self": "ms0",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms1": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms2": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"leader": "ms0",
|
|
||||||
"version": "[version]"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, status_code) = ms1.get_network().await;
|
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]", ".remotes.*.url" => "[url]"}), @r###"
|
let (_response, status_code) = ms2.set_network(network.clone()).await;
|
||||||
{
|
|
||||||
"self": "ms1",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms1": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms2": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"leader": "ms0",
|
|
||||||
"version": "[version]"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, status_code) = ms2.get_network().await;
|
|
||||||
snapshot!(status_code, @"200 OK");
|
snapshot!(status_code, @"200 OK");
|
||||||
snapshot!(json_string!(response, {".version" => "[version]", ".remotes.*.url" => "[url]"}), @r###"
|
|
||||||
{
|
|
||||||
"self": "ms2",
|
|
||||||
"remotes": {
|
|
||||||
"ms0": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms1": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
},
|
|
||||||
"ms2": {
|
|
||||||
"url": "[url]",
|
|
||||||
"searchApiKey": null,
|
|
||||||
"writeApiKey": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"leader": "ms0",
|
|
||||||
"version": "[version]"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
// add documents
|
// add documents
|
||||||
let documents = SCORE_DOCUMENTS.clone();
|
let documents = SCORE_DOCUMENTS.clone();
|
||||||
@@ -3733,7 +3586,6 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
let t2 = t["network"]["remote_tasks"]["ms2"]["taskUid"].as_u64().unwrap();
|
let t2 = t["network"]["remote_tasks"]["ms2"]["taskUid"].as_u64().unwrap();
|
||||||
|
|
||||||
let t = ms0.wait_task(t0).await.succeeded();
|
let t = ms0.wait_task(t0).await.succeeded();
|
||||||
|
|
||||||
snapshot!(t, @r###"
|
snapshot!(t, @r###"
|
||||||
{
|
{
|
||||||
"uid": "[uid]",
|
"uid": "[uid]",
|
||||||
@@ -3754,15 +3606,14 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
"network": {
|
"network": {
|
||||||
"remote_tasks": {
|
"remote_tasks": {
|
||||||
"ms1": {
|
"ms1": {
|
||||||
"taskUid": 1,
|
"taskUid": 0,
|
||||||
"error": null
|
"error": null
|
||||||
},
|
},
|
||||||
"ms2": {
|
"ms2": {
|
||||||
"taskUid": 1,
|
"taskUid": 0,
|
||||||
"error": null
|
"error": null
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
"network_version": "[version]"
|
|
||||||
},
|
},
|
||||||
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
||||||
}
|
}
|
||||||
@@ -3789,8 +3640,7 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
"network": {
|
"network": {
|
||||||
"origin": {
|
"origin": {
|
||||||
"remoteName": "ms0",
|
"remoteName": "ms0",
|
||||||
"taskUid": 1,
|
"taskUid": 0
|
||||||
"networkVersion": "[version]"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
||||||
@@ -3818,8 +3668,7 @@ async fn remote_auto_sharding_with_custom_metadata() {
|
|||||||
"network": {
|
"network": {
|
||||||
"origin": {
|
"origin": {
|
||||||
"remoteName": "ms0",
|
"remoteName": "ms0",
|
||||||
"taskUid": 1,
|
"taskUid": 0
|
||||||
"networkVersion": "[version]"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
||||||
|
|||||||
@@ -95,36 +95,36 @@ async fn task_bad_types() {
|
|||||||
|
|
||||||
let (response, code) = server.tasks_filter("types=doggo").await;
|
let (response, code) = server.tasks_filter("types=doggo").await;
|
||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r#"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`, `networkTopologyChange`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
}
|
}
|
||||||
"###);
|
"#);
|
||||||
|
|
||||||
let (response, code) = server.cancel_tasks("types=doggo").await;
|
let (response, code) = server.cancel_tasks("types=doggo").await;
|
||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r#"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`, `networkTopologyChange`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
}
|
}
|
||||||
"###);
|
"#);
|
||||||
|
|
||||||
let (response, code) = server.delete_tasks("types=doggo").await;
|
let (response, code) = server.delete_tasks("types=doggo").await;
|
||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r#"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`, `networkTopologyChange`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `export`, `upgradeDatabase`, `indexCompaction`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
}
|
}
|
||||||
"###);
|
"#);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
|||||||
@@ -385,10 +385,9 @@ pub struct SearchResult {
|
|||||||
pub query_vector: Option<Embedding>,
|
pub query_vector: Option<Embedding>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
pub enum TermsMatchingStrategy {
|
pub enum TermsMatchingStrategy {
|
||||||
// remove last word first
|
// remove last word first
|
||||||
#[default]
|
|
||||||
Last,
|
Last,
|
||||||
// all words are mandatory
|
// all words are mandatory
|
||||||
All,
|
All,
|
||||||
@@ -396,6 +395,12 @@ pub enum TermsMatchingStrategy {
|
|||||||
Frequency,
|
Frequency,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for TermsMatchingStrategy {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::Last
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<MatchingStrategy> for TermsMatchingStrategy {
|
impl From<MatchingStrategy> for TermsMatchingStrategy {
|
||||||
fn from(other: MatchingStrategy) -> Self {
|
fn from(other: MatchingStrategy) -> Self {
|
||||||
match other {
|
match other {
|
||||||
|
|||||||
@@ -124,7 +124,7 @@ impl GrenadParameters {
|
|||||||
/// This should be called inside of a rayon thread pool,
|
/// This should be called inside of a rayon thread pool,
|
||||||
/// otherwise, it will take the global number of threads.
|
/// otherwise, it will take the global number of threads.
|
||||||
pub fn max_memory_by_thread(&self) -> Option<usize> {
|
pub fn max_memory_by_thread(&self) -> Option<usize> {
|
||||||
self.max_memory.map(|max_memory| max_memory / rayon::current_num_threads())
|
self.max_memory.map(|max_memory| (max_memory / rayon::current_num_threads()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -54,12 +54,11 @@ pub struct DocumentAdditionResult {
|
|||||||
pub number_of_documents: u64,
|
pub number_of_documents: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
pub enum IndexDocumentsMethod {
|
pub enum IndexDocumentsMethod {
|
||||||
/// Replace the previous document with the new one,
|
/// Replace the previous document with the new one,
|
||||||
/// removing all the already known attributes.
|
/// removing all the already known attributes.
|
||||||
#[default]
|
|
||||||
ReplaceDocuments,
|
ReplaceDocuments,
|
||||||
|
|
||||||
/// Merge the previous version of the document with the new version,
|
/// Merge the previous version of the document with the new version,
|
||||||
@@ -67,6 +66,12 @@ pub enum IndexDocumentsMethod {
|
|||||||
UpdateDocuments,
|
UpdateDocuments,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for IndexDocumentsMethod {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::ReplaceDocuments
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct IndexDocuments<'t, 'i, 'a, FP, FA> {
|
pub struct IndexDocuments<'t, 'i, 'a, FP, FA> {
|
||||||
wtxn: &'t mut heed::RwTxn<'i>,
|
wtxn: &'t mut heed::RwTxn<'i>,
|
||||||
index: &'i Index,
|
index: &'i Index,
|
||||||
|
|||||||
@@ -5,38 +5,18 @@
|
|||||||
|
|
||||||
use std::hash::{BuildHasher as _, BuildHasherDefault};
|
use std::hash::{BuildHasher as _, BuildHasherDefault};
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
pub struct Shards {
|
||||||
pub struct Shards(pub Vec<Shard>);
|
pub own: Vec<String>,
|
||||||
|
pub others: Vec<String>,
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct Shard {
|
|
||||||
pub is_own: bool,
|
|
||||||
pub name: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Shards {
|
impl Shards {
|
||||||
pub fn from_remotes_local<'a>(
|
|
||||||
remotes: impl IntoIterator<Item = &'a str>,
|
|
||||||
local: Option<&str>,
|
|
||||||
) -> Self {
|
|
||||||
Shards(
|
|
||||||
remotes
|
|
||||||
.into_iter()
|
|
||||||
.map(|name| Shard { is_own: Some(name) == local, name: name.to_owned() })
|
|
||||||
.collect(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn must_process(&self, docid: &str) -> bool {
|
pub fn must_process(&self, docid: &str) -> bool {
|
||||||
self.processing_shard(docid).map(|shard| shard.is_own).unwrap_or_default()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn processing_shard<'a>(&'a self, docid: &str) -> Option<&'a Shard> {
|
|
||||||
let hasher = BuildHasherDefault::<twox_hash::XxHash3_64>::new();
|
let hasher = BuildHasherDefault::<twox_hash::XxHash3_64>::new();
|
||||||
let to_hash = |shard: &'a Shard| (shard, hasher.hash_one((&shard.name, docid)));
|
let to_hash = |shard: &String| hasher.hash_one((shard, docid));
|
||||||
|
|
||||||
let shard =
|
let max_hash = self.others.iter().map(to_hash).max().unwrap_or_default();
|
||||||
self.0.iter().map(to_hash).max_by_key(|(_, hash)| *hash).map(|(shard, _)| shard);
|
|
||||||
shard
|
self.own.iter().map(to_hash).any(|hash| hash > max_hash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -48,11 +48,10 @@ use crate::{
|
|||||||
ChannelCongestion, FieldId, FilterableAttributesRule, Index, LocalizedAttributesRule, Result,
|
ChannelCongestion, FieldId, FilterableAttributesRule, Index, LocalizedAttributesRule, Result,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Eq, Copy)]
|
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
#[default]
|
|
||||||
NotSet,
|
NotSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,6 +71,12 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Setting<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::NotSet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Setting<T> {
|
impl<T> Setting<T> {
|
||||||
pub fn set(self) -> Option<T> {
|
pub fn set(self) -> Option<T> {
|
||||||
match self {
|
match self {
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ impl<F> Embeddings<F> {
|
|||||||
///
|
///
|
||||||
/// If `embeddings.len() % self.dimension != 0`, then the append operation fails.
|
/// If `embeddings.len() % self.dimension != 0`, then the append operation fails.
|
||||||
pub fn append(&mut self, mut embeddings: Vec<F>) -> Result<(), Vec<F>> {
|
pub fn append(&mut self, mut embeddings: Vec<F>) -> Result<(), Vec<F>> {
|
||||||
if !embeddings.len().is_multiple_of(self.dimension) {
|
if embeddings.len() % self.dimension != 0 {
|
||||||
return Err(embeddings);
|
return Err(embeddings);
|
||||||
}
|
}
|
||||||
self.data.append(&mut embeddings);
|
self.data.append(&mut embeddings);
|
||||||
|
|||||||
@@ -10,3 +10,5 @@ serde_json = "1.0"
|
|||||||
clap = { version = "4.5.52", features = ["derive"] }
|
clap = { version = "4.5.52", features = ["derive"] }
|
||||||
anyhow = "1.0.100"
|
anyhow = "1.0.100"
|
||||||
utoipa = "5.4.0"
|
utoipa = "5.4.0"
|
||||||
|
reqwest = { version = "0.12", features = ["blocking"] }
|
||||||
|
regex = "1.10"
|
||||||
|
|||||||
@@ -1,21 +1,57 @@
|
|||||||
|
use std::borrow::Cow;
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use std::sync::LazyLock;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::{Context, Result};
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use meilisearch::routes::MeilisearchApi;
|
use meilisearch::routes::MeilisearchApi;
|
||||||
|
use regex::Regex;
|
||||||
|
use serde_json::{json, Value};
|
||||||
use utoipa::OpenApi;
|
use utoipa::OpenApi;
|
||||||
|
|
||||||
|
const HTTP_METHODS: &[&str] = &["get", "post", "put", "patch", "delete"];
|
||||||
|
|
||||||
|
/// Language used in the documentation repository (contains the key mapping)
|
||||||
|
const DOCS_LANG: &str = "cURL";
|
||||||
|
|
||||||
|
/// Mapping of repository URLs to language names.
|
||||||
|
/// The "cURL" entry is special: it contains the key mapping used to resolve sample IDs for all SDKs.
|
||||||
|
const CODE_SAMPLES: &[(&str, &str)] = &[
|
||||||
|
("https://raw.githubusercontent.com/meilisearch/documentation/refs/heads/main/.code-samples.meilisearch.yaml", "cURL"),
|
||||||
|
("https://raw.githubusercontent.com/meilisearch/meilisearch-dotnet/refs/heads/main/.code-samples.meilisearch.yaml", "C#"),
|
||||||
|
("https://raw.githubusercontent.com/meilisearch/meilisearch-dart/refs/heads/main/.code-samples.meilisearch.yaml", "Dart"),
|
||||||
|
("https://raw.githubusercontent.com/meilisearch/meilisearch-go/refs/heads/main/.code-samples.meilisearch.yaml", "Go"),
|
||||||
|
("https://raw.githubusercontent.com/meilisearch/meilisearch-java/refs/heads/main/.code-samples.meilisearch.yaml", "Java"),
|
||||||
|
("https://raw.githubusercontent.com/meilisearch/meilisearch-js/refs/heads/main/.code-samples.meilisearch.yaml", "JS"),
|
||||||
|
("https://raw.githubusercontent.com/meilisearch/meilisearch-php/refs/heads/main/.code-samples.meilisearch.yaml", "PHP"),
|
||||||
|
("https://raw.githubusercontent.com/meilisearch/meilisearch-python/refs/heads/main/.code-samples.meilisearch.yaml", "Python"),
|
||||||
|
("https://raw.githubusercontent.com/meilisearch/meilisearch-ruby/refs/heads/main/.code-samples.meilisearch.yaml", "Ruby"),
|
||||||
|
("https://raw.githubusercontent.com/meilisearch/meilisearch-rust/refs/heads/main/.code-samples.meilisearch.yaml", "Rust"),
|
||||||
|
("https://raw.githubusercontent.com/meilisearch/meilisearch-swift/refs/heads/main/.code-samples.meilisearch.yaml", "Swift"),
|
||||||
|
];
|
||||||
|
|
||||||
|
// Pre-compiled regex patterns
|
||||||
|
static COMMENT_RE: LazyLock<Regex> =
|
||||||
|
LazyLock::new(|| Regex::new(r"^#\s*([a-zA-Z0-9_]+)\s*$").unwrap());
|
||||||
|
static CODE_START_RE: LazyLock<Regex> =
|
||||||
|
LazyLock::new(|| Regex::new(r"^([a-zA-Z0-9_]+):\s*\|-\s*$").unwrap());
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(name = "openapi-generator")]
|
#[command(name = "openapi-generator")]
|
||||||
#[command(about = "Generate OpenAPI specification for Meilisearch")]
|
#[command(about = "Generate OpenAPI specification for Meilisearch")]
|
||||||
struct Cli {
|
struct Cli {
|
||||||
/// Output file path (default: meilisearch.json)
|
/// Output file path (default: meilisearch-openapi.json)
|
||||||
#[arg(short, long, value_name = "FILE")]
|
#[arg(short, long, value_name = "FILE")]
|
||||||
output: Option<PathBuf>,
|
output: Option<PathBuf>,
|
||||||
|
|
||||||
/// Pretty print the JSON output
|
/// Pretty print the JSON output
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
pretty: bool,
|
pretty: bool,
|
||||||
|
|
||||||
|
/// Skip fetching code samples (offline mode)
|
||||||
|
#[arg(long)]
|
||||||
|
no_code_samples: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> Result<()> {
|
fn main() -> Result<()> {
|
||||||
@@ -24,14 +60,23 @@ fn main() -> Result<()> {
|
|||||||
// Generate the OpenAPI specification
|
// Generate the OpenAPI specification
|
||||||
let openapi = MeilisearchApi::openapi();
|
let openapi = MeilisearchApi::openapi();
|
||||||
|
|
||||||
|
// Convert to serde_json::Value for modification
|
||||||
|
let mut openapi_value: Value = serde_json::to_value(&openapi)?;
|
||||||
|
|
||||||
|
// Fetch and add code samples if not disabled
|
||||||
|
if !cli.no_code_samples {
|
||||||
|
let code_samples = fetch_all_code_samples()?;
|
||||||
|
add_code_samples_to_openapi(&mut openapi_value, &code_samples)?;
|
||||||
|
}
|
||||||
|
|
||||||
// Determine output path
|
// Determine output path
|
||||||
let output_path = cli.output.unwrap_or_else(|| PathBuf::from("meilisearch.json"));
|
let output_path = cli.output.unwrap_or_else(|| PathBuf::from("meilisearch-openapi.json"));
|
||||||
|
|
||||||
// Serialize to JSON
|
// Serialize to JSON
|
||||||
let json = if cli.pretty {
|
let json = if cli.pretty {
|
||||||
serde_json::to_string_pretty(&openapi)?
|
serde_json::to_string_pretty(&openapi_value)?
|
||||||
} else {
|
} else {
|
||||||
serde_json::to_string(&openapi)?
|
serde_json::to_string(&openapi_value)?
|
||||||
};
|
};
|
||||||
|
|
||||||
// Write to file
|
// Write to file
|
||||||
@@ -41,3 +86,364 @@ fn main() -> Result<()> {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Code sample for a specific language
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct CodeSample {
|
||||||
|
lang: String,
|
||||||
|
source: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch and parse code samples from all repositories
|
||||||
|
/// Returns a map from key (e.g., "get_indexes") to a list of code samples for different languages
|
||||||
|
fn fetch_all_code_samples() -> Result<HashMap<String, Vec<CodeSample>>> {
|
||||||
|
// First, fetch the documentation file (cURL) to get the key mapping
|
||||||
|
let (docs_url, _) = CODE_SAMPLES
|
||||||
|
.iter()
|
||||||
|
.find(|(_, lang)| *lang == DOCS_LANG)
|
||||||
|
.context("Documentation source not found in CODE_SAMPLES")?;
|
||||||
|
|
||||||
|
let docs_content = reqwest::blocking::get(*docs_url)
|
||||||
|
.context("Failed to fetch documentation code samples")?
|
||||||
|
.text()
|
||||||
|
.context("Failed to read documentation code samples response")?;
|
||||||
|
|
||||||
|
let key_to_sample_ids = parse_documentation_mapping(&docs_content);
|
||||||
|
|
||||||
|
// Fetch code samples from all sources
|
||||||
|
let mut all_samples: HashMap<String, Vec<CodeSample>> = HashMap::new();
|
||||||
|
|
||||||
|
for (url, lang) in CODE_SAMPLES {
|
||||||
|
// For cURL, reuse already fetched content; for SDKs, fetch from URL
|
||||||
|
let content: Cow<'_, str> = if *lang == DOCS_LANG {
|
||||||
|
Cow::Borrowed(&docs_content)
|
||||||
|
} else {
|
||||||
|
match reqwest::blocking::get(*url).and_then(|r| r.text()) {
|
||||||
|
Ok(text) => Cow::Owned(text),
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Warning: Failed to fetch code samples for {}: {}", lang, e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let sample_id_to_code = parse_code_samples(&content);
|
||||||
|
for (key, sample_ids) in &key_to_sample_ids {
|
||||||
|
for sample_id in sample_ids {
|
||||||
|
if let Some(source) = sample_id_to_code.get(sample_id) {
|
||||||
|
all_samples.entry(key.clone()).or_default().push(CodeSample {
|
||||||
|
lang: lang.to_string(),
|
||||||
|
source: source.clone(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(all_samples)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse the documentation file to create a mapping from keys (comment IDs) to sample IDs
|
||||||
|
/// Returns: HashMap<key, Vec<sample_id>>
|
||||||
|
fn parse_documentation_mapping(content: &str) -> HashMap<String, Vec<String>> {
|
||||||
|
let mut mapping: HashMap<String, Vec<String>> = HashMap::new();
|
||||||
|
let mut current_key: Option<String> = None;
|
||||||
|
|
||||||
|
for line in content.lines() {
|
||||||
|
// Check if this is a comment line defining a new key
|
||||||
|
if let Some(caps) = COMMENT_RE.captures(line) {
|
||||||
|
current_key = Some(caps[1].to_string());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this starts a new code block and extract the sample_id
|
||||||
|
if let Some(caps) = CODE_START_RE.captures(line) {
|
||||||
|
if let Some(ref key) = current_key {
|
||||||
|
let sample_id = caps[1].to_string();
|
||||||
|
mapping.entry(key.clone()).or_default().push(sample_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mapping
|
||||||
|
}
|
||||||
|
|
||||||
|
/// State machine for parsing YAML code blocks
|
||||||
|
struct YamlCodeBlockParser {
|
||||||
|
current_value: Vec<String>,
|
||||||
|
in_code_block: bool,
|
||||||
|
base_indent: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl YamlCodeBlockParser {
|
||||||
|
fn new() -> Self {
|
||||||
|
Self { current_value: Vec::new(), in_code_block: false, base_indent: None }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_new_block(&mut self) {
|
||||||
|
self.current_value.clear();
|
||||||
|
self.in_code_block = true;
|
||||||
|
self.base_indent = None;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn take_value(&mut self) -> Option<String> {
|
||||||
|
if self.current_value.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let value = self.current_value.join("\n").trim_end().to_string();
|
||||||
|
self.current_value.clear();
|
||||||
|
self.in_code_block = false;
|
||||||
|
self.base_indent = None;
|
||||||
|
Some(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_line(&mut self, line: &str) {
|
||||||
|
if !self.in_code_block {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty line or line with only whitespace
|
||||||
|
if line.trim().is_empty() {
|
||||||
|
// Only add empty lines if we've already started collecting
|
||||||
|
if !self.current_value.is_empty() {
|
||||||
|
self.current_value.push(String::new());
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate indentation
|
||||||
|
let indent = line.len() - line.trim_start().len();
|
||||||
|
|
||||||
|
// Set base indent from first non-empty line
|
||||||
|
let base = *self.base_indent.get_or_insert(indent);
|
||||||
|
|
||||||
|
// If line has less indentation than base, we've exited the block
|
||||||
|
if indent < base {
|
||||||
|
self.in_code_block = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove base indentation and add to value
|
||||||
|
let dedented = line.get(base..).unwrap_or_else(|| line.trim_start());
|
||||||
|
self.current_value.push(dedented.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse a code samples YAML file
|
||||||
|
/// Returns: HashMap<sample_id, code>
|
||||||
|
fn parse_code_samples(content: &str) -> HashMap<String, String> {
|
||||||
|
let mut samples: HashMap<String, String> = HashMap::new();
|
||||||
|
let mut current_sample_id: Option<String> = None;
|
||||||
|
let mut parser = YamlCodeBlockParser::new();
|
||||||
|
|
||||||
|
for line in content.lines() {
|
||||||
|
// Ignore comment lines
|
||||||
|
if line.starts_with('#') {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this starts a new code block
|
||||||
|
if let Some(caps) = CODE_START_RE.captures(line) {
|
||||||
|
// Save previous sample if exists
|
||||||
|
if let Some(sample_id) = current_sample_id.take() {
|
||||||
|
if let Some(value) = parser.take_value() {
|
||||||
|
samples.insert(sample_id, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
current_sample_id = Some(caps[1].to_string());
|
||||||
|
parser.start_new_block();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if current_sample_id.is_some() {
|
||||||
|
parser.process_line(line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't forget the last sample
|
||||||
|
if let Some(sample_id) = current_sample_id {
|
||||||
|
if let Some(value) = parser.take_value() {
|
||||||
|
samples.insert(sample_id, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
samples
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert an OpenAPI path to a code sample key
|
||||||
|
/// Path: /indexes/{index_uid}/documents/{document_id}
|
||||||
|
/// Method: GET
|
||||||
|
/// Key: get_indexes_indexUid_documents_documentId
|
||||||
|
fn path_to_key(path: &str, method: &str) -> String {
|
||||||
|
let method_lower = method.to_lowercase();
|
||||||
|
|
||||||
|
// Remove leading slash and convert path
|
||||||
|
let path_part = path
|
||||||
|
.trim_start_matches('/')
|
||||||
|
.split('/')
|
||||||
|
.map(|segment| {
|
||||||
|
if segment.starts_with('{') && segment.ends_with('}') {
|
||||||
|
// Convert {param_name} to camelCase
|
||||||
|
let param = &segment[1..segment.len() - 1];
|
||||||
|
to_camel_case(param)
|
||||||
|
} else {
|
||||||
|
// Keep path segments as-is, but replace hyphens with underscores
|
||||||
|
segment.replace('-', "_")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join("_");
|
||||||
|
|
||||||
|
if path_part.is_empty() {
|
||||||
|
method_lower
|
||||||
|
} else {
|
||||||
|
format!("{}_{}", method_lower, path_part)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert snake_case to camelCase
|
||||||
|
fn to_camel_case(s: &str) -> String {
|
||||||
|
let mut result = String::with_capacity(s.len());
|
||||||
|
let mut capitalize_next = false;
|
||||||
|
|
||||||
|
for (i, c) in s.chars().enumerate() {
|
||||||
|
match c {
|
||||||
|
'_' => capitalize_next = true,
|
||||||
|
_ if capitalize_next => {
|
||||||
|
result.push(c.to_ascii_uppercase());
|
||||||
|
capitalize_next = false;
|
||||||
|
}
|
||||||
|
_ if i == 0 => result.push(c.to_ascii_lowercase()),
|
||||||
|
_ => result.push(c),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add code samples to the OpenAPI specification
|
||||||
|
fn add_code_samples_to_openapi(
|
||||||
|
openapi: &mut Value,
|
||||||
|
code_samples: &HashMap<String, Vec<CodeSample>>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let paths = openapi
|
||||||
|
.get_mut("paths")
|
||||||
|
.and_then(|p| p.as_object_mut())
|
||||||
|
.context("OpenAPI spec missing 'paths' object")?;
|
||||||
|
|
||||||
|
for (path, path_item) in paths.iter_mut() {
|
||||||
|
let Some(path_item) = path_item.as_object_mut() else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
for method in HTTP_METHODS {
|
||||||
|
let Some(operation) = path_item.get_mut(*method) else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let key = path_to_key(path, method);
|
||||||
|
|
||||||
|
if let Some(samples) = code_samples.get(&key) {
|
||||||
|
// Create x-codeSamples array according to Redocly spec
|
||||||
|
// Sort by language name for consistent output
|
||||||
|
let mut sorted_samples = samples.clone();
|
||||||
|
sorted_samples.sort_by(|a, b| a.lang.cmp(&b.lang));
|
||||||
|
|
||||||
|
let code_sample_array: Vec<Value> = sorted_samples
|
||||||
|
.iter()
|
||||||
|
.map(|sample| {
|
||||||
|
json!({
|
||||||
|
"lang": sample.lang,
|
||||||
|
"source": sample.source
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if let Some(op) = operation.as_object_mut() {
|
||||||
|
op.insert("x-codeSamples".to_string(), json!(code_sample_array));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_path_to_key() {
|
||||||
|
assert_eq!(path_to_key("/indexes", "GET"), "get_indexes");
|
||||||
|
assert_eq!(path_to_key("/indexes/{index_uid}", "GET"), "get_indexes_indexUid");
|
||||||
|
assert_eq!(
|
||||||
|
path_to_key("/indexes/{index_uid}/documents", "POST"),
|
||||||
|
"post_indexes_indexUid_documents"
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
path_to_key("/indexes/{index_uid}/documents/{document_id}", "GET"),
|
||||||
|
"get_indexes_indexUid_documents_documentId"
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
path_to_key("/indexes/{index_uid}/settings/stop-words", "GET"),
|
||||||
|
"get_indexes_indexUid_settings_stop_words"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_to_camel_case() {
|
||||||
|
assert_eq!(to_camel_case("index_uid"), "indexUid");
|
||||||
|
assert_eq!(to_camel_case("document_id"), "documentId");
|
||||||
|
assert_eq!(to_camel_case("task_uid"), "taskUid");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_documentation_mapping() {
|
||||||
|
let yaml = r#"
|
||||||
|
# get_indexes
|
||||||
|
list_all_indexes_1: |-
|
||||||
|
curl \
|
||||||
|
-X GET 'MEILISEARCH_URL/indexes'
|
||||||
|
# post_indexes
|
||||||
|
create_an_index_1: |-
|
||||||
|
curl \
|
||||||
|
-X POST 'MEILISEARCH_URL/indexes'
|
||||||
|
another_sample_id: |-
|
||||||
|
curl \
|
||||||
|
-X POST 'MEILISEARCH_URL/indexes'
|
||||||
|
"#;
|
||||||
|
let mapping = parse_documentation_mapping(yaml);
|
||||||
|
|
||||||
|
assert_eq!(mapping.len(), 2);
|
||||||
|
assert!(mapping.contains_key("get_indexes"));
|
||||||
|
assert!(mapping.contains_key("post_indexes"));
|
||||||
|
assert_eq!(mapping["get_indexes"], vec!["list_all_indexes_1"]);
|
||||||
|
assert_eq!(mapping["post_indexes"], vec!["create_an_index_1", "another_sample_id"]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_code_samples() {
|
||||||
|
let yaml = r#"
|
||||||
|
# This is a comment that should be ignored
|
||||||
|
list_all_indexes_1: |-
|
||||||
|
const client = new MeiliSearch({
|
||||||
|
host: 'http://localhost:7700',
|
||||||
|
apiKey: 'masterKey'
|
||||||
|
});
|
||||||
|
|
||||||
|
const response = await client.getIndexes();
|
||||||
|
|
||||||
|
# Another comment
|
||||||
|
create_an_index_1: |-
|
||||||
|
const task = await client.createIndex('movies');
|
||||||
|
"#;
|
||||||
|
let samples = parse_code_samples(yaml);
|
||||||
|
|
||||||
|
assert_eq!(samples.len(), 2);
|
||||||
|
assert!(samples.contains_key("list_all_indexes_1"));
|
||||||
|
assert!(samples.contains_key("create_an_index_1"));
|
||||||
|
assert!(samples["list_all_indexes_1"].contains("getIndexes"));
|
||||||
|
assert!(samples["create_an_index_1"].contains("createIndex"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -178,7 +178,6 @@ pub fn get_arch() -> anyhow::Result<&'static str> {
|
|||||||
#[cfg(not(all(target_os = "linux", target_arch = "aarch64")))]
|
#[cfg(not(all(target_os = "linux", target_arch = "aarch64")))]
|
||||||
#[cfg(not(all(target_os = "linux", target_arch = "x86_64")))]
|
#[cfg(not(all(target_os = "linux", target_arch = "x86_64")))]
|
||||||
#[cfg(not(all(target_os = "macos", target_arch = "aarch64")))]
|
#[cfg(not(all(target_os = "macos", target_arch = "aarch64")))]
|
||||||
#[cfg(not(all(target_os = "macos", target_arch = "x86_64")))]
|
|
||||||
anyhow::bail!("unsupported platform")
|
anyhow::bail!("unsupported platform")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "1.91.1"
|
channel = "1.89.0"
|
||||||
components = ["clippy"]
|
components = ["clippy"]
|
||||||
|
|||||||
Reference in New Issue
Block a user