mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-11-22 04:36:32 +00:00
Compare commits
30 Commits
v1.22.3
...
upgrade-te
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
47fa91c39f | ||
|
|
2ac623f1e4 | ||
|
|
5b31960967 | ||
|
|
b99410ee6c | ||
|
|
36ff335a4d | ||
|
|
0558d41930 | ||
|
|
a61d9f8584 | ||
|
|
fdb716c818 | ||
|
|
997e7d4bfd | ||
|
|
1ed2a654c9 | ||
|
|
39e796ee03 | ||
|
|
8e3a0c339f | ||
|
|
40b49543f1 | ||
|
|
1528cfe683 | ||
|
|
c839b804fb | ||
|
|
baa4c75af8 | ||
|
|
a1424e1cb4 | ||
|
|
ab91ea8b47 | ||
|
|
857bdffb1a | ||
|
|
4290901dea | ||
|
|
b2a72b0363 | ||
|
|
d649732acd | ||
|
|
c98efe18c9 | ||
|
|
0d8b2edfb0 | ||
|
|
0e25398d3e | ||
|
|
72b6b73a91 | ||
|
|
3a2ec5f576 | ||
|
|
8240b76267 | ||
|
|
78e98a4e6c | ||
|
|
d9177d4727 |
5
.github/ISSUE_TEMPLATE/new_feature_issue.md
vendored
5
.github/ISSUE_TEMPLATE/new_feature_issue.md
vendored
@@ -24,6 +24,11 @@ TBD
|
||||
- [ ] If not, add the `no db change` label to your PR, and you're good to merge.
|
||||
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
|
||||
|
||||
### Reminders when adding features
|
||||
|
||||
- [ ] Write unit tests using insta
|
||||
- [ ] Write declarative integration tests in [workloads/tests](https://github.com/meilisearch/meilisearch/tree/main/workloads/test). Specify the routes to call and then call `cargo xtask test workloads/tests/YOUR_TEST.json --update-responses` so that responses are automatically filled.
|
||||
|
||||
### Reminders when modifying the API
|
||||
|
||||
- [ ] Update the openAPI file with utoipa:
|
||||
|
||||
@@ -124,6 +124,7 @@ They are JSON files with the following structure (comments are not actually supp
|
||||
{
|
||||
// Name of the workload. Must be unique to the workload, as it will be used to group results on the dashboard.
|
||||
"name": "hackernews.ndjson_1M,no-threads",
|
||||
"type": "bench",
|
||||
// Number of consecutive runs of the commands that should be performed.
|
||||
// Each run uses a fresh instance of Meilisearch and a fresh database.
|
||||
// Each run produces its own report file.
|
||||
|
||||
69
Cargo.lock
generated
69
Cargo.lock
generated
@@ -350,6 +350,21 @@ version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78200ac3468a57d333cd0ea5dd398e25111194dcacd49208afca95c629a6311d"
|
||||
|
||||
[[package]]
|
||||
name = "android-tzdata"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
|
||||
|
||||
[[package]]
|
||||
name = "android_system_properties"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anes"
|
||||
version = "0.1.6"
|
||||
@@ -1106,6 +1121,20 @@ dependencies = [
|
||||
"whatlang",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.41"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d"
|
||||
dependencies = [
|
||||
"android-tzdata",
|
||||
"iana-time-zone",
|
||||
"js-sys",
|
||||
"num-traits",
|
||||
"wasm-bindgen",
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ciborium"
|
||||
version = "0.2.2"
|
||||
@@ -2851,6 +2880,30 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "iana-time-zone"
|
||||
version = "0.1.63"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8"
|
||||
dependencies = [
|
||||
"android_system_properties",
|
||||
"core-foundation-sys",
|
||||
"iana-time-zone-haiku",
|
||||
"js-sys",
|
||||
"log",
|
||||
"wasm-bindgen",
|
||||
"windows-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "iana-time-zone-haiku"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "icu_collections"
|
||||
version = "2.0.0"
|
||||
@@ -5682,6 +5735,20 @@ name = "similar"
|
||||
version = "2.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa"
|
||||
dependencies = [
|
||||
"bstr",
|
||||
"unicode-segmentation",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "similar-asserts"
|
||||
version = "1.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5b441962c817e33508847a22bd82f03a30cff43642dc2fae8b050566121eb9a"
|
||||
dependencies = [
|
||||
"console",
|
||||
"similar",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simple_asn1"
|
||||
@@ -7284,6 +7351,7 @@ dependencies = [
|
||||
"anyhow",
|
||||
"build-info",
|
||||
"cargo_metadata",
|
||||
"chrono",
|
||||
"clap",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
@@ -7291,6 +7359,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"similar-asserts",
|
||||
"sysinfo",
|
||||
"time",
|
||||
"tokio",
|
||||
|
||||
@@ -42,3 +42,5 @@ tracing = "0.1.41"
|
||||
tracing-subscriber = "0.3.19"
|
||||
tracing-trace = { version = "0.1.0", path = "../tracing-trace" }
|
||||
uuid = { version = "1.17.0", features = ["v7", "serde"] }
|
||||
similar-asserts = "1.7.0"
|
||||
chrono = "0.4"
|
||||
|
||||
@@ -1,194 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::Display;
|
||||
use std::io::Read as _;
|
||||
|
||||
use anyhow::{bail, Context as _};
|
||||
use serde::Deserialize;
|
||||
|
||||
use super::assets::{fetch_asset, Asset};
|
||||
use super::client::{Client, Method};
|
||||
|
||||
#[derive(Clone, Deserialize)]
|
||||
pub struct Command {
|
||||
pub route: String,
|
||||
pub method: Method,
|
||||
#[serde(default)]
|
||||
pub body: Body,
|
||||
#[serde(default)]
|
||||
pub synchronous: SyncMode,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Body {
|
||||
Inline {
|
||||
inline: serde_json::Value,
|
||||
},
|
||||
Asset {
|
||||
asset: String,
|
||||
},
|
||||
#[default]
|
||||
Empty,
|
||||
}
|
||||
|
||||
impl Body {
|
||||
pub fn get(
|
||||
self,
|
||||
assets: &BTreeMap<String, Asset>,
|
||||
asset_folder: &str,
|
||||
) -> anyhow::Result<Option<(Vec<u8>, &'static str)>> {
|
||||
Ok(match self {
|
||||
Body::Inline { inline: body } => Some((
|
||||
serde_json::to_vec(&body)
|
||||
.context("serializing to bytes")
|
||||
.context("while getting inline body")?,
|
||||
"application/json",
|
||||
)),
|
||||
Body::Asset { asset: name } => Some({
|
||||
let context = || format!("while getting body from asset '{name}'");
|
||||
let (mut file, format) =
|
||||
fetch_asset(&name, assets, asset_folder).with_context(context)?;
|
||||
let mut buf = Vec::new();
|
||||
file.read_to_end(&mut buf).with_context(context)?;
|
||||
(buf, format.to_content_type(&name))
|
||||
}),
|
||||
Body::Empty => None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Command {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:?} {} ({:?})", self.method, self.route, self.synchronous)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy, Deserialize)]
|
||||
pub enum SyncMode {
|
||||
DontWait,
|
||||
#[default]
|
||||
WaitForResponse,
|
||||
WaitForTask,
|
||||
}
|
||||
|
||||
pub async fn run_batch(
|
||||
client: &Client,
|
||||
batch: &[Command],
|
||||
assets: &BTreeMap<String, Asset>,
|
||||
asset_folder: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let [.., last] = batch else { return Ok(()) };
|
||||
let sync = last.synchronous;
|
||||
|
||||
let mut tasks = tokio::task::JoinSet::new();
|
||||
|
||||
for command in batch {
|
||||
// FIXME: you probably don't want to copy assets everytime here
|
||||
tasks.spawn({
|
||||
let client = client.clone();
|
||||
let command = command.clone();
|
||||
let assets = assets.clone();
|
||||
let asset_folder = asset_folder.to_owned();
|
||||
|
||||
async move { run(client, command, &assets, &asset_folder).await }
|
||||
});
|
||||
}
|
||||
|
||||
while let Some(result) = tasks.join_next().await {
|
||||
result
|
||||
.context("panicked while executing command")?
|
||||
.context("error while executing command")?;
|
||||
}
|
||||
|
||||
match sync {
|
||||
SyncMode::DontWait => {}
|
||||
SyncMode::WaitForResponse => {}
|
||||
SyncMode::WaitForTask => wait_for_tasks(client).await?,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn wait_for_tasks(client: &Client) -> anyhow::Result<()> {
|
||||
loop {
|
||||
let response = client
|
||||
.get("tasks?statuses=enqueued,processing")
|
||||
.send()
|
||||
.await
|
||||
.context("could not wait for tasks")?;
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response to JSON")
|
||||
.context("could not wait for tasks")?;
|
||||
match response.get("total") {
|
||||
Some(serde_json::Value::Number(number)) => {
|
||||
let number = number.as_u64().with_context(|| {
|
||||
format!("waiting for tasks: could not parse 'total' as integer, got {}", number)
|
||||
})?;
|
||||
if number == 0 {
|
||||
break;
|
||||
} else {
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Some(thing_else) => {
|
||||
bail!(format!(
|
||||
"waiting for tasks: could not parse 'total' as a number, got '{thing_else}'"
|
||||
))
|
||||
}
|
||||
None => {
|
||||
bail!(format!(
|
||||
"waiting for tasks: expected response to contain 'total', got '{response}'"
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(client, command, assets, asset_folder), fields(command = %command))]
|
||||
pub async fn run(
|
||||
client: Client,
|
||||
mut command: Command,
|
||||
assets: &BTreeMap<String, Asset>,
|
||||
asset_folder: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
// memtake the body here to leave an empty body in its place, so that command is not partially moved-out
|
||||
let body = std::mem::take(&mut command.body)
|
||||
.get(assets, asset_folder)
|
||||
.with_context(|| format!("while getting body for command {command}"))?;
|
||||
|
||||
let request = client.request(command.method.into(), &command.route);
|
||||
|
||||
let request = if let Some((body, content_type)) = body {
|
||||
request.body(body).header(reqwest::header::CONTENT_TYPE, content_type)
|
||||
} else {
|
||||
request
|
||||
};
|
||||
|
||||
let response =
|
||||
request.send().await.with_context(|| format!("error sending command: {}", command))?;
|
||||
|
||||
let code = response.status();
|
||||
if code.is_client_error() {
|
||||
tracing::error!(%command, %code, "error in workload file");
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response as JSON")
|
||||
.context("parsing error in workload file when sending command")?;
|
||||
bail!("error in workload file: server responded with error code {code} and '{response}'")
|
||||
} else if code.is_server_error() {
|
||||
tracing::error!(%command, %code, "server error");
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response as JSON")
|
||||
.context("parsing server error when sending command")?;
|
||||
bail!("server error: server responded with error code {code} and '{response}'")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -7,9 +7,9 @@ use tokio::task::AbortHandle;
|
||||
use tracing_trace::processor::span_stats::CallStats;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::client::Client;
|
||||
use super::env_info;
|
||||
use super::workload::Workload;
|
||||
use super::workload::BenchWorkload;
|
||||
use crate::common::client::Client;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum DashboardClient {
|
||||
@@ -89,7 +89,7 @@ impl DashboardClient {
|
||||
pub async fn create_workload(
|
||||
&self,
|
||||
invocation_uuid: Uuid,
|
||||
workload: &Workload,
|
||||
workload: &BenchWorkload,
|
||||
) -> anyhow::Result<Uuid> {
|
||||
let Self::Client(dashboard_client) = self else { return Ok(Uuid::now_v7()) };
|
||||
|
||||
|
||||
@@ -1,38 +1,22 @@
|
||||
mod assets;
|
||||
mod client;
|
||||
mod command;
|
||||
mod dashboard;
|
||||
mod env_info;
|
||||
mod meili_process;
|
||||
mod workload;
|
||||
|
||||
use std::io::LineWriter;
|
||||
use std::path::PathBuf;
|
||||
use crate::common::args::CommonArgs;
|
||||
use crate::common::logs::setup_logs;
|
||||
use crate::common::workload::Workload;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::{bail, Context};
|
||||
use clap::Parser;
|
||||
use tracing_subscriber::fmt::format::FmtSpan;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::Layer;
|
||||
|
||||
use self::client::Client;
|
||||
use self::workload::Workload;
|
||||
use crate::common::client::Client;
|
||||
pub use workload::BenchWorkload;
|
||||
|
||||
pub fn default_http_addr() -> String {
|
||||
"127.0.0.1:7700".to_string()
|
||||
}
|
||||
pub fn default_report_folder() -> String {
|
||||
"./bench/reports/".into()
|
||||
}
|
||||
|
||||
pub fn default_asset_folder() -> String {
|
||||
"./bench/assets/".into()
|
||||
}
|
||||
|
||||
pub fn default_log_filter() -> String {
|
||||
"info".into()
|
||||
}
|
||||
|
||||
pub fn default_dashboard_url() -> String {
|
||||
"http://localhost:9001".into()
|
||||
}
|
||||
@@ -40,12 +24,13 @@ pub fn default_dashboard_url() -> String {
|
||||
/// Run benchmarks from a workload
|
||||
#[derive(Parser, Debug)]
|
||||
pub struct BenchDeriveArgs {
|
||||
/// Filename of the workload file, pass multiple filenames
|
||||
/// to run multiple workloads in the specified order.
|
||||
///
|
||||
/// Each workload run will get its own report file.
|
||||
#[arg(value_name = "WORKLOAD_FILE", last = false)]
|
||||
workload_file: Vec<PathBuf>,
|
||||
/// Common arguments shared with other commands
|
||||
#[command(flatten)]
|
||||
common: CommonArgs,
|
||||
|
||||
/// Meilisearch master keys
|
||||
#[arg(long)]
|
||||
pub master_key: Option<String>,
|
||||
|
||||
/// URL of the dashboard.
|
||||
#[arg(long, default_value_t = default_dashboard_url())]
|
||||
@@ -59,34 +44,14 @@ pub struct BenchDeriveArgs {
|
||||
#[arg(long, default_value_t = default_report_folder())]
|
||||
report_folder: String,
|
||||
|
||||
/// Directory to store the remote assets.
|
||||
#[arg(long, default_value_t = default_asset_folder())]
|
||||
asset_folder: String,
|
||||
|
||||
/// Log directives
|
||||
#[arg(short, long, default_value_t = default_log_filter())]
|
||||
log_filter: String,
|
||||
|
||||
/// Benchmark dashboard API key
|
||||
#[arg(long)]
|
||||
api_key: Option<String>,
|
||||
|
||||
/// Meilisearch master keys
|
||||
#[arg(long)]
|
||||
master_key: Option<String>,
|
||||
|
||||
/// Authentication bearer for fetching assets
|
||||
#[arg(long)]
|
||||
assets_key: Option<String>,
|
||||
|
||||
/// Reason for the benchmark invocation
|
||||
#[arg(short, long)]
|
||||
reason: Option<String>,
|
||||
|
||||
/// The maximum time in seconds we allow for fetching the task queue before timing out.
|
||||
#[arg(long, default_value_t = 60)]
|
||||
tasks_queue_timeout_secs: u64,
|
||||
|
||||
/// The path to the binary to run.
|
||||
///
|
||||
/// If unspecified, runs `cargo run` after building Meilisearch with `cargo build`.
|
||||
@@ -95,17 +60,7 @@ pub struct BenchDeriveArgs {
|
||||
}
|
||||
|
||||
pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
|
||||
// setup logs
|
||||
let filter: tracing_subscriber::filter::Targets =
|
||||
args.log_filter.parse().context("invalid --log-filter")?;
|
||||
|
||||
let subscriber = tracing_subscriber::registry().with(
|
||||
tracing_subscriber::fmt::layer()
|
||||
.with_writer(|| LineWriter::new(std::io::stderr()))
|
||||
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
|
||||
.with_filter(filter),
|
||||
);
|
||||
tracing::subscriber::set_global_default(subscriber).context("could not setup logging")?;
|
||||
setup_logs(&args.common.log_filter)?;
|
||||
|
||||
// fetch environment and build info
|
||||
let env = env_info::Environment::generate_from_current_config();
|
||||
@@ -116,8 +71,11 @@ pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
|
||||
let _scope = rt.enter();
|
||||
|
||||
// setup clients
|
||||
let assets_client =
|
||||
Client::new(None, args.assets_key.as_deref(), Some(std::time::Duration::from_secs(3600)))?; // 1h
|
||||
let assets_client = Client::new(
|
||||
None,
|
||||
args.common.assets_key.as_deref(),
|
||||
Some(std::time::Duration::from_secs(3600)), // 1h
|
||||
)?;
|
||||
|
||||
let dashboard_client = if args.no_dashboard {
|
||||
dashboard::DashboardClient::new_dry()
|
||||
@@ -134,11 +92,11 @@ pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
|
||||
None,
|
||||
)?;
|
||||
|
||||
let meili_client = Client::new(
|
||||
let meili_client = Arc::new(Client::new(
|
||||
Some("http://127.0.0.1:7700".into()),
|
||||
args.master_key.as_deref(),
|
||||
Some(std::time::Duration::from_secs(args.tasks_queue_timeout_secs)),
|
||||
)?;
|
||||
Some(std::time::Duration::from_secs(args.common.tasks_queue_timeout_secs)),
|
||||
)?);
|
||||
|
||||
// enter runtime
|
||||
|
||||
@@ -146,11 +104,11 @@ pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
|
||||
dashboard_client.send_machine_info(&env).await?;
|
||||
|
||||
let commit_message = build_info.commit_msg.unwrap_or_default().split('\n').next().unwrap();
|
||||
let max_workloads = args.workload_file.len();
|
||||
let max_workloads = args.common.workload_file.len();
|
||||
let reason: Option<&str> = args.reason.as_deref();
|
||||
let invocation_uuid = dashboard_client.create_invocation(build_info.clone(), commit_message, env, max_workloads, reason).await?;
|
||||
|
||||
tracing::info!(workload_count = args.workload_file.len(), "handling workload files");
|
||||
tracing::info!(workload_count = args.common.workload_file.len(), "handling workload files");
|
||||
|
||||
// main task
|
||||
let workload_runs = tokio::spawn(
|
||||
@@ -158,13 +116,17 @@ pub fn run(args: BenchDeriveArgs) -> anyhow::Result<()> {
|
||||
let dashboard_client = dashboard_client.clone();
|
||||
let mut dashboard_urls = Vec::new();
|
||||
async move {
|
||||
for workload_file in args.workload_file.iter() {
|
||||
for workload_file in args.common.workload_file.iter() {
|
||||
let workload: Workload = serde_json::from_reader(
|
||||
std::fs::File::open(workload_file)
|
||||
.with_context(|| format!("error opening {}", workload_file.display()))?,
|
||||
)
|
||||
.with_context(|| format!("error parsing {} as JSON", workload_file.display()))?;
|
||||
|
||||
let Workload::Bench(workload) = workload else {
|
||||
bail!("workload file {} is not a bench workload", workload_file.display());
|
||||
};
|
||||
|
||||
let workload_name = workload.name.clone();
|
||||
|
||||
workload::execute(
|
||||
|
||||
@@ -1,24 +1,27 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::fs::File;
|
||||
use std::io::{Seek as _, Write as _};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, Context as _};
|
||||
use futures_util::TryStreamExt as _;
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use tokio::task::JoinHandle;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::assets::Asset;
|
||||
use super::client::Client;
|
||||
use super::command::SyncMode;
|
||||
use super::dashboard::DashboardClient;
|
||||
use super::BenchDeriveArgs;
|
||||
use crate::bench::{assets, meili_process};
|
||||
use crate::common::assets::{self, Asset};
|
||||
use crate::common::client::Client;
|
||||
use crate::common::command::{run_commands, Command};
|
||||
use crate::common::process::{self, delete_db, start_meili};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct Workload {
|
||||
/// A bench workload.
|
||||
/// Not to be confused with [a test workload](crate::test::workload::Workload).
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct BenchWorkload {
|
||||
pub name: String,
|
||||
pub run_count: u16,
|
||||
pub extra_cli_args: Vec<String>,
|
||||
@@ -26,30 +29,33 @@ pub struct Workload {
|
||||
#[serde(default)]
|
||||
pub target: String,
|
||||
#[serde(default)]
|
||||
pub precommands: Vec<super::command::Command>,
|
||||
pub commands: Vec<super::command::Command>,
|
||||
pub precommands: Vec<Command>,
|
||||
pub commands: Vec<Command>,
|
||||
}
|
||||
|
||||
async fn run_commands(
|
||||
async fn run_workload_commands(
|
||||
dashboard_client: &DashboardClient,
|
||||
logs_client: &Client,
|
||||
meili_client: &Client,
|
||||
meili_client: &Arc<Client>,
|
||||
workload_uuid: Uuid,
|
||||
workload: &Workload,
|
||||
workload: &BenchWorkload,
|
||||
args: &BenchDeriveArgs,
|
||||
run_number: u16,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<File>>> {
|
||||
let report_folder = &args.report_folder;
|
||||
let workload_name = &workload.name;
|
||||
let assets = Arc::new(workload.assets.clone());
|
||||
let asset_folder = args.common.asset_folder.clone().leak();
|
||||
|
||||
for batch in workload
|
||||
.precommands
|
||||
.as_slice()
|
||||
.split_inclusive(|command| !matches!(command.synchronous, SyncMode::DontWait))
|
||||
{
|
||||
super::command::run_batch(meili_client, batch, &workload.assets, &args.asset_folder)
|
||||
.await?;
|
||||
}
|
||||
run_commands(
|
||||
meili_client,
|
||||
&workload.precommands,
|
||||
&assets,
|
||||
asset_folder,
|
||||
&mut HashMap::new(),
|
||||
false,
|
||||
)
|
||||
.await?;
|
||||
|
||||
std::fs::create_dir_all(report_folder)
|
||||
.with_context(|| format!("could not create report directory at {report_folder}"))?;
|
||||
@@ -59,14 +65,15 @@ async fn run_commands(
|
||||
|
||||
let report_handle = start_report(logs_client, trace_filename, &workload.target).await?;
|
||||
|
||||
for batch in workload
|
||||
.commands
|
||||
.as_slice()
|
||||
.split_inclusive(|command| !matches!(command.synchronous, SyncMode::DontWait))
|
||||
{
|
||||
super::command::run_batch(meili_client, batch, &workload.assets, &args.asset_folder)
|
||||
.await?;
|
||||
}
|
||||
run_commands(
|
||||
meili_client,
|
||||
&workload.commands,
|
||||
&assets,
|
||||
asset_folder,
|
||||
&mut HashMap::new(),
|
||||
false,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let processor =
|
||||
stop_report(dashboard_client, logs_client, workload_uuid, report_filename, report_handle)
|
||||
@@ -81,14 +88,14 @@ pub async fn execute(
|
||||
assets_client: &Client,
|
||||
dashboard_client: &DashboardClient,
|
||||
logs_client: &Client,
|
||||
meili_client: &Client,
|
||||
meili_client: &Arc<Client>,
|
||||
invocation_uuid: Uuid,
|
||||
master_key: Option<&str>,
|
||||
workload: Workload,
|
||||
workload: BenchWorkload,
|
||||
args: &BenchDeriveArgs,
|
||||
binary_path: Option<&Path>,
|
||||
) -> anyhow::Result<()> {
|
||||
assets::fetch_assets(assets_client, &workload.assets, &args.asset_folder).await?;
|
||||
assets::fetch_assets(assets_client, &workload.assets, &args.common.asset_folder).await?;
|
||||
|
||||
let workload_uuid = dashboard_client.create_workload(invocation_uuid, &workload).await?;
|
||||
|
||||
@@ -129,38 +136,26 @@ pub async fn execute(
|
||||
async fn execute_run(
|
||||
dashboard_client: &DashboardClient,
|
||||
logs_client: &Client,
|
||||
meili_client: &Client,
|
||||
meili_client: &Arc<Client>,
|
||||
workload_uuid: Uuid,
|
||||
master_key: Option<&str>,
|
||||
workload: &Workload,
|
||||
workload: &BenchWorkload,
|
||||
args: &BenchDeriveArgs,
|
||||
binary_path: Option<&Path>,
|
||||
run_number: u16,
|
||||
) -> anyhow::Result<tokio::task::JoinHandle<anyhow::Result<std::fs::File>>> {
|
||||
meili_process::delete_db();
|
||||
delete_db().await;
|
||||
|
||||
let run_command = match binary_path {
|
||||
Some(binary_path) => tokio::process::Command::new(binary_path),
|
||||
None => {
|
||||
meili_process::build().await?;
|
||||
let mut command = tokio::process::Command::new("cargo");
|
||||
command
|
||||
.arg("run")
|
||||
.arg("--release")
|
||||
.arg("-p")
|
||||
.arg("meilisearch")
|
||||
.arg("--bin")
|
||||
.arg("meilisearch")
|
||||
.arg("--");
|
||||
command
|
||||
}
|
||||
};
|
||||
let meilisearch = start_meili(
|
||||
meili_client,
|
||||
master_key,
|
||||
&workload.extra_cli_args,
|
||||
&workload.name,
|
||||
binary_path,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let meilisearch =
|
||||
meili_process::start(meili_client, master_key, workload, &args.asset_folder, run_command)
|
||||
.await?;
|
||||
|
||||
let processor = run_commands(
|
||||
let processor = run_workload_commands(
|
||||
dashboard_client,
|
||||
logs_client,
|
||||
meili_client,
|
||||
@@ -171,7 +166,7 @@ async fn execute_run(
|
||||
)
|
||||
.await?;
|
||||
|
||||
meili_process::kill(meilisearch).await;
|
||||
process::kill_meili(meilisearch).await;
|
||||
|
||||
tracing::info!(run_number, "Successful run");
|
||||
|
||||
|
||||
36
crates/xtask/src/common/args.rs
Normal file
36
crates/xtask/src/common/args.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use clap::Parser;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub fn default_asset_folder() -> String {
|
||||
"./bench/assets/".into()
|
||||
}
|
||||
|
||||
pub fn default_log_filter() -> String {
|
||||
"info".into()
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug, Clone)]
|
||||
pub struct CommonArgs {
|
||||
/// Filename of the workload file, pass multiple filenames
|
||||
/// to run multiple workloads in the specified order.
|
||||
///
|
||||
/// For benches, each workload run will get its own report file.
|
||||
#[arg(value_name = "WORKLOAD_FILE", last = false)]
|
||||
pub workload_file: Vec<PathBuf>,
|
||||
|
||||
/// Directory to store the remote assets.
|
||||
#[arg(long, default_value_t = default_asset_folder())]
|
||||
pub asset_folder: String,
|
||||
|
||||
/// Log directives
|
||||
#[arg(short, long, default_value_t = default_log_filter())]
|
||||
pub log_filter: String,
|
||||
|
||||
/// Authentication bearer for fetching assets
|
||||
#[arg(long)]
|
||||
pub assets_key: Option<String>,
|
||||
|
||||
/// The maximum time in seconds we allow for fetching the task queue before timing out.
|
||||
#[arg(long, default_value_t = 60)]
|
||||
pub tasks_queue_timeout_secs: u64,
|
||||
}
|
||||
@@ -3,21 +3,22 @@ use std::io::{Read as _, Seek as _, Write as _};
|
||||
|
||||
use anyhow::{bail, Context};
|
||||
use futures_util::TryStreamExt as _;
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::Digest;
|
||||
|
||||
use super::client::Client;
|
||||
|
||||
#[derive(Deserialize, Clone)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct Asset {
|
||||
pub local_location: Option<String>,
|
||||
pub remote_location: Option<String>,
|
||||
#[serde(default)]
|
||||
#[serde(default, skip_serializing_if = "AssetFormat::is_default")]
|
||||
pub format: AssetFormat,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub sha256: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Default, Copy, Clone)]
|
||||
#[derive(Serialize, Deserialize, Default, Copy, Clone, Debug)]
|
||||
pub enum AssetFormat {
|
||||
#[default]
|
||||
Auto,
|
||||
@@ -27,6 +28,10 @@ pub enum AssetFormat {
|
||||
}
|
||||
|
||||
impl AssetFormat {
|
||||
fn is_default(&self) -> bool {
|
||||
matches!(self, AssetFormat::Auto)
|
||||
}
|
||||
|
||||
pub fn to_content_type(self, filename: &str) -> &'static str {
|
||||
match self {
|
||||
AssetFormat::Auto => Self::auto_detect(filename).to_content_type(filename),
|
||||
@@ -166,7 +171,14 @@ fn check_sha256(name: &str, asset: &Asset, mut file: std::fs::File) -> anyhow::R
|
||||
}
|
||||
}
|
||||
None => {
|
||||
tracing::warn!(sha256 = file_hash, "Skipping hash for asset {name} that doesn't have one. Please add it to workload file");
|
||||
let msg = match name.starts_with("meilisearch-v") {
|
||||
true => "Please add it to xtask/src/test/versions.rs",
|
||||
false => "Please add it to workload file",
|
||||
};
|
||||
tracing::warn!(
|
||||
sha256 = file_hash,
|
||||
"Skipping hash for asset {name} that doesn't have one. {msg}"
|
||||
);
|
||||
true
|
||||
}
|
||||
})
|
||||
@@ -1,5 +1,5 @@
|
||||
use anyhow::Context;
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Client {
|
||||
@@ -61,7 +61,7 @@ impl Client {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Deserialize)]
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
|
||||
pub enum Method {
|
||||
Get,
|
||||
398
crates/xtask/src/common/command.rs
Normal file
398
crates/xtask/src/common/command.rs
Normal file
@@ -0,0 +1,398 @@
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::fmt::Display;
|
||||
use std::io::Read as _;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, Context as _};
|
||||
use reqwest::StatusCode;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use similar_asserts::SimpleDiff;
|
||||
|
||||
use crate::common::assets::{fetch_asset, Asset};
|
||||
use crate::common::client::{Client, Method};
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase", deny_unknown_fields)]
|
||||
pub struct Command {
|
||||
pub route: String,
|
||||
pub method: Method,
|
||||
#[serde(default)]
|
||||
pub body: Body,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub expected_status: Option<u16>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub expected_response: Option<serde_json::Value>,
|
||||
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
|
||||
pub register: HashMap<String, String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub api_key_variable: Option<String>,
|
||||
#[serde(default)]
|
||||
pub synchronous: SyncMode,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone, Serialize, Deserialize, Debug)]
|
||||
#[serde(untagged)]
|
||||
pub enum Body {
|
||||
Inline {
|
||||
inline: serde_json::Value,
|
||||
},
|
||||
Asset {
|
||||
asset: String,
|
||||
},
|
||||
#[default]
|
||||
Empty,
|
||||
}
|
||||
|
||||
impl Body {
|
||||
pub fn get(
|
||||
self,
|
||||
assets: &BTreeMap<String, Asset>,
|
||||
registered: &HashMap<String, Value>,
|
||||
asset_folder: &str,
|
||||
) -> anyhow::Result<Option<(Vec<u8>, &'static str)>> {
|
||||
Ok(match self {
|
||||
Body::Inline { inline: mut body } => {
|
||||
fn insert_variables(value: &mut Value, registered: &HashMap<String, Value>) {
|
||||
match value {
|
||||
Value::Null | Value::Bool(_) | Value::Number(_) => (),
|
||||
Value::String(s) => {
|
||||
if s.starts_with("{{") && s.ends_with("}}") {
|
||||
let name = s[2..s.len() - 2].trim();
|
||||
if let Some(replacement) = registered.get(name) {
|
||||
*value = replacement.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
Value::Array(values) => {
|
||||
for value in values {
|
||||
insert_variables(value, registered);
|
||||
}
|
||||
}
|
||||
Value::Object(map) => {
|
||||
for (_key, value) in map.iter_mut() {
|
||||
insert_variables(value, registered);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !registered.is_empty() {
|
||||
insert_variables(&mut body, registered);
|
||||
}
|
||||
|
||||
Some((
|
||||
serde_json::to_vec(&body)
|
||||
.context("serializing to bytes")
|
||||
.context("while getting inline body")?,
|
||||
"application/json",
|
||||
))
|
||||
}
|
||||
Body::Asset { asset: name } => Some({
|
||||
let context = || format!("while getting body from asset '{name}'");
|
||||
let (mut file, format) =
|
||||
fetch_asset(&name, assets, asset_folder).with_context(context)?;
|
||||
let mut buf = Vec::new();
|
||||
file.read_to_end(&mut buf).with_context(context)?;
|
||||
(buf, format.to_content_type(&name))
|
||||
}),
|
||||
Body::Empty => None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Command {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:?} {} ({:?})", self.method, self.route, self.synchronous)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
|
||||
pub enum SyncMode {
|
||||
DontWait,
|
||||
#[default]
|
||||
WaitForResponse,
|
||||
WaitForTask,
|
||||
}
|
||||
|
||||
async fn run_batch(
|
||||
client: &Arc<Client>,
|
||||
batch: &[Command],
|
||||
assets: &Arc<BTreeMap<String, Asset>>,
|
||||
asset_folder: &'static str,
|
||||
registered: &mut HashMap<String, Value>,
|
||||
return_response: bool,
|
||||
) -> anyhow::Result<Vec<(Value, StatusCode)>> {
|
||||
let [.., last] = batch else { return Ok(Vec::new()) };
|
||||
let sync = last.synchronous;
|
||||
let batch_len = batch.len();
|
||||
|
||||
let mut tasks = Vec::with_capacity(batch.len());
|
||||
for command in batch.iter().cloned() {
|
||||
let client2 = Arc::clone(client);
|
||||
let assets2 = Arc::clone(assets);
|
||||
let needs_response = return_response || !command.register.is_empty();
|
||||
let registered2 = registered.clone(); // FIXME: cloning the whole map for each command is inefficient
|
||||
tasks.push(tokio::spawn(async move {
|
||||
run(&client2, &command, &assets2, registered2, asset_folder, needs_response).await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut outputs = Vec::with_capacity(if return_response { batch_len } else { 0 });
|
||||
for (task, command) in tasks.into_iter().zip(batch.iter()) {
|
||||
let output = task.await.context("task panicked")??;
|
||||
if let Some(output) = output {
|
||||
for (name, path) in &command.register {
|
||||
let value = output
|
||||
.0
|
||||
.pointer(path)
|
||||
.with_context(|| format!("could not find path '{path}' in response (required to register '{name}')"))?
|
||||
.clone();
|
||||
registered.insert(name.clone(), value);
|
||||
}
|
||||
|
||||
if return_response {
|
||||
outputs.push(output);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match sync {
|
||||
SyncMode::DontWait => {}
|
||||
SyncMode::WaitForResponse => {}
|
||||
SyncMode::WaitForTask => wait_for_tasks(client).await?,
|
||||
}
|
||||
|
||||
Ok(outputs)
|
||||
}
|
||||
|
||||
async fn wait_for_tasks(client: &Client) -> anyhow::Result<()> {
|
||||
loop {
|
||||
let response = client
|
||||
.get("tasks?statuses=enqueued,processing")
|
||||
.send()
|
||||
.await
|
||||
.context("could not wait for tasks")?;
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response to JSON")
|
||||
.context("could not wait for tasks")?;
|
||||
match response.get("total") {
|
||||
Some(serde_json::Value::Number(number)) => {
|
||||
let number = number.as_u64().with_context(|| {
|
||||
format!("waiting for tasks: could not parse 'total' as integer, got {}", number)
|
||||
})?;
|
||||
if number == 0 {
|
||||
break;
|
||||
} else {
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Some(thing_else) => {
|
||||
bail!(format!(
|
||||
"waiting for tasks: could not parse 'total' as a number, got '{thing_else}'"
|
||||
))
|
||||
}
|
||||
None => {
|
||||
bail!(format!(
|
||||
"waiting for tasks: expected response to contain 'total', got '{response}'"
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn json_eq_ignore(reference: &Value, value: &Value) -> bool {
|
||||
match reference {
|
||||
Value::Null | Value::Bool(_) | Value::Number(_) => reference == value,
|
||||
Value::String(s) => (s.starts_with('[') && s.ends_with(']')) || reference == value,
|
||||
Value::Array(values) => match value {
|
||||
Value::Array(other_values) => {
|
||||
if values.len() != other_values.len() {
|
||||
return false;
|
||||
}
|
||||
for (value, other_value) in values.iter().zip(other_values.iter()) {
|
||||
if !json_eq_ignore(value, other_value) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
},
|
||||
Value::Object(map) => match value {
|
||||
Value::Object(other_map) => {
|
||||
if map.len() != other_map.len() {
|
||||
return false;
|
||||
}
|
||||
for (key, value) in map.iter() {
|
||||
match other_map.get(key) {
|
||||
Some(other_value) => {
|
||||
if !json_eq_ignore(value, other_value) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
None => return false,
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(client, command, assets, registered, asset_folder), fields(command = %command))]
|
||||
pub async fn run(
|
||||
client: &Client,
|
||||
command: &Command,
|
||||
assets: &BTreeMap<String, Asset>,
|
||||
registered: HashMap<String, Value>,
|
||||
asset_folder: &str,
|
||||
return_value: bool,
|
||||
) -> anyhow::Result<Option<(Value, StatusCode)>> {
|
||||
// Try to replace variables in the route
|
||||
let mut route = &command.route;
|
||||
let mut owned_route;
|
||||
if !registered.is_empty() {
|
||||
while let (Some(pos1), Some(pos2)) = (route.find("{{"), route.rfind("}}")) {
|
||||
if pos2 > pos1 {
|
||||
let name = route[pos1 + 2..pos2].trim();
|
||||
if let Some(replacement) = registered.get(name).and_then(|r| r.as_str()) {
|
||||
let mut new_route = String::new();
|
||||
new_route.push_str(&route[..pos1]);
|
||||
new_route.push_str(replacement);
|
||||
new_route.push_str(&route[pos2 + 2..]);
|
||||
owned_route = new_route;
|
||||
route = &owned_route;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// memtake the body here to leave an empty body in its place, so that command is not partially moved-out
|
||||
let body = command
|
||||
.body
|
||||
.clone()
|
||||
.get(assets, ®istered, asset_folder)
|
||||
.with_context(|| format!("while getting body for command {command}"))?;
|
||||
|
||||
let mut request = client.request(command.method.into(), route);
|
||||
|
||||
// Replace the api key
|
||||
if let Some(var_name) = &command.api_key_variable {
|
||||
if let Some(api_key) = registered.get(var_name).and_then(|v| v.as_str()) {
|
||||
request = request.header("Authorization", format!("Bearer {api_key}"));
|
||||
} else {
|
||||
bail!("could not find API key variable '{var_name}' in registered values");
|
||||
}
|
||||
}
|
||||
|
||||
let request = if let Some((body, content_type)) = body {
|
||||
request.body(body).header(reqwest::header::CONTENT_TYPE, content_type)
|
||||
} else {
|
||||
request
|
||||
};
|
||||
|
||||
let response =
|
||||
request.send().await.with_context(|| format!("error sending command: {}", command))?;
|
||||
|
||||
let code = response.status();
|
||||
|
||||
if !return_value {
|
||||
if let Some(expected_status) = command.expected_status {
|
||||
if code.as_u16() != expected_status {
|
||||
let response = response
|
||||
.text()
|
||||
.await
|
||||
.context("could not read response body as text")
|
||||
.context("reading response body when checking expected status")?;
|
||||
bail!("unexpected status code: got {}, expected {expected_status}, response body: '{response}'", code.as_u16());
|
||||
}
|
||||
} else if code.is_client_error() {
|
||||
tracing::error!(%command, %code, "error in workload file");
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response as JSON")
|
||||
.context("parsing error in workload file when sending command")?;
|
||||
bail!(
|
||||
"error in workload file: server responded with error code {code} and '{response}'"
|
||||
)
|
||||
} else if code.is_server_error() {
|
||||
tracing::error!(%command, %code, "server error");
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response as JSON")
|
||||
.context("parsing server error when sending command")?;
|
||||
bail!("server error: server responded with error code {code} and '{response}'")
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(expected_response) = &command.expected_response {
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response as JSON")
|
||||
.context("parsing response when checking expected response")?;
|
||||
if return_value {
|
||||
return Ok(Some((response, code)));
|
||||
}
|
||||
if !json_eq_ignore(expected_response, &response) {
|
||||
let expected_pretty = serde_json::to_string_pretty(expected_response)
|
||||
.context("serializing expected response as pretty JSON")?;
|
||||
let response_pretty = serde_json::to_string_pretty(&response)
|
||||
.context("serializing response as pretty JSON")?;
|
||||
let diff = SimpleDiff::from_str(&expected_pretty, &response_pretty, "expected", "got");
|
||||
bail!("unexpected response:\n{diff}");
|
||||
}
|
||||
} else if return_value {
|
||||
let response: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("could not deserialize response as JSON")
|
||||
.context("parsing response when recording expected response")?;
|
||||
return Ok(Some((response, code)));
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub async fn run_commands(
|
||||
client: &Arc<Client>,
|
||||
commands: &[Command],
|
||||
assets: &Arc<BTreeMap<String, Asset>>,
|
||||
asset_folder: &'static str,
|
||||
registered: &mut HashMap<String, Value>,
|
||||
return_response: bool,
|
||||
) -> anyhow::Result<Vec<(Value, StatusCode)>> {
|
||||
let mut responses = Vec::new();
|
||||
for batch in
|
||||
commands.split_inclusive(|command| !matches!(command.synchronous, SyncMode::DontWait))
|
||||
{
|
||||
let mut new_responses =
|
||||
run_batch(client, batch, assets, asset_folder, registered, return_response).await?;
|
||||
responses.append(&mut new_responses);
|
||||
}
|
||||
|
||||
Ok(responses)
|
||||
}
|
||||
|
||||
pub fn health_command() -> Command {
|
||||
Command {
|
||||
route: "/health".into(),
|
||||
method: crate::common::client::Method::Get,
|
||||
body: Default::default(),
|
||||
register: HashMap::new(),
|
||||
synchronous: SyncMode::WaitForResponse,
|
||||
expected_status: None,
|
||||
expected_response: None,
|
||||
api_key_variable: None,
|
||||
}
|
||||
}
|
||||
18
crates/xtask/src/common/logs.rs
Normal file
18
crates/xtask/src/common/logs.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use anyhow::Context;
|
||||
use std::io::LineWriter;
|
||||
use tracing_subscriber::{fmt::format::FmtSpan, layer::SubscriberExt, Layer};
|
||||
|
||||
pub fn setup_logs(log_filter: &str) -> anyhow::Result<()> {
|
||||
let filter: tracing_subscriber::filter::Targets =
|
||||
log_filter.parse().context("invalid --log-filter")?;
|
||||
|
||||
let subscriber = tracing_subscriber::registry().with(
|
||||
tracing_subscriber::fmt::layer()
|
||||
.with_writer(|| LineWriter::new(std::io::stderr()))
|
||||
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
|
||||
.with_filter(filter),
|
||||
);
|
||||
tracing::subscriber::set_global_default(subscriber).context("could not setup logging")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
7
crates/xtask/src/common/mod.rs
Normal file
7
crates/xtask/src/common/mod.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
pub mod args;
|
||||
pub mod assets;
|
||||
pub mod client;
|
||||
pub mod command;
|
||||
pub mod logs;
|
||||
pub mod process;
|
||||
pub mod workload;
|
||||
@@ -1,18 +1,18 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{bail, Context as _};
|
||||
use tokio::process::Command;
|
||||
use tokio::process::Command as TokioCommand;
|
||||
use tokio::time;
|
||||
|
||||
use super::assets::Asset;
|
||||
use super::client::Client;
|
||||
use super::workload::Workload;
|
||||
use crate::common::client::Client;
|
||||
use crate::common::command::{health_command, run as run_command};
|
||||
|
||||
pub async fn kill(mut meilisearch: tokio::process::Child) {
|
||||
pub async fn kill_meili(mut meilisearch: tokio::process::Child) {
|
||||
let Some(id) = meilisearch.id() else { return };
|
||||
|
||||
match Command::new("kill").args(["--signal=TERM", &id.to_string()]).spawn() {
|
||||
match TokioCommand::new("kill").args(["--signal=TERM", &id.to_string()]).spawn() {
|
||||
Ok(mut cmd) => {
|
||||
let Err(error) = cmd.wait().await else { return };
|
||||
tracing::warn!(
|
||||
@@ -49,8 +49,8 @@ pub async fn kill(mut meilisearch: tokio::process::Child) {
|
||||
}
|
||||
|
||||
#[tracing::instrument]
|
||||
pub async fn build() -> anyhow::Result<()> {
|
||||
let mut command = Command::new("cargo");
|
||||
async fn build() -> anyhow::Result<()> {
|
||||
let mut command = TokioCommand::new("cargo");
|
||||
command.arg("build").arg("--release").arg("-p").arg("meilisearch");
|
||||
|
||||
command.kill_on_drop(true);
|
||||
@@ -64,29 +64,61 @@ pub async fn build() -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(client, master_key, workload), fields(workload = workload.name))]
|
||||
pub async fn start(
|
||||
#[tracing::instrument(skip(client, master_key), fields(workload = _workload))]
|
||||
pub async fn start_meili(
|
||||
client: &Client,
|
||||
master_key: Option<&str>,
|
||||
workload: &Workload,
|
||||
asset_folder: &str,
|
||||
mut command: Command,
|
||||
extra_cli_args: &[String],
|
||||
_workload: &str,
|
||||
binary_path: Option<&Path>,
|
||||
) -> anyhow::Result<tokio::process::Child> {
|
||||
let mut command = match binary_path {
|
||||
Some(binary_path) => tokio::process::Command::new(binary_path),
|
||||
None => {
|
||||
build().await?;
|
||||
let mut command = tokio::process::Command::new("cargo");
|
||||
command
|
||||
.arg("run")
|
||||
.arg("--release")
|
||||
.arg("-p")
|
||||
.arg("meilisearch")
|
||||
.arg("--bin")
|
||||
.arg("meilisearch")
|
||||
.arg("--");
|
||||
command
|
||||
}
|
||||
};
|
||||
|
||||
command.arg("--db-path").arg("./_xtask_benchmark.ms");
|
||||
if let Some(master_key) = master_key {
|
||||
command.arg("--master-key").arg(master_key);
|
||||
}
|
||||
command.arg("--experimental-enable-logs-route");
|
||||
|
||||
for extra_arg in workload.extra_cli_args.iter() {
|
||||
for extra_arg in extra_cli_args.iter() {
|
||||
command.arg(extra_arg);
|
||||
}
|
||||
|
||||
command.kill_on_drop(true);
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
if let Some(binary_path) = binary_path {
|
||||
let mut perms = tokio::fs::metadata(binary_path)
|
||||
.await
|
||||
.with_context(|| format!("could not get metadata for {binary_path:?}"))?
|
||||
.permissions();
|
||||
perms.set_mode(perms.mode() | 0o111);
|
||||
tokio::fs::set_permissions(binary_path, perms)
|
||||
.await
|
||||
.with_context(|| format!("could not set permissions for {binary_path:?}"))?;
|
||||
}
|
||||
}
|
||||
|
||||
let mut meilisearch = command.spawn().context("Error starting Meilisearch")?;
|
||||
|
||||
wait_for_health(client, &mut meilisearch, &workload.assets, asset_folder).await?;
|
||||
wait_for_health(client, &mut meilisearch).await?;
|
||||
|
||||
Ok(meilisearch)
|
||||
}
|
||||
@@ -94,11 +126,11 @@ pub async fn start(
|
||||
async fn wait_for_health(
|
||||
client: &Client,
|
||||
meilisearch: &mut tokio::process::Child,
|
||||
assets: &BTreeMap<String, Asset>,
|
||||
asset_folder: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
for i in 0..100 {
|
||||
let res = super::command::run(client.clone(), health_command(), assets, asset_folder).await;
|
||||
let res =
|
||||
run_command(client, &health_command(), &BTreeMap::new(), HashMap::new(), "", false)
|
||||
.await;
|
||||
if res.is_ok() {
|
||||
// check that this is actually the current Meilisearch instance that answered us
|
||||
if let Some(exit_code) =
|
||||
@@ -122,15 +154,6 @@ async fn wait_for_health(
|
||||
bail!("meilisearch is not responding")
|
||||
}
|
||||
|
||||
fn health_command() -> super::command::Command {
|
||||
super::command::Command {
|
||||
route: "/health".into(),
|
||||
method: super::client::Method::Get,
|
||||
body: Default::default(),
|
||||
synchronous: super::command::SyncMode::WaitForResponse,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete_db() {
|
||||
let _ = std::fs::remove_dir_all("./_xtask_benchmark.ms");
|
||||
pub async fn delete_db() {
|
||||
let _ = tokio::fs::remove_dir_all("./_xtask_benchmark.ms").await;
|
||||
}
|
||||
11
crates/xtask/src/common/workload.rs
Normal file
11
crates/xtask/src/common/workload.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{bench::BenchWorkload, test::TestWorkload};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(tag = "type")]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum Workload {
|
||||
Bench(BenchWorkload),
|
||||
Test(TestWorkload),
|
||||
}
|
||||
@@ -1 +1,3 @@
|
||||
pub mod bench;
|
||||
pub mod common;
|
||||
pub mod test;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use clap::Parser;
|
||||
use xtask::bench::BenchDeriveArgs;
|
||||
use xtask::{bench::BenchDeriveArgs, test::TestDeriveArgs};
|
||||
|
||||
/// List features available in the workspace
|
||||
#[derive(Parser, Debug)]
|
||||
@@ -20,6 +20,7 @@ struct ListFeaturesDeriveArgs {
|
||||
enum Command {
|
||||
ListFeatures(ListFeaturesDeriveArgs),
|
||||
Bench(BenchDeriveArgs),
|
||||
Test(TestDeriveArgs),
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
@@ -27,6 +28,7 @@ fn main() -> anyhow::Result<()> {
|
||||
match args {
|
||||
Command::ListFeatures(args) => list_features(args),
|
||||
Command::Bench(args) => xtask::bench::run(args)?,
|
||||
Command::Test(args) => xtask::test::run(args)?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
100
crates/xtask/src/test/mod.rs
Normal file
100
crates/xtask/src/test/mod.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use crate::{
|
||||
common::{
|
||||
args::CommonArgs, client::Client, command::SyncMode, logs::setup_logs, workload::Workload,
|
||||
},
|
||||
test::workload::CommandOrUpgrade,
|
||||
};
|
||||
use anyhow::{bail, Context};
|
||||
use clap::Parser;
|
||||
|
||||
mod versions;
|
||||
mod workload;
|
||||
|
||||
pub use workload::TestWorkload;
|
||||
|
||||
/// Run tests from a workload
|
||||
#[derive(Parser, Debug)]
|
||||
pub struct TestDeriveArgs {
|
||||
/// Common arguments shared with other commands
|
||||
#[command(flatten)]
|
||||
common: CommonArgs,
|
||||
|
||||
/// Enables workloads to be rewritten in place to update expected responses.
|
||||
#[arg(short, long, default_value_t = false)]
|
||||
pub update_responses: bool,
|
||||
|
||||
/// Enables workloads to be rewritten in place to add missing expected responses.
|
||||
#[arg(short, long, default_value_t = false)]
|
||||
pub add_missing_responses: bool,
|
||||
}
|
||||
|
||||
pub fn run(args: TestDeriveArgs) -> anyhow::Result<()> {
|
||||
let rt = tokio::runtime::Builder::new_current_thread().enable_io().enable_time().build()?;
|
||||
let _scope = rt.enter();
|
||||
|
||||
rt.block_on(async { run_inner(args).await })?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_inner(args: TestDeriveArgs) -> anyhow::Result<()> {
|
||||
setup_logs(&args.common.log_filter)?;
|
||||
|
||||
// setup clients
|
||||
let assets_client = Arc::new(Client::new(
|
||||
None,
|
||||
args.common.assets_key.as_deref(),
|
||||
Some(Duration::from_secs(3600)), // 1h
|
||||
)?);
|
||||
|
||||
let meili_client = Arc::new(Client::new(
|
||||
Some("http://127.0.0.1:7700".into()),
|
||||
Some("masterKey"),
|
||||
Some(Duration::from_secs(args.common.tasks_queue_timeout_secs)),
|
||||
)?);
|
||||
|
||||
let asset_folder = args.common.asset_folder.clone().leak();
|
||||
for workload_file in &args.common.workload_file {
|
||||
let string = tokio::fs::read_to_string(workload_file)
|
||||
.await
|
||||
.with_context(|| format!("error reading {}", workload_file.display()))?;
|
||||
let workload: Workload = serde_json::from_str(string.trim())
|
||||
.with_context(|| format!("error parsing {} as JSON", workload_file.display()))?;
|
||||
|
||||
let Workload::Test(workload) = workload else {
|
||||
bail!("workload file {} is not a test workload", workload_file.display());
|
||||
};
|
||||
|
||||
let has_upgrade =
|
||||
workload.commands.iter().any(|c| matches!(c, CommandOrUpgrade::Upgrade { .. }));
|
||||
|
||||
let has_faulty_register = workload.commands.iter().any(|c| {
|
||||
matches!(c, CommandOrUpgrade::Command(cmd) if cmd.synchronous == SyncMode::DontWait && !cmd.register.is_empty())
|
||||
});
|
||||
if has_faulty_register {
|
||||
bail!("workload {} contains commands that register values but are marked as --dont-wait. This is not supported because we cannot guarantee the value will be registered before the next command runs.", workload.name);
|
||||
}
|
||||
|
||||
let name = workload.name.clone();
|
||||
match workload.run(&args, &assets_client, &meili_client, asset_folder).await {
|
||||
Ok(_) => {
|
||||
match args.update_responses {
|
||||
true => println!("🛠️ Workload {name} was updated"),
|
||||
false => println!("âś… Workload {name} passed"),
|
||||
}
|
||||
if !has_upgrade {
|
||||
println!("⚠️ Warning: this workload doesn't contain an upgrade. The whole point of these tests is to test upgrades! Please add one.");
|
||||
}
|
||||
}
|
||||
Err(error) => {
|
||||
println!("❌ Workload {name} failed: {error}");
|
||||
println!("đź’ˇ Is this intentional? If so, rerun with --update-responses to update the workload files.");
|
||||
return Err(error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
197
crates/xtask/src/test/versions.rs
Normal file
197
crates/xtask/src/test/versions.rs
Normal file
@@ -0,0 +1,197 @@
|
||||
use std::{collections::BTreeMap, fmt::Display, path::PathBuf};
|
||||
|
||||
use crate::common::assets::{Asset, AssetFormat};
|
||||
use anyhow::Context;
|
||||
use cargo_metadata::semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum VersionOrLatest {
|
||||
Version(Version),
|
||||
Latest,
|
||||
}
|
||||
|
||||
impl<'a> Deserialize<'a> for VersionOrLatest {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'a>,
|
||||
{
|
||||
let s: &str = Deserialize::deserialize(deserializer)?;
|
||||
|
||||
if s.eq_ignore_ascii_case("latest") {
|
||||
Ok(VersionOrLatest::Latest)
|
||||
} else {
|
||||
let version = Version::parse(s).map_err(serde::de::Error::custom)?;
|
||||
Ok(VersionOrLatest::Version(version))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for VersionOrLatest {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
VersionOrLatest::Version(v) => serializer.serialize_str(&v.to_string()),
|
||||
VersionOrLatest::Latest => serializer.serialize_str("latest"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VersionOrLatest {
|
||||
pub fn binary_path(&self, asset_folder: &str) -> anyhow::Result<Option<PathBuf>> {
|
||||
match self {
|
||||
VersionOrLatest::Version(version) => {
|
||||
let mut asset_folder: PathBuf =
|
||||
asset_folder.parse().context("parsing asset folder")?;
|
||||
let arch = get_arch()?;
|
||||
let local_filename = format!("meilisearch-{version}-{arch}");
|
||||
asset_folder.push(local_filename);
|
||||
Ok(Some(asset_folder))
|
||||
}
|
||||
VersionOrLatest::Latest => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VersionOrLatest {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
VersionOrLatest::Version(v) => v.fmt(f),
|
||||
VersionOrLatest::Latest => write!(f, "latest"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_sha256(version: &Version, asset_name: &str) -> anyhow::Result<String> {
|
||||
// If version is lower than 1.15 there is no point in trying to get the sha256, GitHub didn't support it
|
||||
if *version < Version::parse("1.15.0")? {
|
||||
anyhow::bail!("version is lower than 1.15, sha256 not available");
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct GithubReleaseAsset {
|
||||
name: String,
|
||||
digest: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct GithubRelease {
|
||||
assets: Vec<GithubReleaseAsset>,
|
||||
}
|
||||
|
||||
let url =
|
||||
format!("https://api.github.com/repos/meilisearch/meilisearch/releases/tags/v{version}");
|
||||
let client = reqwest::Client::builder()
|
||||
.user_agent("Meilisearch bench xtask")
|
||||
.build()
|
||||
.context("failed to build reqwest client")?;
|
||||
let body = client.get(url).send().await?.text().await?;
|
||||
let data: GithubRelease = serde_json::from_str(&body)?;
|
||||
|
||||
let digest = data
|
||||
.assets
|
||||
.into_iter()
|
||||
.find(|asset| asset.name.as_str() == asset_name)
|
||||
.with_context(|| format!("asset {asset_name} not found in release v{version}"))?
|
||||
.digest
|
||||
.with_context(|| format!("asset {asset_name} has no digest"))?;
|
||||
|
||||
let sha256 =
|
||||
digest.strip_prefix("sha256:").map(|s| s.to_string()).context("invalid sha256 format")?;
|
||||
|
||||
Ok(sha256)
|
||||
}
|
||||
|
||||
pub fn get_arch() -> anyhow::Result<&'static str> {
|
||||
let arch;
|
||||
|
||||
// linux-aarch64
|
||||
#[cfg(all(target_os = "linux", target_arch = "aarch64"))]
|
||||
{
|
||||
arch = "linux-aarch64";
|
||||
}
|
||||
|
||||
// linux-amd64
|
||||
#[cfg(all(target_os = "linux", target_arch = "x86_64"))]
|
||||
{
|
||||
arch = "linux-amd64";
|
||||
}
|
||||
|
||||
// macos-amd64
|
||||
#[cfg(all(target_os = "macos", target_arch = "x86_64"))]
|
||||
{
|
||||
arch = "macos-amd64";
|
||||
}
|
||||
|
||||
// macos-apple-silicon
|
||||
#[cfg(all(target_os = "macos", target_arch = "aarch64"))]
|
||||
{
|
||||
arch = "macos-apple-silicon";
|
||||
}
|
||||
|
||||
// windows-amd64
|
||||
#[cfg(all(target_os = "windows", target_arch = "x86_64"))]
|
||||
{
|
||||
arch = "windows-amd64";
|
||||
}
|
||||
|
||||
if arch.is_empty() {
|
||||
anyhow::bail!("unsupported platform");
|
||||
}
|
||||
|
||||
Ok(arch)
|
||||
}
|
||||
|
||||
async fn add_asset(assets: &mut BTreeMap<String, Asset>, version: &Version) -> anyhow::Result<()> {
|
||||
let arch = get_arch()?;
|
||||
let local_filename = format!("meilisearch-{version}-{arch}");
|
||||
if assets.contains_key(&local_filename) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let filename = format!("meilisearch-{arch}");
|
||||
|
||||
// Try to get the sha256 but it may fail if Github is rate limiting us
|
||||
// We hardcode some values to speed up tests and avoid hitting Github
|
||||
// Also, versions prior to 1.15 don't have sha256 available anyway
|
||||
let sha256 = match local_filename.as_str() {
|
||||
"meilisearch-1.12.0-macos-apple-silicon" => {
|
||||
Some(String::from("3b384707a5df9edf66f9157f0ddb70dcd3ac84d4887149169cf93067d06717b7"))
|
||||
}
|
||||
_ => match get_sha256(version, &filename).await {
|
||||
Ok(sha256) => Some(sha256),
|
||||
Err(err) => {
|
||||
tracing::warn!("failed to get sha256 for version {version}: {err}");
|
||||
None
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
let url = format!(
|
||||
"https://github.com/meilisearch/meilisearch/releases/download/v{version}/{filename}"
|
||||
);
|
||||
|
||||
let asset = Asset {
|
||||
local_location: Some(local_filename.clone()),
|
||||
remote_location: Some(url),
|
||||
format: AssetFormat::Raw,
|
||||
sha256,
|
||||
};
|
||||
|
||||
assets.insert(local_filename, asset);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn expand_assets_with_versions(
|
||||
assets: &mut BTreeMap<String, Asset>,
|
||||
versions: &[Version],
|
||||
) -> anyhow::Result<()> {
|
||||
for version in versions {
|
||||
add_asset(assets, version).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
201
crates/xtask/src/test/workload.rs
Normal file
201
crates/xtask/src/test/workload.rs
Normal file
@@ -0,0 +1,201 @@
|
||||
use anyhow::Context;
|
||||
use cargo_metadata::semver::Version;
|
||||
use chrono::DateTime;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
io::Write,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
common::{
|
||||
assets::{fetch_assets, Asset},
|
||||
client::Client,
|
||||
command::{run_commands, Command},
|
||||
process::{self, delete_db, kill_meili},
|
||||
workload::Workload,
|
||||
},
|
||||
test::{
|
||||
versions::{expand_assets_with_versions, VersionOrLatest},
|
||||
TestDeriveArgs,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(untagged)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum CommandOrUpgrade {
|
||||
Command(Command),
|
||||
Upgrade { upgrade: VersionOrLatest },
|
||||
}
|
||||
|
||||
enum CommandOrUpgradeVec<'a> {
|
||||
Commands(Vec<&'a mut Command>),
|
||||
Upgrade(VersionOrLatest),
|
||||
}
|
||||
|
||||
fn produce_reference_value(value: &mut Value) {
|
||||
match value {
|
||||
Value::Null | Value::Bool(_) | Value::Number(_) => (),
|
||||
Value::String(string) => {
|
||||
if DateTime::parse_from_rfc3339(string.as_str()).is_ok() {
|
||||
*string = String::from("[timestamp]");
|
||||
} else if uuid::Uuid::parse_str(string).is_ok() {
|
||||
*string = String::from("[uuid]");
|
||||
}
|
||||
}
|
||||
Value::Array(values) => {
|
||||
for value in values {
|
||||
produce_reference_value(value);
|
||||
}
|
||||
}
|
||||
Value::Object(map) => {
|
||||
for (key, value) in map.iter_mut() {
|
||||
match key.as_str() {
|
||||
"processingTimeMs" => {
|
||||
*value = Value::String(String::from("[duration]"));
|
||||
continue;
|
||||
}
|
||||
_ => produce_reference_value(value),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A test workload.
|
||||
/// Not to be confused with [a bench workload](crate::bench::workload::Workload).
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TestWorkload {
|
||||
pub name: String,
|
||||
pub initial_version: Version,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub master_key: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub assets: BTreeMap<String, Asset>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub commands: Vec<CommandOrUpgrade>,
|
||||
}
|
||||
|
||||
impl TestWorkload {
|
||||
pub async fn run(
|
||||
mut self,
|
||||
args: &TestDeriveArgs,
|
||||
assets_client: &Client,
|
||||
meili_client: &Arc<Client>,
|
||||
asset_folder: &'static str,
|
||||
) -> anyhow::Result<()> {
|
||||
// Group commands between upgrades
|
||||
let mut commands_or_upgrade = Vec::new();
|
||||
let mut current_commands = Vec::new();
|
||||
let mut all_versions = vec![self.initial_version.clone()];
|
||||
for command_or_upgrade in &mut self.commands {
|
||||
match command_or_upgrade {
|
||||
CommandOrUpgrade::Command(command) => current_commands.push(command),
|
||||
CommandOrUpgrade::Upgrade { upgrade } => {
|
||||
if !current_commands.is_empty() {
|
||||
commands_or_upgrade.push(CommandOrUpgradeVec::Commands(current_commands));
|
||||
current_commands = Vec::new();
|
||||
}
|
||||
commands_or_upgrade.push(CommandOrUpgradeVec::Upgrade(upgrade.clone()));
|
||||
if let VersionOrLatest::Version(upgrade) = upgrade {
|
||||
all_versions.push(upgrade.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !current_commands.is_empty() {
|
||||
commands_or_upgrade.push(CommandOrUpgradeVec::Commands(current_commands));
|
||||
}
|
||||
|
||||
// Fetch assets
|
||||
expand_assets_with_versions(&mut self.assets, &all_versions).await?;
|
||||
fetch_assets(assets_client, &self.assets, &args.common.asset_folder).await?;
|
||||
|
||||
// Run server
|
||||
delete_db().await;
|
||||
let binary_path = VersionOrLatest::Version(self.initial_version.clone())
|
||||
.binary_path(&args.common.asset_folder)?;
|
||||
let mut process = process::start_meili(
|
||||
meili_client,
|
||||
Some("masterKey"),
|
||||
&[],
|
||||
&self.name,
|
||||
binary_path.as_deref(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let assets = Arc::new(self.assets.clone());
|
||||
let return_responses = dbg!(args.add_missing_responses || args.update_responses);
|
||||
let mut registered = HashMap::new();
|
||||
for command_or_upgrade in commands_or_upgrade {
|
||||
match command_or_upgrade {
|
||||
CommandOrUpgradeVec::Commands(commands) => {
|
||||
let cloned: Vec<_> = commands.iter().map(|c| (*c).clone()).collect();
|
||||
let responses = run_commands(
|
||||
meili_client,
|
||||
&cloned,
|
||||
&assets,
|
||||
asset_folder,
|
||||
&mut registered,
|
||||
return_responses,
|
||||
)
|
||||
.await?;
|
||||
if return_responses {
|
||||
assert_eq!(responses.len(), cloned.len());
|
||||
for (command, (mut response, status)) in commands.into_iter().zip(responses)
|
||||
{
|
||||
if args.update_responses
|
||||
|| (args.add_missing_responses
|
||||
&& command.expected_response.is_none())
|
||||
{
|
||||
produce_reference_value(&mut response);
|
||||
command.expected_response = Some(response);
|
||||
command.expected_status = Some(status.as_u16());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
CommandOrUpgradeVec::Upgrade(version) => {
|
||||
kill_meili(process).await;
|
||||
let binary_path = version.binary_path(&args.common.asset_folder)?;
|
||||
process = process::start_meili(
|
||||
meili_client,
|
||||
Some("masterKey"),
|
||||
&[String::from("--experimental-dumpless-upgrade")],
|
||||
&self.name,
|
||||
binary_path.as_deref(),
|
||||
)
|
||||
.await?;
|
||||
tracing::info!("Upgraded to {version}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write back the workload if needed
|
||||
if return_responses {
|
||||
// Filter out the assets we added for the versions
|
||||
self.assets.retain(|_, asset| {
|
||||
asset.local_location.as_ref().is_none_or(|a| !a.starts_with("meilisearch-"))
|
||||
});
|
||||
|
||||
let workload = Workload::Test(self);
|
||||
let mut file =
|
||||
std::fs::File::create(&args.common.workload_file[0]).with_context(|| {
|
||||
format!("could not open {}", args.common.workload_file[0].display())
|
||||
})?;
|
||||
serde_json::to_writer_pretty(&file, &workload).with_context(|| {
|
||||
format!("could not write to {}", args.common.workload_file[0].display())
|
||||
})?;
|
||||
file.write_all(b"\n").with_context(|| {
|
||||
format!("could not write to {}", args.common.workload_file[0].display())
|
||||
})?;
|
||||
tracing::info!("Updated workload file {}", args.common.workload_file[0].display());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "movies-subset-hf-embeddings",
|
||||
"type": "bench",
|
||||
"run_count": 5,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=4"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "settings-add-embeddings-hf",
|
||||
"type": "bench",
|
||||
"run_count": 5,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=4"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "hackernews.add_new_documents",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "hackernews.ndjson_1M_ignore_first_100k",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "hackernews.modify_facet_numbers",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "hackernews.modify_facet_strings",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "hackernews.modify_searchables",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "hackernews.ndjson_1M",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "movies.json,no-threads",
|
||||
"type": "bench",
|
||||
"run_count": 2,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=1"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "movies.json",
|
||||
"type": "bench",
|
||||
"run_count": 10,
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "search-movies-subset-hf-embeddings",
|
||||
"type": "bench",
|
||||
"run_count": 2,
|
||||
"target": "search::=trace",
|
||||
"extra_cli_args": [
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "search-filterable-movies.json",
|
||||
"type": "bench",
|
||||
"run_count": 10,
|
||||
"target": "search::=trace",
|
||||
"extra_cli_args": [],
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"name": "search-geosort.jsonl_1M",
|
||||
"run_count": 3,
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"target": "search::=trace",
|
||||
"extra_cli_args": [],
|
||||
"assets": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "search-hackernews.ndjson_1M",
|
||||
"type": "bench",
|
||||
"run_count": 3,
|
||||
"target": "search::=trace",
|
||||
"extra_cli_args": [],
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "search-movies.json",
|
||||
"type": "bench",
|
||||
"run_count": 10,
|
||||
"target": "search::=trace",
|
||||
"extra_cli_args": [],
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "search-sortable-movies.json",
|
||||
"type": "bench",
|
||||
"run_count": 10,
|
||||
"target": "search::=trace",
|
||||
"extra_cli_args": [],
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "settings-add-remove-filters.json",
|
||||
"type": "bench",
|
||||
"run_count": 5,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=4"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "settings-proximity-precision.json",
|
||||
"type": "bench",
|
||||
"run_count": 5,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=4"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "settings-remove-add-swap-searchable.json",
|
||||
"type": "bench",
|
||||
"run_count": 5,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=4"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "settings-typo.json",
|
||||
"type": "bench",
|
||||
"run_count": 5,
|
||||
"extra_cli_args": [
|
||||
"--max-indexing-threads=4"
|
||||
|
||||
265
workloads/tests/README.md
Normal file
265
workloads/tests/README.md
Normal file
@@ -0,0 +1,265 @@
|
||||
# Declarative upgrade tests
|
||||
|
||||
Declarative upgrade tests ensure that Meilisearch features remain stable across versions.
|
||||
|
||||
While we already have unit tests, those are run against **temporary databases** that are created fresh each time and therefore never risk corruption.
|
||||
|
||||
Upgrade tests instead **simulate the lifetime of a database**: they chain together commands and version upgrades, verifying that database state and API responses remain consistent.
|
||||
|
||||
## Basic example
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "test",
|
||||
"name": "api-keys",
|
||||
"initialVersion": "1.19.0", // the first command will run on a brand new database of this version
|
||||
"commands": []
|
||||
}
|
||||
```
|
||||
|
||||
This example defines a no-op test (it does nothing).
|
||||
|
||||
If the file is saved at `workloads/tests/example.json`, you can run it with:
|
||||
|
||||
```bash
|
||||
cargo xtask test workloads/tests/example.json
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
Commands represent API requests sent to Meilisearch endpoints during a test.
|
||||
|
||||
They are executed sequentially, and their responses can be validated to ensure consistent behavior across upgrades.
|
||||
|
||||
```json
|
||||
|
||||
{
|
||||
"route": "keys",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"actions": [
|
||||
"search",
|
||||
"documents.add"
|
||||
],
|
||||
"description": "Test API Key",
|
||||
"expiresAt": null,
|
||||
"indexes": [ "movies" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This command issues a `POST /keys` request, creating an API key with permissions to search and add documents in the `movies` index.
|
||||
|
||||
### Using assets in commands
|
||||
|
||||
To keep tests concise and reusable, you can define **assets** at the root of the workload file.
|
||||
|
||||
Assets are external data sources (such as datasets) that are cached between runs, making tests faster and easier to read.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "test",
|
||||
"name": "movies",
|
||||
"initialVersion": "1.12.0",
|
||||
"assets": {
|
||||
"movies.json": {
|
||||
"local_location": null,
|
||||
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
|
||||
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
|
||||
}
|
||||
},
|
||||
"commands": [
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"asset": "movies.json"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
In this example:
|
||||
- The `movies.json` dataset is defined as an asset, pointing to a remote URL.
|
||||
- The SHA-256 checksum ensures integrity.
|
||||
- The `POST /indexes/movies/documents` command uses this asset as the request body.
|
||||
|
||||
This makes the test much cleaner than inlining a large dataset directly into the command.
|
||||
|
||||
### Asserting responses
|
||||
|
||||
Commands can specify both the **expected status code** and the **expected response body**.
|
||||
|
||||
```json
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"asset": "movies.json"
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]", // Set to a bracketed string to ignore the value
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 1,
|
||||
"type": "documentAdditionOrUpdate"
|
||||
},
|
||||
"synchronous": "WaitForTask"
|
||||
}
|
||||
```
|
||||
|
||||
Manually writing `expectedResponse` fields can be tedious.
|
||||
|
||||
Instead, you can let the test runner populate them automatically:
|
||||
|
||||
```bash
|
||||
# Run the workload to populate expected fields. Only adds the missing ones, doesn't change existing data
|
||||
cargo xtask test workloads/tests/example.json --add-missing-responses
|
||||
|
||||
# OR
|
||||
|
||||
# Run the workload to populate expected fields. Updates all fields including existing ones
|
||||
cargo xtask test workloads/tests/example.json --update-responses
|
||||
```
|
||||
|
||||
This workflow is recommended:
|
||||
|
||||
1. Write the test without expected fields.
|
||||
2. Run it with `--add-missing-responses` to capture the actual responses.
|
||||
3. Review and commit the generated expectations.
|
||||
|
||||
## Upgrade commands
|
||||
|
||||
Upgrade commands allow you to switch the Meilisearch instance from one version to another during a test.
|
||||
|
||||
When executed, an upgrade command will:
|
||||
1. Stop the current Meilisearch server.
|
||||
2. Upgrade the database to the specified version.
|
||||
3. Restart the server with the new specified version.
|
||||
|
||||
### Typical Usage
|
||||
|
||||
In most cases, you will:
|
||||
|
||||
- **Set up** some data using commands on an older version.
|
||||
- **Upgrade** to the latest version.
|
||||
- **Assert** that the data and API behavior remain correct after the upgrade.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "test",
|
||||
"name": "movies",
|
||||
"initialVersion": "1.12.0", // An older version to start with
|
||||
"commands": [
|
||||
// Commands to populate the database
|
||||
{
|
||||
"upgrade": "latest" // Will build meilisearch locally and run it
|
||||
},
|
||||
// Commands to check the state of the database
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This ensures backward compatibility: databases created with older Meilisearch versions should remain functional and consistent after an upgrade.
|
||||
|
||||
### Advanced usage
|
||||
|
||||
As time goes on, tests may grow more complex as they evolve alongside new features and schema changes.
|
||||
A single test can chain together multiple upgrades, interleaving data population, API checks, and version transitions.
|
||||
|
||||
For example:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "test",
|
||||
"name": "movies",
|
||||
"initialVersion": "1.12.0",
|
||||
"commands": [
|
||||
// Commands to populate the database
|
||||
{
|
||||
"upgrade": "1.17.0"
|
||||
},
|
||||
// Commands on endpoints that were removed after 1.17
|
||||
{
|
||||
"upgrade": "latest"
|
||||
},
|
||||
// Check the state
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
Sometimes a command needs to use a value returned by a **previous response**.
|
||||
These values can be captured and reused using the register field.
|
||||
|
||||
```json
|
||||
{
|
||||
"route": "keys",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"actions": [
|
||||
"search",
|
||||
"documents.add"
|
||||
],
|
||||
"description": "Test API Key",
|
||||
"expiresAt": null,
|
||||
"indexes": [ "movies" ]
|
||||
}
|
||||
},
|
||||
"expectedResponse": {
|
||||
"key": "c6f64630bad2996b1f675007c8800168e14adf5d6a7bb1a400a6d2b158050eaf",
|
||||
// ...
|
||||
},
|
||||
"register": {
|
||||
"key": "/key"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
}
|
||||
```
|
||||
|
||||
The `register` field captures the value at the JSON path `/key` from the response.
|
||||
Paths follow the **JavaScript Object Notation Pointer (RFC 6901)** format.
|
||||
Registered variables are available for all subsequent commands.
|
||||
|
||||
Registered variables can be referenced by wrapping their name in double curly braces:
|
||||
|
||||
In the route/path:
|
||||
|
||||
```json
|
||||
{
|
||||
"route": "tasks/{{ task_id }}",
|
||||
"method": "GET"
|
||||
}
|
||||
```
|
||||
|
||||
In the request body:
|
||||
|
||||
```json
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "PATCH",
|
||||
"body": {
|
||||
"inline": {
|
||||
"id": "{{Â document_id }}",
|
||||
"overview": "Shazam turns evil and the world is in danger.",
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
As an API-key:
|
||||
|
||||
```json
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": { /* ... */ },
|
||||
"apiKeyVariable": "key" // The content of the key variable will be used as an API key
|
||||
}
|
||||
```
|
||||
221
workloads/tests/api-keys.json
Normal file
221
workloads/tests/api-keys.json
Normal file
@@ -0,0 +1,221 @@
|
||||
{
|
||||
"type": "test",
|
||||
"name": "api-keys",
|
||||
"initialVersion": "1.12.0",
|
||||
"commands": [
|
||||
{
|
||||
"route": "keys",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"actions": [
|
||||
"search",
|
||||
"documents.add"
|
||||
],
|
||||
"description": "Test API Key",
|
||||
"expiresAt": null,
|
||||
"indexes": [
|
||||
"movies"
|
||||
]
|
||||
}
|
||||
},
|
||||
"expectedStatus": 201,
|
||||
"expectedResponse": {
|
||||
"actions": [
|
||||
"search",
|
||||
"documents.add"
|
||||
],
|
||||
"createdAt": "[timestamp]",
|
||||
"description": "Test API Key",
|
||||
"expiresAt": null,
|
||||
"indexes": [
|
||||
"movies"
|
||||
],
|
||||
"key": "c6f64630bad2996b1f675007c8800168e14adf5d6a7bb1a400a6d2b158050eaf",
|
||||
"name": null,
|
||||
"uid": "[uuid]",
|
||||
"updatedAt": "[timestamp]"
|
||||
},
|
||||
"register": {
|
||||
"key": "/key"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "keys/{{ key }}",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"actions": [
|
||||
"search",
|
||||
"documents.add"
|
||||
],
|
||||
"createdAt": "[timestamp]",
|
||||
"description": "Test API Key",
|
||||
"expiresAt": null,
|
||||
"indexes": [
|
||||
"movies"
|
||||
],
|
||||
"key": "c6f64630bad2996b1f675007c8800168e14adf5d6a7bb1a400a6d2b158050eaf",
|
||||
"name": null,
|
||||
"uid": "[uuid]",
|
||||
"updatedAt": "[timestamp]"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "/indexes",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"primaryKey": "id",
|
||||
"uid": "movies"
|
||||
}
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 0,
|
||||
"type": "indexCreation"
|
||||
},
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"id": 287947,
|
||||
"overview": "A boy is given the ability to become an adult superhero in times of need with a single magic word.",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
|
||||
"release_date": "2019-03-23",
|
||||
"title": "Shazam"
|
||||
}
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 1,
|
||||
"type": "documentAdditionOrUpdate"
|
||||
},
|
||||
"apiKeyVariable": "key",
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search?q=shazam",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 0,
|
||||
"hits": [],
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "shazam"
|
||||
},
|
||||
"apiKeyVariable": "key",
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"upgrade": "latest"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search?q=shazam",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 1,
|
||||
"hits": [
|
||||
{
|
||||
"id": 287947,
|
||||
"overview": "A boy is given the ability to become an adult superhero in times of need with a single magic word.",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
|
||||
"release_date": "2019-03-23",
|
||||
"title": "Shazam"
|
||||
}
|
||||
],
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "shazam"
|
||||
},
|
||||
"apiKeyVariable": "key",
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/documents/287947",
|
||||
"method": "DELETE",
|
||||
"body": null,
|
||||
"expectedStatus": 403,
|
||||
"expectedResponse": {
|
||||
"code": "invalid_api_key",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_api_key",
|
||||
"message": "The provided API key is invalid.",
|
||||
"type": "auth"
|
||||
},
|
||||
"apiKeyVariable": "key",
|
||||
"synchronous": "WaitForResponse"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"inline": {
|
||||
"id": 287948,
|
||||
"overview": "Shazam turns evil and the world is in danger.",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
|
||||
"release_date": "2032-03-23",
|
||||
"title": "Shazam 2"
|
||||
}
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 3,
|
||||
"type": "documentAdditionOrUpdate"
|
||||
},
|
||||
"apiKeyVariable": "key",
|
||||
"synchronous": "WaitForTask"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search?q=shaza",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 2,
|
||||
"hits": [
|
||||
{
|
||||
"id": 287947,
|
||||
"overview": "A boy is given the ability to become an adult superhero in times of need with a single magic word.",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
|
||||
"release_date": "2019-03-23",
|
||||
"title": "Shazam"
|
||||
},
|
||||
{
|
||||
"id": 287948,
|
||||
"overview": "Shazam turns evil and the world is in danger.",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
|
||||
"release_date": "2032-03-23",
|
||||
"title": "Shazam 2"
|
||||
}
|
||||
],
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "shaza"
|
||||
},
|
||||
"apiKeyVariable": "key",
|
||||
"synchronous": "WaitForResponse"
|
||||
}
|
||||
]
|
||||
}
|
||||
163
workloads/tests/movies.json
Normal file
163
workloads/tests/movies.json
Normal file
@@ -0,0 +1,163 @@
|
||||
{
|
||||
"type": "test",
|
||||
"name": "movies",
|
||||
"initialVersion": "1.12.0",
|
||||
"assets": {
|
||||
"movies.json": {
|
||||
"local_location": null,
|
||||
"remote_location": "https://milli-benchmarks.fra1.digitaloceanspaces.com/bench/datasets/movies.json",
|
||||
"sha256": "5b6e4cb660bc20327776e8a33ea197b43d9ec84856710ead1cc87ab24df77de1"
|
||||
}
|
||||
},
|
||||
"commands": [
|
||||
{
|
||||
"route": "indexes/movies/settings",
|
||||
"method": "PATCH",
|
||||
"body": {
|
||||
"inline": {
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"release_date"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"title",
|
||||
"overview"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
]
|
||||
}
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 0,
|
||||
"type": "settingsUpdate"
|
||||
},
|
||||
"synchronous": "DontWait"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/documents",
|
||||
"method": "POST",
|
||||
"body": {
|
||||
"asset": "movies.json"
|
||||
},
|
||||
"expectedStatus": 202,
|
||||
"expectedResponse": {
|
||||
"enqueuedAt": "[timestamp]",
|
||||
"indexUid": "movies",
|
||||
"status": "enqueued",
|
||||
"taskUid": 1,
|
||||
"type": "documentAdditionOrUpdate"
|
||||
},
|
||||
"synchronous": "WaitForTask"
|
||||
},
|
||||
{
|
||||
"upgrade": "latest"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/search?q=bitcoin",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"estimatedTotalHits": 6,
|
||||
"hits": [
|
||||
{
|
||||
"genres": [
|
||||
"Documentary"
|
||||
],
|
||||
"id": 349086,
|
||||
"overview": "A documentary exploring how money and the trading of value has evolved, culminating in Bitcoin.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/A82oxum0dTL71N0cjD0F66S9gdt.jpg",
|
||||
"release_date": 1437177600,
|
||||
"title": "Bitcoin: The End of Money as We Know It"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Documentary",
|
||||
"History"
|
||||
],
|
||||
"id": 427451,
|
||||
"overview": "Not since the invention of the Internet has there been such a disruptive technology as Bitcoin. Bitcoin's early pioneers sought to blur the lines of sovereignty and the financial status quo. After years of underground development Bitcoin grabbed the attention of a curious public, and the ire of the regulators the technology had subverted. After landmark arrests of prominent cyber criminals Bitcoin faces its most severe adversary yet, the very banks it was built to destroy.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/qW3vsno24UBawZjnrKfQ1qHRPD6.jpg",
|
||||
"release_date": 1483056000,
|
||||
"title": "Banking on Bitcoin"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Documentary",
|
||||
"History"
|
||||
],
|
||||
"id": 292607,
|
||||
"overview": "A documentary about the development and spread of the virtual currency called Bitcoin.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/nUzeZupwmEOoddQIDAq10Gyifk0.jpg",
|
||||
"release_date": 1412294400,
|
||||
"title": "The Rise and Rise of Bitcoin"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Documentary"
|
||||
],
|
||||
"id": 321769,
|
||||
"overview": "Deep Web gives the inside story of one of the most important and riveting digital crime sagas of the century -- the arrest of Ross William Ulbricht, the 30-year-old entrepreneur convicted of being 'Dread Pirate Roberts,' creator and operator of online black market Silk Road. As the only film with exclusive access to the Ulbricht family, Deep Web explores how the brightest minds and thought leaders behind the Deep Web and Bitcoin are now caught in the crosshairs of the battle for control of a future inextricably linked to technology, with our digital rights hanging in the balance.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/dtSOFZ7ioDSaJxPzORaplqo8QZ2.jpg",
|
||||
"release_date": 1426377600,
|
||||
"title": "Deep Web"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Comedy",
|
||||
"Horror"
|
||||
],
|
||||
"id": 179538,
|
||||
"overview": "A gang of gold thieves lands in a coven of witches who are preparing for an ancient ritual... and in need of a sacrifice.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/u7w6vghlbz8xDUZRayOXma3Ax96.jpg",
|
||||
"release_date": 1379635200,
|
||||
"title": "Witching & Bitching"
|
||||
},
|
||||
{
|
||||
"genres": [
|
||||
"Comedy"
|
||||
],
|
||||
"id": 70882,
|
||||
"overview": "Roseanne Barr is back with an all-new HBO comedy special! Filmed live at the Comedy Store in Los Angeles, Roseanne returns to her stand-up roots for the first time in 14 years, as she tackles hot issues of today - from gay marriage to President Bush.",
|
||||
"poster": "https://image.tmdb.org/t/p/w500/cUkQQnfPTonMXRroZzCyw11eKXr.jpg",
|
||||
"release_date": 1162598400,
|
||||
"title": "Roseanne Barr: Blonde and Bitchin'"
|
||||
}
|
||||
],
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"processingTimeMs": "[duration]",
|
||||
"query": "bitcoin"
|
||||
},
|
||||
"synchronous": "DontWait"
|
||||
},
|
||||
{
|
||||
"route": "indexes/movies/stats",
|
||||
"method": "GET",
|
||||
"body": null,
|
||||
"expectedStatus": 200,
|
||||
"expectedResponse": {
|
||||
"avgDocumentSize": 499,
|
||||
"fieldDistribution": {
|
||||
"genres": 31944,
|
||||
"id": 31944,
|
||||
"overview": 31944,
|
||||
"poster": 31944,
|
||||
"release_date": 31944,
|
||||
"title": 31944
|
||||
},
|
||||
"isIndexing": false,
|
||||
"numberOfDocuments": 31944,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
"numberOfEmbeddings": 0,
|
||||
"rawDocumentDbSize": 16220160
|
||||
},
|
||||
"synchronous": "DontWait"
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user