mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-13 16:07:00 +00:00
Compare commits
2 Commits
proper-def
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
26e368b116 | ||
|
|
ba95ac0915 |
12
.github/workflows/sdks-tests.yml
vendored
12
.github/workflows/sdks-tests.yml
vendored
@@ -25,14 +25,18 @@ jobs:
|
|||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Define the Docker image we need to use
|
- name: Define the Docker image we need to use
|
||||||
id: define-image
|
id: define-image
|
||||||
|
env:
|
||||||
|
EVENT_NAME: ${{ github.event_name }}
|
||||||
|
DOCKER_IMAGE_INPUT: ${{ github.event.inputs.docker_image }}
|
||||||
run: |
|
run: |
|
||||||
event=${{ github.event_name }}
|
|
||||||
echo "docker-image=nightly" >> $GITHUB_OUTPUT
|
echo "docker-image=nightly" >> $GITHUB_OUTPUT
|
||||||
if [[ $event == 'workflow_dispatch' ]]; then
|
if [[ "$EVENT_NAME" == 'workflow_dispatch' ]]; then
|
||||||
echo "docker-image=${{ github.event.inputs.docker_image }}" >> $GITHUB_OUTPUT
|
echo "docker-image=$DOCKER_IMAGE_INPUT" >> $GITHUB_OUTPUT
|
||||||
fi
|
fi
|
||||||
- name: Docker image is ${{ steps.define-image.outputs.docker-image }}
|
- name: Docker image is ${{ steps.define-image.outputs.docker-image }}
|
||||||
run: echo "Docker image is ${{ steps.define-image.outputs.docker-image }}"
|
env:
|
||||||
|
DOCKER_IMAGE: ${{ steps.define-image.outputs.docker-image }}
|
||||||
|
run: echo "Docker image is $DOCKER_IMAGE"
|
||||||
|
|
||||||
##########
|
##########
|
||||||
## SDKs ##
|
## SDKs ##
|
||||||
|
|||||||
@@ -502,11 +502,13 @@ impl Queue {
|
|||||||
*before_finished_at,
|
*before_finished_at,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
if let Some(limit) = limit {
|
||||||
batches = if query.reverse.unwrap_or_default() {
|
batches = if query.reverse.unwrap_or_default() {
|
||||||
batches.into_iter().take(*limit).collect()
|
batches.into_iter().take(*limit as usize).collect()
|
||||||
} else {
|
} else {
|
||||||
batches.into_iter().rev().take(*limit).collect()
|
batches.into_iter().rev().take(*limit as usize).collect()
|
||||||
};
|
};
|
||||||
|
}
|
||||||
|
|
||||||
Ok(batches)
|
Ok(batches)
|
||||||
}
|
}
|
||||||
@@ -600,8 +602,11 @@ impl Queue {
|
|||||||
Box::new(batches.into_iter().rev()) as Box<dyn Iterator<Item = u32>>
|
Box::new(batches.into_iter().rev()) as Box<dyn Iterator<Item = u32>>
|
||||||
};
|
};
|
||||||
|
|
||||||
let batches =
|
let batches = self.batches.get_existing_batches(
|
||||||
self.batches.get_existing_batches(rtxn, batches.take(query.limit), processing)?;
|
rtxn,
|
||||||
|
batches.take(query.limit.unwrap_or(u32::MAX) as usize),
|
||||||
|
processing,
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok((batches, total))
|
Ok((batches, total))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,21 +28,21 @@ fn query_batches_from_and_limit() {
|
|||||||
|
|
||||||
let proc = index_scheduler.processing_tasks.read().unwrap().clone();
|
let proc = index_scheduler.processing_tasks.read().unwrap().clone();
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let query = Query { limit: 0, ..Default::default() };
|
let query = Query { limit: Some(0), ..Default::default() };
|
||||||
let (batches, _) = index_scheduler
|
let (batches, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&batches), @"[]");
|
snapshot!(snapshot_bitmap(&batches), @"[]");
|
||||||
|
|
||||||
let query = Query { limit: 1, ..Default::default() };
|
let query = Query { limit: Some(1), ..Default::default() };
|
||||||
let (batches, _) = index_scheduler
|
let (batches, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&batches), @"[2,]");
|
snapshot!(snapshot_bitmap(&batches), @"[2,]");
|
||||||
|
|
||||||
let query = Query { limit: 2, ..Default::default() };
|
let query = Query { limit: Some(2), ..Default::default() };
|
||||||
let (batches, _) = index_scheduler
|
let (batches, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
||||||
@@ -63,14 +63,14 @@ fn query_batches_from_and_limit() {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&batches), @"[0,1,2,]");
|
snapshot!(snapshot_bitmap(&batches), @"[0,1,2,]");
|
||||||
|
|
||||||
let query = Query { from: Some(1), limit: 1, ..Default::default() };
|
let query = Query { from: Some(1), limit: Some(1), ..Default::default() };
|
||||||
let (batches, _) = index_scheduler
|
let (batches, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&batches), @"[1,]");
|
snapshot!(snapshot_bitmap(&batches), @"[1,]");
|
||||||
|
|
||||||
let query = Query { from: Some(1), limit: 2, ..Default::default() };
|
let query = Query { from: Some(1), limit: Some(2), ..Default::default() };
|
||||||
let (batches, _) = index_scheduler
|
let (batches, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
.get_batch_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &proc)
|
||||||
|
|||||||
@@ -31,9 +31,6 @@ use crate::{Error, IndexSchedulerOptions, Result, TaskId};
|
|||||||
|
|
||||||
/// The number of database used by queue itself
|
/// The number of database used by queue itself
|
||||||
const NUMBER_OF_DATABASES: u32 = 1;
|
const NUMBER_OF_DATABASES: u32 = 1;
|
||||||
/// The default limit for pagination
|
|
||||||
const DEFAULT_LIMIT: usize = 20;
|
|
||||||
|
|
||||||
/// Database const names for the `IndexScheduler`.
|
/// Database const names for the `IndexScheduler`.
|
||||||
mod db_name {
|
mod db_name {
|
||||||
pub const BATCH_TO_TASKS_MAPPING: &str = "batch-to-tasks-mapping";
|
pub const BATCH_TO_TASKS_MAPPING: &str = "batch-to-tasks-mapping";
|
||||||
@@ -43,11 +40,11 @@ mod db_name {
|
|||||||
///
|
///
|
||||||
/// An empty/default query (where each field is set to `None`) matches all tasks.
|
/// An empty/default query (where each field is set to `None`) matches all tasks.
|
||||||
/// Each non-null field restricts the set of tasks further.
|
/// Each non-null field restricts the set of tasks further.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Default, Debug, Clone, PartialEq, Eq)]
|
||||||
pub struct Query {
|
pub struct Query {
|
||||||
/// The maximum number of tasks to be matched. Defaults to 20.
|
/// The maximum number of tasks to be matched
|
||||||
pub limit: usize,
|
pub limit: Option<u32>,
|
||||||
/// The minimum [task id](`meilisearch_types::tasks::Task::uid`) to be matched. Defaults to 0.
|
/// The minimum [task id](`meilisearch_types::tasks::Task::uid`) to be matched
|
||||||
pub from: Option<u32>,
|
pub from: Option<u32>,
|
||||||
/// The order used to return the tasks. By default the newest tasks are returned first and the boolean is `false`.
|
/// The order used to return the tasks. By default the newest tasks are returned first and the boolean is `false`.
|
||||||
pub reverse: Option<bool>,
|
pub reverse: Option<bool>,
|
||||||
@@ -86,29 +83,32 @@ pub struct Query {
|
|||||||
pub after_finished_at: Option<OffsetDateTime>,
|
pub after_finished_at: Option<OffsetDateTime>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Query {
|
impl Query {
|
||||||
fn default() -> Self {
|
/// Return `true` if every field of the query is set to `None`, such that the query
|
||||||
Self {
|
/// matches all tasks.
|
||||||
limit: DEFAULT_LIMIT,
|
pub fn is_empty(&self) -> bool {
|
||||||
from: Default::default(),
|
matches!(
|
||||||
reverse: Default::default(),
|
self,
|
||||||
uids: Default::default(),
|
Query {
|
||||||
batch_uids: Default::default(),
|
limit: None,
|
||||||
statuses: Default::default(),
|
from: None,
|
||||||
types: Default::default(),
|
reverse: None,
|
||||||
index_uids: Default::default(),
|
uids: None,
|
||||||
canceled_by: Default::default(),
|
batch_uids: None,
|
||||||
before_enqueued_at: Default::default(),
|
statuses: None,
|
||||||
after_enqueued_at: Default::default(),
|
types: None,
|
||||||
before_started_at: Default::default(),
|
index_uids: None,
|
||||||
after_started_at: Default::default(),
|
canceled_by: None,
|
||||||
before_finished_at: Default::default(),
|
before_enqueued_at: None,
|
||||||
after_finished_at: Default::default(),
|
after_enqueued_at: None,
|
||||||
}
|
before_started_at: None,
|
||||||
|
after_started_at: None,
|
||||||
|
before_finished_at: None,
|
||||||
|
after_finished_at: None,
|
||||||
}
|
}
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Query {
|
|
||||||
/// Add an [index id](meilisearch_types::tasks::Task::index_uid) to the list of permitted indexes.
|
/// Add an [index id](meilisearch_types::tasks::Task::index_uid) to the list of permitted indexes.
|
||||||
pub fn with_index(self, index_uid: String) -> Self {
|
pub fn with_index(self, index_uid: String) -> Self {
|
||||||
let mut index_vec = self.index_uids.unwrap_or_default();
|
let mut index_vec = self.index_uids.unwrap_or_default();
|
||||||
@@ -119,7 +119,7 @@ impl Query {
|
|||||||
// Removes the `from` and `limit` restrictions from the query.
|
// Removes the `from` and `limit` restrictions from the query.
|
||||||
// Useful to get the total number of tasks matching a filter.
|
// Useful to get the total number of tasks matching a filter.
|
||||||
pub fn without_limits(self) -> Self {
|
pub fn without_limits(self) -> Self {
|
||||||
Query { limit: usize::MAX, from: None, ..self }
|
Query { limit: None, from: None, ..self }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -465,11 +465,13 @@ impl Queue {
|
|||||||
*before_finished_at,
|
*before_finished_at,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
if let Some(limit) = limit {
|
||||||
tasks = if query.reverse.unwrap_or_default() {
|
tasks = if query.reverse.unwrap_or_default() {
|
||||||
tasks.into_iter().take(*limit).collect()
|
tasks.into_iter().take(*limit as usize).collect()
|
||||||
} else {
|
} else {
|
||||||
tasks.into_iter().rev().take(*limit).collect()
|
tasks.into_iter().rev().take(*limit as usize).collect()
|
||||||
};
|
};
|
||||||
|
}
|
||||||
|
|
||||||
Ok(tasks)
|
Ok(tasks)
|
||||||
}
|
}
|
||||||
@@ -527,7 +529,9 @@ impl Queue {
|
|||||||
} else {
|
} else {
|
||||||
Box::new(tasks.into_iter().rev()) as Box<dyn Iterator<Item = u32>>
|
Box::new(tasks.into_iter().rev()) as Box<dyn Iterator<Item = u32>>
|
||||||
};
|
};
|
||||||
let tasks = self.tasks.get_existing_tasks(rtxn, tasks.take(query.limit))?;
|
let tasks = self
|
||||||
|
.tasks
|
||||||
|
.get_existing_tasks(rtxn, tasks.take(query.limit.unwrap_or(u32::MAX) as usize))?;
|
||||||
|
|
||||||
let ProcessingTasks { batch, processing, progress: _ } = processing_tasks;
|
let ProcessingTasks { batch, processing, progress: _ } = processing_tasks;
|
||||||
|
|
||||||
|
|||||||
@@ -28,21 +28,21 @@ fn query_tasks_from_and_limit() {
|
|||||||
|
|
||||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||||
let processing = index_scheduler.processing_tasks.read().unwrap();
|
let processing = index_scheduler.processing_tasks.read().unwrap();
|
||||||
let query = Query { limit: 0, ..Default::default() };
|
let query = Query { limit: Some(0), ..Default::default() };
|
||||||
let (tasks, _) = index_scheduler
|
let (tasks, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||||
|
|
||||||
let query = Query { limit: 1, ..Default::default() };
|
let query = Query { limit: Some(1), ..Default::default() };
|
||||||
let (tasks, _) = index_scheduler
|
let (tasks, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||||
|
|
||||||
let query = Query { limit: 2, ..Default::default() };
|
let query = Query { limit: Some(2), ..Default::default() };
|
||||||
let (tasks, _) = index_scheduler
|
let (tasks, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
||||||
@@ -63,14 +63,14 @@ fn query_tasks_from_and_limit() {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,]");
|
snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,]");
|
||||||
|
|
||||||
let query = Query { from: Some(1), limit: 1, ..Default::default() };
|
let query = Query { from: Some(1), limit: Some(1), ..Default::default() };
|
||||||
let (tasks, _) = index_scheduler
|
let (tasks, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
snapshot!(snapshot_bitmap(&tasks), @"[1,]");
|
snapshot!(snapshot_bitmap(&tasks), @"[1,]");
|
||||||
|
|
||||||
let query = Query { from: Some(1), limit: 2, ..Default::default() };
|
let query = Query { from: Some(1), limit: Some(2), ..Default::default() };
|
||||||
let (tasks, _) = index_scheduler
|
let (tasks, _) = index_scheduler
|
||||||
.queue
|
.queue
|
||||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default(), &processing)
|
||||||
|
|||||||
@@ -185,7 +185,7 @@ pub async fn get_metrics(
|
|||||||
// Fetch the finished batches...
|
// Fetch the finished batches...
|
||||||
&Query {
|
&Query {
|
||||||
statuses: Some(vec![Status::Succeeded, Status::Failed]),
|
statuses: Some(vec![Status::Succeeded, Status::Failed]),
|
||||||
limit: 1,
|
limit: Some(1),
|
||||||
..Query::default()
|
..Query::default()
|
||||||
},
|
},
|
||||||
auth_filters,
|
auth_filters,
|
||||||
@@ -214,7 +214,7 @@ pub async fn get_metrics(
|
|||||||
let task_queue_latency_seconds = index_scheduler
|
let task_queue_latency_seconds = index_scheduler
|
||||||
.get_tasks_from_authorized_indexes(
|
.get_tasks_from_authorized_indexes(
|
||||||
&Query {
|
&Query {
|
||||||
limit: 1,
|
limit: Some(1),
|
||||||
reverse: Some(true),
|
reverse: Some(true),
|
||||||
statuses: Some(vec![Status::Enqueued, Status::Processing]),
|
statuses: Some(vec![Status::Enqueued, Status::Processing]),
|
||||||
..Query::default()
|
..Query::default()
|
||||||
|
|||||||
@@ -126,7 +126,7 @@ pub struct TasksFilterQuery {
|
|||||||
impl TasksFilterQuery {
|
impl TasksFilterQuery {
|
||||||
pub(crate) fn into_query(self) -> Query {
|
pub(crate) fn into_query(self) -> Query {
|
||||||
Query {
|
Query {
|
||||||
limit: self.limit.0 as usize,
|
limit: Some(self.limit.0),
|
||||||
from: self.from.as_deref().copied(),
|
from: self.from.as_deref().copied(),
|
||||||
reverse: self.reverse.as_deref().copied(),
|
reverse: self.reverse.as_deref().copied(),
|
||||||
batch_uids: self.batch_uids.merge_star_and_none(),
|
batch_uids: self.batch_uids.merge_star_and_none(),
|
||||||
@@ -225,8 +225,7 @@ pub struct TaskDeletionOrCancelationQuery {
|
|||||||
impl TaskDeletionOrCancelationQuery {
|
impl TaskDeletionOrCancelationQuery {
|
||||||
fn into_query(self) -> Query {
|
fn into_query(self) -> Query {
|
||||||
Query {
|
Query {
|
||||||
// We want to delete all tasks that match the given filters
|
limit: None,
|
||||||
limit: usize::MAX,
|
|
||||||
from: None,
|
from: None,
|
||||||
reverse: None,
|
reverse: None,
|
||||||
batch_uids: self.batch_uids.merge_star_and_none(),
|
batch_uids: self.batch_uids.merge_star_and_none(),
|
||||||
|
|||||||
Reference in New Issue
Block a user