Compare commits

...

26 Commits

Author SHA1 Message Date
e1aad060ff Merge #3258
3258: Bump milli to v0.37.4 r=curquiza a=Kerollmops

This PR bumps the milli dependency to v0.37.4 for the next Meilisearch release.

Co-authored-by: Kerollmops <clement@meilisearch.com>
2022-12-15 15:50:54 +00:00
eecc5e7da1 Bump milli to v0.37.4 2022-12-15 16:06:43 +01:00
ab3577f0e9 Merge #3254
3254: Update version for the next release (v0.30.4) in Cargo.toml files r=curquiza a=meili-bot

⚠️ This PR is automatically generated. Check the new version is the expected one before merging.

Co-authored-by: curquiza <curquiza@users.noreply.github.com>
2022-12-15 11:16:40 +00:00
81a41431be Update version for the next release (v0.30.4) in Cargo.toml files 2022-12-15 09:56:56 +00:00
539edb5a64 Merge #3248
3248: Bump milli to v0.37.3 r=curquiza a=Kerollmops

This PR bumps the milli dependency from v0.37.2 to v0.37.3.

Co-authored-by: Kerollmops <clement@meilisearch.com>
2022-12-14 13:52:13 +00:00
312a8afadc Bump milli to v0.37.3 2022-12-14 14:33:43 +01:00
0ceb51a123 Merge #3240
3240: Update version for the next release (v0.30.3) in Cargo.toml files r=curquiza a=meili-bot

⚠️ This PR is automatically generated. Check the new version is the expected one before merging.

Co-authored-by: curquiza <curquiza@users.noreply.github.com>
2022-12-13 17:13:52 +00:00
d2a7f332c1 Update version for the next release (v0.30.3) in Cargo.toml files 2022-12-13 16:35:10 +00:00
4534825191 Merge #3237
3237: Fix the cli flags related to the import of dump and snapshot. r=dureuill a=irevoire

Some flags were badly applied + the database wrongly deleted when it shouldn't.

To reduce the number of mistakes we might make I added a bunch of comments + created a function that handles the import of an existing or empty database.


Here is the associated (working) code from the v0.29.3: https://github.com/meilisearch/meilisearch/blob/release-v0.29.3/meilisearch-lib/src/dump/mod.rs#L166-L191

Fix  #3238

Co-authored-by: Tamo <tamo@meilisearch.com>
2022-12-13 16:31:13 +00:00
bb9b3e0bbb rename the two new functions 2022-12-13 17:25:49 +01:00
79656585dd Fix typos
Co-authored-by: Louis Dureuil <louis@meilisearch.com>
2022-12-13 17:02:07 +01:00
bbe4a84ddc Fix the import of dumps and snapshot.
Some flags were badly applied + the database wrongly deleted when they shouldn't
2022-12-13 16:39:05 +01:00
125f0b1522 Merge #3218
3218: Update version for the next release (v0.30.2) in Cargo.toml files r=curquiza a=meili-bot

⚠️ This PR is automatically generated. Check the new version is the expected one before merging.

Co-authored-by: curquiza <curquiza@users.noreply.github.com>
2022-12-08 13:28:00 +00:00
eb758343bd Merge #3217
3217: Bump milli to v0.37.2 r=Kerollmops a=curquiza

⚠️ Please ensure milli has been updated in EVERY cargo.tom ⚠️ 

Co-authored-by: curquiza <clementine@meilisearch.com>
2022-12-08 13:01:44 +00:00
4b61ffae67 Update version for the next release (v0.30.2) in Cargo.toml files 2022-12-08 12:32:02 +00:00
b259bc686d Bump milli to v0.37.2 2022-12-08 13:23:44 +01:00
6ddde37850 Merge #3213
3213: Fix the instance-uid in the data.ms r=Kerollmops a=irevoire

We were writing the instance-uid as bytes instead of strings in the data.ms, and thus we were unable to parse it later. Also, it was less practical for our users to retrieve it and send it to us.

Fix #3214

Co-authored-by: Tamo <tamo@meilisearch.com>
2022-12-07 19:08:17 +00:00
ee7a4be95c Fix the instance-uid in the data.ms
We were writing the instance-uid as bytes instead of string in the data.ms and thus we were unable to parse it later.
Also it was less practical for our user to retrieve it and send it to us.
2022-12-07 18:22:36 +01:00
58cd5d29e8 Merge #3202 #3203
3202: Bump milli to v0.37.1 r=curquiza a=Kerollmops

This PR bumps milli to v0.37.1 and fixes #3167, #3178, #3165, and #3021.

3203: Update version for the next release (v0.30.1) in Cargo.toml files r=Kerollmops a=meili-bot

⚠️ This PR is automatically generated. Check the new version is the expected one before merging.

Co-authored-by: Kerollmops <clement@meilisearch.com>
Co-authored-by: curquiza <curquiza@users.noreply.github.com>
2022-12-06 16:22:38 +00:00
d3d794e9ba Update version for the next release (v0.30.1) in Cargo.toml files 2022-12-06 16:20:31 +00:00
ef978a6106 Bump milli to v0.37.1 2022-12-06 17:11:03 +01:00
ee372099fd Merge #3160
3160: Clamp databases max size to the page size r=irevoire a=Kerollmops

This PR fixes #3150 (again #2662). We fix it again, here, as we entirely rewrote the index scheduler and forgot about this little detail.

`@irevoire` Can I have your input on where we create the indexes in the tests? I want to use a non-page-size rounded value in the tests. This way, we can see this issue in the tests next time.

Co-authored-by: Kerollmops <clement@meilisearch.com>
2022-11-29 14:43:49 +00:00
e6f4a8a992 Clamp the databases size to the page size 2022-11-29 15:26:48 +01:00
b948bb5191 Make the tests use MB to trigger page size issues 2022-11-29 15:26:48 +01:00
27ac62ec33 Merge #3161
3161: « Fix » dump tests r=curquiza a=irevoire

This PR doesn't re-enable the dump tests.
It's a cherry-pick of https://github.com/meilisearch/meilisearch/pull/3149 that should avoid a conflict when we merge on `main` later

Close  #3153

Co-authored-by: Tamo <tamo@meilisearch.com>
2022-11-29 12:30:54 +00:00
0aec840d20 Fix the dump tests 2022-11-29 10:45:39 +01:00
16 changed files with 123 additions and 94 deletions

47
Cargo.lock generated
View File

@ -1101,7 +1101,7 @@ dependencies = [
[[package]]
name = "dump"
version = "0.30.0"
version = "0.30.4"
dependencies = [
"anyhow",
"big_s",
@ -1310,7 +1310,7 @@ dependencies = [
[[package]]
name = "file-store"
version = "0.30.0"
version = "0.30.4"
dependencies = [
"faux",
"tempfile",
@ -1332,8 +1332,8 @@ dependencies = [
[[package]]
name = "filter-parser"
version = "0.37.0"
source = "git+https://github.com/meilisearch/milli.git?tag=v0.37.0#57c9f03e514436a2cca799b2a28cd89247682be0"
version = "0.37.4"
source = "git+https://github.com/meilisearch/milli.git?tag=v0.37.4#e1d7d7231338803f940734b4d54a2de99f6b3c6b"
dependencies = [
"nom",
"nom_locate",
@ -1351,8 +1351,8 @@ dependencies = [
[[package]]
name = "flatten-serde-json"
version = "0.37.0"
source = "git+https://github.com/meilisearch/milli.git?tag=v0.37.0#57c9f03e514436a2cca799b2a28cd89247682be0"
version = "0.37.4"
source = "git+https://github.com/meilisearch/milli.git?tag=v0.37.4#e1d7d7231338803f940734b4d54a2de99f6b3c6b"
dependencies = [
"serde_json",
]
@ -1625,7 +1625,7 @@ dependencies = [
"libc",
"lmdb-rkv-sys",
"once_cell",
"page_size",
"page_size 0.4.2",
"synchronoise",
"url",
"zerocopy",
@ -1767,7 +1767,7 @@ dependencies = [
[[package]]
name = "index-scheduler"
version = "0.30.0"
version = "0.30.4"
dependencies = [
"anyhow",
"big_s",
@ -1783,6 +1783,7 @@ dependencies = [
"meili-snap",
"meilisearch-types",
"nelson",
"page_size 0.5.0",
"roaring",
"serde",
"serde_json",
@ -1897,8 +1898,8 @@ dependencies = [
[[package]]
name = "json-depth-checker"
version = "0.37.0"
source = "git+https://github.com/meilisearch/milli.git?tag=v0.37.0#57c9f03e514436a2cca799b2a28cd89247682be0"
version = "0.37.4"
source = "git+https://github.com/meilisearch/milli.git?tag=v0.37.4#e1d7d7231338803f940734b4d54a2de99f6b3c6b"
dependencies = [
"serde_json",
]
@ -2155,7 +2156,7 @@ checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d"
[[package]]
name = "lmdb-rkv-sys"
version = "0.15.1"
source = "git+https://github.com/meilisearch/lmdb-rs#5592bf5a812905cf0c633404ef8f8f4057112c65"
source = "git+https://github.com/meilisearch/lmdb-rs#0144fb2bac524cdc2897d7750681ed3fff2dc3ac"
dependencies = [
"cc",
"libc",
@ -2257,7 +2258,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
[[package]]
name = "meili-snap"
version = "0.30.0"
version = "0.30.4"
dependencies = [
"insta",
"md5",
@ -2266,7 +2267,7 @@ dependencies = [
[[package]]
name = "meilisearch-auth"
version = "0.30.0"
version = "0.30.4"
dependencies = [
"enum-iterator",
"hmac",
@ -2283,7 +2284,7 @@ dependencies = [
[[package]]
name = "meilisearch-http"
version = "0.30.0"
version = "0.30.4"
dependencies = [
"actix-cors",
"actix-http",
@ -2366,7 +2367,7 @@ dependencies = [
[[package]]
name = "meilisearch-types"
version = "0.30.0"
version = "0.30.4"
dependencies = [
"actix-web",
"anyhow",
@ -2416,8 +2417,8 @@ dependencies = [
[[package]]
name = "milli"
version = "0.37.0"
source = "git+https://github.com/meilisearch/milli.git?tag=v0.37.0#57c9f03e514436a2cca799b2a28cd89247682be0"
version = "0.37.4"
source = "git+https://github.com/meilisearch/milli.git?tag=v0.37.4#e1d7d7231338803f940734b4d54a2de99f6b3c6b"
dependencies = [
"bimap",
"bincode",
@ -2663,6 +2664,16 @@ dependencies = [
"winapi",
]
[[package]]
name = "page_size"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b7663cbd190cfd818d08efa8497f6cd383076688c49a391ef7c0d03cd12b561"
dependencies = [
"libc",
"winapi",
]
[[package]]
name = "parking_lot"
version = "0.12.1"
@ -2747,7 +2758,7 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
[[package]]
name = "permissive-json-pointer"
version = "0.30.0"
version = "0.30.4"
dependencies = [
"big_s",
"serde_json",

View File

@ -1,6 +1,6 @@
[package]
name = "dump"
version = "0.30.0"
version = "0.30.4"
edition = "2021"
[dependencies]

View File

@ -420,7 +420,7 @@ pub(crate) mod test {
// tasks
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"42d4200cf6d92a6449989ca48cd8e28a");
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"6519f7064c45d2196dd59b71350a9bf5");
assert_eq!(update_files.len(), 22);
assert!(update_files[0].is_none()); // the dump creation
assert!(update_files[1].is_some()); // the enqueued document addition

View File

@ -201,7 +201,7 @@ pub(crate) mod test {
// tasks
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"42d4200cf6d92a6449989ca48cd8e28a");
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"6519f7064c45d2196dd59b71350a9bf5");
assert_eq!(update_files.len(), 22);
assert!(update_files[0].is_none()); // the dump creation
assert!(update_files[1].is_some()); // the enqueued document addition

View File

@ -1,6 +1,6 @@
[package]
name = "file-store"
version = "0.30.0"
version = "0.30.4"
edition = "2021"
[dependencies]

View File

@ -1,6 +1,6 @@
[package]
name = "index-scheduler"
version = "0.30.0"
version = "0.30.4"
edition = "2021"
[dependencies]
@ -13,6 +13,7 @@ enum-iterator = "1.1.3"
file-store = { path = "../file-store" }
log = "0.4.14"
meilisearch-types = { path = "../meilisearch-types" }
page_size = "0.5.0"
roaring = { version = "0.10.0", features = ["serde"] }
serde = { version = "1.0.136", features = ["derive"] }
serde_json = { version = "1.0.85", features = ["preserve_order"] }

View File

@ -12,7 +12,7 @@ use meilisearch_types::milli::Index;
use uuid::Uuid;
use self::IndexStatus::{Available, BeingDeleted};
use crate::{Error, Result};
use crate::{clamp_to_page_size, Error, Result};
const INDEX_MAPPING: &str = "index-mapping";
@ -68,7 +68,7 @@ impl IndexMapper {
/// The path *must* exists or an error will be thrown.
fn create_or_open_index(&self, path: &Path) -> Result<Index> {
let mut options = EnvOpenOptions::new();
options.map_size(self.index_size);
options.map_size(clamp_to_page_size(self.index_size));
options.max_readers(1024);
Ok(Index::new(options, path)?)
}

View File

@ -54,7 +54,7 @@ use utils::{filter_out_references_to_newer_tasks, keep_tasks_within_datetimes, m
use uuid::Uuid;
use crate::index_mapper::IndexMapper;
use crate::utils::check_index_swap_validity;
use crate::utils::{check_index_swap_validity, clamp_to_page_size};
pub(crate) type BEI128 =
meilisearch_types::heed::zerocopy::I128<meilisearch_types::heed::byteorder::BE>;
@ -361,7 +361,7 @@ impl IndexScheduler {
let env = heed::EnvOpenOptions::new()
.max_dbs(10)
.map_size(options.task_db_size)
.map_size(clamp_to_page_size(options.task_db_size))
.open(options.tasks_path)?;
let file_store = FileStore::new(&options.update_file_path)?;
@ -1111,8 +1111,8 @@ mod tests {
indexes_path: tempdir.path().join("indexes"),
snapshots_path: tempdir.path().join("snapshots"),
dumps_path: tempdir.path().join("dumps"),
task_db_size: 1024 * 1024, // 1 MiB
index_size: 1024 * 1024, // 1 MiB
task_db_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
index_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
indexer_config: IndexerConfig::default(),
autobatching_enabled,
};

View File

@ -324,6 +324,11 @@ pub(crate) fn check_index_swap_validity(task: &Task) -> Result<()> {
Ok(())
}
/// Clamp the provided value to be a multiple of system page size.
pub fn clamp_to_page_size(size: usize) -> usize {
size / page_size::get() * page_size::get()
}
#[cfg(test)]
impl IndexScheduler {
/// Asserts that the index scheduler's content is internally consistent.

View File

@ -1,6 +1,6 @@
[package]
name = "meili-snap"
version = "0.30.0"
version = "0.30.4"
edition = "2021"
[dependencies]

View File

@ -1,6 +1,6 @@
[package]
name = "meilisearch-auth"
version = "0.30.0"
version = "0.30.4"
edition = "2021"
[dependencies]

View File

@ -4,7 +4,7 @@ description = "Meilisearch HTTP server"
edition = "2021"
license = "MIT"
name = "meilisearch-http"
version = "0.30.0"
version = "0.30.4"
[[bin]]
name = "meilisearch"

View File

@ -39,7 +39,7 @@ const ANALYTICS_HEADER: &str = "X-Meilisearch-Client";
/// Write the instance-uid in the `data.ms` and in `~/.config/MeiliSearch/path-to-db-instance-uid`. Ignore the errors.
fn write_user_id(db_path: &Path, user_id: &InstanceUid) {
let _ = fs::write(db_path.join("instance-uid"), user_id.as_bytes());
let _ = fs::write(db_path.join("instance-uid"), user_id.to_string());
if let Some((meilisearch_config_path, user_id_path)) =
MEILISEARCH_CONFIG_PATH.as_ref().zip(config_user_id_path(db_path))
{

View File

@ -108,75 +108,43 @@ pub fn create_app(
.wrap(middleware::NormalizePath::new(middleware::TrailingSlash::Trim))
}
// TODO: TAMO: Finish setting up things
enum OnFailure {
RemoveDb,
KeepDb,
}
pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, AuthController)> {
// we don't want to create anything in the data.ms yet, thus we
// wrap our two builders in a closure that'll be executed later.
let auth_controller_builder = || AuthController::new(&opt.db_path, &opt.master_key);
let index_scheduler_builder = || {
IndexScheduler::new(IndexSchedulerOptions {
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
auth_path: opt.db_path.join("auth"),
tasks_path: opt.db_path.join("tasks"),
update_file_path: opt.db_path.join("update_files"),
indexes_path: opt.db_path.join("indexes"),
snapshots_path: opt.snapshot_dir.clone(),
dumps_path: opt.dumps_dir.clone(),
task_db_size: opt.max_task_db_size.get_bytes() as usize,
index_size: opt.max_index_size.get_bytes() as usize,
indexer_config: (&opt.indexer_options).try_into()?,
autobatching_enabled: !opt.scheduler_options.disable_auto_batching,
})
};
enum OnFailure {
RemoveDb,
KeepDb,
}
let meilisearch_builder = |on_failure: OnFailure| -> anyhow::Result<_> {
// if anything wrong happens we delete the `data.ms` entirely.
match (
index_scheduler_builder().map_err(anyhow::Error::from),
auth_controller_builder().map_err(anyhow::Error::from),
create_version_file(&opt.db_path).map_err(anyhow::Error::from),
) {
(Ok(i), Ok(a), Ok(())) => Ok((i, a)),
(Err(e), _, _) | (_, Err(e), _) | (_, _, Err(e)) => {
if matches!(on_failure, OnFailure::RemoveDb) {
std::fs::remove_dir_all(&opt.db_path)?;
}
Err(e)
}
}
};
let empty_db = is_empty_db(&opt.db_path);
let (index_scheduler, auth_controller) = if let Some(ref snapshot_path) = opt.import_snapshot {
let snapshot_path_exists = snapshot_path.exists();
// the db is empty and the snapshot exists, import it
if empty_db && snapshot_path_exists {
match compression::from_tar_gz(snapshot_path, &opt.db_path) {
Ok(()) => meilisearch_builder(OnFailure::RemoveDb)?,
Ok(()) => open_or_create_database_unchecked(opt, OnFailure::RemoveDb)?,
Err(e) => {
std::fs::remove_dir_all(&opt.db_path)?;
return Err(e);
}
}
// the db already exists and we should not ignore the snapshot => throw an error
} else if !empty_db && !opt.ignore_snapshot_if_db_exists {
bail!(
"database already exists at {:?}, try to delete it or rename it",
opt.db_path.canonicalize().unwrap_or_else(|_| opt.db_path.to_owned())
)
// the snapshot doesn't exist and we can't ignore it => throw an error
} else if !snapshot_path_exists && !opt.ignore_missing_snapshot {
bail!("snapshot doesn't exist at {}", snapshot_path.display())
// the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag
} else {
meilisearch_builder(OnFailure::RemoveDb)?
open_or_create_database(opt, empty_db)?
}
} else if let Some(ref path) = opt.import_dump {
let src_path_exists = path.exists();
// the db is empty and the dump exists, import it
if empty_db && src_path_exists {
let (mut index_scheduler, mut auth_controller) =
meilisearch_builder(OnFailure::RemoveDb)?;
open_or_create_database_unchecked(opt, OnFailure::RemoveDb)?;
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
Ok(()) => (index_scheduler, auth_controller),
Err(e) => {
@ -184,29 +152,22 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Auth
return Err(e);
}
}
// the db already exists and we should not ignore the dump option => throw an error
} else if !empty_db && !opt.ignore_dump_if_db_exists {
bail!(
"database already exists at {:?}, try to delete it or rename it",
opt.db_path.canonicalize().unwrap_or_else(|_| opt.db_path.to_owned())
)
// the dump doesn't exist and we can't ignore it => throw an error
} else if !src_path_exists && !opt.ignore_missing_dump {
bail!("dump doesn't exist at {:?}", path)
// the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag
// or, the dump is missing but we can ignore that because of the ignore_missing_dump flag
} else {
let (mut index_scheduler, mut auth_controller) =
meilisearch_builder(OnFailure::RemoveDb)?;
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
Ok(()) => (index_scheduler, auth_controller),
Err(e) => {
std::fs::remove_dir_all(&opt.db_path)?;
return Err(e);
}
}
open_or_create_database(opt, empty_db)?
}
} else {
if !empty_db {
check_version_file(&opt.db_path)?;
}
meilisearch_builder(OnFailure::KeepDb)?
open_or_create_database(opt, empty_db)?
};
// We create a loop in a thread that registers snapshotCreation tasks
@ -228,6 +189,57 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Auth
Ok((index_scheduler, auth_controller))
}
/// Try to start the IndexScheduler and AuthController without checking the VERSION file or anything.
fn open_or_create_database_unchecked(
opt: &Opt,
on_failure: OnFailure,
) -> anyhow::Result<(IndexScheduler, AuthController)> {
// we don't want to create anything in the data.ms yet, thus we
// wrap our two builders in a closure that'll be executed later.
let auth_controller = AuthController::new(&opt.db_path, &opt.master_key);
let index_scheduler_builder = || -> anyhow::Result<_> {
Ok(IndexScheduler::new(IndexSchedulerOptions {
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
auth_path: opt.db_path.join("auth"),
tasks_path: opt.db_path.join("tasks"),
update_file_path: opt.db_path.join("update_files"),
indexes_path: opt.db_path.join("indexes"),
snapshots_path: opt.snapshot_dir.clone(),
dumps_path: opt.dumps_dir.clone(),
task_db_size: opt.max_task_db_size.get_bytes() as usize,
index_size: opt.max_index_size.get_bytes() as usize,
indexer_config: (&opt.indexer_options).try_into()?,
autobatching_enabled: !opt.scheduler_options.disable_auto_batching,
})?)
};
match (
index_scheduler_builder(),
auth_controller.map_err(anyhow::Error::from),
create_version_file(&opt.db_path).map_err(anyhow::Error::from),
) {
(Ok(i), Ok(a), Ok(())) => Ok((i, a)),
(Err(e), _, _) | (_, Err(e), _) | (_, _, Err(e)) => {
if matches!(on_failure, OnFailure::RemoveDb) {
std::fs::remove_dir_all(&opt.db_path)?;
}
Err(e)
}
}
}
/// Ensure you're in a valid state and open the IndexScheduler + AuthController for you.
fn open_or_create_database(
opt: &Opt,
empty_db: bool,
) -> anyhow::Result<(IndexScheduler, AuthController)> {
if !empty_db {
check_version_file(&opt.db_path)?;
}
open_or_create_database_unchecked(opt, OnFailure::KeepDb)
}
fn import_dump(
db_path: &Path,
dump_path: &Path,

View File

@ -1,6 +1,6 @@
[package]
name = "meilisearch-types"
version = "0.30.0"
version = "0.30.4"
authors = ["marin <postma.marin@protonmail.com>"]
edition = "2021"
@ -12,7 +12,7 @@ either = { version = "1.6.1", features = ["serde"] }
enum-iterator = "1.1.3"
flate2 = "1.0.24"
fst = "0.4.7"
milli = { git = "https://github.com/meilisearch/milli.git", tag = "v0.37.0", default-features = false }
milli = { git = "https://github.com/meilisearch/milli.git", tag = "v0.37.4", default-features = false }
proptest = { version = "1.0.0", optional = true }
proptest-derive = { version = "0.3.0", optional = true }
roaring = { version = "0.10.0", features = ["serde"] }

View File

@ -1,6 +1,6 @@
[package]
name = "permissive-json-pointer"
version = "0.30.0"
version = "0.30.4"
edition = "2021"
description = "A permissive json pointer"
readme = "README.md"