mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-11-22 20:56:04 +00:00
Compare commits
32 Commits
delta-enco
...
change-net
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1bf6e5030a | ||
|
|
b757db5cf6 | ||
|
|
47f87ba17d | ||
|
|
ea70a7d1c9 | ||
|
|
9304f8e586 | ||
|
|
495db080ec | ||
|
|
d71341fa48 | ||
|
|
5b3070d8c3 | ||
|
|
89006fd4b3 | ||
|
|
49f50a0a21 | ||
|
|
1104f00803 | ||
|
|
33fa564a9c | ||
|
|
a097b254f8 | ||
|
|
54cb0ec437 | ||
|
|
38ed1f1dbb | ||
|
|
643dd33358 | ||
|
|
32f9fb6ab2 | ||
|
|
b5966f82e8 | ||
|
|
5e54063aab | ||
|
|
40456795d0 | ||
|
|
40e60c6f52 | ||
|
|
eeae6383d0 | ||
|
|
9f7172f6ab | ||
|
|
d6eca83cfa | ||
|
|
fc3508c8c8 | ||
|
|
747476a225 | ||
|
|
4b72e54ca7 | ||
|
|
adef2cc132 | ||
|
|
533b9951b1 | ||
|
|
9103cbc9db | ||
|
|
083de2bfc1 | ||
|
|
08bc982748 |
230
Cargo.lock
generated
230
Cargo.lock
generated
@@ -310,6 +310,7 @@ dependencies = [
|
||||
"const-random",
|
||||
"getrandom 0.3.3",
|
||||
"once_cell",
|
||||
"serde",
|
||||
"version_check",
|
||||
"zerocopy",
|
||||
]
|
||||
@@ -344,12 +345,6 @@ version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
|
||||
|
||||
[[package]]
|
||||
name = "allocator-api2"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c583acf993cf4245c4acb0a2cc2ab1f9cc097de73411bb6d3647ff6af2b1013d"
|
||||
|
||||
[[package]]
|
||||
name = "anes"
|
||||
version = "0.1.6"
|
||||
@@ -492,7 +487,7 @@ dependencies = [
|
||||
"backoff",
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"derive_builder 0.20.2",
|
||||
"derive_builder",
|
||||
"eventsource-stream",
|
||||
"futures",
|
||||
"rand 0.8.5",
|
||||
@@ -589,7 +584,7 @@ source = "git+https://github.com/meilisearch/bbqueue#cbb87cc707b5af415ef203bdaf2
|
||||
|
||||
[[package]]
|
||||
name = "benchmarks"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bumpalo",
|
||||
@@ -799,7 +794,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "build-info"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"time",
|
||||
@@ -812,7 +807,7 @@ version = "3.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
|
||||
dependencies = [
|
||||
"allocator-api2 0.2.21",
|
||||
"allocator-api2",
|
||||
"serde",
|
||||
]
|
||||
|
||||
@@ -822,7 +817,7 @@ version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ce682bdc86c2e25ef5cd95881d9d6a1902214eddf74cf9ffea88fe1464377e8"
|
||||
dependencies = [
|
||||
"allocator-api2 0.2.21",
|
||||
"allocator-api2",
|
||||
"bitpacking",
|
||||
"bumpalo",
|
||||
"hashbrown 0.15.5",
|
||||
@@ -945,7 +940,7 @@ dependencies = [
|
||||
"rand 0.9.2",
|
||||
"rand_distr",
|
||||
"rayon",
|
||||
"safetensors",
|
||||
"safetensors 0.4.5",
|
||||
"thiserror 1.0.69",
|
||||
"ug",
|
||||
"ug-cuda",
|
||||
@@ -972,7 +967,7 @@ dependencies = [
|
||||
"half",
|
||||
"num-traits",
|
||||
"rayon",
|
||||
"safetensors",
|
||||
"safetensors 0.4.5",
|
||||
"serde",
|
||||
"thiserror 1.0.69",
|
||||
]
|
||||
@@ -1052,6 +1047,15 @@ version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "castaway"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dec551ab6e7578819132c713a93c022a05d60159dc86e7a7050223577484c55a"
|
||||
dependencies = [
|
||||
"rustversion",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.37"
|
||||
@@ -1214,7 +1218,7 @@ dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
"clap_lex",
|
||||
"strsim 0.11.1",
|
||||
"strsim",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1253,6 +1257,21 @@ version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
|
||||
|
||||
[[package]]
|
||||
name = "compact_str"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fdb1325a1cece981e8a296ab8f0f9b63ae357bd0784a9faaf548cc7b480707a"
|
||||
dependencies = [
|
||||
"castaway",
|
||||
"cfg-if",
|
||||
"itoa",
|
||||
"rustversion",
|
||||
"ryu",
|
||||
"serde",
|
||||
"static_assertions",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "concat-arrays"
|
||||
version = "0.1.2"
|
||||
@@ -1511,38 +1530,14 @@ dependencies = [
|
||||
"libloading",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling"
|
||||
version = "0.14.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850"
|
||||
dependencies = [
|
||||
"darling_core 0.14.4",
|
||||
"darling_macro 0.14.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling"
|
||||
version = "0.20.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
|
||||
dependencies = [
|
||||
"darling_core 0.20.11",
|
||||
"darling_macro 0.20.11",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_core"
|
||||
version = "0.14.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0"
|
||||
dependencies = [
|
||||
"fnv",
|
||||
"ident_case",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"strsim 0.10.0",
|
||||
"syn 1.0.109",
|
||||
"darling_core",
|
||||
"darling_macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1555,28 +1550,17 @@ dependencies = [
|
||||
"ident_case",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"strsim 0.11.1",
|
||||
"strsim",
|
||||
"syn 2.0.106",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_macro"
|
||||
version = "0.14.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e"
|
||||
dependencies = [
|
||||
"darling_core 0.14.4",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_macro"
|
||||
version = "0.20.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
|
||||
dependencies = [
|
||||
"darling_core 0.20.11",
|
||||
"darling_core",
|
||||
"quote",
|
||||
"syn 2.0.106",
|
||||
]
|
||||
@@ -1586,6 +1570,9 @@ name = "dary_heap"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deadpool"
|
||||
@@ -1641,34 +1628,13 @@ dependencies = [
|
||||
"syn 2.0.106",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8"
|
||||
dependencies = [
|
||||
"derive_builder_macro 0.12.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder"
|
||||
version = "0.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947"
|
||||
dependencies = [
|
||||
"derive_builder_macro 0.20.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder_core"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f"
|
||||
dependencies = [
|
||||
"darling 0.14.4",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
"derive_builder_macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1677,29 +1643,19 @@ version = "0.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8"
|
||||
dependencies = [
|
||||
"darling 0.20.11",
|
||||
"darling",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.106",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder_macro"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e"
|
||||
dependencies = [
|
||||
"derive_builder_core 0.12.0",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder_macro"
|
||||
version = "0.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c"
|
||||
dependencies = [
|
||||
"derive_builder_core 0.20.2",
|
||||
"derive_builder_core",
|
||||
"syn 2.0.106",
|
||||
]
|
||||
|
||||
@@ -1738,7 +1694,7 @@ dependencies = [
|
||||
"serde-cs",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"strsim 0.11.1",
|
||||
"strsim",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1828,7 +1784,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "dump"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"big_s",
|
||||
@@ -2071,7 +2027,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
||||
|
||||
[[package]]
|
||||
name = "file-store"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"tempfile",
|
||||
"thiserror 2.0.16",
|
||||
@@ -2093,7 +2049,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "filter-parser"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"levenshtein_automata",
|
||||
@@ -2121,7 +2077,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flatten-serde-json"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@@ -2278,7 +2234,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "fuzzers"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"bumpalo",
|
||||
@@ -2804,7 +2760,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"allocator-api2 0.2.21",
|
||||
"allocator-api2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2813,7 +2769,7 @@ version = "0.15.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
|
||||
dependencies = [
|
||||
"allocator-api2 0.2.21",
|
||||
"allocator-api2",
|
||||
"equivalent",
|
||||
"foldhash",
|
||||
"serde",
|
||||
@@ -3232,7 +3188,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index-scheduler"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"backoff",
|
||||
@@ -3245,7 +3201,7 @@ dependencies = [
|
||||
"convert_case 0.8.0",
|
||||
"crossbeam-channel",
|
||||
"csv",
|
||||
"derive_builder 0.20.2",
|
||||
"derive_builder",
|
||||
"dump",
|
||||
"enum-iterator",
|
||||
"file-store",
|
||||
@@ -3412,15 +3368,6 @@ dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569"
|
||||
dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.13.0"
|
||||
@@ -3514,7 +3461,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "json-depth-checker"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@@ -3765,7 +3712,7 @@ dependencies = [
|
||||
"bincode 2.0.1",
|
||||
"byteorder",
|
||||
"csv",
|
||||
"derive_builder 0.20.2",
|
||||
"derive_builder",
|
||||
"encoding",
|
||||
"encoding_rs",
|
||||
"encoding_rs_io",
|
||||
@@ -4033,7 +3980,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
|
||||
|
||||
[[package]]
|
||||
name = "meili-snap"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"md5",
|
||||
@@ -4044,7 +3991,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"actix-cors",
|
||||
"actix-http",
|
||||
@@ -4141,7 +4088,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-auth"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"enum-iterator",
|
||||
@@ -4160,7 +4107,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-types"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"actix-web",
|
||||
"anyhow",
|
||||
@@ -4195,7 +4142,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilitool"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"clap",
|
||||
@@ -4229,9 +4176,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "milli"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"allocator-api2 0.3.1",
|
||||
"arroy",
|
||||
"bbqueue",
|
||||
"big_s",
|
||||
@@ -4289,6 +4235,7 @@ dependencies = [
|
||||
"roaring 0.10.12",
|
||||
"rstar",
|
||||
"rustc-hash 2.1.1",
|
||||
"safetensors 0.6.2",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"slice-group-by",
|
||||
@@ -4810,7 +4757,7 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
|
||||
|
||||
[[package]]
|
||||
name = "permissive-json-pointer"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"big_s",
|
||||
"serde_json",
|
||||
@@ -5399,12 +5346,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rayon-cond"
|
||||
version = "0.3.0"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "059f538b55efd2309c9794130bc149c6a553db90e9d99c2030785c82f0bd7df9"
|
||||
checksum = "2964d0cf57a3e7a06e8183d14a8b527195c706b7983549cd5462d5aa3747438f"
|
||||
dependencies = [
|
||||
"either",
|
||||
"itertools 0.11.0",
|
||||
"itertools 0.14.0",
|
||||
"rayon",
|
||||
]
|
||||
|
||||
@@ -5825,6 +5772,16 @@ dependencies = [
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "safetensors"
|
||||
version = "0.6.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "172dd94c5a87b5c79f945c863da53b2ebc7ccef4eca24ac63cca66a41aab2178"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "same-file"
|
||||
version = "1.0.6"
|
||||
@@ -6306,12 +6263,6 @@ dependencies = [
|
||||
"indexmap",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.11.1"
|
||||
@@ -6637,21 +6588,24 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||
|
||||
[[package]]
|
||||
name = "tokenizers"
|
||||
version = "0.15.2"
|
||||
source = "git+https://github.com/huggingface/tokenizers.git?tag=v0.15.2#701a73b869602b5639589d197e805349cdba3223"
|
||||
version = "0.22.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6475a27088c98ea96d00b39a9ddfb63780d1ad4cceb6f48374349a96ab2b7842"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"aho-corasick",
|
||||
"derive_builder 0.12.0",
|
||||
"compact_str",
|
||||
"dary_heap",
|
||||
"derive_builder",
|
||||
"esaxx-rs",
|
||||
"getrandom 0.2.16",
|
||||
"itertools 0.12.1",
|
||||
"lazy_static",
|
||||
"getrandom 0.3.3",
|
||||
"itertools 0.14.0",
|
||||
"log",
|
||||
"macro_rules_attribute",
|
||||
"monostate",
|
||||
"onig",
|
||||
"paste",
|
||||
"rand 0.8.5",
|
||||
"rand 0.9.2",
|
||||
"rayon",
|
||||
"rayon-cond",
|
||||
"regex",
|
||||
@@ -6659,7 +6613,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"spm_precompiled",
|
||||
"thiserror 1.0.69",
|
||||
"thiserror 2.0.16",
|
||||
"unicode-normalization-alignments",
|
||||
"unicode-segmentation",
|
||||
"unicode_categories",
|
||||
@@ -7021,7 +6975,7 @@ dependencies = [
|
||||
"num-traits",
|
||||
"num_cpus",
|
||||
"rayon",
|
||||
"safetensors",
|
||||
"safetensors 0.4.5",
|
||||
"serde",
|
||||
"thiserror 1.0.69",
|
||||
"tracing",
|
||||
@@ -7251,7 +7205,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6b2bf58be11fc9414104c6d3a2e464163db5ef74b12296bda593cac37b6e4777"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"derive_builder 0.20.2",
|
||||
"derive_builder",
|
||||
"rustversion",
|
||||
"vergen-lib",
|
||||
]
|
||||
@@ -7263,7 +7217,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f6ee511ec45098eabade8a0750e76eec671e7fb2d9360c563911336bea9cac1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"derive_builder 0.20.2",
|
||||
"derive_builder",
|
||||
"git2",
|
||||
"rustversion",
|
||||
"time",
|
||||
@@ -7278,7 +7232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b07e6010c0f3e59fcb164e0163834597da68d1f864e2b8ca49f74de01e9c166"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"derive_builder 0.20.2",
|
||||
"derive_builder",
|
||||
"rustversion",
|
||||
]
|
||||
|
||||
@@ -7925,7 +7879,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "xtask"
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"build-info",
|
||||
|
||||
@@ -23,7 +23,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.24.0"
|
||||
version = "1.26.0"
|
||||
authors = [
|
||||
"Quentin de Quelen <quentin@dequelen.me>",
|
||||
"Clément Renault <clement@meilisearch.com>",
|
||||
|
||||
@@ -9,8 +9,9 @@ use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::keys::Key;
|
||||
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
||||
use meilisearch_types::settings::Unchecked;
|
||||
use meilisearch_types::tasks::enterprise_edition::network::{NetworkTopologyChange, DbTaskNetwork};
|
||||
use meilisearch_types::tasks::{
|
||||
Details, ExportIndexSettings, IndexSwap, KindWithContent, Status, Task, TaskId, TaskNetwork,
|
||||
Details, ExportIndexSettings, IndexSwap, KindWithContent, Status, Task, TaskId,
|
||||
};
|
||||
use meilisearch_types::InstanceUid;
|
||||
use roaring::RoaringBitmap;
|
||||
@@ -95,7 +96,9 @@ pub struct TaskDump {
|
||||
)]
|
||||
pub finished_at: Option<OffsetDateTime>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub network: Option<TaskNetwork>,
|
||||
pub network: Option<DbTaskNetwork>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub custom_metadata: Option<String>,
|
||||
}
|
||||
|
||||
// A `Kind` specific version made for the dump. If modified you may break the dump.
|
||||
@@ -161,6 +164,7 @@ pub enum KindDump {
|
||||
IndexCompaction {
|
||||
index_uid: String,
|
||||
},
|
||||
NetworkTopologyChange(NetworkTopologyChange),
|
||||
}
|
||||
|
||||
impl From<Task> for TaskDump {
|
||||
@@ -178,6 +182,7 @@ impl From<Task> for TaskDump {
|
||||
started_at: task.started_at,
|
||||
finished_at: task.finished_at,
|
||||
network: task.network,
|
||||
custom_metadata: task.custom_metadata,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -246,6 +251,9 @@ impl From<KindWithContent> for KindDump {
|
||||
KindWithContent::IndexCompaction { index_uid } => {
|
||||
KindDump::IndexCompaction { index_uid }
|
||||
}
|
||||
KindWithContent::NetworkTopologyChange(network_topology_change) => {
|
||||
KindDump::NetworkTopologyChange(network_topology_change)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -396,6 +404,7 @@ pub(crate) mod test {
|
||||
started_at: Some(datetime!(2022-11-20 0:00 UTC)),
|
||||
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
||||
network: None,
|
||||
custom_metadata: None,
|
||||
},
|
||||
None,
|
||||
),
|
||||
@@ -421,6 +430,7 @@ pub(crate) mod test {
|
||||
started_at: None,
|
||||
finished_at: None,
|
||||
network: None,
|
||||
custom_metadata: None,
|
||||
},
|
||||
Some(vec![
|
||||
json!({ "id": 4, "race": "leonberg" }).as_object().unwrap().clone(),
|
||||
@@ -441,6 +451,7 @@ pub(crate) mod test {
|
||||
started_at: None,
|
||||
finished_at: None,
|
||||
network: None,
|
||||
custom_metadata: None,
|
||||
},
|
||||
None,
|
||||
),
|
||||
@@ -554,7 +565,8 @@ pub(crate) mod test {
|
||||
Network {
|
||||
local: Some("myself".to_string()),
|
||||
remotes: maplit::btreemap! {"other".to_string() => Remote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
|
||||
sharding: false,
|
||||
leader: None,
|
||||
version: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -164,6 +164,7 @@ impl CompatV5ToV6 {
|
||||
started_at: task_view.started_at,
|
||||
finished_at: task_view.finished_at,
|
||||
network: None,
|
||||
custom_metadata: None,
|
||||
};
|
||||
|
||||
(task, content_file)
|
||||
|
||||
@@ -150,6 +150,7 @@ impl<'a> Dump<'a> {
|
||||
details: task.details,
|
||||
status: task.status,
|
||||
network: task.network,
|
||||
custom_metadata: task.custom_metadata,
|
||||
kind: match task.kind {
|
||||
KindDump::DocumentImport {
|
||||
primary_key,
|
||||
@@ -237,6 +238,9 @@ impl<'a> Dump<'a> {
|
||||
KindDump::IndexCompaction { index_uid } => {
|
||||
KindWithContent::IndexCompaction { index_uid }
|
||||
}
|
||||
KindDump::NetworkTopologyChange(network_topology_change) => {
|
||||
KindWithContent::NetworkTopologyChange(network_topology_change)
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -3,10 +3,13 @@ use std::fmt::Display;
|
||||
use meilisearch_types::batches::BatchId;
|
||||
use meilisearch_types::error::{Code, ErrorCode};
|
||||
use meilisearch_types::milli::index::RollbackOutcome;
|
||||
use meilisearch_types::milli::DocumentId;
|
||||
use meilisearch_types::tasks::enterprise_edition::network::ReceiveTaskError;
|
||||
use meilisearch_types::tasks::{Kind, Status};
|
||||
use meilisearch_types::{heed, milli};
|
||||
use reqwest::StatusCode;
|
||||
use thiserror::Error;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::TaskId;
|
||||
|
||||
@@ -191,6 +194,15 @@ pub enum Error {
|
||||
#[error(transparent)]
|
||||
HeedTransaction(heed::Error),
|
||||
|
||||
#[error("No network topology change task is currently enqueued or processing")]
|
||||
ImportTaskWithoutNetworkTask,
|
||||
#[error("The network task version (`{network_task}`) does not match the import task version (`{import_task}`)")]
|
||||
NetworkVersionMismatch { network_task: Uuid, import_task: Uuid },
|
||||
#[error("The import task emanates from an unknown remote `{0}`")]
|
||||
ImportTaskUnknownRemote(String),
|
||||
#[error("The import task with key `{0}` was already received")]
|
||||
ImportTaskAlreadyReceived(DocumentId),
|
||||
|
||||
#[cfg(test)]
|
||||
#[error("Planned failure for tests.")]
|
||||
PlannedFailure,
|
||||
@@ -248,6 +260,10 @@ impl Error {
|
||||
| Error::Persist(_)
|
||||
| Error::FeatureNotEnabled(_)
|
||||
| Error::Export(_)
|
||||
| Error::ImportTaskWithoutNetworkTask
|
||||
| Error::NetworkVersionMismatch { .. }
|
||||
| Error::ImportTaskAlreadyReceived(_)
|
||||
| Error::ImportTaskUnknownRemote(_)
|
||||
| Error::Anyhow(_) => true,
|
||||
Error::CreateBatch(_)
|
||||
| Error::CorruptedTaskQueue
|
||||
@@ -307,6 +323,10 @@ impl ErrorCode for Error {
|
||||
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
|
||||
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
|
||||
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
|
||||
Error::ImportTaskWithoutNetworkTask => Code::ImportTaskWithoutNetworkTask,
|
||||
Error::NetworkVersionMismatch { .. } => Code::NetworkVersionMismatch,
|
||||
Error::ImportTaskAlreadyReceived(_) => Code::ImportTaskAlreadyReceived,
|
||||
Error::ImportTaskUnknownRemote(_) => Code::ImportTaskUnknownRemote,
|
||||
Error::S3Error { status, .. } if status.is_client_error() => {
|
||||
Code::InvalidS3SnapshotRequest
|
||||
}
|
||||
@@ -345,3 +365,12 @@ impl ErrorCode for Error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ReceiveTaskError> for Error {
|
||||
fn from(value: ReceiveTaskError) -> Self {
|
||||
match value {
|
||||
ReceiveTaskError::UnknownRemote(unknown) => Error::ImportTaskUnknownRemote(unknown),
|
||||
ReceiveTaskError::DuplicateTask(dup) => Error::ImportTaskAlreadyReceived(dup),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -361,6 +361,11 @@ impl IndexMapper {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The number of indexes in the database
|
||||
pub fn index_count(&self, rtxn: &RoTxn) -> Result<u64> {
|
||||
Ok(self.index_mapping.len(rtxn)?)
|
||||
}
|
||||
|
||||
/// Return an index, may open it if it wasn't already opened.
|
||||
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
|
||||
if let Some((current_name, current_index)) =
|
||||
|
||||
@@ -21,7 +21,7 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
||||
let IndexScheduler {
|
||||
cleanup_enabled: _,
|
||||
experimental_no_edition_2024_for_dumps: _,
|
||||
processing_tasks,
|
||||
runtime_tasks,
|
||||
env,
|
||||
version,
|
||||
queue,
|
||||
@@ -50,7 +50,7 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
||||
snap.push_str(&format!("index scheduler running on version {indx_sched_version:?}\n"));
|
||||
}
|
||||
|
||||
let processing = processing_tasks.read().unwrap().clone();
|
||||
let processing = runtime_tasks.read().unwrap().processing.clone();
|
||||
snap.push_str(&format!("### Autobatching Enabled = {}\n", scheduler.autobatching_enabled));
|
||||
snap.push_str(&format!(
|
||||
"### Processing batch {:?}:\n",
|
||||
@@ -232,6 +232,7 @@ pub fn snapshot_task(task: &Task) -> String {
|
||||
status,
|
||||
kind,
|
||||
network,
|
||||
custom_metadata,
|
||||
} = task;
|
||||
snap.push('{');
|
||||
snap.push_str(&format!("uid: {uid}, "));
|
||||
@@ -252,6 +253,9 @@ pub fn snapshot_task(task: &Task) -> String {
|
||||
if let Some(network) = network {
|
||||
snap.push_str(&format!("network: {network:?}, "))
|
||||
}
|
||||
if let Some(custom_metadata) = custom_metadata {
|
||||
snap.push_str(&format!("custom_metadata: {custom_metadata:?}"))
|
||||
}
|
||||
|
||||
snap.push('}');
|
||||
snap
|
||||
@@ -321,6 +325,9 @@ fn snapshot_details(d: &Details) -> String {
|
||||
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
|
||||
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
|
||||
}
|
||||
Details::NetworkTopologyChange { moved_documents, received_documents, message } => {
|
||||
format!("{{ moved_documents: {moved_documents:?}, received_documents: {received_documents:?}, message: {message:?}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -68,10 +68,10 @@ use meilisearch_types::milli::vector::{
|
||||
};
|
||||
use meilisearch_types::milli::{self, Index};
|
||||
use meilisearch_types::task_view::TaskView;
|
||||
use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork};
|
||||
use meilisearch_types::tasks::enterprise_edition::network::{DbTaskNetwork, TaskNetwork};
|
||||
use meilisearch_types::tasks::{KindWithContent, Task};
|
||||
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
|
||||
use milli::vector::db::IndexEmbeddingConfig;
|
||||
use processing::ProcessingTasks;
|
||||
pub use queue::Query;
|
||||
use queue::Queue;
|
||||
use roaring::RoaringBitmap;
|
||||
@@ -82,6 +82,7 @@ use uuid::Uuid;
|
||||
use versioning::Versioning;
|
||||
|
||||
use crate::index_mapper::IndexMapper;
|
||||
use crate::processing::RuntimeTasks;
|
||||
use crate::utils::clamp_to_page_size;
|
||||
|
||||
pub(crate) type BEI128 = I128<BE>;
|
||||
@@ -163,7 +164,7 @@ pub struct IndexScheduler {
|
||||
pub(crate) env: Env<WithoutTls>,
|
||||
|
||||
/// The list of tasks currently processing
|
||||
pub(crate) processing_tasks: Arc<RwLock<ProcessingTasks>>,
|
||||
pub(crate) runtime_tasks: Arc<RwLock<RuntimeTasks>>,
|
||||
|
||||
/// A database containing only the version of the index-scheduler
|
||||
pub version: versioning::Versioning,
|
||||
@@ -225,7 +226,7 @@ impl IndexScheduler {
|
||||
fn private_clone(&self) -> IndexScheduler {
|
||||
IndexScheduler {
|
||||
env: self.env.clone(),
|
||||
processing_tasks: self.processing_tasks.clone(),
|
||||
runtime_tasks: self.runtime_tasks.clone(),
|
||||
version: self.version.clone(),
|
||||
queue: self.queue.private_clone(),
|
||||
scheduler: self.scheduler.private_clone(),
|
||||
@@ -331,7 +332,7 @@ impl IndexScheduler {
|
||||
wtxn.commit()?;
|
||||
|
||||
Ok(Self {
|
||||
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
|
||||
runtime_tasks: Arc::new(RwLock::new(RuntimeTasks::new())),
|
||||
version,
|
||||
queue,
|
||||
scheduler: Scheduler::new(&options, auth_env),
|
||||
@@ -639,19 +640,19 @@ impl IndexScheduler {
|
||||
/// 3. The number of times the properties appeared.
|
||||
pub fn get_stats(&self) -> Result<BTreeMap<String, BTreeMap<String, u64>>> {
|
||||
let rtxn = self.read_txn()?;
|
||||
self.queue.get_stats(&rtxn, &self.processing_tasks.read().unwrap())
|
||||
self.queue.get_stats(&rtxn, &self.runtime_tasks.read().unwrap().processing)
|
||||
}
|
||||
|
||||
// Return true if there is at least one task that is processing.
|
||||
pub fn is_task_processing(&self) -> Result<bool> {
|
||||
Ok(!self.processing_tasks.read().unwrap().processing.is_empty())
|
||||
Ok(!self.runtime_tasks.read().unwrap().processing.processing.is_empty())
|
||||
}
|
||||
|
||||
/// Return true iff there is at least one task associated with this index
|
||||
/// that is processing.
|
||||
pub fn is_index_processing(&self, index: &str) -> Result<bool> {
|
||||
let rtxn = self.env.read_txn()?;
|
||||
let processing_tasks = self.processing_tasks.read().unwrap().processing.clone();
|
||||
let processing_tasks = self.runtime_tasks.read().unwrap().processing.processing.clone();
|
||||
let index_tasks = self.queue.tasks.index_tasks(&rtxn, index)?;
|
||||
let nbr_index_processing_tasks = processing_tasks.intersection_len(&index_tasks);
|
||||
Ok(nbr_index_processing_tasks > 0)
|
||||
@@ -677,8 +678,8 @@ impl IndexScheduler {
|
||||
filters: &meilisearch_auth::AuthFilter,
|
||||
) -> Result<(Vec<Task>, u64)> {
|
||||
let rtxn = self.read_txn()?;
|
||||
let processing = self.processing_tasks.read().unwrap();
|
||||
self.queue.get_tasks_from_authorized_indexes(&rtxn, query, filters, &processing)
|
||||
let processing = &self.runtime_tasks.read().unwrap().processing;
|
||||
self.queue.get_tasks_from_authorized_indexes(&rtxn, query, filters, processing)
|
||||
}
|
||||
|
||||
/// Return the task ids matching the query along with the total number of tasks
|
||||
@@ -696,18 +697,18 @@ impl IndexScheduler {
|
||||
filters: &meilisearch_auth::AuthFilter,
|
||||
) -> Result<(RoaringBitmap, u64)> {
|
||||
let rtxn = self.read_txn()?;
|
||||
let processing = self.processing_tasks.read().unwrap();
|
||||
self.queue.get_task_ids_from_authorized_indexes(&rtxn, query, filters, &processing)
|
||||
let processing = &self.runtime_tasks.read().unwrap().processing;
|
||||
self.queue.get_task_ids_from_authorized_indexes(&rtxn, query, filters, processing)
|
||||
}
|
||||
|
||||
pub fn set_task_network(&self, task_id: TaskId, network: TaskNetwork) -> Result<()> {
|
||||
pub fn set_task_network(&self, task_id: TaskId, network: DbTaskNetwork) -> Result<Task> {
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
let mut task =
|
||||
self.queue.tasks.get_task(&wtxn, task_id)?.ok_or(Error::TaskNotFound(task_id))?;
|
||||
task.network = Some(network);
|
||||
self.queue.tasks.all_tasks.put(&mut wtxn, &task_id, &task)?;
|
||||
wtxn.commit()?;
|
||||
Ok(())
|
||||
Ok(task)
|
||||
}
|
||||
|
||||
/// Return the batches matching the query from the user's point of view along
|
||||
@@ -725,8 +726,8 @@ impl IndexScheduler {
|
||||
filters: &meilisearch_auth::AuthFilter,
|
||||
) -> Result<(Vec<Batch>, u64)> {
|
||||
let rtxn = self.read_txn()?;
|
||||
let processing = self.processing_tasks.read().unwrap();
|
||||
self.queue.get_batches_from_authorized_indexes(&rtxn, query, filters, &processing)
|
||||
let processing = &self.runtime_tasks.read().unwrap().processing;
|
||||
self.queue.get_batches_from_authorized_indexes(&rtxn, query, filters, processing)
|
||||
}
|
||||
|
||||
/// Return the batch ids matching the query along with the total number of batches
|
||||
@@ -744,8 +745,8 @@ impl IndexScheduler {
|
||||
filters: &meilisearch_auth::AuthFilter,
|
||||
) -> Result<(RoaringBitmap, u64)> {
|
||||
let rtxn = self.read_txn()?;
|
||||
let processing = self.processing_tasks.read().unwrap();
|
||||
self.queue.get_batch_ids_from_authorized_indexes(&rtxn, query, filters, &processing)
|
||||
let processing = &self.runtime_tasks.read().unwrap().processing;
|
||||
self.queue.get_batch_ids_from_authorized_indexes(&rtxn, query, filters, processing)
|
||||
}
|
||||
|
||||
/// Register a new task in the scheduler.
|
||||
@@ -756,6 +757,31 @@ impl IndexScheduler {
|
||||
kind: KindWithContent,
|
||||
task_id: Option<TaskId>,
|
||||
dry_run: bool,
|
||||
) -> Result<Task> {
|
||||
self.register_with_custom_metadata(kind, task_id, None, dry_run, None)
|
||||
}
|
||||
|
||||
/// Register a new task in the scheduler, with metadata.
|
||||
///
|
||||
/// If it fails and data was associated with the task, it tries to delete the associated data.
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - task_network: network of the task to check.
|
||||
///
|
||||
/// If the task is an import task, only accept it if:
|
||||
///
|
||||
/// 1. There is an ongoing network topology change task
|
||||
/// 2. The task to register matches the network version of the network topology change task
|
||||
///
|
||||
/// Always accept the task if it is not an import task.
|
||||
pub fn register_with_custom_metadata(
|
||||
&self,
|
||||
kind: KindWithContent,
|
||||
task_id: Option<TaskId>,
|
||||
custom_metadata: Option<String>,
|
||||
dry_run: bool,
|
||||
task_network: Option<TaskNetwork>,
|
||||
) -> Result<Task> {
|
||||
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
|
||||
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
|
||||
@@ -766,13 +792,86 @@ impl IndexScheduler {
|
||||
}
|
||||
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
let task = self.queue.register(&mut wtxn, &kind, task_id, dry_run)?;
|
||||
|
||||
if let Some(TaskNetwork::Import { import_from, network_change, metadata }) = &task_network {
|
||||
let mut network_tasks = self
|
||||
.queue
|
||||
.tasks
|
||||
.get_kind(&wtxn, meilisearch_types::tasks::Kind::NetworkTopologyChange)?;
|
||||
if network_tasks.is_empty() {
|
||||
return Err(Error::ImportTaskWithoutNetworkTask);
|
||||
}
|
||||
|
||||
let network_task = {
|
||||
let processing = self.runtime_tasks.read().unwrap().processing.processing.clone();
|
||||
if processing.is_disjoint(&network_tasks) {
|
||||
let enqueued = self
|
||||
.queue
|
||||
.tasks
|
||||
.get_status(&wtxn, meilisearch_types::tasks::Status::Enqueued)?;
|
||||
|
||||
network_tasks &= enqueued;
|
||||
if let Some(network_task) = network_tasks.into_iter().next() {
|
||||
network_task
|
||||
} else {
|
||||
return Err(Error::ImportTaskWithoutNetworkTask);
|
||||
}
|
||||
} else {
|
||||
network_tasks &= &*processing;
|
||||
network_tasks.into_iter().next().unwrap()
|
||||
}
|
||||
};
|
||||
|
||||
let mut network_task = self.queue.tasks.get_task(&wtxn, network_task)?.unwrap();
|
||||
let network_task_version = network_task
|
||||
.network
|
||||
.as_ref()
|
||||
.map(|network| network.network_version())
|
||||
.unwrap_or_default();
|
||||
if network_task_version != network_change.network_version {
|
||||
return Err(Error::NetworkVersionMismatch {
|
||||
network_task: network_task_version,
|
||||
import_task: network_change.network_version,
|
||||
});
|
||||
}
|
||||
|
||||
let KindWithContent::NetworkTopologyChange(network_topology_change) =
|
||||
&mut network_task.kind
|
||||
else {
|
||||
return Err(Error::CorruptedTaskQueue);
|
||||
};
|
||||
|
||||
network_topology_change.receive_remote_task(
|
||||
&import_from.remote_name,
|
||||
&import_from.index_name,
|
||||
metadata.task_key,
|
||||
import_from.document_count,
|
||||
metadata.index_count,
|
||||
metadata.total_index_documents,
|
||||
)?;
|
||||
|
||||
self.queue.tasks.update_task(&mut wtxn, &mut network_task)?;
|
||||
}
|
||||
|
||||
let task = self.queue.register(
|
||||
&mut wtxn,
|
||||
&kind,
|
||||
task_id,
|
||||
custom_metadata,
|
||||
dry_run,
|
||||
task_network.map(DbTaskNetwork::from),
|
||||
)?;
|
||||
|
||||
// If the registered task is a task cancelation
|
||||
// we inform the processing tasks to stop (if necessary).
|
||||
if let KindWithContent::TaskCancelation { tasks, .. } = kind {
|
||||
let tasks_to_cancel = RoaringBitmap::from_iter(tasks);
|
||||
if self.processing_tasks.read().unwrap().must_cancel_processing_tasks(&tasks_to_cancel)
|
||||
if self
|
||||
.runtime_tasks
|
||||
.read()
|
||||
.unwrap()
|
||||
.processing
|
||||
.must_cancel_processing_tasks(&tasks_to_cancel)
|
||||
{
|
||||
self.scheduler.must_stop_processing.must_stop();
|
||||
}
|
||||
|
||||
@@ -6,6 +6,28 @@ use roaring::RoaringBitmap;
|
||||
|
||||
use crate::utils::ProcessingBatch;
|
||||
|
||||
pub struct RuntimeTasks {
|
||||
pub processing: ProcessingTasks,
|
||||
pub enqueued_network: EnqueuedNetworkTasks,
|
||||
}
|
||||
impl RuntimeTasks {
|
||||
pub(crate) fn new() -> Self {
|
||||
Self { processing: ProcessingTasks::new(), enqueued_network: Default::default() }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct EnqueuedNetworkTasks {
|
||||
tasks: RoaringBitmap,
|
||||
}
|
||||
|
||||
impl EnqueuedNetworkTasks {
|
||||
pub fn swap(&mut self, mut new: RoaringBitmap) -> RoaringBitmap {
|
||||
std::mem::swap(&mut self.tasks, &mut new);
|
||||
new
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct ProcessingTasks {
|
||||
pub batch: Option<Arc<ProcessingBatch>>,
|
||||
|
||||
@@ -26,7 +26,7 @@ fn query_batches_from_and_limit() {
|
||||
handle.advance_n_successful_batches(3);
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "processed_all_tasks");
|
||||
|
||||
let proc = index_scheduler.processing_tasks.read().unwrap().clone();
|
||||
let proc = index_scheduler.runtime_tasks.read().unwrap().processing.clone();
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let query = Query { limit: Some(0), ..Default::default() };
|
||||
let (batches, _) = index_scheduler
|
||||
@@ -359,7 +359,7 @@ fn query_batches_special_rules() {
|
||||
handle.advance_till([Start, BatchCreated]);
|
||||
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = index_scheduler.processing_tasks.read().unwrap().clone();
|
||||
let proc = index_scheduler.runtime_tasks.read().unwrap().processing.clone();
|
||||
|
||||
let query = Query { index_uids: Some(vec!["catto".to_owned()]), ..Default::default() };
|
||||
let (batches, _) = index_scheduler
|
||||
|
||||
@@ -15,6 +15,7 @@ use file_store::FileStore;
|
||||
use meilisearch_types::batches::BatchId;
|
||||
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, BEU32};
|
||||
use meilisearch_types::tasks::enterprise_edition::network::DbTaskNetwork;
|
||||
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
|
||||
use roaring::RoaringBitmap;
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
@@ -257,7 +258,9 @@ impl Queue {
|
||||
wtxn: &mut RwTxn,
|
||||
kind: &KindWithContent,
|
||||
task_id: Option<TaskId>,
|
||||
custom_metadata: Option<String>,
|
||||
dry_run: bool,
|
||||
network: Option<DbTaskNetwork>,
|
||||
) -> Result<Task> {
|
||||
let next_task_id = self.tasks.next_task_id(wtxn)?;
|
||||
|
||||
@@ -279,7 +282,8 @@ impl Queue {
|
||||
details: kind.default_details(),
|
||||
status: Status::Enqueued,
|
||||
kind: kind.clone(),
|
||||
network: None,
|
||||
network,
|
||||
custom_metadata,
|
||||
};
|
||||
// For deletion and cancelation tasks, we want to make extra sure that they
|
||||
// don't attempt to delete/cancel tasks that are newer than themselves.
|
||||
@@ -344,7 +348,9 @@ impl Queue {
|
||||
tasks: to_delete,
|
||||
},
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
None,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -27,7 +27,7 @@ fn query_tasks_from_and_limit() {
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "processed_all_tasks");
|
||||
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let processing = index_scheduler.processing_tasks.read().unwrap();
|
||||
let processing = &index_scheduler.runtime_tasks.read().unwrap().processing;
|
||||
let query = Query { limit: Some(0), ..Default::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
.queue
|
||||
@@ -317,7 +317,7 @@ fn query_tasks_special_rules() {
|
||||
handle.advance_till([Start, BatchCreated]);
|
||||
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
|
||||
|
||||
let query = Query { index_uids: Some(vec!["catto".to_owned()]), ..Default::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
@@ -414,7 +414,7 @@ fn query_tasks_canceled_by() {
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "start");
|
||||
|
||||
let rtxn = index_scheduler.read_txn().unwrap();
|
||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
|
||||
let query = Query { canceled_by: Some(vec![task_cancelation.uid]), ..Query::default() };
|
||||
let (tasks, _) = index_scheduler
|
||||
.queue
|
||||
|
||||
@@ -203,26 +203,30 @@ fn test_disable_auto_deletion_of_tasks() {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||
let tasks =
|
||||
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
|
||||
drop(rtxn);
|
||||
drop(proc);
|
||||
{
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
|
||||
let tasks = index_scheduler
|
||||
.queue
|
||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
||||
.unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
|
||||
}
|
||||
|
||||
// now we're above the max number of tasks
|
||||
// and if we try to advance in the tick function no new task deletion should be enqueued
|
||||
handle.advance_till([Start, BatchCreated]);
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||
let tasks =
|
||||
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_not_been_enqueued");
|
||||
drop(rtxn);
|
||||
drop(proc);
|
||||
{
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
|
||||
let tasks = index_scheduler
|
||||
.queue
|
||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
||||
.unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_not_been_enqueued");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -267,59 +271,69 @@ fn test_auto_deletion_of_tasks() {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||
let tasks =
|
||||
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
|
||||
drop(rtxn);
|
||||
drop(proc);
|
||||
{
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
|
||||
let tasks = index_scheduler
|
||||
.queue
|
||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
||||
.unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
|
||||
}
|
||||
|
||||
// now we're above the max number of tasks
|
||||
// and if we try to advance in the tick function a new task deletion should be enqueued
|
||||
handle.advance_till([Start, BatchCreated]);
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||
let tasks =
|
||||
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_enqueued");
|
||||
drop(rtxn);
|
||||
drop(proc);
|
||||
{
|
||||
// now we're above the max number of tasks
|
||||
// and if we try to advance in the tick function a new task deletion should be enqueued
|
||||
handle.advance_till([Start, BatchCreated]);
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
|
||||
let tasks = index_scheduler
|
||||
.queue
|
||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
||||
.unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_enqueued");
|
||||
}
|
||||
|
||||
handle.advance_till([InsideProcessBatch, ProcessBatchSucceeded, AfterProcessing]);
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||
let tasks =
|
||||
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_processed");
|
||||
drop(rtxn);
|
||||
drop(proc);
|
||||
{
|
||||
handle.advance_till([InsideProcessBatch, ProcessBatchSucceeded, AfterProcessing]);
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
|
||||
let tasks = index_scheduler
|
||||
.queue
|
||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
||||
.unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_processed");
|
||||
}
|
||||
|
||||
handle.advance_one_failed_batch();
|
||||
// a new task deletion has been enqueued
|
||||
handle.advance_one_successful_batch();
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||
let tasks =
|
||||
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "after_the_second_task_deletion");
|
||||
drop(rtxn);
|
||||
drop(proc);
|
||||
{
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
|
||||
let tasks = index_scheduler
|
||||
.queue
|
||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
||||
.unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "after_the_second_task_deletion");
|
||||
}
|
||||
|
||||
handle.advance_one_failed_batch();
|
||||
handle.advance_one_successful_batch();
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = index_scheduler.processing_tasks.read().unwrap();
|
||||
let tasks =
|
||||
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "everything_has_been_processed");
|
||||
drop(rtxn);
|
||||
drop(proc);
|
||||
{
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
|
||||
let tasks = index_scheduler
|
||||
.queue
|
||||
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
|
||||
.unwrap();
|
||||
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
|
||||
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "everything_has_been_processed");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -74,6 +74,7 @@ impl From<KindWithContent> for AutobatchKind {
|
||||
| KindWithContent::DumpCreation { .. }
|
||||
| KindWithContent::Export { .. }
|
||||
| KindWithContent::UpgradeDatabase { .. }
|
||||
| KindWithContent::NetworkTopologyChange(_)
|
||||
| KindWithContent::SnapshotCreation => {
|
||||
panic!("The autobatcher should never be called with tasks with special priority or that don't apply to an index.")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ use std::io::ErrorKind;
|
||||
use meilisearch_types::heed::RoTxn;
|
||||
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
||||
use meilisearch_types::settings::{Settings, Unchecked};
|
||||
use meilisearch_types::tasks::enterprise_edition::network::NetworkTopologyState;
|
||||
use meilisearch_types::tasks::{BatchStopReason, Kind, KindWithContent, Status, Task};
|
||||
use roaring::RoaringBitmap;
|
||||
use uuid::Uuid;
|
||||
@@ -59,6 +60,13 @@ pub(crate) enum Batch {
|
||||
index_uid: String,
|
||||
task: Task,
|
||||
},
|
||||
NetworkIndexBatch {
|
||||
network_task: Task,
|
||||
inner_batch: Box<Batch>,
|
||||
},
|
||||
NetworkWait {
|
||||
task: Task,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -140,9 +148,14 @@ impl Batch {
|
||||
..
|
||||
} => RoaringBitmap::from_iter(tasks.iter().chain(other).map(|task| task.uid)),
|
||||
},
|
||||
Batch::IndexSwap { task } => {
|
||||
Batch::IndexSwap { task } | Batch::NetworkWait { task } => {
|
||||
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
||||
}
|
||||
Batch::NetworkIndexBatch { network_task, inner_batch } => {
|
||||
let mut tasks = inner_batch.ids();
|
||||
tasks.insert(network_task.uid);
|
||||
tasks
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,12 +169,14 @@ impl Batch {
|
||||
| Dump(_)
|
||||
| Export { .. }
|
||||
| UpgradeDatabase { .. }
|
||||
| NetworkWait { .. }
|
||||
| IndexSwap { .. } => None,
|
||||
IndexOperation { op, .. } => Some(op.index_uid()),
|
||||
IndexCreation { index_uid, .. }
|
||||
| IndexUpdate { index_uid, .. }
|
||||
| IndexDeletion { index_uid, .. }
|
||||
| IndexCompaction { index_uid, .. } => Some(index_uid),
|
||||
NetworkIndexBatch { network_task: _, inner_batch } => inner_batch.index_uid(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -184,6 +199,8 @@ impl fmt::Display for Batch {
|
||||
Batch::IndexCompaction { .. } => f.write_str("IndexCompaction")?,
|
||||
Batch::Export { .. } => f.write_str("Export")?,
|
||||
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
|
||||
Batch::NetworkIndexBatch { .. } => f.write_str("NetworkTopologyChange")?,
|
||||
Batch::NetworkWait { .. } => f.write_str("NetworkTopologyChange")?,
|
||||
};
|
||||
match index_uid {
|
||||
Some(name) => f.write_fmt(format_args!(" on {name:?} from tasks: {tasks:?}")),
|
||||
@@ -460,7 +477,6 @@ impl IndexScheduler {
|
||||
let mut current_batch = ProcessingBatch::new(batch_id);
|
||||
|
||||
let enqueued = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
||||
let count_total_enqueued = enqueued.len();
|
||||
let failed = &self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
||||
|
||||
// 0. we get the last task to cancel.
|
||||
@@ -519,7 +535,15 @@ impl IndexScheduler {
|
||||
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
|
||||
}
|
||||
|
||||
// 3. we get the next task to compact
|
||||
// 3. Check for enqueued network topology changes
|
||||
let network_changes =
|
||||
self.queue.tasks.get_kind(rtxn, Kind::NetworkTopologyChange)? & enqueued;
|
||||
if let Some(task_id) = network_changes.iter().next() {
|
||||
let task = self.queue.tasks.get_task(rtxn, task_id)?.unwrap();
|
||||
return self.start_processing_network(rtxn, task, enqueued, current_batch);
|
||||
}
|
||||
|
||||
// 4. we get the next task to compact
|
||||
let to_compact = self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)? & enqueued;
|
||||
if let Some(task_id) = to_compact.min() {
|
||||
let mut task =
|
||||
@@ -534,7 +558,7 @@ impl IndexScheduler {
|
||||
return Ok(Some((Batch::IndexCompaction { index_uid, task }, current_batch)));
|
||||
}
|
||||
|
||||
// 4. we batch the export.
|
||||
// 5. we batch the export.
|
||||
let to_export = self.queue.tasks.get_kind(rtxn, Kind::Export)? & enqueued;
|
||||
if !to_export.is_empty() {
|
||||
let task_id = to_export.iter().next().expect("There must be at least one export task");
|
||||
@@ -545,7 +569,7 @@ impl IndexScheduler {
|
||||
return Ok(Some((Batch::Export { task }, current_batch)));
|
||||
}
|
||||
|
||||
// 5. we batch the snapshot.
|
||||
// 6. we batch the snapshot.
|
||||
let to_snapshot = self.queue.tasks.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued;
|
||||
if !to_snapshot.is_empty() {
|
||||
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
|
||||
@@ -555,7 +579,7 @@ impl IndexScheduler {
|
||||
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
|
||||
}
|
||||
|
||||
// 6. we batch the dumps.
|
||||
// 7. we batch the dumps.
|
||||
let to_dump = self.queue.tasks.get_kind(rtxn, Kind::DumpCreation)? & enqueued;
|
||||
if let Some(to_dump) = to_dump.min() {
|
||||
let mut task =
|
||||
@@ -568,8 +592,25 @@ impl IndexScheduler {
|
||||
return Ok(Some((Batch::Dump(task), current_batch)));
|
||||
}
|
||||
|
||||
// 7. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
|
||||
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
|
||||
// 8. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
|
||||
let (batch, current_batch) =
|
||||
self.create_next_batch_unprioritized(rtxn, &enqueued, current_batch)?;
|
||||
Ok(batch.map(|batch| (batch, current_batch)))
|
||||
}
|
||||
|
||||
fn create_next_batch_unprioritized(
|
||||
&self,
|
||||
rtxn: &RoTxn,
|
||||
enqueued: &RoaringBitmap,
|
||||
mut current_batch: ProcessingBatch,
|
||||
) -> Result<(Option<Batch>, ProcessingBatch)> {
|
||||
let count_total_enqueued = enqueued.len();
|
||||
|
||||
let task_id = if let Some(task_id) = enqueued.min() {
|
||||
task_id
|
||||
} else {
|
||||
return Ok((None, current_batch));
|
||||
};
|
||||
let mut task =
|
||||
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||
|
||||
@@ -586,7 +627,7 @@ impl IndexScheduler {
|
||||
kind: Kind::IndexSwap,
|
||||
id: task.uid,
|
||||
});
|
||||
return Ok(Some((Batch::IndexSwap { task }, current_batch)));
|
||||
return Ok((Some(Batch::IndexSwap { task }), current_batch));
|
||||
};
|
||||
|
||||
let index_already_exists = self.index_mapper.exists(rtxn, index_name)?;
|
||||
@@ -651,7 +692,7 @@ impl IndexScheduler {
|
||||
autobatcher::autobatch(enqueued, index_already_exists, primary_key.as_deref())
|
||||
{
|
||||
current_batch.reason(autobatch_stop_reason.unwrap_or(stop_reason));
|
||||
return Ok(self
|
||||
let batch = self
|
||||
.create_next_batch_index(
|
||||
rtxn,
|
||||
index_name.to_string(),
|
||||
@@ -659,11 +700,127 @@ impl IndexScheduler {
|
||||
&mut current_batch,
|
||||
create_index,
|
||||
)?
|
||||
.map(|batch| (batch, current_batch)));
|
||||
.map(|batch| batch);
|
||||
return Ok((batch, current_batch));
|
||||
}
|
||||
|
||||
// If we found no tasks then we were notified for something that got autobatched
|
||||
// somehow and there is nothing to do.
|
||||
Ok(None)
|
||||
Ok((None, current_batch))
|
||||
}
|
||||
|
||||
fn start_processing_network(
|
||||
&self,
|
||||
rtxn: &RoTxn,
|
||||
mut task: Task,
|
||||
enqueued: &RoaringBitmap,
|
||||
current_batch: ProcessingBatch,
|
||||
) -> Result<Option<(Batch, ProcessingBatch)>> {
|
||||
let change_version =
|
||||
task.network.as_ref().map(|network| network.network_version()).unwrap_or_default();
|
||||
let KindWithContent::NetworkTopologyChange(network_topology_change) = &task.kind else {
|
||||
panic!("inconsistent kind with content")
|
||||
};
|
||||
match network_topology_change.state() {
|
||||
NetworkTopologyState::WaitingForOlderTasks => {
|
||||
let mut old_tasks = RoaringBitmap::new();
|
||||
for task_id in enqueued {
|
||||
let task = self
|
||||
.queue
|
||||
.tasks
|
||||
.get_task(rtxn, task_id)?
|
||||
.ok_or(Error::CorruptedTaskQueue)?;
|
||||
|
||||
let has_index = task.index_uid().is_some();
|
||||
|
||||
if !has_index {
|
||||
continue;
|
||||
}
|
||||
|
||||
let has_older_network_version = task
|
||||
.network
|
||||
.map(|network| network.network_version() <= change_version)
|
||||
// if there is no version, we never retain the task
|
||||
.unwrap_or_default();
|
||||
|
||||
if has_older_network_version {
|
||||
old_tasks.push(task_id);
|
||||
}
|
||||
}
|
||||
|
||||
let res = self.create_next_batch_unprioritized(rtxn, &old_tasks, current_batch);
|
||||
self.runtime_tasks.write().unwrap().enqueued_network.swap(old_tasks);
|
||||
|
||||
let (batch, mut current_batch) = res?;
|
||||
|
||||
current_batch.processing(Some(&mut task));
|
||||
|
||||
let batch = match batch {
|
||||
Some(batch) => {
|
||||
let inner_batch = Box::new(batch);
|
||||
|
||||
Batch::NetworkIndexBatch { network_task: task, inner_batch }
|
||||
}
|
||||
None => Batch::NetworkWait { task },
|
||||
};
|
||||
|
||||
Ok(Some((batch, current_batch)))
|
||||
}
|
||||
NetworkTopologyState::ImportingDocuments => {
|
||||
let mut import_tasks = RoaringBitmap::new();
|
||||
for task_id in enqueued {
|
||||
let task = self
|
||||
.queue
|
||||
.tasks
|
||||
.get_task(rtxn, task_id)?
|
||||
.ok_or(Error::CorruptedTaskQueue)?;
|
||||
|
||||
let has_index = task.index_uid().is_some();
|
||||
|
||||
if !has_index {
|
||||
continue;
|
||||
}
|
||||
|
||||
let is_import_task = task
|
||||
.network
|
||||
.map(|network| {
|
||||
network.network_version() == change_version
|
||||
&& network.import_data().is_some()
|
||||
})
|
||||
// if there is no version, we never retain the task
|
||||
.unwrap_or_default();
|
||||
|
||||
if is_import_task {
|
||||
import_tasks.push(task_id);
|
||||
}
|
||||
}
|
||||
|
||||
let res = self.create_next_batch_unprioritized(rtxn, &import_tasks, current_batch);
|
||||
self.runtime_tasks.write().unwrap().enqueued_network.swap(import_tasks);
|
||||
|
||||
let (batch, mut current_batch) = res?;
|
||||
|
||||
current_batch.processing(Some(&mut task));
|
||||
|
||||
let batch = match batch {
|
||||
Some(batch) => {
|
||||
let inner_batch = Box::new(batch);
|
||||
|
||||
Batch::NetworkIndexBatch { network_task: task, inner_batch }
|
||||
}
|
||||
None => Batch::NetworkWait { task },
|
||||
};
|
||||
|
||||
Ok(Some((batch, current_batch)))
|
||||
}
|
||||
NetworkTopologyState::ExportingDocuments | NetworkTopologyState::Finished => {
|
||||
Ok(Some((Batch::NetworkWait { task }, current_batch)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum BatchOutcome {
|
||||
NoTaskToProcess,
|
||||
Batch { batch: Batch, processing: ProcessingBatch },
|
||||
}
|
||||
|
||||
@@ -200,9 +200,10 @@ impl IndexScheduler {
|
||||
// We reset the must_stop flag to be sure that we don't stop processing tasks
|
||||
self.scheduler.must_stop_processing.reset();
|
||||
let progress = self
|
||||
.processing_tasks
|
||||
.runtime_tasks
|
||||
.write()
|
||||
.unwrap()
|
||||
.processing
|
||||
// We can clone the processing batch here because we don't want its modification to affect the view of the processing batches
|
||||
.start_processing(processing_batch.clone(), ids.clone());
|
||||
|
||||
@@ -453,7 +454,7 @@ impl IndexScheduler {
|
||||
|
||||
// We should stop processing AFTER everything is processed and written to disk otherwise, a batch (which only lives in RAM) may appear in the processing task
|
||||
// and then become « not found » for some time until the commit everything is written and the final commit is made.
|
||||
self.processing_tasks.write().unwrap().stop_processing();
|
||||
self.runtime_tasks.write().unwrap().processing.stop_processing();
|
||||
|
||||
// Once the tasks are committed, we should delete all the update files associated ASAP to avoid leaking files in case of a restart
|
||||
tracing::debug!("Deleting the update files");
|
||||
|
||||
@@ -1,15 +1,22 @@
|
||||
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
|
||||
use std::fs::{remove_file, File};
|
||||
use std::io::{ErrorKind, Seek, SeekFrom};
|
||||
use std::panic::{catch_unwind, AssertUnwindSafe};
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::Duration;
|
||||
|
||||
use bumpalo::Bump;
|
||||
use byte_unit::Byte;
|
||||
use meilisearch_types::batches::{BatchEnqueuedAt, BatchId};
|
||||
use meilisearch_types::enterprise_edition::network::Remote;
|
||||
use meilisearch_types::heed::{RoTxn, RwTxn};
|
||||
use meilisearch_types::milli::documents::PrimaryKey;
|
||||
use meilisearch_types::milli::heed::CompactionOption;
|
||||
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
||||
use meilisearch_types::milli::progress::{EmbedderStats, Progress, VariableNameStep};
|
||||
use meilisearch_types::milli::update::new::indexer;
|
||||
use meilisearch_types::milli::update::new::indexer::enterprise_edition::sharding::Shards;
|
||||
use meilisearch_types::milli::{self, ChannelCongestion};
|
||||
use meilisearch_types::tasks::enterprise_edition::network::{NetworkTopologyState, Origin};
|
||||
use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status, Task};
|
||||
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
||||
use milli::update::Settings as MilliSettings;
|
||||
@@ -23,6 +30,7 @@ use crate::processing::{
|
||||
IndexCompaction, InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress,
|
||||
TaskDeletionProgress, UpdateIndexProgress,
|
||||
};
|
||||
use crate::scheduler::process_export::{ExportContext, ExportOptions, TargetInstance};
|
||||
use crate::utils::{
|
||||
self, remove_n_tasks_datetime_earlier_than, remove_task_datetime, swap_index_uid_in_task,
|
||||
ProcessingBatch,
|
||||
@@ -539,9 +547,209 @@ impl IndexScheduler {
|
||||
|
||||
Ok((tasks, ProcessBatchInfo::default()))
|
||||
}
|
||||
Batch::NetworkIndexBatch { mut network_task, inner_batch } => {
|
||||
let (mut tasks, info) =
|
||||
self.process_batch(*inner_batch, current_batch, progress)?;
|
||||
let KindWithContent::NetworkTopologyChange(network_topology_change) =
|
||||
&mut network_task.kind
|
||||
else {
|
||||
return Err(Error::CorruptedTaskQueue);
|
||||
};
|
||||
for task in &tasks {
|
||||
let Some(network) = task.network.as_ref() else {
|
||||
continue;
|
||||
};
|
||||
let Some(import) = network.import_data() else {
|
||||
continue;
|
||||
};
|
||||
network_topology_change.process_remote_tasks(
|
||||
&import.remote_name,
|
||||
&import.index_name,
|
||||
import.document_count,
|
||||
);
|
||||
}
|
||||
tasks.push(network_task);
|
||||
Ok((tasks, info))
|
||||
}
|
||||
Batch::NetworkWait { mut task } => {
|
||||
let KindWithContent::NetworkTopologyChange(network_topology_change) =
|
||||
&mut task.kind
|
||||
else {
|
||||
tracing::error!("network topology change task has the wrong kind with content");
|
||||
return Err(Error::CorruptedTaskQueue);
|
||||
};
|
||||
|
||||
let Some(task_network) = &task.network else {
|
||||
tracing::error!("network topology change task has no network");
|
||||
return Err(Error::CorruptedTaskQueue);
|
||||
};
|
||||
|
||||
let origin;
|
||||
let origin = match task_network.origin() {
|
||||
Some(origin) => origin,
|
||||
None => {
|
||||
let myself =
|
||||
network_topology_change.in_name().expect("origin is not the leader");
|
||||
origin = Origin {
|
||||
remote_name: myself.to_string(),
|
||||
task_uid: task.uid,
|
||||
network_version: task_network.network_version(),
|
||||
};
|
||||
&origin
|
||||
}
|
||||
};
|
||||
|
||||
if let Some((remotes, out_name)) = network_topology_change.export_to_process() {
|
||||
self.balance_documents(
|
||||
remotes,
|
||||
out_name,
|
||||
network_topology_change.in_name(),
|
||||
origin,
|
||||
&progress,
|
||||
&self.scheduler.must_stop_processing,
|
||||
)?;
|
||||
}
|
||||
network_topology_change.update_state();
|
||||
if network_topology_change.state() == NetworkTopologyState::Finished {
|
||||
task.status = Status::Succeeded;
|
||||
}
|
||||
Ok((vec![task], Default::default()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn balance_documents(
|
||||
&self,
|
||||
remotes: &BTreeMap<String, Remote>,
|
||||
out_name: &str,
|
||||
in_name: Option<&str>,
|
||||
network_change_origin: &Origin,
|
||||
progress: &Progress,
|
||||
must_stop_processing: &crate::scheduler::MustStopProcessing,
|
||||
) -> crate::Result<()> {
|
||||
let new_shards = Shards::from_remotes_local(remotes.keys().map(String::as_str), in_name);
|
||||
|
||||
// TECHDEBT: this spawns a `ureq` agent additionally to `reqwest`. We probably want to harmonize all of this.
|
||||
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
||||
|
||||
let mut indexer_alloc = Bump::new();
|
||||
|
||||
let scheduler_rtxn = self.env.read_txn()?;
|
||||
|
||||
let index_count = self.index_mapper.index_count(&scheduler_rtxn)?;
|
||||
|
||||
// process by batches of 20MiB. Allow for compression? Don't forget about embeddings
|
||||
let _: Vec<()> = self.index_mapper.try_for_each_index(
|
||||
&scheduler_rtxn,
|
||||
|index_uid, index| -> crate::Result<()> {
|
||||
indexer_alloc.reset();
|
||||
let err = |err| Error::from_milli(err, Some(index_uid.to_string()));
|
||||
let index_rtxn = index.read_txn()?;
|
||||
let all_docids = index.external_documents_ids();
|
||||
let mut documents_to_move_to: HashMap<String, RoaringBitmap> = HashMap::new();
|
||||
let mut documents_to_delete = RoaringBitmap::new();
|
||||
|
||||
for res in all_docids.iter(&index_rtxn)? {
|
||||
let (external_docid, docid) = res?;
|
||||
match new_shards.processing_shard(external_docid) {
|
||||
Some(shard) if shard.is_own => continue,
|
||||
Some(shard) => {
|
||||
documents_to_move_to
|
||||
.entry(shard.name.clone())
|
||||
.or_default()
|
||||
.insert(docid);
|
||||
}
|
||||
None => {
|
||||
documents_to_delete.insert(docid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
|
||||
|
||||
for (remote, documents_to_move) in documents_to_move_to {
|
||||
/// TODO: justify the unwrap
|
||||
let remote = remotes.get(&remote).unwrap();
|
||||
|
||||
let target = TargetInstance {
|
||||
base_url: &remote.url,
|
||||
api_key: remote.write_api_key.as_deref(),
|
||||
};
|
||||
let options = ExportOptions {
|
||||
index_uid,
|
||||
payload_size: None,
|
||||
override_settings: false,
|
||||
export_mode: super::process_export::ExportMode::NetworkBalancing {
|
||||
index_count,
|
||||
export_old_remote_name: out_name,
|
||||
network_change_origin,
|
||||
},
|
||||
};
|
||||
let ctx = ExportContext {
|
||||
index,
|
||||
index_rtxn: &index_rtxn,
|
||||
universe: &documents_to_move,
|
||||
progress,
|
||||
agent: &agent,
|
||||
must_stop_processing,
|
||||
};
|
||||
|
||||
self.export_one_index(target, options, ctx)?;
|
||||
|
||||
documents_to_delete |= documents_to_move;
|
||||
}
|
||||
|
||||
if documents_to_delete.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut new_fields_ids_map = fields_ids_map.clone();
|
||||
|
||||
// candidates not empty => index not empty => a primary key is set
|
||||
let primary_key = index.primary_key(&index_rtxn)?.unwrap();
|
||||
|
||||
let primary_key = PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
|
||||
.map_err(milli::Error::from)
|
||||
.map_err(err)?;
|
||||
|
||||
let mut index_wtxn = index.write_txn()?;
|
||||
|
||||
let mut indexer = indexer::DocumentDeletion::new();
|
||||
indexer.delete_documents_by_docids(documents_to_delete);
|
||||
let document_changes = indexer.into_changes(&indexer_alloc, primary_key);
|
||||
let embedders = index
|
||||
.embedding_configs()
|
||||
.embedding_configs(&index_wtxn)
|
||||
.map_err(milli::Error::from)
|
||||
.map_err(err)?;
|
||||
let embedders = self.embedders(index_uid.to_string(), embedders)?;
|
||||
let indexer_config = self.index_mapper.indexer_config();
|
||||
let pool = &indexer_config.thread_pool;
|
||||
|
||||
indexer::index(
|
||||
&mut index_wtxn,
|
||||
index,
|
||||
pool,
|
||||
indexer_config.grenad_parameters(),
|
||||
&fields_ids_map,
|
||||
new_fields_ids_map,
|
||||
None, // document deletion never changes primary key
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| must_stop_processing.get(),
|
||||
&progress,
|
||||
&EmbedderStats::default(),
|
||||
)
|
||||
.map_err(err)?;
|
||||
|
||||
index_wtxn.commit()?;
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_compaction(
|
||||
&self,
|
||||
rtxn: &RoTxn,
|
||||
@@ -711,7 +919,7 @@ impl IndexScheduler {
|
||||
|
||||
// 1. Remove from this list the tasks that we are not allowed to delete
|
||||
let enqueued_tasks = self.queue.tasks.get_status(wtxn, Status::Enqueued)?;
|
||||
let processing_tasks = &self.processing_tasks.read().unwrap().processing.clone();
|
||||
let processing_tasks = &self.runtime_tasks.read().unwrap().processing.processing.clone();
|
||||
|
||||
let all_task_ids = self.queue.tasks.all_task_ids(wtxn)?;
|
||||
let mut to_delete_tasks = all_task_ids & matched_tasks;
|
||||
|
||||
@@ -7,6 +7,7 @@ use backoff::ExponentialBackoff;
|
||||
use byte_unit::Byte;
|
||||
use flate2::write::GzEncoder;
|
||||
use flate2::Compression;
|
||||
use meilisearch_types::error::Code;
|
||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
use meilisearch_types::milli::constants::RESERVED_VECTORS_FIELD_NAME;
|
||||
use meilisearch_types::milli::index::EmbeddingsWithMetadata;
|
||||
@@ -15,7 +16,11 @@ use meilisearch_types::milli::update::{request_threads, Setting};
|
||||
use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOrArrayOfVectors};
|
||||
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
|
||||
use meilisearch_types::settings::{self, SecretPolicy};
|
||||
use meilisearch_types::tasks::enterprise_edition::network::{
|
||||
headers, ImportData, ImportMetadata, Origin,
|
||||
};
|
||||
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
|
||||
use roaring::RoaringBitmap;
|
||||
use serde::Deserialize;
|
||||
use ureq::{json, Response};
|
||||
|
||||
@@ -50,6 +55,7 @@ impl IndexScheduler {
|
||||
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
|
||||
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
||||
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
|
||||
let err = |err| Error::from_milli(err, Some(uid.to_string()));
|
||||
if must_stop_processing.get() {
|
||||
return Err(Error::AbortedTask);
|
||||
}
|
||||
@@ -61,104 +67,31 @@ impl IndexScheduler {
|
||||
));
|
||||
|
||||
let ExportIndexSettings { filter, override_settings } = export_settings;
|
||||
|
||||
let index = self.index(uid)?;
|
||||
let index_rtxn = index.read_txn()?;
|
||||
let bearer = api_key.map(|api_key| format!("Bearer {api_key}"));
|
||||
|
||||
// First, check if the index already exists
|
||||
let url = format!("{base_url}/indexes/{uid}");
|
||||
let response = retry(&must_stop_processing, || {
|
||||
let mut request = agent.get(&url);
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
|
||||
request.send_bytes(Default::default()).map_err(into_backoff_error)
|
||||
});
|
||||
let index_exists = match response {
|
||||
Ok(response) => response.status() == 200,
|
||||
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => {
|
||||
false
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
let primary_key = index
|
||||
.primary_key(&index_rtxn)
|
||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||
|
||||
// Create the index
|
||||
if !index_exists {
|
||||
let url = format!("{base_url}/indexes");
|
||||
retry(&must_stop_processing, || {
|
||||
let mut request = agent.post(&url);
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
let index_param = json!({ "uid": uid, "primaryKey": primary_key });
|
||||
request.send_json(&index_param).map_err(into_backoff_error)
|
||||
})?;
|
||||
}
|
||||
|
||||
// Patch the index primary key
|
||||
if index_exists && *override_settings {
|
||||
let url = format!("{base_url}/indexes/{uid}");
|
||||
retry(&must_stop_processing, || {
|
||||
let mut request = agent.patch(&url);
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
let index_param = json!({ "primaryKey": primary_key });
|
||||
request.send_json(&index_param).map_err(into_backoff_error)
|
||||
})?;
|
||||
}
|
||||
|
||||
// Send the index settings
|
||||
if !index_exists || *override_settings {
|
||||
let mut settings =
|
||||
settings::settings(&index, &index_rtxn, SecretPolicy::RevealSecrets)
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||
// Remove the experimental chat setting if not enabled
|
||||
if self.features().check_chat_completions("exporting chat settings").is_err() {
|
||||
settings.chat = Setting::NotSet;
|
||||
}
|
||||
// Retry logic for sending settings
|
||||
let url = format!("{base_url}/indexes/{uid}/settings");
|
||||
retry(&must_stop_processing, || {
|
||||
let mut request = agent.patch(&url);
|
||||
if let Some(bearer) = bearer.as_ref() {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
request.send_json(settings.clone()).map_err(into_backoff_error)
|
||||
})?;
|
||||
}
|
||||
|
||||
let filter = filter
|
||||
.as_ref()
|
||||
.map(Filter::from_json)
|
||||
.transpose()
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?
|
||||
.flatten();
|
||||
|
||||
let filter_universe = filter
|
||||
.map(|f| f.evaluate(&index_rtxn, &index))
|
||||
.transpose()
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||
let whole_universe = index
|
||||
.documents_ids(&index_rtxn)
|
||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||
let filter = filter.as_ref().map(Filter::from_json).transpose().map_err(err)?.flatten();
|
||||
let filter_universe =
|
||||
filter.map(|f| f.evaluate(&index_rtxn, &index)).transpose().map_err(err)?;
|
||||
let whole_universe =
|
||||
index.documents_ids(&index_rtxn).map_err(milli::Error::from).map_err(err)?;
|
||||
let universe = filter_universe.unwrap_or(whole_universe);
|
||||
|
||||
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
|
||||
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
||||
|
||||
// We don't need to keep this one alive as we will
|
||||
// spawn many threads to process the documents
|
||||
drop(index_rtxn);
|
||||
|
||||
let total_documents = universe.len() as u32;
|
||||
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
|
||||
progress.update_progress(progress_step);
|
||||
let target = TargetInstance { base_url, api_key };
|
||||
let ctx = ExportContext {
|
||||
index: &index,
|
||||
index_rtxn: &index_rtxn,
|
||||
universe: &universe,
|
||||
progress: &progress,
|
||||
agent: &agent,
|
||||
must_stop_processing: &must_stop_processing,
|
||||
};
|
||||
let options = ExportOptions {
|
||||
index_uid: uid,
|
||||
payload_size,
|
||||
override_settings: *override_settings,
|
||||
export_mode: ExportMode::ExportRoute,
|
||||
};
|
||||
let total_documents = self.export_one_index(target, options, ctx)?;
|
||||
|
||||
output.insert(
|
||||
IndexUidPattern::new_unchecked(uid.clone()),
|
||||
@@ -167,155 +100,307 @@ impl IndexScheduler {
|
||||
matched_documents: Some(total_documents as u64),
|
||||
},
|
||||
);
|
||||
|
||||
let limit = payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024); // defaults to 20 MiB
|
||||
let documents_url = format!("{base_url}/indexes/{uid}/documents");
|
||||
|
||||
let results = request_threads()
|
||||
.broadcast(|ctx| {
|
||||
let index_rtxn = index
|
||||
.read_txn()
|
||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
let mut tmp_buffer = Vec::new();
|
||||
let mut compressed_buffer = Vec::new();
|
||||
for (i, docid) in universe.iter().enumerate() {
|
||||
if i % ctx.num_threads() != ctx.index() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let document = index
|
||||
.document(&index_rtxn, docid)
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||
|
||||
let mut document = obkv_to_json(&all_fields, &fields_ids_map, document)
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||
|
||||
// TODO definitely factorize this code
|
||||
'inject_vectors: {
|
||||
let embeddings = index
|
||||
.embeddings(&index_rtxn, docid)
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||
|
||||
if embeddings.is_empty() {
|
||||
break 'inject_vectors;
|
||||
}
|
||||
|
||||
let vectors = document
|
||||
.entry(RESERVED_VECTORS_FIELD_NAME)
|
||||
.or_insert(serde_json::Value::Object(Default::default()));
|
||||
|
||||
let serde_json::Value::Object(vectors) = vectors else {
|
||||
return Err(Error::from_milli(
|
||||
milli::Error::UserError(
|
||||
milli::UserError::InvalidVectorsMapType {
|
||||
document_id: {
|
||||
if let Ok(Some(Ok(index))) = index
|
||||
.external_id_of(
|
||||
&index_rtxn,
|
||||
std::iter::once(docid),
|
||||
)
|
||||
.map(|it| it.into_iter().next())
|
||||
{
|
||||
index
|
||||
} else {
|
||||
format!("internal docid={docid}")
|
||||
}
|
||||
},
|
||||
value: vectors.clone(),
|
||||
},
|
||||
),
|
||||
Some(uid.to_string()),
|
||||
));
|
||||
};
|
||||
|
||||
for (
|
||||
embedder_name,
|
||||
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
|
||||
) in embeddings
|
||||
{
|
||||
let embeddings = ExplicitVectors {
|
||||
embeddings: Some(
|
||||
VectorOrArrayOfVectors::from_array_of_vectors(embeddings),
|
||||
),
|
||||
regenerate: regenerate &&
|
||||
// Meilisearch does not handle well dumps with fragments, because as the fragments
|
||||
// are marked as user-provided,
|
||||
// all embeddings would be regenerated on any settings change or document update.
|
||||
// To prevent this, we mark embeddings has non regenerate in this case.
|
||||
!has_fragments,
|
||||
};
|
||||
vectors.insert(
|
||||
embedder_name,
|
||||
serde_json::to_value(embeddings).unwrap(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
tmp_buffer.clear();
|
||||
serde_json::to_writer(&mut tmp_buffer, &document)
|
||||
.map_err(milli::InternalError::from)
|
||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
|
||||
|
||||
// Make sure we put at least one document in the buffer even
|
||||
// though we might go above the buffer limit before sending
|
||||
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
|
||||
// We compress the documents before sending them
|
||||
let mut encoder =
|
||||
GzEncoder::new(&mut compressed_buffer, Compression::default());
|
||||
encoder
|
||||
.write_all(&buffer)
|
||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
|
||||
encoder
|
||||
.finish()
|
||||
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
|
||||
|
||||
retry(&must_stop_processing, || {
|
||||
let mut request = agent.post(&documents_url);
|
||||
request = request.set("Content-Type", "application/x-ndjson");
|
||||
request = request.set("Content-Encoding", "gzip");
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
|
||||
})?;
|
||||
buffer.clear();
|
||||
compressed_buffer.clear();
|
||||
}
|
||||
buffer.extend_from_slice(&tmp_buffer);
|
||||
|
||||
if i > 0 && i % 100 == 0 {
|
||||
step.fetch_add(100, atomic::Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
retry(&must_stop_processing, || {
|
||||
let mut request = agent.post(&documents_url);
|
||||
request = request.set("Content-Type", "application/x-ndjson");
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
request.send_bytes(&buffer).map_err(into_backoff_error)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.map_err(|e| {
|
||||
Error::from_milli(
|
||||
milli::Error::InternalError(InternalError::PanicInThreadPool(e)),
|
||||
Some(uid.to_string()),
|
||||
)
|
||||
})?;
|
||||
for result in results {
|
||||
result?;
|
||||
}
|
||||
|
||||
step.store(total_documents, atomic::Ordering::Relaxed);
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
pub(super) fn export_one_index(
|
||||
&self,
|
||||
target: TargetInstance<'_>,
|
||||
options: ExportOptions<'_>,
|
||||
ctx: ExportContext<'_>,
|
||||
) -> Result<u64, Error> {
|
||||
let err = |err| Error::from_milli(err, Some(options.index_uid.to_string()));
|
||||
|
||||
let bearer = target.api_key.map(|api_key| format!("Bearer {api_key}"));
|
||||
let url = format!(
|
||||
"{base_url}/indexes/{index_uid}",
|
||||
base_url = target.base_url,
|
||||
index_uid = options.index_uid
|
||||
);
|
||||
let response = retry(ctx.must_stop_processing, || {
|
||||
let mut request = ctx.agent.get(&url);
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
|
||||
request.send_bytes(Default::default()).map_err(into_backoff_error)
|
||||
});
|
||||
let index_exists = match response {
|
||||
Ok(response) => response.status() == 200,
|
||||
Err(Error::FromRemoteWhenExporting { code, .. })
|
||||
if code == Code::IndexNotFound.name() =>
|
||||
{
|
||||
false
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
let primary_key =
|
||||
ctx.index.primary_key(&ctx.index_rtxn).map_err(milli::Error::from).map_err(err)?;
|
||||
if !index_exists {
|
||||
let url = format!("{base_url}/indexes", base_url = target.base_url);
|
||||
retry(ctx.must_stop_processing, || {
|
||||
let mut request = ctx.agent.post(&url);
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
let index_param = json!({ "uid": options.index_uid, "primaryKey": primary_key });
|
||||
request.send_json(&index_param).map_err(into_backoff_error)
|
||||
})?;
|
||||
}
|
||||
if index_exists && options.override_settings {
|
||||
retry(ctx.must_stop_processing, || {
|
||||
let mut request = ctx.agent.patch(&url);
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
let index_param = json!({ "primaryKey": primary_key });
|
||||
request.send_json(&index_param).map_err(into_backoff_error)
|
||||
})?;
|
||||
}
|
||||
if !index_exists || options.override_settings {
|
||||
let mut settings =
|
||||
settings::settings(&ctx.index, &ctx.index_rtxn, SecretPolicy::RevealSecrets)
|
||||
.map_err(err)?;
|
||||
// Remove the experimental chat setting if not enabled
|
||||
if self.features().check_chat_completions("exporting chat settings").is_err() {
|
||||
settings.chat = Setting::NotSet;
|
||||
}
|
||||
// Retry logic for sending settings
|
||||
let url = format!(
|
||||
"{base_url}/indexes/{index_uid}/settings",
|
||||
base_url = target.base_url,
|
||||
index_uid = options.index_uid
|
||||
);
|
||||
retry(ctx.must_stop_processing, || {
|
||||
let mut request = ctx.agent.patch(&url);
|
||||
if let Some(bearer) = bearer.as_ref() {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
request.send_json(settings.clone()).map_err(into_backoff_error)
|
||||
})?;
|
||||
}
|
||||
|
||||
let fields_ids_map = ctx.index.fields_ids_map(&ctx.index_rtxn)?;
|
||||
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
||||
let total_documents = ctx.universe.len() as u32;
|
||||
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
|
||||
ctx.progress.update_progress(progress_step);
|
||||
|
||||
let limit = options.payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024);
|
||||
let documents_url = format!(
|
||||
"{base_url}/indexes/{index_uid}/documents",
|
||||
base_url = target.base_url,
|
||||
index_uid = options.index_uid
|
||||
);
|
||||
let results = request_threads()
|
||||
.broadcast(|broadcast| {
|
||||
let mut task_network = if let ExportMode::NetworkBalancing {
|
||||
index_count,
|
||||
export_old_remote_name,
|
||||
network_change_origin,
|
||||
} = options.export_mode
|
||||
{
|
||||
Some((
|
||||
ImportData {
|
||||
remote_name: export_old_remote_name.to_string(),
|
||||
index_name: options.index_uid.to_string(),
|
||||
document_count: 0,
|
||||
},
|
||||
network_change_origin.clone(),
|
||||
ImportMetadata {
|
||||
index_count,
|
||||
task_key: 0,
|
||||
total_index_documents: ctx.universe.len(),
|
||||
},
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let index_rtxn = ctx.index.read_txn().map_err(milli::Error::from).map_err(err)?;
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
let mut tmp_buffer = Vec::new();
|
||||
let mut compressed_buffer = Vec::new();
|
||||
for (i, docid) in ctx.universe.iter().enumerate() {
|
||||
if i % broadcast.num_threads() != broadcast.index() {
|
||||
continue;
|
||||
}
|
||||
if let Some((import_data, _, metadata)) = &mut task_network {
|
||||
import_data.document_count += 1;
|
||||
metadata.task_key = docid;
|
||||
}
|
||||
|
||||
let document = ctx.index.document(&index_rtxn, docid).map_err(err)?;
|
||||
|
||||
let mut document =
|
||||
obkv_to_json(&all_fields, &fields_ids_map, document).map_err(err)?;
|
||||
|
||||
// TODO definitely factorize this code
|
||||
'inject_vectors: {
|
||||
let embeddings = ctx.index.embeddings(&index_rtxn, docid).map_err(err)?;
|
||||
|
||||
if embeddings.is_empty() {
|
||||
break 'inject_vectors;
|
||||
}
|
||||
|
||||
let vectors = document
|
||||
.entry(RESERVED_VECTORS_FIELD_NAME)
|
||||
.or_insert(serde_json::Value::Object(Default::default()));
|
||||
|
||||
let serde_json::Value::Object(vectors) = vectors else {
|
||||
return Err(err(milli::Error::UserError(
|
||||
milli::UserError::InvalidVectorsMapType {
|
||||
document_id: {
|
||||
if let Ok(Some(Ok(index))) = ctx
|
||||
.index
|
||||
.external_id_of(&index_rtxn, std::iter::once(docid))
|
||||
.map(|it| it.into_iter().next())
|
||||
{
|
||||
index
|
||||
} else {
|
||||
format!("internal docid={docid}")
|
||||
}
|
||||
},
|
||||
value: vectors.clone(),
|
||||
},
|
||||
)));
|
||||
};
|
||||
|
||||
for (
|
||||
embedder_name,
|
||||
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
|
||||
) in embeddings
|
||||
{
|
||||
let embeddings = ExplicitVectors {
|
||||
embeddings: Some(VectorOrArrayOfVectors::from_array_of_vectors(
|
||||
embeddings,
|
||||
)),
|
||||
regenerate: regenerate &&
|
||||
// Meilisearch does not handle well dumps with fragments, because as the fragments
|
||||
// are marked as user-provided,
|
||||
// all embeddings would be regenerated on any settings change or document update.
|
||||
// To prevent this, we mark embeddings has non regenerate in this case.
|
||||
!has_fragments,
|
||||
};
|
||||
vectors
|
||||
.insert(embedder_name, serde_json::to_value(embeddings).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
tmp_buffer.clear();
|
||||
serde_json::to_writer(&mut tmp_buffer, &document)
|
||||
.map_err(milli::InternalError::from)
|
||||
.map_err(milli::Error::from)
|
||||
.map_err(err)?;
|
||||
|
||||
// Make sure we put at least one document in the buffer even
|
||||
// though we might go above the buffer limit before sending
|
||||
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
|
||||
// We compress the documents before sending them
|
||||
let mut encoder =
|
||||
GzEncoder::new(&mut compressed_buffer, Compression::default());
|
||||
encoder.write_all(&buffer).map_err(milli::Error::from).map_err(err)?;
|
||||
encoder.finish().map_err(milli::Error::from).map_err(err)?;
|
||||
|
||||
match retry(ctx.must_stop_processing, || {
|
||||
let mut request = ctx.agent.post(&documents_url);
|
||||
request = request.set("Content-Type", "application/x-ndjson");
|
||||
request = request.set("Content-Encoding", "gzip");
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
if let Some((import_data, origin, metadata)) = &task_network {
|
||||
request = set_network_ureq_headers(
|
||||
request,
|
||||
import_data,
|
||||
origin,
|
||||
metadata,
|
||||
);
|
||||
}
|
||||
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
|
||||
}) {
|
||||
Ok(_response) => {}
|
||||
Err(Error::FromRemoteWhenExporting { code, .. })
|
||||
if code == Code::ImportTaskAlreadyReceived.name() =>
|
||||
{
|
||||
continue;
|
||||
}
|
||||
Err(Error::FromRemoteWhenExporting { code, message, .. })
|
||||
if code == Code::ImportTaskUnknownRemote.name() =>
|
||||
{
|
||||
tracing::warn!("remote answered with: {message}");
|
||||
break;
|
||||
}
|
||||
Err(Error::FromRemoteWhenExporting { code, message, .. })
|
||||
if code == Code::ImportTaskWithoutNetworkTask.name() =>
|
||||
{
|
||||
tracing::warn!("remote answered with: {message}");
|
||||
break;
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
buffer.clear();
|
||||
compressed_buffer.clear();
|
||||
if let Some((import_data, _, metadata)) = &mut task_network {
|
||||
import_data.document_count = 0;
|
||||
metadata.task_key = 0;
|
||||
}
|
||||
}
|
||||
buffer.extend_from_slice(&tmp_buffer);
|
||||
|
||||
if i > 0 && i % 100 == 0 {
|
||||
step.fetch_add(100, atomic::Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
retry(ctx.must_stop_processing, || {
|
||||
let mut request = ctx.agent.post(&documents_url);
|
||||
request = request.set("Content-Type", "application/x-ndjson");
|
||||
if let Some((import_data, origin, metadata)) = &task_network {
|
||||
request = set_network_ureq_headers(request, import_data, origin, metadata);
|
||||
}
|
||||
if let Some(bearer) = &bearer {
|
||||
request = request.set("Authorization", bearer);
|
||||
}
|
||||
request.send_bytes(&buffer).map_err(into_backoff_error)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.map_err(|e| err(milli::Error::InternalError(InternalError::PanicInThreadPool(e))))?;
|
||||
for result in results {
|
||||
result?;
|
||||
}
|
||||
step.store(total_documents, atomic::Ordering::Relaxed);
|
||||
Ok(total_documents as u64)
|
||||
}
|
||||
}
|
||||
|
||||
fn set_network_ureq_headers(
|
||||
request: ureq::Request,
|
||||
import_data: &ImportData,
|
||||
origin: &Origin,
|
||||
metadata: &ImportMetadata,
|
||||
) -> ureq::Request {
|
||||
request
|
||||
.set(headers::PROXY_ORIGIN_REMOTE_HEADER, &origin.remote_name)
|
||||
.set(headers::PROXY_ORIGIN_TASK_UID_HEADER, &origin.task_uid.to_string())
|
||||
.set(
|
||||
headers::PROXY_ORIGIN_NETWORK_VERSION_HEADER,
|
||||
&origin.network_version.to_u128_le().to_string(),
|
||||
)
|
||||
.set(headers::PROXY_IMPORT_REMOTE_HEADER, &import_data.remote_name)
|
||||
.set(headers::PROXY_IMPORT_INDEX_HEADER, &import_data.index_name)
|
||||
.set(headers::PROXY_IMPORT_TASK_KEY_HEADER, &metadata.task_key.to_string())
|
||||
.set(headers::PROXY_IMPORT_DOCS_HEADER, &import_data.document_count.to_string())
|
||||
.set(headers::PROXY_IMPORT_INDEX_COUNT_HEADER, &metadata.index_count.to_string())
|
||||
.set(
|
||||
headers::PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
||||
&metadata.total_index_documents.to_string(),
|
||||
)
|
||||
}
|
||||
|
||||
fn retry<F>(must_stop_processing: &MustStopProcessing, send_request: F) -> Result<ureq::Response>
|
||||
@@ -374,4 +459,37 @@ fn ureq_error_into_error(error: ureq::Error) -> Error {
|
||||
}
|
||||
}
|
||||
|
||||
// export_one_index arguments
|
||||
pub(super) struct TargetInstance<'a> {
|
||||
pub(super) base_url: &'a str,
|
||||
pub(super) api_key: Option<&'a str>,
|
||||
}
|
||||
|
||||
pub(super) struct ExportOptions<'a> {
|
||||
pub(super) index_uid: &'a str,
|
||||
pub(super) payload_size: Option<&'a Byte>,
|
||||
pub(super) override_settings: bool,
|
||||
pub(super) export_mode: ExportMode<'a>,
|
||||
}
|
||||
|
||||
pub(super) struct ExportContext<'a> {
|
||||
pub(super) index: &'a meilisearch_types::milli::Index,
|
||||
pub(super) index_rtxn: &'a milli::heed::RoTxn<'a>,
|
||||
pub(super) universe: &'a RoaringBitmap,
|
||||
pub(super) progress: &'a Progress,
|
||||
pub(super) agent: &'a ureq::Agent,
|
||||
pub(super) must_stop_processing: &'a MustStopProcessing,
|
||||
}
|
||||
|
||||
pub(super) enum ExportMode<'a> {
|
||||
ExportRoute,
|
||||
NetworkBalancing {
|
||||
index_count: u64,
|
||||
|
||||
export_old_remote_name: &'a str,
|
||||
network_change_origin: &'a Origin,
|
||||
},
|
||||
}
|
||||
|
||||
// progress related
|
||||
enum ExportIndex {}
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
|
||||
[timestamp] [4,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
||||
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
||||
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,]
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
@@ -37,7 +37,7 @@ catto [1,]
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
0 [0,]
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
----------------------------------------------------------------------
|
||||
@@ -40,7 +40,7 @@ doggo [2,]
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
0 [0,]
|
||||
|
||||
@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
@@ -43,7 +43,7 @@ doggo [2,3,]
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
0 [0,]
|
||||
|
||||
@@ -48,6 +48,8 @@ pub fn upgrade_index_scheduler(
|
||||
(1, 22, _) => 0,
|
||||
(1, 23, _) => 0,
|
||||
(1, 24, _) => 0,
|
||||
(1, 25, _) => 0,
|
||||
(1, 26, _) => 0,
|
||||
(major, minor, patch) => {
|
||||
if major > current_major
|
||||
|| (major == current_major && minor > current_minor)
|
||||
@@ -98,6 +100,7 @@ pub fn upgrade_index_scheduler(
|
||||
status: Status::Enqueued,
|
||||
kind: KindWithContent::UpgradeDatabase { from },
|
||||
network: None,
|
||||
custom_metadata: None,
|
||||
},
|
||||
)?;
|
||||
wtxn.commit()?;
|
||||
|
||||
@@ -286,6 +286,7 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
|
||||
| K::DumpCreation { .. }
|
||||
| K::Export { .. }
|
||||
| K::UpgradeDatabase { .. }
|
||||
| K::NetworkTopologyChange(_)
|
||||
| K::SnapshotCreation => (),
|
||||
};
|
||||
if let Some(Details::IndexSwap { swaps }) = &mut task.details {
|
||||
@@ -379,6 +380,7 @@ impl crate::IndexScheduler {
|
||||
status,
|
||||
kind,
|
||||
network: _,
|
||||
custom_metadata: _,
|
||||
} = task;
|
||||
assert_eq!(uid, task.uid);
|
||||
if task.status != Status::Enqueued {
|
||||
@@ -626,6 +628,13 @@ impl crate::IndexScheduler {
|
||||
} => {
|
||||
assert_eq!(kind.as_kind(), Kind::IndexCompaction);
|
||||
}
|
||||
Details::NetworkTopologyChange {
|
||||
moved_documents: _,
|
||||
received_documents: _,
|
||||
message: _,
|
||||
} => {
|
||||
assert_eq!(kind.as_kind(), Kind::NetworkTopologyChange);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ use std::collections::BTreeMap;
|
||||
|
||||
use milli::update::new::indexer::enterprise_edition::sharding::Shards;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@@ -16,20 +17,18 @@ pub struct Network {
|
||||
#[serde(default)]
|
||||
pub remotes: BTreeMap<String, Remote>,
|
||||
#[serde(default)]
|
||||
pub sharding: bool,
|
||||
pub leader: Option<String>,
|
||||
#[serde(default)]
|
||||
pub version: Uuid,
|
||||
}
|
||||
|
||||
impl Network {
|
||||
pub fn shards(&self) -> Option<Shards> {
|
||||
if self.sharding {
|
||||
let this = self.local.as_deref().expect("Inconsistent `sharding` and `self`");
|
||||
let others = self
|
||||
.remotes
|
||||
.keys()
|
||||
.filter(|name| name.as_str() != this)
|
||||
.map(|name| name.to_owned())
|
||||
.collect();
|
||||
Some(Shards { own: vec![this.to_owned()], others })
|
||||
if self.leader.is_some() {
|
||||
Some(Shards::from_remotes_local(
|
||||
self.remotes.keys().map(String::as_str),
|
||||
self.local.as_deref(),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
||||
@@ -156,7 +156,7 @@ macro_rules! make_error_codes {
|
||||
}
|
||||
|
||||
/// return error name, used as error code
|
||||
fn name(&self) -> String {
|
||||
pub fn name(&self) -> String {
|
||||
match self {
|
||||
$(
|
||||
Code::$code_ident => stringify!($code_ident).to_case(convert_case::Case::Snake)
|
||||
@@ -214,6 +214,9 @@ ImmutableApiKeyUid , InvalidRequest , BAD_REQU
|
||||
ImmutableApiKeyUpdatedAt , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableIndexCreatedAt , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableIndexUpdatedAt , InvalidRequest , BAD_REQUEST;
|
||||
ImportTaskAlreadyReceived , InvalidRequest , PRECONDITION_FAILED;
|
||||
ImportTaskUnknownRemote , InvalidRequest , PRECONDITION_FAILED;
|
||||
ImportTaskWithoutNetworkTask , InvalidRequest , SERVICE_UNAVAILABLE;
|
||||
IndexAlreadyExists , InvalidRequest , CONFLICT ;
|
||||
IndexCreationFailed , Internal , INTERNAL_SERVER_ERROR;
|
||||
IndexNotFound , InvalidRequest , NOT_FOUND;
|
||||
@@ -254,6 +257,7 @@ InvalidSearchHybridQuery , InvalidRequest , BAD_REQU
|
||||
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexCustomMetadata , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexUid , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidMultiSearchFacets , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidMultiSearchFacetsByIndex , InvalidRequest , BAD_REQUEST ;
|
||||
@@ -269,9 +273,9 @@ InvalidMultiSearchQueryRankingRules , InvalidRequest , BAD_REQU
|
||||
InvalidMultiSearchQueryPosition , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidMultiSearchRemote , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidMultiSearchWeight , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidNetworkLeader , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidNetworkRemotes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidNetworkSelf , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidNetworkSharding , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidNetworkSearchApiKey , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidNetworkWriteApiKey , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidNetworkUrl , InvalidRequest , BAD_REQUEST ;
|
||||
@@ -376,7 +380,9 @@ MissingPayload , InvalidRequest , BAD_REQU
|
||||
MissingSearchHybrid , InvalidRequest , BAD_REQUEST ;
|
||||
MissingSwapIndexes , InvalidRequest , BAD_REQUEST ;
|
||||
MissingTaskFilters , InvalidRequest , BAD_REQUEST ;
|
||||
NetworkVersionMismatch , InvalidRequest , PRECONDITION_FAILED ;
|
||||
NoSpaceLeftOnDevice , System , UNPROCESSABLE_ENTITY;
|
||||
NotLeader , InvalidRequest , BAD_REQUEST ;
|
||||
PayloadTooLarge , InvalidRequest , PAYLOAD_TOO_LARGE ;
|
||||
RemoteBadResponse , System , BAD_GATEWAY ;
|
||||
RemoteBadRequest , InvalidRequest , BAD_REQUEST ;
|
||||
@@ -390,6 +396,7 @@ TaskFileNotFound , InvalidRequest , NOT_FOUN
|
||||
BatchNotFound , InvalidRequest , NOT_FOUND ;
|
||||
TooManyOpenFiles , System , UNPROCESSABLE_ENTITY ;
|
||||
TooManyVectors , InvalidRequest , BAD_REQUEST ;
|
||||
UnexpectedNetworkPreviousRemotes , InvalidRequest , BAD_REQUEST ;
|
||||
UnretrievableDocument , Internal , BAD_REQUEST ;
|
||||
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
|
||||
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
||||
|
||||
@@ -9,9 +9,9 @@ use utoipa::ToSchema;
|
||||
use crate::batches::BatchId;
|
||||
use crate::error::ResponseError;
|
||||
use crate::settings::{Settings, Unchecked};
|
||||
use crate::tasks::enterprise_edition::network::DbTaskNetwork;
|
||||
use crate::tasks::{
|
||||
serialize_duration, Details, DetailsExportIndexSettings, IndexSwap, Kind, Status, Task, TaskId,
|
||||
TaskNetwork,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, ToSchema)]
|
||||
@@ -54,7 +54,10 @@ pub struct TaskView {
|
||||
pub finished_at: Option<OffsetDateTime>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub network: Option<TaskNetwork>,
|
||||
pub network: Option<DbTaskNetwork>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub custom_metadata: Option<String>,
|
||||
}
|
||||
|
||||
impl TaskView {
|
||||
@@ -73,6 +76,7 @@ impl TaskView {
|
||||
started_at: task.started_at,
|
||||
finished_at: task.finished_at,
|
||||
network: task.network.clone(),
|
||||
custom_metadata: task.custom_metadata.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -147,6 +151,11 @@ pub struct DetailsView {
|
||||
pub pre_compaction_size: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub post_compaction_size: Option<String>,
|
||||
// network topology change
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub moved_documents: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub message: Option<String>,
|
||||
}
|
||||
|
||||
impl DetailsView {
|
||||
@@ -157,6 +166,17 @@ impl DetailsView {
|
||||
(None, Some(doc)) | (Some(doc), None) => Some(doc),
|
||||
(Some(left), Some(right)) => Some(left + right),
|
||||
},
|
||||
moved_documents: match (self.moved_documents, other.moved_documents) {
|
||||
(None, None) => None,
|
||||
(None, Some(doc)) | (Some(doc), None) => Some(doc),
|
||||
(Some(left), Some(right)) => Some(left + right),
|
||||
},
|
||||
message: match (&mut self.message, &other.message) {
|
||||
(None, None) => None,
|
||||
(None, Some(message)) => Some(message.clone()),
|
||||
(Some(message), None) => Some(std::mem::take(message)),
|
||||
(Some(message), Some(_)) => Some(std::mem::take(message)),
|
||||
},
|
||||
indexed_documents: match (self.indexed_documents, other.indexed_documents) {
|
||||
(None, None) => None,
|
||||
(None, Some(None)) | (Some(None), None) | (Some(None), Some(None)) => Some(None),
|
||||
@@ -447,6 +467,14 @@ impl From<Details> for DetailsView {
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
Details::NetworkTopologyChange { moved_documents, received_documents, message } => {
|
||||
DetailsView {
|
||||
moved_documents: Some(moved_documents),
|
||||
received_documents: Some(received_documents),
|
||||
message: Some(message),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
// Copyright © 2025 Meilisearch Some Rights Reserved
|
||||
// This file is part of Meilisearch Enterprise Edition (EE).
|
||||
// Use of this source code is governed by the Business Source License 1.1,
|
||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||
|
||||
pub mod network;
|
||||
564
crates/meilisearch-types/src/tasks/enterprise_edition/network.rs
Normal file
564
crates/meilisearch-types/src/tasks/enterprise_edition/network.rs
Normal file
@@ -0,0 +1,564 @@
|
||||
// Copyright © 2025 Meilisearch Some Rights Reserved
|
||||
// This file is part of Meilisearch Enterprise Edition (EE).
|
||||
// Use of this source code is governed by the Business Source License 1.1,
|
||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use milli::DocumentId;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::enterprise_edition::network::{Network, Remote};
|
||||
use crate::error::ResponseError;
|
||||
use crate::tasks::{Details, TaskId};
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||
#[serde(untagged, rename_all = "camelCase")]
|
||||
// This type is used in the database, care should be taken when modifying it.
|
||||
pub enum DbTaskNetwork {
|
||||
/// Tasks that were duplicated from `origin`
|
||||
Origin { origin: Origin },
|
||||
/// Tasks that were duplicated as `remote_tasks`
|
||||
Remotes {
|
||||
remote_tasks: BTreeMap<String, RemoteTask>,
|
||||
#[serde(default)]
|
||||
network_version: Uuid,
|
||||
},
|
||||
/// Document import tasks sent in the context of `network_change`
|
||||
Import { import_from: ImportData, network_change: Origin },
|
||||
}
|
||||
|
||||
impl DbTaskNetwork {
|
||||
pub fn network_version(&self) -> Uuid {
|
||||
match self {
|
||||
DbTaskNetwork::Origin { origin } => origin.network_version,
|
||||
DbTaskNetwork::Remotes { remote_tasks: _, network_version } => *network_version,
|
||||
DbTaskNetwork::Import { import_from: _, network_change } => {
|
||||
network_change.network_version
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn import_data(&self) -> Option<&ImportData> {
|
||||
match self {
|
||||
DbTaskNetwork::Origin { .. } | DbTaskNetwork::Remotes { .. } => None,
|
||||
DbTaskNetwork::Import { import_from, .. } => Some(import_from),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn origin(&self) -> Option<&Origin> {
|
||||
match self {
|
||||
DbTaskNetwork::Origin { origin } => Some(origin),
|
||||
DbTaskNetwork::Remotes { .. } => None,
|
||||
DbTaskNetwork::Import { network_change, .. } => Some(network_change),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum TaskNetwork {
|
||||
/// Tasks that were duplicated from `origin`
|
||||
Origin { origin: Origin },
|
||||
/// Tasks that were duplicated as `remote_tasks`
|
||||
Remotes { remote_tasks: BTreeMap<String, RemoteTask>, network_version: Uuid },
|
||||
/// Document import tasks sent in the context of `network_change`
|
||||
Import { import_from: ImportData, network_change: Origin, metadata: ImportMetadata },
|
||||
}
|
||||
|
||||
impl From<TaskNetwork> for DbTaskNetwork {
|
||||
fn from(value: TaskNetwork) -> Self {
|
||||
match value {
|
||||
TaskNetwork::Origin { origin } => DbTaskNetwork::Origin { origin },
|
||||
TaskNetwork::Remotes { remote_tasks, network_version } => {
|
||||
DbTaskNetwork::Remotes { remote_tasks, network_version }
|
||||
}
|
||||
TaskNetwork::Import { import_from, network_change, metadata: _ } => {
|
||||
DbTaskNetwork::Import { import_from, network_change }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Origin {
|
||||
pub remote_name: String,
|
||||
pub task_uid: u32,
|
||||
pub network_version: Uuid,
|
||||
}
|
||||
|
||||
/// Import data stored in a task
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ImportData {
|
||||
pub remote_name: String,
|
||||
pub index_name: String,
|
||||
pub document_count: u64,
|
||||
}
|
||||
|
||||
/// Import metadata associated with a task but not stored in the task
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct ImportMetadata {
|
||||
/// Total number of indexes to import from this host
|
||||
pub index_count: u64,
|
||||
/// Key unique to this (network_change, index, host, key).
|
||||
///
|
||||
/// In practice, an internal document id of one of the documents to import.
|
||||
pub task_key: DocumentId,
|
||||
/// Total number of documents to import for this index from this host.
|
||||
pub total_index_documents: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RemoteTask {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
task_uid: Option<TaskId>,
|
||||
error: Option<ResponseError>,
|
||||
}
|
||||
|
||||
impl From<Result<TaskId, ResponseError>> for RemoteTask {
|
||||
fn from(res: Result<TaskId, ResponseError>) -> RemoteTask {
|
||||
match res {
|
||||
Ok(task_uid) => RemoteTask { task_uid: Some(task_uid), error: None },
|
||||
Err(err) => RemoteTask { task_uid: None, error: Some(err) },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Contains the full state of a network topology change.
|
||||
///
|
||||
/// A network topology change task is unique in that it can be processed in multiple different batches, as its resolution
|
||||
/// depends on various document additions tasks being processed.
|
||||
///
|
||||
/// A network topology task has 4 states:
|
||||
///
|
||||
/// 1. Processing any task that was meant for an earlier version of the network. This is necessary to know that we have the right version of
|
||||
/// documents.
|
||||
/// 2. Sending all documents that must be moved to other remotes.
|
||||
/// 3. Processing any task coming from the remotes.
|
||||
/// 4. Finished.
|
||||
///
|
||||
/// Furthermore, it maintains some stats
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NetworkTopologyChange {
|
||||
state: NetworkTopologyState,
|
||||
// in name, `None` if the node is no longer part of the network
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
in_name: Option<String>,
|
||||
// out name, `None` if the node is new to the network
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
out_name: Option<String>,
|
||||
out_remotes: BTreeMap<String, Remote>,
|
||||
in_remotes: BTreeMap<String, InRemote>,
|
||||
stats: NetworkTopologyStats,
|
||||
}
|
||||
|
||||
impl NetworkTopologyChange {
|
||||
pub fn new(old_network: Network, new_network: Network) -> Self {
|
||||
// we use our old name as export name
|
||||
let out_name = old_network.local;
|
||||
// we use our new name as import name
|
||||
let in_name = new_network.local;
|
||||
// we export to the new network
|
||||
let mut out_remotes = new_network.remotes;
|
||||
// don't export to ourselves
|
||||
if let Some(in_name) = &in_name {
|
||||
out_remotes.remove(in_name);
|
||||
}
|
||||
/// FIXME: doesn't work if the old network is not the same for old nodes
|
||||
let in_remotes = old_network
|
||||
.remotes
|
||||
.into_keys()
|
||||
// don't await imports from ourselves
|
||||
.filter(|name| Some(name.as_str()) != out_name.as_deref())
|
||||
.map(|name| (name, InRemote::new()))
|
||||
.collect();
|
||||
Self {
|
||||
state: NetworkTopologyState::WaitingForOlderTasks,
|
||||
in_name,
|
||||
out_name,
|
||||
out_remotes,
|
||||
in_remotes,
|
||||
stats: NetworkTopologyStats { received_documents: 0, moved_documents: 0 },
|
||||
}
|
||||
}
|
||||
|
||||
pub fn state(&self) -> NetworkTopologyState {
|
||||
self.state
|
||||
}
|
||||
|
||||
pub fn out_name(&self) -> Option<&str> {
|
||||
// unwrap: one of out name or in_name must be defined
|
||||
self.out_name.as_deref()
|
||||
}
|
||||
|
||||
pub fn in_name(&self) -> Option<&str> {
|
||||
self.in_name.as_deref()
|
||||
}
|
||||
|
||||
pub fn export_to_process(&self) -> Option<(&BTreeMap<String, Remote>, &str)> {
|
||||
if self.state != NetworkTopologyState::ExportingDocuments {
|
||||
return None;
|
||||
}
|
||||
|
||||
if self.out_remotes.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let out_name = self.out_name()?;
|
||||
Some((&self.out_remotes, out_name))
|
||||
}
|
||||
|
||||
/// Compute the next state from the current state of the task.
|
||||
pub fn update_state(&mut self) {
|
||||
self.state = match self.state {
|
||||
NetworkTopologyState::WaitingForOlderTasks => {
|
||||
// no more older tasks, so finished waiting
|
||||
NetworkTopologyState::ExportingDocuments
|
||||
}
|
||||
NetworkTopologyState::ExportingDocuments => {
|
||||
// processed all exported documents
|
||||
NetworkTopologyState::ImportingDocuments
|
||||
}
|
||||
NetworkTopologyState::ImportingDocuments => {
|
||||
if self.is_import_finished() {
|
||||
NetworkTopologyState::Finished
|
||||
} else {
|
||||
NetworkTopologyState::ImportingDocuments
|
||||
}
|
||||
}
|
||||
NetworkTopologyState::Finished => NetworkTopologyState::Finished,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn receive_remote_task(
|
||||
&mut self,
|
||||
remote_name: &str,
|
||||
index_name: &str,
|
||||
task_key: DocumentId,
|
||||
document_count: u64,
|
||||
total_indexes: u64,
|
||||
total_index_documents: u64,
|
||||
) -> Result<(), ReceiveTaskError> {
|
||||
let remote = self
|
||||
.in_remotes
|
||||
.get_mut(remote_name)
|
||||
.ok_or_else(|| ReceiveTaskError::UnknownRemote(remote_name.to_string()))?;
|
||||
remote.import_state = match std::mem::take(&mut remote.import_state) {
|
||||
ImportState::WaitingForInitialTask => {
|
||||
let mut import_index_state = BTreeMap::new();
|
||||
import_index_state.insert(
|
||||
index_name.to_owned(),
|
||||
ImportIndexState::Ongoing {
|
||||
total_documents: total_index_documents,
|
||||
received_documents: document_count,
|
||||
task_keys: vec![task_key],
|
||||
processed_documents: 0,
|
||||
},
|
||||
);
|
||||
ImportState::Ongoing { import_index_state, total_indexes }
|
||||
}
|
||||
ImportState::Ongoing { mut import_index_state, total_indexes } => {
|
||||
if let Some((index_name, mut index_state)) =
|
||||
import_index_state.remove_entry(index_name)
|
||||
{
|
||||
index_state = match index_state {
|
||||
ImportIndexState::Ongoing {
|
||||
total_documents,
|
||||
received_documents: previously_received,
|
||||
processed_documents,
|
||||
mut task_keys,
|
||||
} => {
|
||||
if task_keys.contains(&task_key) {
|
||||
return Err(ReceiveTaskError::DuplicateTask(task_key));
|
||||
}
|
||||
task_keys.push(task_key);
|
||||
ImportIndexState::Ongoing {
|
||||
total_documents,
|
||||
received_documents: previously_received + document_count,
|
||||
processed_documents,
|
||||
task_keys,
|
||||
}
|
||||
}
|
||||
ImportIndexState::Finished { total_documents } => {
|
||||
ImportIndexState::Finished { total_documents }
|
||||
}
|
||||
};
|
||||
import_index_state.insert(index_name, index_state);
|
||||
} else {
|
||||
let state = ImportIndexState::Ongoing {
|
||||
total_documents: total_index_documents,
|
||||
received_documents: document_count,
|
||||
processed_documents: 0,
|
||||
task_keys: vec![task_key],
|
||||
};
|
||||
import_index_state.insert(index_name.to_string(), state);
|
||||
}
|
||||
ImportState::Ongoing { import_index_state, total_indexes: total_indexes }
|
||||
}
|
||||
ImportState::Finished { total_indexes, total_documents } => {
|
||||
ImportState::Finished { total_indexes, total_documents }
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn process_remote_tasks(
|
||||
&mut self,
|
||||
remote_name: &str,
|
||||
index_name: &str,
|
||||
document_count: u64,
|
||||
) {
|
||||
/// FIXME: unwraps and panics
|
||||
let remote = self.in_remotes.get_mut(remote_name).unwrap();
|
||||
remote.import_state = match std::mem::take(&mut remote.import_state) {
|
||||
ImportState::WaitingForInitialTask => panic!("no task received yet one processed"),
|
||||
ImportState::Ongoing { mut import_index_state, total_indexes } => {
|
||||
let (index_name, mut index_state) =
|
||||
import_index_state.remove_entry(index_name).unwrap();
|
||||
index_state = match index_state {
|
||||
ImportIndexState::Ongoing {
|
||||
total_documents,
|
||||
received_documents,
|
||||
processed_documents: previously_processed,
|
||||
task_keys,
|
||||
} => {
|
||||
let newly_processed_documents = previously_processed + document_count;
|
||||
if newly_processed_documents >= total_documents {
|
||||
ImportIndexState::Finished { total_documents }
|
||||
} else {
|
||||
ImportIndexState::Ongoing {
|
||||
total_documents,
|
||||
received_documents,
|
||||
processed_documents: newly_processed_documents,
|
||||
task_keys,
|
||||
}
|
||||
}
|
||||
}
|
||||
ImportIndexState::Finished { total_documents } => {
|
||||
ImportIndexState::Finished { total_documents }
|
||||
}
|
||||
};
|
||||
import_index_state.insert(index_name, index_state);
|
||||
if import_index_state.len() as u64 == total_indexes
|
||||
&& import_index_state.values().all(|index| index.is_finished())
|
||||
{
|
||||
let total_documents =
|
||||
import_index_state.values().map(|index| index.total_documents()).sum();
|
||||
ImportState::Finished { total_indexes, total_documents }
|
||||
} else {
|
||||
ImportState::Ongoing { import_index_state, total_indexes }
|
||||
}
|
||||
}
|
||||
ImportState::Finished { total_indexes, total_documents } => {
|
||||
ImportState::Finished { total_indexes, total_documents }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_details(&self) -> Details {
|
||||
let message = match self.state {
|
||||
NetworkTopologyState::WaitingForOlderTasks => {
|
||||
"Waiting for tasks enqueued before the network change to finish processing".into()
|
||||
}
|
||||
NetworkTopologyState::ExportingDocuments => "Exporting documents".into(),
|
||||
NetworkTopologyState::ImportingDocuments => {
|
||||
let mut finished_count = 0;
|
||||
let mut first_ongoing = None;
|
||||
let mut ongoing_total_indexes = 0;
|
||||
let mut ongoing_processed_documents = 0;
|
||||
let mut ongoing_missing_documents = 0;
|
||||
let mut ongoing_total_documents = 0;
|
||||
let mut other_ongoing_count = 0;
|
||||
let mut first_waiting = None;
|
||||
let mut other_waiting_count = 0;
|
||||
for (remote_name, in_remote) in &self.in_remotes {
|
||||
match &in_remote.import_state {
|
||||
ImportState::WaitingForInitialTask => {
|
||||
first_waiting = match first_waiting {
|
||||
None => Some(remote_name),
|
||||
first_waiting => {
|
||||
other_waiting_count += 1;
|
||||
first_waiting
|
||||
}
|
||||
};
|
||||
}
|
||||
ImportState::Ongoing { import_index_state, total_indexes } => {
|
||||
first_ongoing = match first_ongoing {
|
||||
None => {
|
||||
ongoing_total_indexes = *total_indexes;
|
||||
Some(remote_name)
|
||||
}
|
||||
first_ongoing => {
|
||||
other_ongoing_count += 1;
|
||||
first_ongoing
|
||||
}
|
||||
};
|
||||
for import_state in import_index_state.values() {
|
||||
match import_state {
|
||||
ImportIndexState::Ongoing {
|
||||
total_documents,
|
||||
processed_documents,
|
||||
received_documents,
|
||||
task_keys: _,
|
||||
} => {
|
||||
ongoing_total_documents += total_documents;
|
||||
ongoing_processed_documents += processed_documents;
|
||||
ongoing_missing_documents +=
|
||||
total_documents.saturating_sub(*received_documents);
|
||||
}
|
||||
ImportIndexState::Finished { total_documents } => {
|
||||
ongoing_total_documents += total_documents;
|
||||
ongoing_processed_documents += total_documents;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ImportState::Finished { total_indexes, total_documents } => {
|
||||
finished_count += 1;
|
||||
ongoing_total_indexes = *total_indexes;
|
||||
ongoing_total_documents += *total_documents;
|
||||
ongoing_processed_documents += *total_documents;
|
||||
}
|
||||
}
|
||||
}
|
||||
format!(
|
||||
"Importing documents from {total} remotes{waiting}{ongoing}{finished}",
|
||||
total = self.in_remotes.len(),
|
||||
waiting = if let Some(first_waiting) = first_waiting {
|
||||
&format!(
|
||||
", waiting on first task from `{}`{others}",
|
||||
first_waiting,
|
||||
others = if other_waiting_count > 0 {
|
||||
&format!(" and {other_waiting_count} other remotes")
|
||||
} else {
|
||||
""
|
||||
}
|
||||
)
|
||||
} else {
|
||||
""
|
||||
},
|
||||
ongoing = if let Some(first_ongoing) = first_ongoing {
|
||||
&format!(", awaiting {ongoing_missing_documents} and processed {ongoing_processed_documents} out of {ongoing_total_documents} documents in {ongoing_total_indexes} indexes from `{first_ongoing}`{others}",
|
||||
others=if other_ongoing_count > 0 {&format!(" and {other_ongoing_count} other remotes")} else {""})
|
||||
} else {
|
||||
""
|
||||
},
|
||||
finished = if finished_count >= 0 {
|
||||
&format!(", {finished_count} remotes finished processing")
|
||||
} else {
|
||||
""
|
||||
}
|
||||
)
|
||||
}
|
||||
NetworkTopologyState::Finished => "Finished".into(),
|
||||
};
|
||||
Details::NetworkTopologyChange {
|
||||
moved_documents: self.stats.moved_documents,
|
||||
received_documents: self.stats.received_documents,
|
||||
message,
|
||||
}
|
||||
}
|
||||
|
||||
fn is_import_finished(&self) -> bool {
|
||||
self.in_remotes.values().all(|remote| remote.is_finished())
|
||||
}
|
||||
}
|
||||
|
||||
pub enum ReceiveTaskError {
|
||||
UnknownRemote(String),
|
||||
DuplicateTask(DocumentId),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum NetworkTopologyState {
|
||||
WaitingForOlderTasks,
|
||||
ExportingDocuments,
|
||||
ImportingDocuments,
|
||||
Finished,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NetworkTopologyStats {
|
||||
pub received_documents: u64,
|
||||
pub moved_documents: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct InRemote {
|
||||
import_state: ImportState,
|
||||
}
|
||||
|
||||
impl InRemote {
|
||||
pub fn new() -> Self {
|
||||
Self { import_state: ImportState::WaitingForInitialTask }
|
||||
}
|
||||
|
||||
pub fn is_finished(&self) -> bool {
|
||||
matches!(self.import_state, ImportState::Finished { .. })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
enum ImportState {
|
||||
/// Initially Meilisearch doesn't know how many documents it should expect from a remote.
|
||||
/// The first task for each remote contains the information of how many indexes will be imported,
|
||||
/// and the first task for each index contains the number of documents to import for that index.
|
||||
#[default]
|
||||
WaitingForInitialTask,
|
||||
Ongoing {
|
||||
import_index_state: BTreeMap<String, ImportIndexState>,
|
||||
total_indexes: u64,
|
||||
},
|
||||
Finished {
|
||||
total_indexes: u64,
|
||||
total_documents: u64,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
enum ImportIndexState {
|
||||
Ongoing {
|
||||
total_documents: u64,
|
||||
received_documents: u64,
|
||||
processed_documents: u64,
|
||||
task_keys: Vec<DocumentId>,
|
||||
},
|
||||
Finished {
|
||||
total_documents: u64,
|
||||
},
|
||||
}
|
||||
|
||||
impl ImportIndexState {
|
||||
pub fn is_finished(&self) -> bool {
|
||||
matches!(self, ImportIndexState::Finished { .. })
|
||||
}
|
||||
|
||||
fn total_documents(&self) -> u64 {
|
||||
match *self {
|
||||
ImportIndexState::Ongoing { total_documents, .. }
|
||||
| ImportIndexState::Finished { total_documents } => total_documents,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub mod headers {
|
||||
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "Meili-Proxy-Origin-Remote";
|
||||
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "Meili-Proxy-Origin-TaskUid";
|
||||
pub const PROXY_ORIGIN_NETWORK_VERSION_HEADER: &str = "Meili-Proxy-Origin-Network-Version";
|
||||
pub const PROXY_IMPORT_REMOTE_HEADER: &str = "Meili-Proxy-Import-Remote";
|
||||
pub const PROXY_IMPORT_INDEX_COUNT_HEADER: &str = "Meili-Proxy-Import-Index-Count";
|
||||
pub const PROXY_IMPORT_INDEX_HEADER: &str = "Meili-Proxy-Import-Index";
|
||||
pub const PROXY_IMPORT_TASK_KEY_HEADER: &str = "Meili-Proxy-Import-Task-Key";
|
||||
pub const PROXY_IMPORT_DOCS_HEADER: &str = "Meili-Proxy-Import-Docs";
|
||||
pub const PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER: &str = "Meili-Proxy-Import-Total-Index-Docs";
|
||||
}
|
||||
@@ -23,6 +23,8 @@ use crate::{versioning, InstanceUid};
|
||||
|
||||
pub type TaskId = u32;
|
||||
|
||||
pub mod enterprise_edition;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Task {
|
||||
@@ -44,7 +46,10 @@ pub struct Task {
|
||||
pub kind: KindWithContent,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub network: Option<TaskNetwork>,
|
||||
pub network: Option<enterprise_edition::network::DbTaskNetwork>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub custom_metadata: Option<String>,
|
||||
}
|
||||
|
||||
impl Task {
|
||||
@@ -58,6 +63,7 @@ impl Task {
|
||||
| TaskDeletion { .. }
|
||||
| Export { .. }
|
||||
| UpgradeDatabase { .. }
|
||||
| NetworkTopologyChange { .. }
|
||||
| IndexSwap { .. } => None,
|
||||
DocumentAdditionOrUpdate { index_uid, .. }
|
||||
| DocumentEdition { index_uid, .. }
|
||||
@@ -96,6 +102,7 @@ impl Task {
|
||||
| KindWithContent::SnapshotCreation
|
||||
| KindWithContent::Export { .. }
|
||||
| KindWithContent::UpgradeDatabase { .. }
|
||||
| KindWithContent::NetworkTopologyChange { .. }
|
||||
| KindWithContent::IndexCompaction { .. } => None,
|
||||
}
|
||||
}
|
||||
@@ -175,6 +182,7 @@ pub enum KindWithContent {
|
||||
IndexCompaction {
|
||||
index_uid: String,
|
||||
},
|
||||
NetworkTopologyChange(enterprise_edition::network::NetworkTopologyChange),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
|
||||
@@ -212,6 +220,7 @@ impl KindWithContent {
|
||||
KindWithContent::Export { .. } => Kind::Export,
|
||||
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
|
||||
KindWithContent::IndexCompaction { .. } => Kind::IndexCompaction,
|
||||
KindWithContent::NetworkTopologyChange { .. } => Kind::NetworkTopologyChange,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -224,6 +233,7 @@ impl KindWithContent {
|
||||
| TaskCancelation { .. }
|
||||
| TaskDeletion { .. }
|
||||
| Export { .. }
|
||||
| NetworkTopologyChange { .. }
|
||||
| UpgradeDatabase { .. } => vec![],
|
||||
DocumentAdditionOrUpdate { index_uid, .. }
|
||||
| DocumentEdition { index_uid, .. }
|
||||
@@ -337,6 +347,11 @@ impl KindWithContent {
|
||||
pre_compaction_size: None,
|
||||
post_compaction_size: None,
|
||||
}),
|
||||
KindWithContent::NetworkTopologyChange { .. } => Some(Details::NetworkTopologyChange {
|
||||
moved_documents: 0,
|
||||
received_documents: 0,
|
||||
message: "processing tasks for previous network versions".into(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -389,7 +404,7 @@ impl KindWithContent {
|
||||
})
|
||||
}
|
||||
KindWithContent::IndexSwap { .. } => {
|
||||
todo!()
|
||||
unimplemented!("do not call `default_finished_details` for `IndexSwap` tasks")
|
||||
}
|
||||
KindWithContent::TaskCancelation { query, tasks } => Some(Details::TaskCancelation {
|
||||
matched_tasks: tasks.len(),
|
||||
@@ -424,6 +439,9 @@ impl KindWithContent {
|
||||
pre_compaction_size: None,
|
||||
post_compaction_size: None,
|
||||
}),
|
||||
KindWithContent::NetworkTopologyChange(network_topology_change) => {
|
||||
Some(network_topology_change.to_details())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -491,6 +509,9 @@ impl From<&KindWithContent> for Option<Details> {
|
||||
pre_compaction_size: None,
|
||||
post_compaction_size: None,
|
||||
}),
|
||||
KindWithContent::NetworkTopologyChange(network_topology_change) => {
|
||||
Some(network_topology_change.to_details())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -602,6 +623,7 @@ pub enum Kind {
|
||||
Export,
|
||||
UpgradeDatabase,
|
||||
IndexCompaction,
|
||||
NetworkTopologyChange,
|
||||
}
|
||||
|
||||
impl Kind {
|
||||
@@ -621,6 +643,7 @@ impl Kind {
|
||||
| Kind::DumpCreation
|
||||
| Kind::Export
|
||||
| Kind::UpgradeDatabase
|
||||
| Kind::NetworkTopologyChange
|
||||
| Kind::SnapshotCreation => false,
|
||||
}
|
||||
}
|
||||
@@ -643,6 +666,7 @@ impl Display for Kind {
|
||||
Kind::Export => write!(f, "export"),
|
||||
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
|
||||
Kind::IndexCompaction => write!(f, "indexCompaction"),
|
||||
Kind::NetworkTopologyChange => write!(f, "networkTopologyChange"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -680,6 +704,8 @@ impl FromStr for Kind {
|
||||
Ok(Kind::UpgradeDatabase)
|
||||
} else if kind.eq_ignore_ascii_case("indexCompaction") {
|
||||
Ok(Kind::IndexCompaction)
|
||||
} else if kind.eq_ignore_ascii_case("networkTopologyChange") {
|
||||
Ok(Kind::NetworkTopologyChange)
|
||||
} else {
|
||||
Err(ParseTaskKindError(kind.to_owned()))
|
||||
}
|
||||
@@ -770,36 +796,11 @@ pub enum Details {
|
||||
pre_compaction_size: Option<Byte>,
|
||||
post_compaction_size: Option<Byte>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||
#[serde(untagged, rename_all = "camelCase")]
|
||||
pub enum TaskNetwork {
|
||||
Origin { origin: Origin },
|
||||
Remotes { remote_tasks: BTreeMap<String, RemoteTask> },
|
||||
}
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Origin {
|
||||
pub remote_name: String,
|
||||
pub task_uid: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RemoteTask {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
task_uid: Option<TaskId>,
|
||||
error: Option<ResponseError>,
|
||||
}
|
||||
|
||||
impl From<Result<TaskId, ResponseError>> for RemoteTask {
|
||||
fn from(res: Result<TaskId, ResponseError>) -> RemoteTask {
|
||||
match res {
|
||||
Ok(task_uid) => RemoteTask { task_uid: Some(task_uid), error: None },
|
||||
Err(err) => RemoteTask { task_uid: None, error: Some(err) },
|
||||
}
|
||||
}
|
||||
NetworkTopologyChange {
|
||||
moved_documents: u64,
|
||||
received_documents: u64,
|
||||
message: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
|
||||
@@ -842,6 +843,9 @@ impl Details {
|
||||
| Self::Export { .. }
|
||||
| Self::UpgradeDatabase { .. }
|
||||
| Self::IndexSwap { .. } => (),
|
||||
Self::NetworkTopologyChange { moved_documents: _, received_documents: _, message } => {
|
||||
*message = format!("Failed. Previous status: {}", message);
|
||||
}
|
||||
}
|
||||
|
||||
details
|
||||
@@ -6,6 +6,10 @@ use meilisearch_types::error::{Code, ErrorCode, ResponseError};
|
||||
use meilisearch_types::index_uid::{IndexUid, IndexUidFormatError};
|
||||
use meilisearch_types::milli;
|
||||
use meilisearch_types::milli::OrderBy;
|
||||
use meilisearch_types::tasks::enterprise_edition::network::headers::{
|
||||
PROXY_IMPORT_DOCS_HEADER, PROXY_IMPORT_INDEX_COUNT_HEADER, PROXY_IMPORT_INDEX_HEADER,
|
||||
PROXY_IMPORT_REMOTE_HEADER, PROXY_IMPORT_TASK_KEY_HEADER, PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
||||
};
|
||||
use serde_json::Value;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
@@ -93,8 +97,49 @@ pub enum MeilisearchHttpError {
|
||||
} else { PROXY_ORIGIN_TASK_UID_HEADER }
|
||||
)]
|
||||
InconsistentOriginHeaders { is_remote_missing: bool },
|
||||
#[error("Inconsistent `Import` headers: {remote}: {remote_status}, {index}: {index_status}, {docs}: {docs_status}.\n - Hint: either all three headers should be provided, or none of them",
|
||||
remote = PROXY_IMPORT_REMOTE_HEADER,
|
||||
remote_status = if *is_remote_missing { "missing" } else{ "provided" },
|
||||
index = PROXY_IMPORT_INDEX_HEADER,
|
||||
index_status = if *is_index_missing { "missing" } else { "provided" },
|
||||
docs = PROXY_IMPORT_DOCS_HEADER,
|
||||
docs_status = if *is_docs_missing { "missing" } else { "provided" }
|
||||
)]
|
||||
InconsistentImportHeaders {
|
||||
is_remote_missing: bool,
|
||||
is_index_missing: bool,
|
||||
is_docs_missing: bool,
|
||||
},
|
||||
#[error("Inconsistent `Import-Metadata` headers: {index_count}: {index_count_status}, {task_key}: {task_key_status}, {total_index_documents}: {total_index_documents_status}.\n - Hint: either all three headers should be provided, or none of them",
|
||||
index_count = PROXY_IMPORT_INDEX_COUNT_HEADER,
|
||||
index_count_status = if *is_index_count_missing { "missing" } else { "provided"},
|
||||
task_key = PROXY_IMPORT_TASK_KEY_HEADER,
|
||||
task_key_status = if *is_task_key_missing { "missing" } else { "provided"},
|
||||
total_index_documents = PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
||||
total_index_documents_status = if *is_total_index_documents_missing { "missing" } else { "provided"},
|
||||
)]
|
||||
InconsistentImportMetadataHeaders {
|
||||
is_index_count_missing: bool,
|
||||
is_task_key_missing: bool,
|
||||
is_total_index_documents_missing: bool,
|
||||
},
|
||||
|
||||
#[error(
|
||||
"Inconsistent task network headers: origin headers: {origin_status}, import headers: {import_status}, import metadata: {import_metadata_status}",
|
||||
origin_status = if *is_missing_origin { "missing"} else { "present" },
|
||||
import_status = if *is_missing_import { "missing"} else { "present" },
|
||||
import_metadata_status = if *is_missing_import_metadata { "missing"} else { "present" })]
|
||||
InconsistentTaskNetworkHeaders {
|
||||
is_missing_origin: bool,
|
||||
is_missing_import: bool,
|
||||
is_missing_import_metadata: bool,
|
||||
},
|
||||
#[error("Invalid value for header {header_name}: {msg}")]
|
||||
InvalidHeaderValue { header_name: &'static str, msg: String },
|
||||
#[error("This remote is not the leader of the network.\n - Note: only the leader `{leader}` can receive new tasks.")]
|
||||
NotLeader { leader: String },
|
||||
#[error("Unexpected `previousRemotes` in network call.\n - Note: `previousRemote` is reserved for internal use.")]
|
||||
UnexpectedNetworkPreviousRemotes,
|
||||
}
|
||||
|
||||
impl MeilisearchHttpError {
|
||||
@@ -142,10 +187,17 @@ impl ErrorCode for MeilisearchHttpError {
|
||||
MeilisearchHttpError::PersonalizationInFederatedQuery(_) => {
|
||||
Code::InvalidMultiSearchQueryPersonalization
|
||||
}
|
||||
MeilisearchHttpError::InconsistentOriginHeaders { .. } => {
|
||||
MeilisearchHttpError::InconsistentOriginHeaders { .. }
|
||||
| MeilisearchHttpError::InconsistentImportHeaders { .. }
|
||||
| MeilisearchHttpError::InconsistentImportMetadataHeaders { .. }
|
||||
| MeilisearchHttpError::InconsistentTaskNetworkHeaders { .. } => {
|
||||
Code::InconsistentDocumentChangeHeaders
|
||||
}
|
||||
MeilisearchHttpError::InvalidHeaderValue { .. } => Code::InvalidHeaderValue,
|
||||
MeilisearchHttpError::NotLeader { .. } => Code::NotLeader,
|
||||
MeilisearchHttpError::UnexpectedNetworkPreviousRemotes => {
|
||||
Code::UnexpectedNetworkPreviousRemotes
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,7 +45,9 @@ use crate::extractors::authentication::policies::*;
|
||||
use crate::extractors::authentication::GuardedData;
|
||||
use crate::extractors::payload::Payload;
|
||||
use crate::extractors::sequential_extractor::SeqHandler;
|
||||
use crate::routes::indexes::enterprise_edition::proxy::{proxy, Body};
|
||||
use crate::routes::indexes::enterprise_edition::proxy::{
|
||||
proxy, task_network_and_check_leader, Body,
|
||||
};
|
||||
use crate::routes::indexes::search::fix_sort_query_parameters;
|
||||
use crate::routes::{
|
||||
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
|
||||
@@ -333,13 +335,16 @@ impl Aggregate for DocumentsDeletionAggregator {
|
||||
pub async fn delete_document(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
||||
path: web::Path<DocumentParam>,
|
||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
||||
req: HttpRequest,
|
||||
opt: web::Data<Opt>,
|
||||
analytics: web::Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
||||
let DocumentParam { index_uid, document_id } = path.into_inner();
|
||||
let index_uid = IndexUid::try_from(index_uid)?;
|
||||
let network = index_scheduler.network();
|
||||
let task_network = task_network_and_check_leader(&req, &network)?;
|
||||
|
||||
analytics.publish(
|
||||
DocumentsDeletionAggregator {
|
||||
@@ -357,13 +362,23 @@ pub async fn delete_document(
|
||||
};
|
||||
let uid = get_task_id(&req, &opt)?;
|
||||
let dry_run = is_dry_run(&req, &opt)?;
|
||||
let task = {
|
||||
let mut task = {
|
||||
let index_scheduler = index_scheduler.clone();
|
||||
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||
tokio::task::spawn_blocking(move || {
|
||||
index_scheduler.register_with_custom_metadata(
|
||||
task,
|
||||
uid,
|
||||
custom_metadata,
|
||||
dry_run,
|
||||
task_network,
|
||||
)
|
||||
})
|
||||
.await??
|
||||
};
|
||||
|
||||
if network.sharding && !dry_run {
|
||||
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
||||
if let Some(task_network) = task.network.take() {
|
||||
proxy(&index_scheduler, Some(&index_uid), &req, task_network, network, Body::none(), &task)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let task: SummarizedTaskView = task.into();
|
||||
@@ -678,6 +693,19 @@ pub struct UpdateDocumentsQuery {
|
||||
#[param(value_type = char, default = ",", example = ";")]
|
||||
#[deserr(default, try_from(char) = from_char_csv_delimiter -> DeserrQueryParamError<InvalidDocumentCsvDelimiter>, error = DeserrQueryParamError<InvalidDocumentCsvDelimiter>)]
|
||||
pub csv_delimiter: Option<u8>,
|
||||
|
||||
#[param(example = "custom")]
|
||||
#[deserr(default, error = DeserrQueryParamError<InvalidIndexCustomMetadata>)]
|
||||
pub custom_metadata: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Deserr, IntoParams)]
|
||||
#[deserr(error = DeserrQueryParamError, rename_all = camelCase, deny_unknown_fields)]
|
||||
#[into_params(parameter_in = Query, rename_all = "camelCase")]
|
||||
pub struct CustomMetadataQuery {
|
||||
#[param(example = "custom")]
|
||||
#[deserr(default, error = DeserrQueryParamError<InvalidIndexCustomMetadata>)]
|
||||
pub custom_metadata: Option<String>,
|
||||
}
|
||||
|
||||
fn from_char_csv_delimiter(
|
||||
@@ -819,6 +847,7 @@ pub async fn replace_documents(
|
||||
body,
|
||||
IndexDocumentsMethod::ReplaceDocuments,
|
||||
uid,
|
||||
params.custom_metadata,
|
||||
dry_run,
|
||||
allow_index_creation,
|
||||
&req,
|
||||
@@ -921,6 +950,7 @@ pub async fn update_documents(
|
||||
body,
|
||||
IndexDocumentsMethod::UpdateDocuments,
|
||||
uid,
|
||||
params.custom_metadata,
|
||||
dry_run,
|
||||
allow_index_creation,
|
||||
&req,
|
||||
@@ -940,12 +970,14 @@ async fn document_addition(
|
||||
body: Payload,
|
||||
method: IndexDocumentsMethod,
|
||||
task_id: Option<TaskId>,
|
||||
custom_metadata: Option<String>,
|
||||
dry_run: bool,
|
||||
allow_index_creation: bool,
|
||||
req: &HttpRequest,
|
||||
) -> Result<SummarizedTaskView, MeilisearchHttpError> {
|
||||
let mime_type = extract_mime_type(req)?;
|
||||
let network = index_scheduler.network();
|
||||
let task_network = task_network_and_check_leader(&req, &network)?;
|
||||
|
||||
let format = match (
|
||||
mime_type.as_ref().map(|m| (m.type_().as_str(), m.subtype().as_str())),
|
||||
@@ -1064,9 +1096,18 @@ async fn document_addition(
|
||||
index_uid: index_uid.to_string(),
|
||||
};
|
||||
|
||||
/// FIXME: not new to this PR, but _any_ error here will cause the payload to unduly persist
|
||||
let scheduler = index_scheduler.clone();
|
||||
let task = match tokio::task::spawn_blocking(move || scheduler.register(task, task_id, dry_run))
|
||||
.await?
|
||||
let mut task = match tokio::task::spawn_blocking(move || {
|
||||
scheduler.register_with_custom_metadata(
|
||||
task,
|
||||
task_id,
|
||||
custom_metadata,
|
||||
dry_run,
|
||||
task_network,
|
||||
)
|
||||
})
|
||||
.await?
|
||||
{
|
||||
Ok(task) => task,
|
||||
Err(e) => {
|
||||
@@ -1075,12 +1116,13 @@ async fn document_addition(
|
||||
}
|
||||
};
|
||||
|
||||
if network.sharding {
|
||||
if let Some(task_network) = task.network.take() {
|
||||
if let Some(file) = file {
|
||||
proxy(
|
||||
&index_scheduler,
|
||||
&index_uid,
|
||||
Some(&index_uid),
|
||||
req,
|
||||
task_network,
|
||||
network,
|
||||
Body::with_ndjson_payload(file),
|
||||
&task,
|
||||
@@ -1130,7 +1172,7 @@ async fn copy_body_to_file(
|
||||
/// Delete a set of documents based on an array of document ids.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "{indexUid}/delete-batch",
|
||||
path = "{indexUid}/documents/delete-batch",
|
||||
tag = "Documents",
|
||||
security(("Bearer" = ["documents.delete", "documents.*", "*"])),
|
||||
params(
|
||||
@@ -1161,13 +1203,17 @@ pub async fn delete_documents_batch(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
||||
index_uid: web::Path<String>,
|
||||
body: web::Json<Vec<Value>>,
|
||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
||||
req: HttpRequest,
|
||||
opt: web::Data<Opt>,
|
||||
analytics: web::Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
debug!(parameters = ?body, "Delete documents by batch");
|
||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
||||
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||
let network = index_scheduler.network();
|
||||
let task_network = task_network_and_check_leader(&req, &network)?;
|
||||
|
||||
analytics.publish(
|
||||
DocumentsDeletionAggregator {
|
||||
@@ -1188,13 +1234,31 @@ pub async fn delete_documents_batch(
|
||||
KindWithContent::DocumentDeletion { index_uid: index_uid.to_string(), documents_ids: ids };
|
||||
let uid = get_task_id(&req, &opt)?;
|
||||
let dry_run = is_dry_run(&req, &opt)?;
|
||||
let task = {
|
||||
let mut task = {
|
||||
let index_scheduler = index_scheduler.clone();
|
||||
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||
tokio::task::spawn_blocking(move || {
|
||||
index_scheduler.register_with_custom_metadata(
|
||||
task,
|
||||
uid,
|
||||
custom_metadata,
|
||||
dry_run,
|
||||
task_network,
|
||||
)
|
||||
})
|
||||
.await??
|
||||
};
|
||||
|
||||
if network.sharding && !dry_run {
|
||||
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
|
||||
if let Some(task_network) = task.network.take() {
|
||||
proxy(
|
||||
&index_scheduler,
|
||||
Some(&index_uid),
|
||||
&req,
|
||||
task_network,
|
||||
network,
|
||||
Body::inline(body),
|
||||
&task,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let task: SummarizedTaskView = task.into();
|
||||
@@ -1244,16 +1308,20 @@ pub struct DocumentDeletionByFilter {
|
||||
pub async fn delete_documents_by_filter(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
||||
index_uid: web::Path<String>,
|
||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
||||
body: AwebJson<DocumentDeletionByFilter, DeserrJsonError>,
|
||||
req: HttpRequest,
|
||||
opt: web::Data<Opt>,
|
||||
analytics: web::Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
debug!(parameters = ?body, "Delete documents by filter");
|
||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
||||
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||
let index_uid = index_uid.into_inner();
|
||||
let filter = body.into_inner();
|
||||
let network = index_scheduler.network();
|
||||
let task_network = task_network_and_check_leader(&req, &network)?;
|
||||
|
||||
analytics.publish(
|
||||
DocumentsDeletionAggregator {
|
||||
@@ -1280,13 +1348,31 @@ pub async fn delete_documents_by_filter(
|
||||
|
||||
let uid = get_task_id(&req, &opt)?;
|
||||
let dry_run = is_dry_run(&req, &opt)?;
|
||||
let task = {
|
||||
let mut task = {
|
||||
let index_scheduler = index_scheduler.clone();
|
||||
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||
tokio::task::spawn_blocking(move || {
|
||||
index_scheduler.register_with_custom_metadata(
|
||||
task,
|
||||
uid,
|
||||
custom_metadata,
|
||||
dry_run,
|
||||
task_network,
|
||||
)
|
||||
})
|
||||
.await??
|
||||
};
|
||||
|
||||
if network.sharding && !dry_run {
|
||||
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(filter), &task).await?;
|
||||
if let Some(task_network) = task.network.take() {
|
||||
proxy(
|
||||
&index_scheduler,
|
||||
Some(&index_uid),
|
||||
&req,
|
||||
task_network,
|
||||
network,
|
||||
Body::inline(filter),
|
||||
&task,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let task: SummarizedTaskView = task.into();
|
||||
@@ -1372,38 +1458,41 @@ impl Aggregate for EditDocumentsByFunctionAggregator {
|
||||
pub async fn edit_documents_by_function(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_ALL }>, Data<IndexScheduler>>,
|
||||
index_uid: web::Path<String>,
|
||||
params: AwebJson<DocumentEditionByFunction, DeserrJsonError>,
|
||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
||||
body: AwebJson<DocumentEditionByFunction, DeserrJsonError>,
|
||||
req: HttpRequest,
|
||||
opt: web::Data<Opt>,
|
||||
analytics: web::Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
debug!(parameters = ?params, "Edit documents by function");
|
||||
debug!(parameters = ?body, "Edit documents by function");
|
||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
||||
|
||||
index_scheduler
|
||||
.features()
|
||||
.check_edit_documents_by_function("Using the documents edit route")?;
|
||||
|
||||
let network = index_scheduler.network();
|
||||
let task_network = task_network_and_check_leader(&req, &network)?;
|
||||
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||
let index_uid = index_uid.into_inner();
|
||||
let params = params.into_inner();
|
||||
let body = body.into_inner();
|
||||
|
||||
analytics.publish(
|
||||
EditDocumentsByFunctionAggregator {
|
||||
filtered: params.filter.is_some(),
|
||||
with_context: params.context.is_some(),
|
||||
filtered: body.filter.is_some(),
|
||||
with_context: body.context.is_some(),
|
||||
index_creation: index_scheduler.index(&index_uid).is_err(),
|
||||
},
|
||||
&req,
|
||||
);
|
||||
|
||||
let engine = milli::rhai::Engine::new();
|
||||
if let Err(e) = engine.compile(¶ms.function) {
|
||||
if let Err(e) = engine.compile(&body.function) {
|
||||
return Err(ResponseError::from_msg(e.to_string(), Code::BadRequest));
|
||||
}
|
||||
|
||||
if let Some(ref filter) = params.filter {
|
||||
if let Some(ref filter) = body.filter {
|
||||
// we ensure the filter is well formed before enqueuing it
|
||||
crate::search::parse_filter(
|
||||
filter,
|
||||
@@ -1414,8 +1503,8 @@ pub async fn edit_documents_by_function(
|
||||
}
|
||||
let task = KindWithContent::DocumentEdition {
|
||||
index_uid: index_uid.clone(),
|
||||
filter_expr: params.filter.clone(),
|
||||
context: match params.context.clone() {
|
||||
filter_expr: body.filter.clone(),
|
||||
context: match body.context.clone() {
|
||||
Some(Value::Object(m)) => Some(m),
|
||||
None => None,
|
||||
_ => {
|
||||
@@ -1425,18 +1514,36 @@ pub async fn edit_documents_by_function(
|
||||
))
|
||||
}
|
||||
},
|
||||
function: params.function.clone(),
|
||||
function: body.function.clone(),
|
||||
};
|
||||
|
||||
let uid = get_task_id(&req, &opt)?;
|
||||
let dry_run = is_dry_run(&req, &opt)?;
|
||||
let task = {
|
||||
let mut task = {
|
||||
let index_scheduler = index_scheduler.clone();
|
||||
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||
tokio::task::spawn_blocking(move || {
|
||||
index_scheduler.register_with_custom_metadata(
|
||||
task,
|
||||
uid,
|
||||
custom_metadata,
|
||||
dry_run,
|
||||
task_network,
|
||||
)
|
||||
})
|
||||
.await??
|
||||
};
|
||||
|
||||
if network.sharding && !dry_run {
|
||||
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(params), &task).await?;
|
||||
if let Some(task_network) = task.network.take() {
|
||||
proxy(
|
||||
&index_scheduler,
|
||||
Some(&index_uid),
|
||||
&req,
|
||||
task_network,
|
||||
network,
|
||||
Body::inline(body),
|
||||
&task,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let task: SummarizedTaskView = task.into();
|
||||
@@ -1477,12 +1584,15 @@ pub async fn edit_documents_by_function(
|
||||
pub async fn clear_all_documents(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
|
||||
index_uid: web::Path<String>,
|
||||
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
|
||||
req: HttpRequest,
|
||||
opt: web::Data<Opt>,
|
||||
analytics: web::Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||
let network = index_scheduler.network();
|
||||
let CustomMetadataQuery { custom_metadata } = params.into_inner();
|
||||
let task_network = task_network_and_check_leader(&req, &network)?;
|
||||
|
||||
analytics.publish(
|
||||
DocumentsDeletionAggregator {
|
||||
@@ -1498,14 +1608,24 @@ pub async fn clear_all_documents(
|
||||
let uid = get_task_id(&req, &opt)?;
|
||||
let dry_run = is_dry_run(&req, &opt)?;
|
||||
|
||||
let task = {
|
||||
let mut task = {
|
||||
let index_scheduler = index_scheduler.clone();
|
||||
|
||||
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
|
||||
tokio::task::spawn_blocking(move || {
|
||||
index_scheduler.register_with_custom_metadata(
|
||||
task,
|
||||
uid,
|
||||
custom_metadata,
|
||||
dry_run,
|
||||
task_network,
|
||||
)
|
||||
})
|
||||
.await??
|
||||
};
|
||||
|
||||
if network.sharding && !dry_run {
|
||||
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
|
||||
if let Some(task_network) = task.network.take() {
|
||||
proxy(&index_scheduler, Some(&index_uid), &req, task_network, network, Body::none(), &task)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let task: SummarizedTaskView = task.into();
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by the Business Source License 1.1,
|
||||
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs::File;
|
||||
|
||||
@@ -10,8 +11,18 @@ use actix_web::http::header::CONTENT_TYPE;
|
||||
use actix_web::HttpRequest;
|
||||
use bytes::Bytes;
|
||||
use index_scheduler::IndexScheduler;
|
||||
use meilisearch_types::enterprise_edition::network::Remote;
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::tasks::{Origin, RemoteTask, TaskNetwork};
|
||||
use meilisearch_types::milli::DocumentId;
|
||||
use meilisearch_types::tasks::enterprise_edition::network::headers::{
|
||||
PROXY_IMPORT_DOCS_HEADER, PROXY_IMPORT_INDEX_COUNT_HEADER, PROXY_IMPORT_INDEX_HEADER,
|
||||
PROXY_IMPORT_REMOTE_HEADER, PROXY_IMPORT_TASK_KEY_HEADER, PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
||||
PROXY_ORIGIN_NETWORK_VERSION_HEADER, PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER,
|
||||
};
|
||||
use meilisearch_types::tasks::enterprise_edition::network::{
|
||||
DbTaskNetwork, ImportData, ImportMetadata, Origin, TaskNetwork,
|
||||
};
|
||||
use meilisearch_types::tasks::Task;
|
||||
use reqwest::StatusCode;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
@@ -22,13 +33,18 @@ use crate::routes::indexes::enterprise_edition::proxy::error::{
|
||||
};
|
||||
use crate::routes::SummarizedTaskView;
|
||||
|
||||
pub enum Body<T: serde::Serialize> {
|
||||
pub enum Body<T, F>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
F: FnMut(&str, &Remote, &mut T),
|
||||
{
|
||||
NdJsonPayload(File),
|
||||
Inline(T),
|
||||
Generated(T, F),
|
||||
None,
|
||||
}
|
||||
|
||||
impl Body<()> {
|
||||
impl Body<(), fn(&str, &Remote, &mut ())> {
|
||||
pub fn with_ndjson_payload(file: File) -> Self {
|
||||
Self::NdJsonPayload(file)
|
||||
}
|
||||
@@ -38,7 +54,115 @@ impl Body<()> {
|
||||
}
|
||||
}
|
||||
|
||||
/// If necessary, proxies the passed request to the network and update the task description.
|
||||
impl<T> Body<T, fn(&str, &Remote, &mut T)>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
{
|
||||
pub fn inline(payload: T) -> Self {
|
||||
Self::Inline(payload)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, F> Body<T, F>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
F: FnMut(&str, &Remote, &mut T),
|
||||
{
|
||||
pub fn generated(initial: T, f: F) -> Self {
|
||||
Self::Generated(initial, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, F> Body<T, F>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
F: FnMut(&str, &Remote, &mut T),
|
||||
{
|
||||
pub fn into_bytes_iter(
|
||||
self,
|
||||
remotes: impl IntoIterator<Item = (String, Remote)>,
|
||||
) -> Result<
|
||||
impl Iterator<Item = (Option<Bytes>, (String, Remote))>,
|
||||
meilisearch_types::milli::Error,
|
||||
> {
|
||||
let bytes = match self {
|
||||
Body::NdJsonPayload(file) => {
|
||||
Some(Bytes::from_owner(unsafe { memmap2::Mmap::map(&file)? }))
|
||||
}
|
||||
|
||||
Body::Inline(payload) => {
|
||||
Some(Bytes::copy_from_slice(&serde_json::to_vec(&payload).unwrap()))
|
||||
}
|
||||
|
||||
Body::None => None,
|
||||
|
||||
Body::Generated(mut initial, mut f) => {
|
||||
return Ok(either::Right(remotes.into_iter().map(move |(name, remote)| {
|
||||
f(&name, &remote, &mut initial);
|
||||
let bytes =
|
||||
Some(Bytes::copy_from_slice(&serde_json::to_vec(&initial).unwrap()));
|
||||
(bytes, (name, remote))
|
||||
})));
|
||||
}
|
||||
};
|
||||
Ok(either::Left(std::iter::repeat(bytes).zip(remotes)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses the header to determine if this task is a duplicate and originates with a remote.
|
||||
///
|
||||
/// If not, checks whether this remote is the leader and return `MeilisearchHttpError::NotLeader` if not.
|
||||
///
|
||||
/// If there is no leader, returns `Ok(None)`
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// - `MeiliearchHttpError::NotLeader`: if the following are true simultaneously:
|
||||
/// 1. The task originates with the current node
|
||||
/// 2. There's a declared `leader`
|
||||
/// 3. The declared leader is **not** the current node
|
||||
/// - `MeilisearchHttpError::InvalidHeaderValue`: if only parts of the headers are present, or if they cannot be parsed as a task network.
|
||||
/// - `MeilisearchHttpError::Inconsistent`
|
||||
pub fn task_network_and_check_leader(
|
||||
req: &HttpRequest,
|
||||
network: &meilisearch_types::enterprise_edition::network::Network,
|
||||
) -> Result<Option<TaskNetwork>, MeilisearchHttpError> {
|
||||
match (origin_from_req(req)?, import_data_from_req(req)?, import_metadata_from_req(req)?) {
|
||||
(Some(network_change), Some(import_from), Some(metadata)) => {
|
||||
Ok(Some(TaskNetwork::Import { import_from, network_change, metadata }))
|
||||
}
|
||||
(Some(origin), None, None) => Ok(Some(TaskNetwork::Origin { origin })),
|
||||
(None, None, None) => {
|
||||
match (network.leader.as_deref(), network.local.as_deref()) {
|
||||
// 1. Always allowed if there is no leader
|
||||
(None, _) => return Ok(None),
|
||||
// 2. Allowed if the leader is self
|
||||
(Some(leader), Some(this)) if leader == this => (),
|
||||
// 3. Any other change is disallowed
|
||||
(Some(leader), _) => {
|
||||
return Err(
|
||||
MeilisearchHttpError::NotLeader { leader: leader.to_string() }.into()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(TaskNetwork::Remotes {
|
||||
remote_tasks: Default::default(),
|
||||
network_version: network.version,
|
||||
}))
|
||||
}
|
||||
// all good cases were matched, so this is always an error
|
||||
(origin, import_from, metadata) => {
|
||||
Err(MeilisearchHttpError::InconsistentTaskNetworkHeaders {
|
||||
is_missing_origin: origin.is_none(),
|
||||
is_missing_import: import_from.is_none(),
|
||||
is_missing_import_metadata: metadata.is_none(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates the task description and, if necessary, proxies the passed request to the network and update the task description.
|
||||
///
|
||||
/// This function reads the custom headers from the request to determine if must proxy the request or if the request
|
||||
/// has already been proxied.
|
||||
@@ -48,152 +172,139 @@ impl Body<()> {
|
||||
/// with the task ids from the task queues of the remotes.
|
||||
/// - when the request has already been proxied, the custom headers contains information about the remote that created the initial task.
|
||||
/// This information is copied to the passed task.
|
||||
pub async fn proxy<T: serde::Serialize>(
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The updated task. The task is read back from the database to avoid erasing concurrent changes.
|
||||
pub async fn proxy<T, F>(
|
||||
index_scheduler: &IndexScheduler,
|
||||
index_uid: &str,
|
||||
index_uid: Option<&str>,
|
||||
req: &HttpRequest,
|
||||
mut task_network: DbTaskNetwork,
|
||||
network: meilisearch_types::enterprise_edition::network::Network,
|
||||
body: Body<T>,
|
||||
body: Body<T, F>,
|
||||
task: &meilisearch_types::tasks::Task,
|
||||
) -> Result<(), MeilisearchHttpError> {
|
||||
match origin_from_req(req)? {
|
||||
Some(origin) => {
|
||||
index_scheduler.set_task_network(task.uid, TaskNetwork::Origin { origin })?
|
||||
) -> Result<Task, MeilisearchHttpError>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
F: FnMut(&str, &Remote, &mut T),
|
||||
{
|
||||
if let DbTaskNetwork::Remotes { remote_tasks, network_version: _ } = &mut task_network {
|
||||
let this = network
|
||||
.local
|
||||
.as_deref()
|
||||
.expect("inconsistent `network.sharding` and `network.self`")
|
||||
.to_owned();
|
||||
|
||||
let content_type = match &body {
|
||||
// for file bodies, force x-ndjson
|
||||
Body::NdJsonPayload(_) => Some(b"application/x-ndjson".as_slice()),
|
||||
// otherwise get content type from request
|
||||
_ => req.headers().get(CONTENT_TYPE).map(|h| h.as_bytes()),
|
||||
};
|
||||
|
||||
let mut in_flight_remote_queries = BTreeMap::new();
|
||||
let client = reqwest::ClientBuilder::new()
|
||||
.connect_timeout(std::time::Duration::from_secs(3))
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let method = from_old_http_method(req.method());
|
||||
|
||||
// send payload to all remotes
|
||||
for (body, (node_name, node)) in body
|
||||
.into_bytes_iter(network.remotes.into_iter().filter(|(name, _)| name.as_str() != this))
|
||||
.map_err(|err| {
|
||||
MeilisearchHttpError::from_milli(err, index_uid.map(ToOwned::to_owned))
|
||||
})?
|
||||
{
|
||||
let client = client.clone();
|
||||
let api_key = node.write_api_key;
|
||||
let this = this.clone();
|
||||
let method = method.clone();
|
||||
let path_and_query = req.uri().path_and_query().map(|paq| paq.as_str()).unwrap_or("/");
|
||||
|
||||
in_flight_remote_queries.insert(
|
||||
node_name,
|
||||
tokio::spawn({
|
||||
let url = format!("{}{}", node.url, path_and_query);
|
||||
|
||||
let url_encoded_this = urlencoding::encode(&this).into_owned();
|
||||
let url_encoded_task_uid = task.uid.to_string(); // it's url encoded i promize
|
||||
|
||||
let content_type = content_type.map(|b| b.to_owned());
|
||||
|
||||
let backoff = backoff::ExponentialBackoffBuilder::new()
|
||||
.with_max_elapsed_time(Some(std::time::Duration::from_secs(25)))
|
||||
.build();
|
||||
|
||||
backoff::future::retry(backoff, move || {
|
||||
let url = url.clone();
|
||||
let client = client.clone();
|
||||
let url_encoded_this = url_encoded_this.clone();
|
||||
let url_encoded_task_uid = url_encoded_task_uid.clone();
|
||||
let content_type = content_type.clone();
|
||||
|
||||
let body = body.clone();
|
||||
let api_key = api_key.clone();
|
||||
let method = method.clone();
|
||||
|
||||
async move {
|
||||
try_proxy(
|
||||
method,
|
||||
&url,
|
||||
content_type.as_deref(),
|
||||
api_key.as_deref(),
|
||||
&client,
|
||||
&url_encoded_this,
|
||||
&url_encoded_task_uid,
|
||||
body,
|
||||
)
|
||||
.await
|
||||
}
|
||||
})
|
||||
}),
|
||||
);
|
||||
}
|
||||
None => {
|
||||
let this = network
|
||||
.local
|
||||
.as_deref()
|
||||
.expect("inconsistent `network.sharding` and `network.self`")
|
||||
.to_owned();
|
||||
|
||||
let content_type = match &body {
|
||||
// for file bodies, force x-ndjson
|
||||
Body::NdJsonPayload(_) => Some(b"application/x-ndjson".as_slice()),
|
||||
// otherwise get content type from request
|
||||
_ => req.headers().get(CONTENT_TYPE).map(|h| h.as_bytes()),
|
||||
};
|
||||
// wait for all in-flight queries to finish and collect their results
|
||||
for (node_name, handle) in in_flight_remote_queries {
|
||||
match handle.await {
|
||||
Ok(Ok(res)) => {
|
||||
let task_uid = res.task_uid;
|
||||
|
||||
let body = match body {
|
||||
Body::NdJsonPayload(file) => Some(Bytes::from_owner(unsafe {
|
||||
memmap2::Mmap::map(&file).map_err(|err| {
|
||||
MeilisearchHttpError::from_milli(err.into(), Some(index_uid.to_owned()))
|
||||
})?
|
||||
})),
|
||||
|
||||
Body::Inline(payload) => {
|
||||
Some(Bytes::copy_from_slice(&serde_json::to_vec(&payload).unwrap()))
|
||||
remote_tasks.insert(node_name, Ok(task_uid).into());
|
||||
}
|
||||
|
||||
Body::None => None,
|
||||
};
|
||||
|
||||
let mut in_flight_remote_queries = BTreeMap::new();
|
||||
let client = reqwest::ClientBuilder::new()
|
||||
.connect_timeout(std::time::Duration::from_secs(3))
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let method = from_old_http_method(req.method());
|
||||
|
||||
// send payload to all remotes
|
||||
for (node_name, node) in
|
||||
network.remotes.into_iter().filter(|(name, _)| name.as_str() != this)
|
||||
{
|
||||
let body = body.clone();
|
||||
let client = client.clone();
|
||||
let api_key = node.write_api_key;
|
||||
let this = this.clone();
|
||||
let method = method.clone();
|
||||
let path_and_query =
|
||||
req.uri().path_and_query().map(|paq| paq.as_str()).unwrap_or("/");
|
||||
|
||||
in_flight_remote_queries.insert(
|
||||
node_name,
|
||||
tokio::spawn({
|
||||
let url = format!("{}{}", node.url, path_and_query);
|
||||
|
||||
let url_encoded_this = urlencoding::encode(&this).into_owned();
|
||||
let url_encoded_task_uid = task.uid.to_string(); // it's url encoded i promize
|
||||
|
||||
let content_type = content_type.map(|b| b.to_owned());
|
||||
|
||||
let backoff = backoff::ExponentialBackoffBuilder::new()
|
||||
.with_max_elapsed_time(Some(std::time::Duration::from_secs(25)))
|
||||
.build();
|
||||
|
||||
backoff::future::retry(backoff, move || {
|
||||
let url = url.clone();
|
||||
let client = client.clone();
|
||||
let url_encoded_this = url_encoded_this.clone();
|
||||
let url_encoded_task_uid = url_encoded_task_uid.clone();
|
||||
let content_type = content_type.clone();
|
||||
|
||||
let body = body.clone();
|
||||
let api_key = api_key.clone();
|
||||
let method = method.clone();
|
||||
|
||||
async move {
|
||||
try_proxy(
|
||||
method,
|
||||
&url,
|
||||
content_type.as_deref(),
|
||||
api_key.as_deref(),
|
||||
&client,
|
||||
&url_encoded_this,
|
||||
&url_encoded_task_uid,
|
||||
body,
|
||||
)
|
||||
.await
|
||||
}
|
||||
})
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
// wait for all in-flight queries to finish and collect their results
|
||||
let mut remote_tasks: BTreeMap<String, RemoteTask> = BTreeMap::new();
|
||||
for (node_name, handle) in in_flight_remote_queries {
|
||||
match handle.await {
|
||||
Ok(Ok(res)) => {
|
||||
let task_uid = res.task_uid;
|
||||
|
||||
remote_tasks.insert(node_name, Ok(task_uid).into());
|
||||
}
|
||||
Ok(Err(error)) => {
|
||||
remote_tasks.insert(node_name, Err(error.as_response_error()).into());
|
||||
}
|
||||
Err(panic) => match panic.try_into_panic() {
|
||||
Ok(panic) => {
|
||||
let msg = match panic.downcast_ref::<&'static str>() {
|
||||
Some(s) => *s,
|
||||
None => match panic.downcast_ref::<String>() {
|
||||
Some(s) => &s[..],
|
||||
None => "Box<dyn Any>",
|
||||
},
|
||||
};
|
||||
remote_tasks.insert(
|
||||
node_name,
|
||||
Err(ResponseError::from_msg(
|
||||
msg.to_string(),
|
||||
meilisearch_types::error::Code::Internal,
|
||||
))
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
Err(_) => {
|
||||
tracing::error!("proxy task was unexpectedly cancelled")
|
||||
}
|
||||
},
|
||||
Ok(Err(error)) => {
|
||||
remote_tasks.insert(node_name, Err(error.as_response_error()).into());
|
||||
}
|
||||
Err(panic) => match panic.try_into_panic() {
|
||||
Ok(panic) => {
|
||||
let msg = match panic.downcast_ref::<&'static str>() {
|
||||
Some(s) => *s,
|
||||
None => match panic.downcast_ref::<String>() {
|
||||
Some(s) => &s[..],
|
||||
None => "Box<dyn Any>",
|
||||
},
|
||||
};
|
||||
remote_tasks.insert(
|
||||
node_name,
|
||||
Err(ResponseError::from_msg(
|
||||
msg.to_string(),
|
||||
meilisearch_types::error::Code::Internal,
|
||||
))
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
Err(_) => {
|
||||
tracing::error!("proxy task was unexpectedly cancelled")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// edit details to contain the return values from the remotes
|
||||
index_scheduler.set_task_network(task.uid, TaskNetwork::Remotes { remote_tasks })?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(index_scheduler.set_task_network(task.uid, task_network)?)
|
||||
}
|
||||
|
||||
fn from_old_http_method(method: &actix_http::Method) -> reqwest::Method {
|
||||
@@ -375,25 +486,23 @@ mod error {
|
||||
}
|
||||
}
|
||||
|
||||
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "Meili-Proxy-Origin-Remote";
|
||||
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "Meili-Proxy-Origin-TaskUid";
|
||||
|
||||
pub fn origin_from_req(req: &HttpRequest) -> Result<Option<Origin>, MeilisearchHttpError> {
|
||||
let (remote_name, task_uid) = match (
|
||||
let (remote_name, task_uid, network_version) = match (
|
||||
req.headers().get(PROXY_ORIGIN_REMOTE_HEADER),
|
||||
req.headers().get(PROXY_ORIGIN_TASK_UID_HEADER),
|
||||
req.headers().get(PROXY_ORIGIN_NETWORK_VERSION_HEADER),
|
||||
) {
|
||||
(None, None) => return Ok(None),
|
||||
(None, Some(_)) => {
|
||||
(None, None, _) => return Ok(None),
|
||||
(None, Some(_), _) => {
|
||||
return Err(MeilisearchHttpError::InconsistentOriginHeaders { is_remote_missing: true })
|
||||
}
|
||||
(Some(_), None) => {
|
||||
(Some(_), None, _) => {
|
||||
return Err(MeilisearchHttpError::InconsistentOriginHeaders {
|
||||
is_remote_missing: false,
|
||||
})
|
||||
}
|
||||
(Some(remote_name), Some(task_uid)) => (
|
||||
urlencoding::decode(remote_name.to_str().map_err(|err| {
|
||||
(Some(remote_name), Some(task_uid), network_version) => {
|
||||
let remote_name = urlencoding::decode(remote_name.to_str().map_err(|err| {
|
||||
MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_ORIGIN_REMOTE_HEADER,
|
||||
msg: format!("while parsing remote name as UTF-8: {err}"),
|
||||
@@ -402,8 +511,8 @@ pub fn origin_from_req(req: &HttpRequest) -> Result<Option<Origin>, MeilisearchH
|
||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_ORIGIN_REMOTE_HEADER,
|
||||
msg: format!("while URL-decoding remote name: {err}"),
|
||||
})?,
|
||||
urlencoding::decode(task_uid.to_str().map_err(|err| {
|
||||
})?;
|
||||
let task_uid = urlencoding::decode(task_uid.to_str().map_err(|err| {
|
||||
MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
||||
msg: format!("while parsing task UID as UTF-8: {err}"),
|
||||
@@ -412,15 +521,182 @@ pub fn origin_from_req(req: &HttpRequest) -> Result<Option<Origin>, MeilisearchH
|
||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
||||
msg: format!("while URL-decoding task UID: {err}"),
|
||||
})?,
|
||||
),
|
||||
})?;
|
||||
let network_version = match network_version {
|
||||
Some(network_version) => {
|
||||
urlencoding::decode(network_version.to_str().map_err(|err| {
|
||||
MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_ORIGIN_NETWORK_VERSION_HEADER,
|
||||
msg: format!("while parsing network version as UTF-8: {err}"),
|
||||
}
|
||||
})?)
|
||||
.map_err(|err| {
|
||||
MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_ORIGIN_NETWORK_VERSION_HEADER,
|
||||
msg: format!("while URL-decoding network version: {err}"),
|
||||
}
|
||||
})?
|
||||
}
|
||||
None => Cow::Borrowed("0"),
|
||||
};
|
||||
(remote_name, task_uid, network_version)
|
||||
}
|
||||
};
|
||||
|
||||
let task_uid: usize =
|
||||
let task_uid: u32 =
|
||||
task_uid.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
|
||||
msg: format!("while parsing the task UID as an integer: {err}"),
|
||||
})?;
|
||||
|
||||
Ok(Some(Origin { remote_name: remote_name.into_owned(), task_uid }))
|
||||
let network_version: u128 =
|
||||
network_version.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_ORIGIN_NETWORK_VERSION_HEADER,
|
||||
msg: format!("while parsing the network version as a u128: {err}"),
|
||||
})?;
|
||||
|
||||
let network_version = uuid::Uuid::from_u128(network_version);
|
||||
|
||||
Ok(Some(Origin { remote_name: remote_name.into_owned(), task_uid, network_version }))
|
||||
}
|
||||
|
||||
pub fn import_data_from_req(req: &HttpRequest) -> Result<Option<ImportData>, MeilisearchHttpError> {
|
||||
let (remote_name, index_name, documents) = match (
|
||||
req.headers().get(PROXY_IMPORT_REMOTE_HEADER),
|
||||
req.headers().get(PROXY_IMPORT_INDEX_HEADER),
|
||||
req.headers().get(PROXY_IMPORT_DOCS_HEADER),
|
||||
) {
|
||||
(None, None, None) => return Ok(None),
|
||||
(Some(remote_name), Some(index_name), Some(documents)) => {
|
||||
let remote_name = urlencoding::decode(remote_name.to_str().map_err(|err| {
|
||||
MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
||||
msg: format!("while parsing import remote name as UTF-8: {err}"),
|
||||
}
|
||||
})?)
|
||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
||||
msg: format!("while URL-decoding import remote name: {err}"),
|
||||
})?;
|
||||
|
||||
let index_name = urlencoding::decode(index_name.to_str().map_err(|err| {
|
||||
MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_INDEX_HEADER,
|
||||
msg: format!("while parsing import index name as UTF-8: {err}"),
|
||||
}
|
||||
})?)
|
||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_INDEX_HEADER,
|
||||
msg: format!("while URL-decoding import index name: {err}"),
|
||||
})?;
|
||||
|
||||
let documents = urlencoding::decode(documents.to_str().map_err(|err| {
|
||||
MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_DOCS_HEADER,
|
||||
msg: format!("while parsing documents as UTF-8: {err}"),
|
||||
}
|
||||
})?)
|
||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_DOCS_HEADER,
|
||||
msg: format!("while URL-decoding documents: {err}"),
|
||||
})?;
|
||||
(remote_name, index_name, documents)
|
||||
}
|
||||
// catch-all pattern that has to contain an inconsistency since we already matched (None, None, None) and (Some, Some, Some)
|
||||
(remote_name, index_name, documents) => {
|
||||
return Err(MeilisearchHttpError::InconsistentImportHeaders {
|
||||
is_remote_missing: remote_name.is_none(),
|
||||
is_index_missing: index_name.is_none(),
|
||||
is_docs_missing: documents.is_none(),
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
let document_count: u64 =
|
||||
documents.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_DOCS_HEADER,
|
||||
msg: format!("while parsing the documents as an integer: {err}"),
|
||||
})?;
|
||||
|
||||
Ok(Some(ImportData {
|
||||
remote_name: remote_name.to_string(),
|
||||
index_name: index_name.to_string(),
|
||||
document_count,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn import_metadata_from_req(
|
||||
req: &HttpRequest,
|
||||
) -> Result<Option<ImportMetadata>, MeilisearchHttpError> {
|
||||
let (index_count, task_key, total_index_documents) = match (
|
||||
req.headers().get(PROXY_IMPORT_INDEX_COUNT_HEADER),
|
||||
req.headers().get(PROXY_IMPORT_TASK_KEY_HEADER),
|
||||
req.headers().get(PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER),
|
||||
) {
|
||||
(None, None, None) => return Ok(None),
|
||||
(Some(index_count), Some(task_key), Some(total_index_documents)) => {
|
||||
let index_count = urlencoding::decode(index_count.to_str().map_err(|err| {
|
||||
MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_REMOTE_HEADER,
|
||||
msg: format!("while parsing import index count as UTF-8: {err}"),
|
||||
}
|
||||
})?)
|
||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_INDEX_COUNT_HEADER,
|
||||
msg: format!("while URL-decoding import index count: {err}"),
|
||||
})?;
|
||||
|
||||
let task_key = urlencoding::decode(task_key.to_str().map_err(|err| {
|
||||
MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_TASK_KEY_HEADER,
|
||||
msg: format!("while parsing import task key as UTF-8: {err}"),
|
||||
}
|
||||
})?)
|
||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_TASK_KEY_HEADER,
|
||||
msg: format!("while URL-decoding import task key: {err}"),
|
||||
})?;
|
||||
|
||||
let total_index_documents =
|
||||
urlencoding::decode(total_index_documents.to_str().map_err(|err| {
|
||||
MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
||||
msg: format!("while parsing total index documents as UTF-8: {err}"),
|
||||
}
|
||||
})?)
|
||||
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
||||
msg: format!("while URL-decoding total index documents: {err}"),
|
||||
})?;
|
||||
(index_count, task_key, total_index_documents)
|
||||
}
|
||||
// catch-all pattern that has to contain an inconsistency since we already matched (None, None, None) and (Some, Some, Some)
|
||||
(index_count, task_key, total_index_documents) => {
|
||||
return Err(MeilisearchHttpError::InconsistentImportMetadataHeaders {
|
||||
is_index_count_missing: index_count.is_none(),
|
||||
is_task_key_missing: task_key.is_none(),
|
||||
is_total_index_documents_missing: total_index_documents.is_none(),
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
let index_count: u64 =
|
||||
index_count.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_INDEX_COUNT_HEADER,
|
||||
msg: format!("while parsing the index count as an integer: {err}"),
|
||||
})?;
|
||||
|
||||
let task_key: DocumentId =
|
||||
task_key.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_TASK_KEY_HEADER,
|
||||
msg: format!("while parsing import task key as an integer: {err}"),
|
||||
})?;
|
||||
|
||||
let total_index_documents: u64 =
|
||||
total_index_documents.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
|
||||
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
|
||||
msg: format!("while parsing the total index documents as an integer: {err}"),
|
||||
})?;
|
||||
|
||||
Ok(Some(ImportMetadata { index_count, task_key, total_index_documents }))
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ use crate::Opt;
|
||||
|
||||
pub mod compact;
|
||||
pub mod documents;
|
||||
mod enterprise_edition;
|
||||
pub mod enterprise_edition;
|
||||
pub mod facet_search;
|
||||
pub mod search;
|
||||
mod search_analytics;
|
||||
@@ -41,7 +41,9 @@ mod settings_analytics;
|
||||
pub mod similar;
|
||||
mod similar_analytics;
|
||||
|
||||
pub use enterprise_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER};
|
||||
pub use meilisearch_types::tasks::enterprise_edition::network::headers::{
|
||||
PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER,
|
||||
};
|
||||
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(
|
||||
|
||||
@@ -218,6 +218,8 @@ pub struct SummarizedTaskView {
|
||||
deserialize_with = "time::serde::rfc3339::deserialize"
|
||||
)]
|
||||
enqueued_at: OffsetDateTime,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
custom_metadata: Option<String>,
|
||||
}
|
||||
|
||||
impl From<Task> for SummarizedTaskView {
|
||||
@@ -228,6 +230,7 @@ impl From<Task> for SummarizedTaskView {
|
||||
status: task.status,
|
||||
kind: task.kind.as_kind(),
|
||||
enqueued_at: task.enqueued_at,
|
||||
custom_metadata: task.custom_metadata,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,20 +9,27 @@ use itertools::{EitherOrBoth, Itertools};
|
||||
use meilisearch_types::deserr::DeserrJsonError;
|
||||
use meilisearch_types::enterprise_edition::network::{Network as DbNetwork, Remote as DbRemote};
|
||||
use meilisearch_types::error::deserr_codes::{
|
||||
InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf, InvalidNetworkSharding,
|
||||
InvalidNetworkLeader, InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf,
|
||||
InvalidNetworkUrl, InvalidNetworkWriteApiKey,
|
||||
};
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::keys::actions;
|
||||
use meilisearch_types::milli::update::Setting;
|
||||
use meilisearch_types::tasks::enterprise_edition::network::{
|
||||
NetworkTopologyChange, Origin, DbTaskNetwork,
|
||||
};
|
||||
use meilisearch_types::tasks::KindWithContent;
|
||||
use serde::Serialize;
|
||||
use tracing::debug;
|
||||
use utoipa::{OpenApi, ToSchema};
|
||||
|
||||
use crate::analytics::{Aggregate, Analytics};
|
||||
use crate::error::MeilisearchHttpError;
|
||||
use crate::extractors::authentication::policies::ActionPolicy;
|
||||
use crate::extractors::authentication::GuardedData;
|
||||
use crate::extractors::sequential_extractor::SeqHandler;
|
||||
use crate::routes::indexes::enterprise_edition::proxy::{proxy, Body};
|
||||
use crate::routes::SummarizedTaskView;
|
||||
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(
|
||||
@@ -83,7 +90,7 @@ async fn get_network(
|
||||
Ok(HttpResponse::Ok().json(network))
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserr, ToSchema, Serialize)]
|
||||
#[derive(Clone, Debug, Deserr, ToSchema, Serialize)]
|
||||
#[deserr(error = DeserrJsonError<InvalidNetworkRemotes>, rename_all = camelCase, deny_unknown_fields)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[schema(rename_all = "camelCase")]
|
||||
@@ -106,12 +113,19 @@ pub struct Remote {
|
||||
pub write_api_key: Setting<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserr, ToSchema, Serialize)]
|
||||
#[derive(Clone, Debug, Deserr, ToSchema, Serialize)]
|
||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[schema(rename_all = "camelCase")]
|
||||
pub struct Network {
|
||||
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!("http://localhost:7700"))]
|
||||
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!({
|
||||
"ms-00": {
|
||||
"url": "http://localhost:7700"
|
||||
},
|
||||
"ms-01": {
|
||||
"url": "http://localhost:7701"
|
||||
}
|
||||
}))]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
|
||||
#[serde(default)]
|
||||
pub remotes: Setting<BTreeMap<String, Option<Remote>>>,
|
||||
@@ -119,10 +133,21 @@ pub struct Network {
|
||||
#[serde(default, rename = "self")]
|
||||
#[deserr(default, rename = "self", error = DeserrJsonError<InvalidNetworkSelf>)]
|
||||
pub local: Setting<String>,
|
||||
#[schema(value_type = Option<bool>, example = json!(true))]
|
||||
#[schema(value_type = Option<String>, example = json!("ms-00"))]
|
||||
#[serde(default)]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkSharding>)]
|
||||
pub sharding: Setting<bool>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkLeader>)]
|
||||
pub leader: Setting<String>,
|
||||
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!({
|
||||
"ms-00": {
|
||||
"url": "http://localhost:7700"
|
||||
},
|
||||
"ms-01": {
|
||||
"url": "http://localhost:7701"
|
||||
}
|
||||
}))]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
|
||||
#[serde(default)]
|
||||
pub previous_remotes: Setting<BTreeMap<String, Option<Remote>>>,
|
||||
}
|
||||
|
||||
impl Remote {
|
||||
@@ -207,29 +232,203 @@ async fn patch_network(
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
index_scheduler.features().check_network("Using the /network route")?;
|
||||
|
||||
match crate::routes::indexes::enterprise_edition::proxy::origin_from_req(&req)? {
|
||||
Some(origin) => {
|
||||
patch_network_with_origin(index_scheduler, new_network, req, origin, analytics).await
|
||||
}
|
||||
None => patch_network_without_origin(index_scheduler, new_network, req, analytics).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn patch_network_without_origin(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::NETWORK_UPDATE }>, Data<IndexScheduler>>,
|
||||
new_network: AwebJson<Network, DeserrJsonError>,
|
||||
req: HttpRequest,
|
||||
analytics: Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let new_network = new_network.0;
|
||||
let old_network = index_scheduler.network();
|
||||
debug!(parameters = ?new_network, "Patch network");
|
||||
|
||||
if !matches!(new_network.previous_remotes, Setting::NotSet) {
|
||||
return Err(MeilisearchHttpError::UnexpectedNetworkPreviousRemotes.into());
|
||||
}
|
||||
|
||||
let merged_network = merge_networks(old_network.clone(), new_network)?;
|
||||
index_scheduler.put_network(merged_network.clone())?;
|
||||
|
||||
analytics.publish(
|
||||
PatchNetworkAnalytics {
|
||||
network_size: merged_network.remotes.len(),
|
||||
network_has_self: merged_network.local.is_some(),
|
||||
},
|
||||
&req,
|
||||
);
|
||||
|
||||
/// TODO: spawn task only if necessary
|
||||
let network_topology_change =
|
||||
NetworkTopologyChange::new(old_network.clone(), merged_network.clone());
|
||||
let task = KindWithContent::NetworkTopologyChange(network_topology_change);
|
||||
let task = {
|
||||
let index_scheduler = index_scheduler.clone();
|
||||
tokio::task::spawn_blocking(move || index_scheduler.register(task, None, false)).await??
|
||||
};
|
||||
|
||||
let mut proxied_network = Network {
|
||||
remotes: Setting::Set(to_settings_remotes(&merged_network.remotes)),
|
||||
local: Setting::NotSet,
|
||||
leader: Setting::some_or_not_set(merged_network.leader.clone()),
|
||||
previous_remotes: Setting::Set(to_settings_remotes(&old_network.remotes)),
|
||||
};
|
||||
let mut deleted_network = old_network;
|
||||
|
||||
let deleted_remotes = &mut deleted_network.remotes;
|
||||
deleted_remotes.retain(|node, _| !merged_network.remotes.contains_key(node));
|
||||
|
||||
// proxy network change to the remaining remotes.
|
||||
let updated_task = proxy(
|
||||
&index_scheduler,
|
||||
None,
|
||||
&req,
|
||||
DbTaskNetwork::Remotes {
|
||||
remote_tasks: Default::default(),
|
||||
network_version: merged_network.version,
|
||||
},
|
||||
merged_network,
|
||||
Body::generated(proxied_network.clone(), |name, _remote, network| {
|
||||
network.local = Setting::Set(name.to_string());
|
||||
}),
|
||||
&task,
|
||||
)
|
||||
.await?;
|
||||
// unwrap: network was set by `proxy`
|
||||
let task_network = updated_task.network.unwrap();
|
||||
|
||||
proxied_network.previous_remotes = Setting::NotSet;
|
||||
|
||||
// proxy network change to the deleted remotes
|
||||
proxy(
|
||||
&index_scheduler,
|
||||
None,
|
||||
&req,
|
||||
task_network,
|
||||
deleted_network,
|
||||
Body::generated(proxied_network.clone(), |_name, _remote, network| {
|
||||
network.local = Setting::Reset;
|
||||
}),
|
||||
&task,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let task: SummarizedTaskView = task.into();
|
||||
debug!("returns: {:?}", task);
|
||||
Ok(HttpResponse::Accepted().json(task))
|
||||
}
|
||||
|
||||
async fn patch_network_with_origin(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::NETWORK_UPDATE }>, Data<IndexScheduler>>,
|
||||
merged_network: AwebJson<Network, DeserrJsonError>,
|
||||
req: HttpRequest,
|
||||
origin: Origin,
|
||||
analytics: Data<Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let merged_network = merged_network.into_inner();
|
||||
debug!(parameters = ?merged_network, ?origin, "Patch network");
|
||||
let mut remotes = BTreeMap::new();
|
||||
let mut old_network = index_scheduler.network();
|
||||
|
||||
for (name, remote) in merged_network.remotes.set().into_iter().flat_map(|x| x.into_iter()) {
|
||||
let Some(remote) = remote else { continue };
|
||||
let remote = remote.try_into_db_node(&name)?;
|
||||
remotes.insert(name, remote);
|
||||
}
|
||||
let mut previous_remotes = BTreeMap::new();
|
||||
for (name, remote) in
|
||||
merged_network.previous_remotes.set().into_iter().flat_map(|x| x.into_iter())
|
||||
{
|
||||
let Some(remote) = remote else {
|
||||
continue;
|
||||
};
|
||||
let remote = remote.try_into_db_node(&name)?;
|
||||
previous_remotes.insert(name, remote);
|
||||
}
|
||||
|
||||
old_network.remotes = previous_remotes;
|
||||
|
||||
let new_network = DbNetwork {
|
||||
local: merged_network.local.set(),
|
||||
remotes,
|
||||
leader: merged_network.leader.set(),
|
||||
version: origin.network_version,
|
||||
};
|
||||
index_scheduler.put_network(new_network.clone())?;
|
||||
|
||||
analytics.publish(
|
||||
PatchNetworkAnalytics {
|
||||
network_size: new_network.remotes.len(),
|
||||
network_has_self: new_network.local.is_some(),
|
||||
},
|
||||
&req,
|
||||
);
|
||||
|
||||
/// TODO: spawn task only if necessary
|
||||
let network_topology_change = NetworkTopologyChange::new(old_network, new_network);
|
||||
let task = KindWithContent::NetworkTopologyChange(network_topology_change);
|
||||
let task = {
|
||||
let index_scheduler = index_scheduler.clone();
|
||||
tokio::task::spawn_blocking(move || index_scheduler.register(task, None, false)).await??
|
||||
};
|
||||
|
||||
index_scheduler.set_task_network(task.uid, DbTaskNetwork::Origin { origin })?;
|
||||
|
||||
let task: SummarizedTaskView = task.into();
|
||||
debug!("returns: {:?}", task);
|
||||
Ok(HttpResponse::Accepted().json(task))
|
||||
}
|
||||
|
||||
fn to_settings_remotes(
|
||||
db_remotes: &BTreeMap<String, DbRemote>,
|
||||
) -> BTreeMap<String, Option<Remote>> {
|
||||
db_remotes
|
||||
.iter()
|
||||
.map(|(name, remote)| {
|
||||
(
|
||||
name.clone(),
|
||||
Some(Remote {
|
||||
url: Setting::Set(remote.url.clone()),
|
||||
search_api_key: Setting::some_or_not_set(remote.search_api_key.clone()),
|
||||
write_api_key: Setting::some_or_not_set(remote.write_api_key.clone()),
|
||||
}),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn merge_networks(
|
||||
old_network: DbNetwork,
|
||||
new_network: Network,
|
||||
) -> Result<DbNetwork, ResponseError> {
|
||||
let merged_self = match new_network.local {
|
||||
Setting::Set(new_self) => Some(new_self),
|
||||
Setting::Reset => None,
|
||||
Setting::NotSet => old_network.local,
|
||||
};
|
||||
|
||||
let merged_sharding = match new_network.sharding {
|
||||
Setting::Set(new_sharding) => new_sharding,
|
||||
Setting::Reset => false,
|
||||
Setting::NotSet => old_network.sharding,
|
||||
let merged_leader = match new_network.leader {
|
||||
Setting::Set(new_leader) => Some(new_leader),
|
||||
Setting::Reset => None,
|
||||
Setting::NotSet => old_network.leader,
|
||||
};
|
||||
|
||||
if merged_sharding && merged_self.is_none() {
|
||||
return Err(ResponseError::from_msg(
|
||||
"`.sharding`: enabling the sharding requires `.self` to be set\n - Hint: Disable `sharding` or set `self` to a value.".into(),
|
||||
meilisearch_types::error::Code::InvalidNetworkSharding,
|
||||
));
|
||||
match (merged_leader.as_deref(), merged_self.as_deref()) {
|
||||
// 1. Always allowed if there is no leader
|
||||
(None, _) => (),
|
||||
// 2. Allowed if the leader is self
|
||||
(Some(leader), Some(this)) if leader == this => (),
|
||||
// 3. Any other change is disallowed
|
||||
(Some(leader), _) => {
|
||||
return Err(MeilisearchHttpError::NotLeader { leader: leader.to_string() }.into())
|
||||
}
|
||||
}
|
||||
|
||||
let new_version = uuid::Uuid::now_v7();
|
||||
let merged_remotes = match new_network.remotes {
|
||||
Setting::Set(new_remotes) => {
|
||||
let mut merged_remotes = BTreeMap::new();
|
||||
@@ -301,18 +500,11 @@ async fn patch_network(
|
||||
Setting::Reset => BTreeMap::new(),
|
||||
Setting::NotSet => old_network.remotes,
|
||||
};
|
||||
|
||||
analytics.publish(
|
||||
PatchNetworkAnalytics {
|
||||
network_size: merged_remotes.len(),
|
||||
network_has_self: merged_self.is_some(),
|
||||
},
|
||||
&req,
|
||||
);
|
||||
|
||||
let merged_network =
|
||||
DbNetwork { local: merged_self, remotes: merged_remotes, sharding: merged_sharding };
|
||||
index_scheduler.put_network(merged_network.clone())?;
|
||||
debug!(returns = ?merged_network, "Patch network");
|
||||
Ok(HttpResponse::Ok().json(merged_network))
|
||||
let merged_network = DbNetwork {
|
||||
local: merged_self,
|
||||
remotes: merged_remotes,
|
||||
leader: merged_leader,
|
||||
version: new_version,
|
||||
};
|
||||
Ok(merged_network)
|
||||
}
|
||||
|
||||
@@ -91,7 +91,16 @@ impl<'a> Index<'a, Owned> {
|
||||
documents: Value,
|
||||
primary_key: Option<&str>,
|
||||
) -> (Value, StatusCode) {
|
||||
self._add_documents(documents, primary_key).await
|
||||
self._add_documents(documents, primary_key, None).await
|
||||
}
|
||||
|
||||
pub async fn add_documents_with_custom_metadata(
|
||||
&self,
|
||||
documents: Value,
|
||||
primary_key: Option<&str>,
|
||||
custom_metadata: Option<&str>,
|
||||
) -> (Value, StatusCode) {
|
||||
self._add_documents(documents, primary_key, custom_metadata).await
|
||||
}
|
||||
|
||||
pub async fn raw_add_documents(
|
||||
@@ -352,12 +361,25 @@ impl<State> Index<'_, State> {
|
||||
&self,
|
||||
documents: Value,
|
||||
primary_key: Option<&str>,
|
||||
custom_metadata: Option<&str>,
|
||||
) -> (Value, StatusCode) {
|
||||
let url = match primary_key {
|
||||
Some(key) => {
|
||||
format!("/indexes/{}/documents?primaryKey={}", urlencode(self.uid.as_ref()), key)
|
||||
let url = match (primary_key, custom_metadata) {
|
||||
(Some(key), Some(meta)) => {
|
||||
format!(
|
||||
"/indexes/{}/documents?primaryKey={key}&customMetadata={meta}",
|
||||
urlencode(self.uid.as_ref()),
|
||||
)
|
||||
}
|
||||
None => format!("/indexes/{}/documents", urlencode(self.uid.as_ref())),
|
||||
(None, Some(meta)) => {
|
||||
format!(
|
||||
"/indexes/{}/documents?&customMetadata={meta}",
|
||||
urlencode(self.uid.as_ref()),
|
||||
)
|
||||
}
|
||||
(Some(key), None) => {
|
||||
format!("/indexes/{}/documents?&primaryKey={key}", urlencode(self.uid.as_ref()),)
|
||||
}
|
||||
(None, None) => format!("/indexes/{}/documents", urlencode(self.uid.as_ref())),
|
||||
};
|
||||
self.service.post_encoded(url, documents, self.encoder).await
|
||||
}
|
||||
|
||||
@@ -241,7 +241,7 @@ pub async fn shared_index_with_documents() -> &'static Index<'static, Shared> {
|
||||
let server = Server::new_shared();
|
||||
let index = server._index("SHARED_DOCUMENTS").to_shared();
|
||||
let documents = DOCUMENTS.clone();
|
||||
let (response, _code) = index._add_documents(documents, None).await;
|
||||
let (response, _code) = index._add_documents(documents, None, None).await;
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
let (response, _code) = index
|
||||
._update_settings(
|
||||
@@ -284,7 +284,7 @@ pub async fn shared_index_with_score_documents() -> &'static Index<'static, Shar
|
||||
let server = Server::new_shared();
|
||||
let index = server._index("SHARED_SCORE_DOCUMENTS").to_shared();
|
||||
let documents = SCORE_DOCUMENTS.clone();
|
||||
let (response, _code) = index._add_documents(documents, None).await;
|
||||
let (response, _code) = index._add_documents(documents, None, None).await;
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
let (response, _code) = index
|
||||
._update_settings(
|
||||
@@ -361,7 +361,7 @@ pub async fn shared_index_with_nested_documents() -> &'static Index<'static, Sha
|
||||
let server = Server::new_shared();
|
||||
let index = server._index("SHARED_NESTED_DOCUMENTS").to_shared();
|
||||
let documents = NESTED_DOCUMENTS.clone();
|
||||
let (response, _code) = index._add_documents(documents, None).await;
|
||||
let (response, _code) = index._add_documents(documents, None, None).await;
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
let (response, _code) = index
|
||||
._update_settings(
|
||||
@@ -508,7 +508,7 @@ pub async fn shared_index_with_geo_documents() -> &'static Index<'static, Shared
|
||||
.get_or_init(|| async {
|
||||
let server = Server::new_shared();
|
||||
let index = server._index("SHARED_GEO_DOCUMENTS").to_shared();
|
||||
let (response, _code) = index._add_documents(GEO_DOCUMENTS.clone(), None).await;
|
||||
let (response, _code) = index._add_documents(GEO_DOCUMENTS.clone(), None, None).await;
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (response, _code) = index
|
||||
@@ -531,7 +531,7 @@ pub async fn shared_index_geojson_documents() -> &'static Index<'static, Shared>
|
||||
let index = server._index("SHARED_GEOJSON_DOCUMENTS").to_shared();
|
||||
let countries = include_str!("../documents/geojson/assets/countries.json");
|
||||
let lille = serde_json::from_str::<serde_json::Value>(countries).unwrap();
|
||||
let (response, _code) = index._add_documents(Value(lille), Some("name")).await;
|
||||
let (response, _code) = index._add_documents(Value(lille), Some("name"), None).await;
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (response, _code) =
|
||||
|
||||
@@ -3141,3 +3141,513 @@ fn fail(override_response_body: Option<&str>) -> ResponseTemplate {
|
||||
response.set_body_json(json!({"error": "provoked error", "code": "test_error", "link": "https://docs.meilisearch.com/errors#test_error"}))
|
||||
}
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn remote_auto_sharding() {
|
||||
let ms0 = Server::new().await;
|
||||
let ms1 = Server::new().await;
|
||||
let ms2 = Server::new().await;
|
||||
|
||||
// enable feature
|
||||
|
||||
let (response, code) = ms0.set_features(json!({"network": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response["network"]), @"true");
|
||||
let (response, code) = ms1.set_features(json!({"network": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response["network"]), @"true");
|
||||
let (response, code) = ms2.set_features(json!({"network": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response["network"]), @"true");
|
||||
|
||||
// set self & sharding
|
||||
|
||||
let (response, code) = ms0.set_network(json!({"self": "ms0", "sharding": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"self": "ms0",
|
||||
"remotes": {},
|
||||
"sharding": true
|
||||
}
|
||||
"###);
|
||||
let (response, code) = ms1.set_network(json!({"self": "ms1", "sharding": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"self": "ms1",
|
||||
"remotes": {},
|
||||
"sharding": true
|
||||
}
|
||||
"###);
|
||||
let (response, code) = ms2.set_network(json!({"self": "ms2", "sharding": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"self": "ms2",
|
||||
"remotes": {},
|
||||
"sharding": true
|
||||
}
|
||||
"###);
|
||||
|
||||
// wrap servers
|
||||
let ms0 = Arc::new(ms0);
|
||||
let ms1 = Arc::new(ms1);
|
||||
let ms2 = Arc::new(ms2);
|
||||
|
||||
let rms0 = LocalMeili::new(ms0.clone()).await;
|
||||
let rms1 = LocalMeili::new(ms1.clone()).await;
|
||||
let rms2 = LocalMeili::new(ms2.clone()).await;
|
||||
|
||||
// set network
|
||||
let network = json!({"remotes": {
|
||||
"ms0": {
|
||||
"url": rms0.url()
|
||||
},
|
||||
"ms1": {
|
||||
"url": rms1.url()
|
||||
},
|
||||
"ms2": {
|
||||
"url": rms2.url()
|
||||
}
|
||||
}});
|
||||
|
||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||
|
||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||
snapshot!(status_code, @"200 OK");
|
||||
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||
snapshot!(status_code, @"200 OK");
|
||||
let (_response, status_code) = ms2.set_network(network.clone()).await;
|
||||
snapshot!(status_code, @"200 OK");
|
||||
|
||||
// add documents
|
||||
let documents = SCORE_DOCUMENTS.clone();
|
||||
let documents = documents.as_array().unwrap();
|
||||
let index0 = ms0.index("test");
|
||||
let _index1 = ms1.index("test");
|
||||
let _index2 = ms2.index("test");
|
||||
|
||||
let (task, _status_code) = index0.add_documents(json!(documents), None).await;
|
||||
|
||||
let t0 = task.uid();
|
||||
let (t, _) = ms0.get_task(task.uid()).await;
|
||||
let t1 = t["network"]["remote_tasks"]["ms1"]["taskUid"].as_u64().unwrap();
|
||||
let t2 = t["network"]["remote_tasks"]["ms2"]["taskUid"].as_u64().unwrap();
|
||||
|
||||
ms0.wait_task(t0).await.succeeded();
|
||||
ms1.wait_task(t1).await.succeeded();
|
||||
ms2.wait_task(t2).await.succeeded();
|
||||
|
||||
// perform multi-search
|
||||
let query = "badman returns";
|
||||
let request = json!({
|
||||
"federation": {},
|
||||
"queries": [
|
||||
{
|
||||
"q": query,
|
||||
"indexUid": "test",
|
||||
"federationOptions": {
|
||||
"remote": "ms0"
|
||||
}
|
||||
},
|
||||
{
|
||||
"q": query,
|
||||
"indexUid": "test",
|
||||
"federationOptions": {
|
||||
"remote": "ms1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"q": query,
|
||||
"indexUid": "test",
|
||||
"federationOptions": {
|
||||
"remote": "ms2"
|
||||
}
|
||||
},
|
||||
]
|
||||
});
|
||||
|
||||
let (response, _status_code) = ms0.multi_search(request.clone()).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"title": "Batman Returns",
|
||||
"id": "C",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 2,
|
||||
"weightedRankingScore": 0.8317901234567902,
|
||||
"remote": "ms2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Batman the dark knight returns: Part 1",
|
||||
"id": "A",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 1,
|
||||
"weightedRankingScore": 0.7028218694885362,
|
||||
"remote": "ms1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Batman the dark knight returns: Part 2",
|
||||
"id": "B",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 1,
|
||||
"weightedRankingScore": 0.7028218694885362,
|
||||
"remote": "ms1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Badman",
|
||||
"id": "E",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 2,
|
||||
"weightedRankingScore": 0.5,
|
||||
"remote": "ms2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Batman",
|
||||
"id": "D",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 0,
|
||||
"weightedRankingScore": 0.23106060606060605,
|
||||
"remote": "ms0"
|
||||
}
|
||||
}
|
||||
],
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 5,
|
||||
"requestUid": "[uuid]",
|
||||
"remoteErrors": {}
|
||||
}
|
||||
"###);
|
||||
let (response, _status_code) = ms1.multi_search(request.clone()).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"title": "Batman Returns",
|
||||
"id": "C",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 2,
|
||||
"weightedRankingScore": 0.8317901234567902,
|
||||
"remote": "ms2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Batman the dark knight returns: Part 1",
|
||||
"id": "A",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 1,
|
||||
"weightedRankingScore": 0.7028218694885362,
|
||||
"remote": "ms1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Batman the dark knight returns: Part 2",
|
||||
"id": "B",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 1,
|
||||
"weightedRankingScore": 0.7028218694885362,
|
||||
"remote": "ms1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Badman",
|
||||
"id": "E",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 2,
|
||||
"weightedRankingScore": 0.5,
|
||||
"remote": "ms2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Batman",
|
||||
"id": "D",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 0,
|
||||
"weightedRankingScore": 0.23106060606060605,
|
||||
"remote": "ms0"
|
||||
}
|
||||
}
|
||||
],
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 5,
|
||||
"requestUid": "[uuid]",
|
||||
"remoteErrors": {}
|
||||
}
|
||||
"###);
|
||||
let (response, _status_code) = ms2.multi_search(request.clone()).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"title": "Batman Returns",
|
||||
"id": "C",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 2,
|
||||
"weightedRankingScore": 0.8317901234567902,
|
||||
"remote": "ms2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Batman the dark knight returns: Part 1",
|
||||
"id": "A",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 1,
|
||||
"weightedRankingScore": 0.7028218694885362,
|
||||
"remote": "ms1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Batman the dark knight returns: Part 2",
|
||||
"id": "B",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 1,
|
||||
"weightedRankingScore": 0.7028218694885362,
|
||||
"remote": "ms1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Badman",
|
||||
"id": "E",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 2,
|
||||
"weightedRankingScore": 0.5,
|
||||
"remote": "ms2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Batman",
|
||||
"id": "D",
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 0,
|
||||
"weightedRankingScore": 0.23106060606060605,
|
||||
"remote": "ms0"
|
||||
}
|
||||
}
|
||||
],
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 5,
|
||||
"requestUid": "[uuid]",
|
||||
"remoteErrors": {}
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn remote_auto_sharding_with_custom_metadata() {
|
||||
let ms0 = Server::new().await;
|
||||
let ms1 = Server::new().await;
|
||||
let ms2 = Server::new().await;
|
||||
|
||||
// enable feature
|
||||
|
||||
let (response, code) = ms0.set_features(json!({"network": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response["network"]), @"true");
|
||||
let (response, code) = ms1.set_features(json!({"network": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response["network"]), @"true");
|
||||
let (response, code) = ms2.set_features(json!({"network": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response["network"]), @"true");
|
||||
|
||||
// set self & sharding
|
||||
|
||||
let (response, code) = ms0.set_network(json!({"self": "ms0", "sharding": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"self": "ms0",
|
||||
"remotes": {},
|
||||
"sharding": true
|
||||
}
|
||||
"###);
|
||||
let (response, code) = ms1.set_network(json!({"self": "ms1", "sharding": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"self": "ms1",
|
||||
"remotes": {},
|
||||
"sharding": true
|
||||
}
|
||||
"###);
|
||||
let (response, code) = ms2.set_network(json!({"self": "ms2", "sharding": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"self": "ms2",
|
||||
"remotes": {},
|
||||
"sharding": true
|
||||
}
|
||||
"###);
|
||||
|
||||
// wrap servers
|
||||
let ms0 = Arc::new(ms0);
|
||||
let ms1 = Arc::new(ms1);
|
||||
let ms2 = Arc::new(ms2);
|
||||
|
||||
let rms0 = LocalMeili::new(ms0.clone()).await;
|
||||
let rms1 = LocalMeili::new(ms1.clone()).await;
|
||||
let rms2 = LocalMeili::new(ms2.clone()).await;
|
||||
|
||||
// set network
|
||||
let network = json!({"remotes": {
|
||||
"ms0": {
|
||||
"url": rms0.url()
|
||||
},
|
||||
"ms1": {
|
||||
"url": rms1.url()
|
||||
},
|
||||
"ms2": {
|
||||
"url": rms2.url()
|
||||
}
|
||||
}});
|
||||
|
||||
println!("{}", serde_json::to_string_pretty(&network).unwrap());
|
||||
|
||||
let (_response, status_code) = ms0.set_network(network.clone()).await;
|
||||
snapshot!(status_code, @"200 OK");
|
||||
let (_response, status_code) = ms1.set_network(network.clone()).await;
|
||||
snapshot!(status_code, @"200 OK");
|
||||
let (_response, status_code) = ms2.set_network(network.clone()).await;
|
||||
snapshot!(status_code, @"200 OK");
|
||||
|
||||
// add documents
|
||||
let documents = SCORE_DOCUMENTS.clone();
|
||||
let documents = documents.as_array().unwrap();
|
||||
let index0 = ms0.index("test");
|
||||
let _index1 = ms1.index("test");
|
||||
let _index2 = ms2.index("test");
|
||||
|
||||
let (task, _status_code) = index0
|
||||
.add_documents_with_custom_metadata(
|
||||
json!(documents),
|
||||
None,
|
||||
Some("remote_auto_sharding_with_custom_metadata"),
|
||||
)
|
||||
.await;
|
||||
|
||||
let t0 = task.uid();
|
||||
let (t, _) = ms0.get_task(task.uid()).await;
|
||||
let t1 = t["network"]["remote_tasks"]["ms1"]["taskUid"].as_u64().unwrap();
|
||||
let t2 = t["network"]["remote_tasks"]["ms2"]["taskUid"].as_u64().unwrap();
|
||||
|
||||
let t = ms0.wait_task(t0).await.succeeded();
|
||||
snapshot!(t, @r###"
|
||||
{
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "test",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 5,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]",
|
||||
"network": {
|
||||
"remote_tasks": {
|
||||
"ms1": {
|
||||
"taskUid": 0,
|
||||
"error": null
|
||||
},
|
||||
"ms2": {
|
||||
"taskUid": 0,
|
||||
"error": null
|
||||
}
|
||||
}
|
||||
},
|
||||
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
||||
}
|
||||
"###);
|
||||
|
||||
let t = ms1.wait_task(t1).await.succeeded();
|
||||
snapshot!(t, @r###"
|
||||
{
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "test",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 5,
|
||||
"indexedDocuments": 2
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]",
|
||||
"network": {
|
||||
"origin": {
|
||||
"remoteName": "ms0",
|
||||
"taskUid": 0
|
||||
}
|
||||
},
|
||||
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
||||
}
|
||||
"###);
|
||||
|
||||
let t = ms2.wait_task(t2).await.succeeded();
|
||||
snapshot!(t, @r###"
|
||||
{
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "test",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 5,
|
||||
"indexedDocuments": 2
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]",
|
||||
"network": {
|
||||
"origin": {
|
||||
"remoteName": "ms0",
|
||||
"taskUid": 0
|
||||
}
|
||||
},
|
||||
"customMetadata": "remote_auto_sharding_with_custom_metadata"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
@@ -656,3 +656,119 @@ async fn forbidden_fields() {
|
||||
}
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_web::test]
|
||||
async fn receive_custom_metadata() {
|
||||
let WebhookHandle { server_handle: handle1, url: url1, receiver: mut receiver1 } =
|
||||
create_webhook_server().await;
|
||||
let WebhookHandle { server_handle: handle2, url: url2, receiver: mut receiver2 } =
|
||||
create_webhook_server().await;
|
||||
let WebhookHandle { server_handle: handle3, url: url3, receiver: mut receiver3 } =
|
||||
create_webhook_server().await;
|
||||
|
||||
let db_path = tempfile::tempdir().unwrap();
|
||||
let server = Server::new_with_options(Opt {
|
||||
task_webhook_url: Some(Url::parse(&url3).unwrap()),
|
||||
..default_settings(db_path.path())
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
for url in [url1, url2] {
|
||||
let (value, code) = server.create_webhook(json!({ "url": url })).await;
|
||||
snapshot!(code, @"201 Created");
|
||||
snapshot!(json_string!(value, { ".uuid" => "[uuid]", ".url" => "[ignored]" }), @r#"
|
||||
{
|
||||
"uuid": "[uuid]",
|
||||
"isEditable": true,
|
||||
"url": "[ignored]",
|
||||
"headers": {}
|
||||
}
|
||||
"#);
|
||||
}
|
||||
let index = server.index("tamo");
|
||||
let (response, code) = index
|
||||
.add_documents_with_custom_metadata(
|
||||
json!({ "id": 1, "doggo": "bone" }),
|
||||
None,
|
||||
Some("test_meta"),
|
||||
)
|
||||
.await;
|
||||
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"taskUid": 0,
|
||||
"indexUid": "tamo",
|
||||
"status": "enqueued",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"enqueuedAt": "[date]",
|
||||
"customMetadata": "test_meta"
|
||||
}
|
||||
"###);
|
||||
snapshot!(code, @"202 Accepted");
|
||||
|
||||
let mut count1 = 0;
|
||||
let mut count2 = 0;
|
||||
let mut count3 = 0;
|
||||
while count1 == 0 || count2 == 0 || count3 == 0 {
|
||||
tokio::select! {
|
||||
msg = receiver1.recv() => {
|
||||
if let Some(msg) = msg {
|
||||
count1 += 1;
|
||||
check_metadata(msg);
|
||||
}
|
||||
},
|
||||
msg = receiver2.recv() => {
|
||||
if let Some(msg) = msg {
|
||||
count2 += 1;
|
||||
check_metadata(msg);
|
||||
}
|
||||
},
|
||||
msg = receiver3.recv() => {
|
||||
if let Some(msg) = msg {
|
||||
count3 += 1;
|
||||
check_metadata(msg);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(count1, 1);
|
||||
assert_eq!(count2, 1);
|
||||
assert_eq!(count3, 1);
|
||||
|
||||
handle1.abort();
|
||||
handle2.abort();
|
||||
handle3.abort();
|
||||
}
|
||||
|
||||
fn check_metadata(msg: Vec<u8>) {
|
||||
let msg = String::from_utf8(msg).unwrap();
|
||||
let tasks = msg.split('\n');
|
||||
for task in tasks {
|
||||
if task.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let task: serde_json::Value = serde_json::from_str(task).unwrap();
|
||||
snapshot!(common::Value(task), @r###"
|
||||
{
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "tamo",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]",
|
||||
"customMetadata": "test_meta"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ async fn version_too_old() {
|
||||
std::fs::write(db_path.join("VERSION"), "1.11.9999").unwrap();
|
||||
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
|
||||
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
|
||||
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.24.0");
|
||||
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.26.0");
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -58,7 +58,7 @@ async fn version_requires_downgrade() {
|
||||
std::fs::write(db_path.join("VERSION"), format!("{major}.{minor}.{patch}")).unwrap();
|
||||
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
|
||||
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
|
||||
snapshot!(err, @"Database version 1.24.1 is higher than the Meilisearch version 1.24.0. Downgrade is not supported");
|
||||
snapshot!(err, @"Database version 1.26.1 is higher than the Meilisearch version 1.26.0. Downgrade is not supported");
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
|
||||
@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"progress": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.24.0"
|
||||
"upgradeTo": "v1.26.0"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
|
||||
@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"progress": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.24.0"
|
||||
"upgradeTo": "v1.26.0"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
|
||||
@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"progress": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.24.0"
|
||||
"upgradeTo": "v1.26.0"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
|
||||
@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.24.0"
|
||||
"upgradeTo": "v1.26.0"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
|
||||
@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.24.0"
|
||||
"upgradeTo": "v1.26.0"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
|
||||
@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.24.0"
|
||||
"upgradeTo": "v1.26.0"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
|
||||
@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"progress": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.24.0"
|
||||
"upgradeTo": "v1.26.0"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
|
||||
@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.24.0"
|
||||
"upgradeTo": "v1.26.0"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
|
||||
@@ -74,12 +74,13 @@ csv = "1.3.1"
|
||||
candle-core = { version = "0.9.1" }
|
||||
candle-transformers = { version = "0.9.1" }
|
||||
candle-nn = { version = "0.9.1" }
|
||||
tokenizers = { git = "https://github.com/huggingface/tokenizers.git", tag = "v0.15.2", version = "0.15.2", default-features = false, features = [
|
||||
tokenizers = { version = "0.22.1", default-features = false, features = [
|
||||
"onig",
|
||||
] }
|
||||
hf-hub = { git = "https://github.com/dureuill/hf-hub.git", branch = "rust_tls", default-features = false, features = [
|
||||
"online",
|
||||
] }
|
||||
safetensors = "0.6.2"
|
||||
tiktoken-rs = "0.7.0"
|
||||
liquid = "0.26.11"
|
||||
rhai = { version = "1.22.2", features = [
|
||||
@@ -100,7 +101,6 @@ bumpalo = "3.18.1"
|
||||
bumparaw-collections = "0.1.4"
|
||||
steppe = { version = "0.4", default-features = false }
|
||||
thread_local = "1.1.9"
|
||||
allocator-api2 = "0.3.0"
|
||||
rustc-hash = "2.1.1"
|
||||
enum-iterator = "2.1.0"
|
||||
bbqueue = { git = "https://github.com/meilisearch/bbqueue" }
|
||||
|
||||
@@ -1173,6 +1173,7 @@ pub fn extract_embeddings_from_fragments<R: io::Read + io::Seek>(
|
||||
request_threads,
|
||||
&doc_alloc,
|
||||
embedder_stats,
|
||||
false,
|
||||
on_embed,
|
||||
);
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@ pub struct EmbeddingExtractor<'a, 'b> {
|
||||
possible_embedding_mistakes: PossibleEmbeddingMistakes,
|
||||
embedder_stats: &'a EmbedderStats,
|
||||
threads: &'a ThreadPoolNoAbort,
|
||||
failure_modes: EmbedderFailureModes,
|
||||
}
|
||||
|
||||
impl<'a, 'b> EmbeddingExtractor<'a, 'b> {
|
||||
@@ -46,7 +47,15 @@ impl<'a, 'b> EmbeddingExtractor<'a, 'b> {
|
||||
threads: &'a ThreadPoolNoAbort,
|
||||
) -> Self {
|
||||
let possible_embedding_mistakes = PossibleEmbeddingMistakes::new(field_distribution);
|
||||
Self { embedders, sender, threads, possible_embedding_mistakes, embedder_stats }
|
||||
let failure_modes = EmbedderFailureModes::from_env();
|
||||
Self {
|
||||
embedders,
|
||||
sender,
|
||||
threads,
|
||||
possible_embedding_mistakes,
|
||||
embedder_stats,
|
||||
failure_modes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,6 +100,7 @@ impl<'extractor> Extractor<'extractor> for EmbeddingExtractor<'_, '_> {
|
||||
self.threads,
|
||||
self.sender,
|
||||
&context.doc_alloc,
|
||||
self.failure_modes,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -267,6 +277,7 @@ pub struct SettingsChangeEmbeddingExtractor<'a, 'b, SD> {
|
||||
sender: EmbeddingSender<'a, 'b>,
|
||||
possible_embedding_mistakes: PossibleEmbeddingMistakes,
|
||||
threads: &'a ThreadPoolNoAbort,
|
||||
failure_modes: EmbedderFailureModes,
|
||||
}
|
||||
|
||||
impl<'a, 'b, SD: SettingsDelta> SettingsChangeEmbeddingExtractor<'a, 'b, SD> {
|
||||
@@ -279,7 +290,16 @@ impl<'a, 'b, SD: SettingsDelta> SettingsChangeEmbeddingExtractor<'a, 'b, SD> {
|
||||
threads: &'a ThreadPoolNoAbort,
|
||||
) -> Self {
|
||||
let possible_embedding_mistakes = PossibleEmbeddingMistakes::new(field_distribution);
|
||||
Self { settings_delta, embedder_stats, sender, threads, possible_embedding_mistakes }
|
||||
let failure_modes = EmbedderFailureModes::from_env();
|
||||
|
||||
Self {
|
||||
settings_delta,
|
||||
embedder_stats,
|
||||
sender,
|
||||
threads,
|
||||
possible_embedding_mistakes,
|
||||
failure_modes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,6 +356,7 @@ impl<'extractor, SD: SettingsDelta + Sync> SettingsChangeExtractor<'extractor>
|
||||
self.threads,
|
||||
self.sender,
|
||||
&context.doc_alloc,
|
||||
self.failure_modes,
|
||||
),
|
||||
reindex_action,
|
||||
));
|
||||
@@ -539,6 +560,7 @@ struct Chunks<'a, 'b, 'extractor> {
|
||||
enum ChunkType<'a, 'b> {
|
||||
DocumentTemplate {
|
||||
document_template: &'a Prompt,
|
||||
ignore_document_template_failures: bool,
|
||||
session: EmbedSession<'a, OnEmbeddingDocumentUpdates<'a, 'b>, &'a str>,
|
||||
},
|
||||
Fragments {
|
||||
@@ -559,6 +581,7 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
|
||||
threads: &'a ThreadPoolNoAbort,
|
||||
sender: EmbeddingSender<'a, 'b>,
|
||||
doc_alloc: &'a Bump,
|
||||
failure_modes: EmbedderFailureModes,
|
||||
) -> Self {
|
||||
let embedder = &runtime.embedder;
|
||||
let dimensions = embedder.dimensions();
|
||||
@@ -567,12 +590,14 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
|
||||
let kind = if fragments.is_empty() {
|
||||
ChunkType::DocumentTemplate {
|
||||
document_template: &runtime.document_template,
|
||||
ignore_document_template_failures: failure_modes.ignore_document_template_failures,
|
||||
session: EmbedSession::new(
|
||||
&runtime.embedder,
|
||||
embedder_name,
|
||||
threads,
|
||||
doc_alloc,
|
||||
embedder_stats,
|
||||
failure_modes.ignore_embedder_failures,
|
||||
OnEmbeddingDocumentUpdates {
|
||||
embedder_id: embedder_info.embedder_id,
|
||||
sender,
|
||||
@@ -589,6 +614,7 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
|
||||
threads,
|
||||
doc_alloc,
|
||||
embedder_stats,
|
||||
failure_modes.ignore_embedder_failures,
|
||||
OnEmbeddingDocumentUpdates {
|
||||
embedder_id: embedder_info.embedder_id,
|
||||
sender,
|
||||
@@ -693,7 +719,11 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
|
||||
},
|
||||
)?;
|
||||
}
|
||||
ChunkType::DocumentTemplate { document_template, session } => {
|
||||
ChunkType::DocumentTemplate {
|
||||
document_template,
|
||||
ignore_document_template_failures,
|
||||
session,
|
||||
} => {
|
||||
let doc_alloc = session.doc_alloc();
|
||||
|
||||
let old_embedder = settings_delta.old_embedders().get(session.embedder_name());
|
||||
@@ -702,6 +732,7 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
|
||||
} else {
|
||||
old_embedder.as_ref().map(|old_embedder| &old_embedder.document_template)
|
||||
};
|
||||
|
||||
let extractor =
|
||||
DocumentTemplateExtractor::new(document_template, doc_alloc, fields_ids_map);
|
||||
let old_extractor = old_document_template.map(|old_document_template| {
|
||||
@@ -710,7 +741,15 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
|
||||
let metadata =
|
||||
Metadata { docid, external_docid, extractor_id: extractor.extractor_id() };
|
||||
|
||||
match extractor.diff_settings(document, &external_docid, old_extractor.as_ref())? {
|
||||
let extractor_diff = if *ignore_document_template_failures {
|
||||
let extractor = extractor.ignore_errors();
|
||||
let old_extractor = old_extractor.map(DocumentTemplateExtractor::ignore_errors);
|
||||
extractor.diff_settings(document, &external_docid, old_extractor.as_ref())?
|
||||
} else {
|
||||
extractor.diff_settings(document, &external_docid, old_extractor.as_ref())?
|
||||
};
|
||||
|
||||
match extractor_diff {
|
||||
ExtractorDiff::Removed => {
|
||||
if old_is_user_provided || full_reindex {
|
||||
session.on_embed_mut().clear_vectors(docid);
|
||||
@@ -758,7 +797,11 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
|
||||
new_must_regenerate,
|
||||
);
|
||||
match &mut self.kind {
|
||||
ChunkType::DocumentTemplate { document_template, session } => {
|
||||
ChunkType::DocumentTemplate {
|
||||
document_template,
|
||||
ignore_document_template_failures,
|
||||
session,
|
||||
} => {
|
||||
let doc_alloc = session.doc_alloc();
|
||||
let ex = DocumentTemplateExtractor::new(
|
||||
document_template,
|
||||
@@ -766,18 +809,33 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
|
||||
new_fields_ids_map,
|
||||
);
|
||||
|
||||
update_autogenerated(
|
||||
docid,
|
||||
external_docid,
|
||||
[ex],
|
||||
old_document,
|
||||
new_document,
|
||||
&external_docid,
|
||||
old_must_regenerate,
|
||||
old_is_user_provided,
|
||||
session,
|
||||
unused_vectors_distribution,
|
||||
)?
|
||||
if *ignore_document_template_failures {
|
||||
update_autogenerated(
|
||||
docid,
|
||||
external_docid,
|
||||
[ex.ignore_errors()],
|
||||
old_document,
|
||||
new_document,
|
||||
&external_docid,
|
||||
old_must_regenerate,
|
||||
old_is_user_provided,
|
||||
session,
|
||||
unused_vectors_distribution,
|
||||
)
|
||||
} else {
|
||||
update_autogenerated(
|
||||
docid,
|
||||
external_docid,
|
||||
[ex],
|
||||
old_document,
|
||||
new_document,
|
||||
&external_docid,
|
||||
old_must_regenerate,
|
||||
old_is_user_provided,
|
||||
session,
|
||||
unused_vectors_distribution,
|
||||
)
|
||||
}?
|
||||
}
|
||||
ChunkType::Fragments { fragments, session } => {
|
||||
let doc_alloc = session.doc_alloc();
|
||||
@@ -844,23 +902,38 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
|
||||
);
|
||||
|
||||
match &mut self.kind {
|
||||
ChunkType::DocumentTemplate { document_template, session } => {
|
||||
ChunkType::DocumentTemplate {
|
||||
document_template,
|
||||
ignore_document_template_failures,
|
||||
session,
|
||||
} => {
|
||||
let doc_alloc = session.doc_alloc();
|
||||
let ex = DocumentTemplateExtractor::new(
|
||||
document_template,
|
||||
doc_alloc,
|
||||
new_fields_ids_map,
|
||||
);
|
||||
|
||||
insert_autogenerated(
|
||||
docid,
|
||||
external_docid,
|
||||
[ex],
|
||||
new_document,
|
||||
&external_docid,
|
||||
session,
|
||||
unused_vectors_distribution,
|
||||
)?;
|
||||
if *ignore_document_template_failures {
|
||||
insert_autogenerated(
|
||||
docid,
|
||||
external_docid,
|
||||
[ex.ignore_errors()],
|
||||
new_document,
|
||||
&external_docid,
|
||||
session,
|
||||
unused_vectors_distribution,
|
||||
)?;
|
||||
} else {
|
||||
insert_autogenerated(
|
||||
docid,
|
||||
external_docid,
|
||||
[ex],
|
||||
new_document,
|
||||
&external_docid,
|
||||
session,
|
||||
unused_vectors_distribution,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
ChunkType::Fragments { fragments, session } => {
|
||||
let doc_alloc = session.doc_alloc();
|
||||
@@ -884,7 +957,11 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
|
||||
|
||||
pub fn drain(self, unused_vectors_distribution: &UnusedVectorsDistributionBump) -> Result<()> {
|
||||
match self.kind {
|
||||
ChunkType::DocumentTemplate { document_template: _, session } => {
|
||||
ChunkType::DocumentTemplate {
|
||||
document_template: _,
|
||||
ignore_document_template_failures: _,
|
||||
session,
|
||||
} => {
|
||||
session.drain(unused_vectors_distribution)?;
|
||||
}
|
||||
ChunkType::Fragments { fragments: _, session } => {
|
||||
@@ -896,9 +973,11 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
|
||||
|
||||
pub fn embedder_name(&self) -> &'a str {
|
||||
match &self.kind {
|
||||
ChunkType::DocumentTemplate { document_template: _, session } => {
|
||||
session.embedder_name()
|
||||
}
|
||||
ChunkType::DocumentTemplate {
|
||||
document_template: _,
|
||||
ignore_document_template_failures: _,
|
||||
session,
|
||||
} => session.embedder_name(),
|
||||
ChunkType::Fragments { fragments: _, session } => session.embedder_name(),
|
||||
}
|
||||
}
|
||||
@@ -967,7 +1046,11 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
|
||||
}
|
||||
}
|
||||
match &mut self.kind {
|
||||
ChunkType::DocumentTemplate { document_template: _, session } => {
|
||||
ChunkType::DocumentTemplate {
|
||||
document_template: _,
|
||||
ignore_document_template_failures: _,
|
||||
session,
|
||||
} => {
|
||||
session.on_embed_mut().process_embeddings(
|
||||
Metadata { docid, external_docid, extractor_id: 0 },
|
||||
embeddings,
|
||||
@@ -1078,3 +1161,41 @@ where
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Default)]
|
||||
struct EmbedderFailureModes {
|
||||
pub ignore_document_template_failures: bool,
|
||||
pub ignore_embedder_failures: bool,
|
||||
}
|
||||
|
||||
impl EmbedderFailureModes {
|
||||
fn from_env() -> Self {
|
||||
match std::env::var("MEILI_EXPERIMENTAL_CONFIG_EMBEDDER_FAILURE_MODES") {
|
||||
Ok(failure_modes) => Self::parse_from_str(
|
||||
&failure_modes,
|
||||
"`MEILI_EXPERIMENTAL_CONFIG_EMBEDDER_FAILURE_MODES`",
|
||||
),
|
||||
Err(std::env::VarError::NotPresent) => Self::default(),
|
||||
Err(std::env::VarError::NotUnicode(_)) => panic!(
|
||||
"`MEILI_EXPERIMENTAL_CONFIG_EMBEDDER_FAILURE_MODES` contains a non-unicode value"
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_from_str(failure_modes: &str, provenance: &'static str) -> Self {
|
||||
let Self { mut ignore_document_template_failures, mut ignore_embedder_failures } =
|
||||
Default::default();
|
||||
for segment in failure_modes.split(',') {
|
||||
let segment = segment.trim();
|
||||
match segment {
|
||||
"ignore_document_template_failures" => {
|
||||
ignore_document_template_failures = true;
|
||||
}
|
||||
"ignore_embedder_failures" => ignore_embedder_failures = true,
|
||||
"" => continue,
|
||||
segment => panic!("Unrecognized segment value for {provenance}: {segment}"),
|
||||
}
|
||||
}
|
||||
Self { ignore_document_template_failures, ignore_embedder_failures }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,18 +5,36 @@
|
||||
|
||||
use std::hash::{BuildHasher as _, BuildHasherDefault};
|
||||
|
||||
pub struct Shards {
|
||||
pub own: Vec<String>,
|
||||
pub others: Vec<String>,
|
||||
pub struct Shards(pub Vec<Shard>);
|
||||
|
||||
pub struct Shard {
|
||||
pub is_own: bool,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl Shards {
|
||||
pub fn from_remotes_local<'a>(
|
||||
remotes: impl IntoIterator<Item = &'a str>,
|
||||
local: Option<&str>,
|
||||
) -> Self {
|
||||
Shards(
|
||||
remotes
|
||||
.into_iter()
|
||||
.map(|name| Shard { is_own: Some(name) == local, name: name.to_owned() })
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn must_process(&self, docid: &str) -> bool {
|
||||
self.processing_shard(docid).map(|shard| shard.is_own).unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn processing_shard<'a>(&'a self, docid: &str) -> Option<&'a Shard> {
|
||||
let hasher = BuildHasherDefault::<twox_hash::XxHash3_64>::new();
|
||||
let to_hash = |shard: &String| hasher.hash_one((shard, docid));
|
||||
let to_hash = |shard: &'a Shard| (shard, hasher.hash_one((&shard.name, docid)));
|
||||
|
||||
let max_hash = self.others.iter().map(to_hash).max().unwrap_or_default();
|
||||
|
||||
self.own.iter().map(to_hash).any(|hash| hash > max_hash)
|
||||
let shard =
|
||||
self.0.iter().map(to_hash).max_by_key(|(_, hash)| *hash).map(|(shard, _)| shard);
|
||||
shard
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1631,8 +1631,11 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
|
||||
|
||||
// Update index settings
|
||||
let embedding_config_updates = self.update_embedding_configs()?;
|
||||
self.update_user_defined_searchable_attributes()?;
|
||||
|
||||
let new_inner_settings = InnerIndexSettings::from_index(self.index, self.wtxn, None)?;
|
||||
let mut new_inner_settings =
|
||||
InnerIndexSettings::from_index(self.index, self.wtxn, None)?;
|
||||
new_inner_settings.recompute_searchables(self.wtxn, self.index)?;
|
||||
|
||||
let primary_key_id = self
|
||||
.index
|
||||
|
||||
@@ -42,6 +42,8 @@ const UPGRADE_FUNCTIONS: &[&dyn UpgradeIndex] = &[
|
||||
&ToTargetNoOp { target: (1, 22, 0) },
|
||||
&ToTargetNoOp { target: (1, 23, 0) },
|
||||
&ToTargetNoOp { target: (1, 24, 0) },
|
||||
&ToTargetNoOp { target: (1, 25, 0) },
|
||||
&ToTargetNoOp { target: (1, 26, 0) },
|
||||
// This is the last upgrade function, it will be called when the index is up to date.
|
||||
// any other upgrade function should be added before this one.
|
||||
&ToCurrentNoOp {},
|
||||
@@ -77,6 +79,8 @@ const fn start(from: (u32, u32, u32)) -> Option<usize> {
|
||||
(1, 22, _) => function_index!(12),
|
||||
(1, 23, _) => function_index!(13),
|
||||
(1, 24, _) => function_index!(14),
|
||||
(1, 25, _) => function_index!(15),
|
||||
(1, 26, _) => function_index!(16),
|
||||
// We deliberately don't add a placeholder with (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) here to force manually
|
||||
// considering dumpless upgrade.
|
||||
(_major, _minor, _patch) => return None,
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
use candle_core::Tensor;
|
||||
use candle_nn::VarBuilder;
|
||||
use candle_transformers::models::bert::{BertModel, Config, DTYPE};
|
||||
use candle_transformers::models::bert::{BertModel, Config as BertConfig, DTYPE};
|
||||
use candle_transformers::models::modernbert::{Config as ModernConfig, ModernBert};
|
||||
// FIXME: currently we'll be using the hub to retrieve model, in the future we might want to embed it into Meilisearch itself
|
||||
use hf_hub::api::sync::Api;
|
||||
use hf_hub::{Repo, RepoType};
|
||||
use safetensors::SafeTensors;
|
||||
use tokenizers::{PaddingParams, Tokenizer};
|
||||
|
||||
use super::EmbeddingCache;
|
||||
@@ -84,14 +86,21 @@ impl Default for EmbedderOptions {
|
||||
}
|
||||
}
|
||||
|
||||
enum ModelKind {
|
||||
Bert(BertModel),
|
||||
Modern(ModernBert),
|
||||
}
|
||||
|
||||
/// Perform embedding of documents and queries
|
||||
pub struct Embedder {
|
||||
model: BertModel,
|
||||
model: ModelKind,
|
||||
tokenizer: Tokenizer,
|
||||
options: EmbedderOptions,
|
||||
dimensions: usize,
|
||||
pooling: Pooling,
|
||||
cache: EmbeddingCache,
|
||||
device: candle_core::Device,
|
||||
max_len: usize,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Embedder {
|
||||
@@ -101,10 +110,60 @@ impl std::fmt::Debug for Embedder {
|
||||
.field("tokenizer", &self.tokenizer)
|
||||
.field("options", &self.options)
|
||||
.field("pooling", &self.pooling)
|
||||
.field("device", &self.device)
|
||||
.field("max_len", &self.max_len)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
// some models do not have the "model." prefix in their safetensors weights
|
||||
fn change_tensor_names(
|
||||
weights_path: &std::path::Path,
|
||||
) -> Result<std::path::PathBuf, NewEmbedderError> {
|
||||
let data = std::fs::read(weights_path)
|
||||
.map_err(|e| NewEmbedderError::safetensor_weight(candle_core::Error::Io(e)))?;
|
||||
|
||||
let tensors = SafeTensors::deserialize(&data)
|
||||
.map_err(|e| NewEmbedderError::safetensor_weight(candle_core::Error::Msg(e.to_string())))?;
|
||||
|
||||
let names = tensors.names();
|
||||
let has_model_prefix = names.iter().any(|n| n.starts_with("model."));
|
||||
|
||||
if has_model_prefix {
|
||||
return Ok(weights_path.to_path_buf());
|
||||
}
|
||||
|
||||
let fixed_path = weights_path.with_extension("fixed.safetensors");
|
||||
|
||||
if fixed_path.exists() {
|
||||
return Ok(fixed_path);
|
||||
}
|
||||
|
||||
let mut new_tensors = vec![];
|
||||
for name in names {
|
||||
let tensor_view = tensors.tensor(name).map_err(|e| {
|
||||
NewEmbedderError::safetensor_weight(candle_core::Error::Msg(e.to_string()))
|
||||
})?;
|
||||
|
||||
let new_name = format!("model.{}", name);
|
||||
let data_offset = tensor_view.data();
|
||||
let shape = tensor_view.shape();
|
||||
let dtype = tensor_view.dtype();
|
||||
|
||||
new_tensors.push((new_name, shape.to_vec(), dtype, data_offset));
|
||||
}
|
||||
|
||||
use safetensors::tensor::TensorView;
|
||||
let views = new_tensors.iter().map(|(name, shape, dtype, data)| {
|
||||
(name.as_str(), TensorView::new(*dtype, shape.clone(), data).unwrap())
|
||||
});
|
||||
|
||||
safetensors::serialize_to_file(views, None, &fixed_path)
|
||||
.map_err(|e| NewEmbedderError::safetensor_weight(candle_core::Error::Msg(e.to_string())))?;
|
||||
|
||||
Ok(fixed_path)
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, serde::Deserialize)]
|
||||
struct PoolingConfig {
|
||||
#[serde(default)]
|
||||
@@ -220,19 +279,42 @@ impl Embedder {
|
||||
(config, tokenizer, weights, source, pooling)
|
||||
};
|
||||
|
||||
let config = std::fs::read_to_string(&config_filename)
|
||||
let config_str = std::fs::read_to_string(&config_filename)
|
||||
.map_err(|inner| NewEmbedderError::open_config(config_filename.clone(), inner))?;
|
||||
let config: Config = serde_json::from_str(&config).map_err(|inner| {
|
||||
NewEmbedderError::deserialize_config(
|
||||
options.model.clone(),
|
||||
config,
|
||||
config_filename,
|
||||
inner,
|
||||
)
|
||||
})?;
|
||||
|
||||
let cfg_val: serde_json::Value = match serde_json::from_str(&config_str) {
|
||||
Ok(v) => v,
|
||||
Err(inner) => {
|
||||
return Err(NewEmbedderError::deserialize_config(
|
||||
options.model.clone(),
|
||||
config_str.clone(),
|
||||
config_filename.clone(),
|
||||
inner,
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let model_type = cfg_val.get("model_type").and_then(|v| v.as_str()).unwrap_or_default();
|
||||
let arch_arr = cfg_val.get("architectures").and_then(|v| v.as_array());
|
||||
let has_arch = |needle: &str| {
|
||||
model_type.eq_ignore_ascii_case(needle)
|
||||
|| arch_arr.is_some_and(|arr| {
|
||||
arr.iter().filter_map(|v| v.as_str()).any(|s| s.to_lowercase().contains(needle))
|
||||
})
|
||||
};
|
||||
|
||||
let is_modern = has_arch("modernbert");
|
||||
tracing::debug!(is_modern, model_type, "detected HF architecture");
|
||||
|
||||
let mut tokenizer = Tokenizer::from_file(&tokenizer_filename)
|
||||
.map_err(|inner| NewEmbedderError::open_tokenizer(tokenizer_filename, inner))?;
|
||||
|
||||
let weights_filename = if is_modern && weight_source == WeightSource::Safetensors {
|
||||
change_tensor_names(&weights_filename)?
|
||||
} else {
|
||||
weights_filename
|
||||
};
|
||||
|
||||
let vb = match weight_source {
|
||||
WeightSource::Pytorch => VarBuilder::from_pth(&weights_filename, DTYPE, &device)
|
||||
.map_err(NewEmbedderError::pytorch_weight)?,
|
||||
@@ -244,7 +326,31 @@ impl Embedder {
|
||||
|
||||
tracing::debug!(model = options.model, weight=?weight_source, pooling=?pooling, "model config");
|
||||
|
||||
let model = BertModel::load(vb, &config).map_err(NewEmbedderError::load_model)?;
|
||||
// max length from config, fallback to 512
|
||||
let max_len =
|
||||
cfg_val.get("max_position_embeddings").and_then(|v| v.as_u64()).unwrap_or(512) as usize;
|
||||
|
||||
let model = if is_modern {
|
||||
let config: ModernConfig = serde_json::from_str(&config_str).map_err(|inner| {
|
||||
NewEmbedderError::deserialize_config(
|
||||
options.model.clone(),
|
||||
config_str.clone(),
|
||||
config_filename.clone(),
|
||||
inner,
|
||||
)
|
||||
})?;
|
||||
ModelKind::Modern(ModernBert::load(vb, &config).map_err(NewEmbedderError::load_model)?)
|
||||
} else {
|
||||
let config: BertConfig = serde_json::from_str(&config_str).map_err(|inner| {
|
||||
NewEmbedderError::deserialize_config(
|
||||
options.model.clone(),
|
||||
config_str.clone(),
|
||||
config_filename.clone(),
|
||||
inner,
|
||||
)
|
||||
})?;
|
||||
ModelKind::Bert(BertModel::load(vb, &config).map_err(NewEmbedderError::load_model)?)
|
||||
};
|
||||
|
||||
if let Some(pp) = tokenizer.get_padding_mut() {
|
||||
pp.strategy = tokenizers::PaddingStrategy::BatchLongest
|
||||
@@ -263,6 +369,8 @@ impl Embedder {
|
||||
dimensions: 0,
|
||||
pooling,
|
||||
cache: EmbeddingCache::new(cache_cap),
|
||||
device,
|
||||
max_len,
|
||||
};
|
||||
|
||||
let embeddings = this
|
||||
@@ -321,15 +429,29 @@ impl Embedder {
|
||||
pub fn embed_one(&self, text: &str) -> std::result::Result<Embedding, EmbedError> {
|
||||
let tokens = self.tokenizer.encode(text, true).map_err(EmbedError::tokenize)?;
|
||||
let token_ids = tokens.get_ids();
|
||||
let token_ids = if token_ids.len() > 512 { &token_ids[..512] } else { token_ids };
|
||||
let token_ids =
|
||||
Tensor::new(token_ids, &self.model.device).map_err(EmbedError::tensor_shape)?;
|
||||
if token_ids.len() > self.max_len { &token_ids[..self.max_len] } else { token_ids };
|
||||
let token_ids = Tensor::new(token_ids, &self.device).map_err(EmbedError::tensor_shape)?;
|
||||
let token_ids = Tensor::stack(&[token_ids], 0).map_err(EmbedError::tensor_shape)?;
|
||||
let token_type_ids = token_ids.zeros_like().map_err(EmbedError::tensor_shape)?;
|
||||
let embeddings = self
|
||||
.model
|
||||
.forward(&token_ids, &token_type_ids, None)
|
||||
.map_err(EmbedError::model_forward)?;
|
||||
|
||||
let embeddings = match &self.model {
|
||||
ModelKind::Bert(model) => {
|
||||
let token_type_ids = token_ids.zeros_like().map_err(EmbedError::tensor_shape)?;
|
||||
model
|
||||
.forward(&token_ids, &token_type_ids, None)
|
||||
.map_err(EmbedError::model_forward)?
|
||||
}
|
||||
ModelKind::Modern(model) => {
|
||||
let mut mask_vec = tokens.get_attention_mask().to_vec();
|
||||
if mask_vec.len() > self.max_len {
|
||||
mask_vec.truncate(self.max_len);
|
||||
}
|
||||
let mask = Tensor::new(mask_vec.as_slice(), &self.device)
|
||||
.map_err(EmbedError::tensor_shape)?;
|
||||
let mask = Tensor::stack(&[mask], 0).map_err(EmbedError::tensor_shape)?;
|
||||
model.forward(&token_ids, &mask).map_err(EmbedError::model_forward)?
|
||||
}
|
||||
};
|
||||
|
||||
let embedding = Self::pooling(embeddings, self.pooling)?;
|
||||
|
||||
|
||||
@@ -91,6 +91,7 @@ struct EmbedderData {
|
||||
request: RequestData,
|
||||
response: Response,
|
||||
configuration_source: ConfigurationSource,
|
||||
max_retry_duration: std::time::Duration,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -182,10 +183,15 @@ impl Embedder {
|
||||
) -> Result<Self, NewEmbedderError> {
|
||||
let bearer = options.api_key.as_deref().map(|api_key| format!("Bearer {api_key}"));
|
||||
|
||||
let timeout = std::env::var("MEILI_EXPERIMENTAL_REST_EMBEDDER_TIMEOUT_SECONDS")
|
||||
.ok()
|
||||
.map(|p| p.parse().unwrap())
|
||||
.unwrap_or(30);
|
||||
|
||||
let client = ureq::AgentBuilder::new()
|
||||
.max_idle_connections(REQUEST_PARALLELISM * 2)
|
||||
.max_idle_connections_per_host(REQUEST_PARALLELISM * 2)
|
||||
.timeout(std::time::Duration::from_secs(30))
|
||||
.timeout(std::time::Duration::from_secs(timeout))
|
||||
.build();
|
||||
|
||||
let request = RequestData::new(
|
||||
@@ -196,6 +202,14 @@ impl Embedder {
|
||||
|
||||
let response = Response::new(options.response, &request)?;
|
||||
|
||||
let max_retry_duration =
|
||||
std::env::var("MEILI_EXPERIMENTAL_REST_EMBEDDER_MAX_RETRY_DURATION_SECONDS")
|
||||
.ok()
|
||||
.map(|p| p.parse().unwrap())
|
||||
.unwrap_or(60);
|
||||
|
||||
let max_retry_duration = std::time::Duration::from_secs(max_retry_duration);
|
||||
|
||||
let data = EmbedderData {
|
||||
client,
|
||||
bearer,
|
||||
@@ -204,6 +218,7 @@ impl Embedder {
|
||||
response,
|
||||
configuration_source,
|
||||
headers: options.headers,
|
||||
max_retry_duration,
|
||||
};
|
||||
|
||||
let dimensions = if let Some(dimensions) = options.dimensions {
|
||||
@@ -457,7 +472,7 @@ where
|
||||
}
|
||||
}?;
|
||||
|
||||
let retry_duration = retry_duration.min(std::time::Duration::from_secs(60)); // don't wait more than a minute
|
||||
let retry_duration = retry_duration.min(data.max_retry_duration); // don't wait more than the max duration
|
||||
|
||||
// randomly up to double the retry duration
|
||||
let retry_duration = retry_duration
|
||||
|
||||
@@ -550,9 +550,9 @@ pub struct DeserializePoolingConfig {
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("model `{model_name}` appears to be unsupported{}\n - inner error: {inner}",
|
||||
if architectures.is_empty() {
|
||||
"\n - Note: only models with architecture \"BertModel\" are supported.".to_string()
|
||||
"\n - Note: only models with architecture \"BertModel\" or \"ModernBert\" are supported.".to_string()
|
||||
} else {
|
||||
format!("\n - Note: model has declared architectures `{architectures:?}`, only models with architecture `\"BertModel\"` are supported.")
|
||||
format!("\n - Note: model has declared architectures `{architectures:?}`, only models with architecture `\"BertModel\"` or `\"ModernBert\"` are supported.")
|
||||
})]
|
||||
pub struct UnsupportedModel {
|
||||
pub model_name: String,
|
||||
|
||||
@@ -44,6 +44,7 @@ pub struct EmbedSession<'doc, C, I> {
|
||||
embedder_name: &'doc str,
|
||||
|
||||
embedder_stats: &'doc EmbedderStats,
|
||||
ignore_embedding_failures: bool,
|
||||
|
||||
on_embed: C,
|
||||
}
|
||||
@@ -87,6 +88,7 @@ impl<'doc, C: OnEmbed<'doc>, I: Input> EmbedSession<'doc, C, I> {
|
||||
threads: &'doc ThreadPoolNoAbort,
|
||||
doc_alloc: &'doc Bump,
|
||||
embedder_stats: &'doc EmbedderStats,
|
||||
ignore_embedding_failures: bool,
|
||||
on_embed: C,
|
||||
) -> Self {
|
||||
let capacity = embedder.prompt_count_in_chunk_hint() * embedder.chunk_count_hint();
|
||||
@@ -99,6 +101,7 @@ impl<'doc, C: OnEmbed<'doc>, I: Input> EmbedSession<'doc, C, I> {
|
||||
threads,
|
||||
embedder_name,
|
||||
embedder_stats,
|
||||
ignore_embedding_failures,
|
||||
on_embed,
|
||||
}
|
||||
}
|
||||
@@ -144,24 +147,33 @@ impl<'doc, C: OnEmbed<'doc>, I: Input> EmbedSession<'doc, C, I> {
|
||||
Ok(())
|
||||
}
|
||||
Err(error) => {
|
||||
// reset metadata and inputs, and send metadata to the error processing.
|
||||
// send metadata to the error processing.
|
||||
let doc_alloc = self.metadata.bump();
|
||||
let metadata = std::mem::replace(
|
||||
&mut self.metadata,
|
||||
BVec::with_capacity_in(self.inputs.capacity(), doc_alloc),
|
||||
);
|
||||
self.inputs.clear();
|
||||
return Err(self.on_embed.process_embedding_error(
|
||||
Err(self.on_embed.process_embedding_error(
|
||||
error,
|
||||
self.embedder_name,
|
||||
unused_vectors_distribution,
|
||||
metadata,
|
||||
));
|
||||
))
|
||||
}
|
||||
};
|
||||
self.inputs.clear();
|
||||
self.metadata.clear();
|
||||
res
|
||||
if self.ignore_embedding_failures {
|
||||
if let Err(err) = res {
|
||||
tracing::warn!(
|
||||
%err,
|
||||
"ignored error embedding batch of documents due to failure policy"
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn embedder_name(&self) -> &'doc str {
|
||||
|
||||
Reference in New Issue
Block a user