Compare commits

..

3 Commits

Author SHA1 Message Date
Louis Dureuil
5ecb4eb79e Embedding no longer returns a result 2025-10-20 17:55:14 +02:00
Louis Dureuil
0a91c091c6 Do not fail a batch when a the embedder fails to embed 2025-10-20 17:54:54 +02:00
Louis Dureuil
47a15fbe24 Ignor rendering errors on the document template 2025-10-20 17:54:19 +02:00
93 changed files with 1100 additions and 5687 deletions

381
Cargo.lock generated
View File

@@ -310,7 +310,6 @@ dependencies = [
"const-random",
"getrandom 0.3.3",
"once_cell",
"serde",
"version_check",
"zerocopy",
]
@@ -345,6 +344,12 @@ version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
[[package]]
name = "allocator-api2"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c583acf993cf4245c4acb0a2cc2ab1f9cc097de73411bb6d3647ff6af2b1013d"
[[package]]
name = "anes"
version = "0.1.6"
@@ -487,7 +492,7 @@ dependencies = [
"backoff",
"base64 0.22.1",
"bytes",
"derive_builder",
"derive_builder 0.20.2",
"eventsource-stream",
"futures",
"rand 0.8.5",
@@ -584,7 +589,7 @@ source = "git+https://github.com/meilisearch/bbqueue#cbb87cc707b5af415ef203bdaf2
[[package]]
name = "benchmarks"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"anyhow",
"bumpalo",
@@ -794,7 +799,7 @@ dependencies = [
[[package]]
name = "build-info"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"anyhow",
"time",
@@ -807,7 +812,7 @@ version = "3.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
dependencies = [
"allocator-api2",
"allocator-api2 0.2.21",
"serde",
]
@@ -817,7 +822,7 @@ version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ce682bdc86c2e25ef5cd95881d9d6a1902214eddf74cf9ffea88fe1464377e8"
dependencies = [
"allocator-api2",
"allocator-api2 0.2.21",
"bitpacking",
"bumpalo",
"hashbrown 0.15.5",
@@ -940,7 +945,7 @@ dependencies = [
"rand 0.9.2",
"rand_distr",
"rayon",
"safetensors 0.4.5",
"safetensors",
"thiserror 1.0.69",
"ug",
"ug-cuda",
@@ -967,7 +972,7 @@ dependencies = [
"half",
"num-traits",
"rayon",
"safetensors 0.4.5",
"safetensors",
"serde",
"thiserror 1.0.69",
]
@@ -1047,15 +1052,6 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "castaway"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dec551ab6e7578819132c713a93c022a05d60159dc86e7a7050223577484c55a"
dependencies = [
"rustversion",
]
[[package]]
name = "cc"
version = "1.2.37"
@@ -1132,9 +1128,9 @@ dependencies = [
[[package]]
name = "charabia"
version = "0.9.8"
version = "0.9.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbdc8cd8f999e8b8b13ed71d30962bbf98cf39e2f2a9f1ae1ba354199239d66e"
checksum = "7c2f456825b7f15eac01a1cae40c12c3f55e931d4327e6e4fa59508d664e9568"
dependencies = [
"aho-corasick",
"csv",
@@ -1143,6 +1139,7 @@ dependencies = [
"irg-kvariants",
"jieba-rs",
"lindera",
"once_cell",
"pinyin",
"serde",
"slice-group-by",
@@ -1218,7 +1215,7 @@ dependencies = [
"anstream",
"anstyle",
"clap_lex",
"strsim",
"strsim 0.11.1",
]
[[package]]
@@ -1257,21 +1254,6 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
[[package]]
name = "compact_str"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb1325a1cece981e8a296ab8f0f9b63ae357bd0784a9faaf548cc7b480707a"
dependencies = [
"castaway",
"cfg-if",
"itoa",
"rustversion",
"ryu",
"serde",
"static_assertions",
]
[[package]]
name = "concat-arrays"
version = "0.1.2"
@@ -1530,14 +1512,38 @@ dependencies = [
"libloading",
]
[[package]]
name = "darling"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850"
dependencies = [
"darling_core 0.14.4",
"darling_macro 0.14.4",
]
[[package]]
name = "darling"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
dependencies = [
"darling_core",
"darling_macro",
"darling_core 0.20.11",
"darling_macro 0.20.11",
]
[[package]]
name = "darling_core"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim 0.10.0",
"syn 1.0.109",
]
[[package]]
@@ -1550,17 +1556,28 @@ dependencies = [
"ident_case",
"proc-macro2",
"quote",
"strsim",
"strsim 0.11.1",
"syn 2.0.106",
]
[[package]]
name = "darling_macro"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e"
dependencies = [
"darling_core 0.14.4",
"quote",
"syn 1.0.109",
]
[[package]]
name = "darling_macro"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
dependencies = [
"darling_core",
"darling_core 0.20.11",
"quote",
"syn 2.0.106",
]
@@ -1570,9 +1587,6 @@ name = "dary_heap"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728"
dependencies = [
"serde",
]
[[package]]
name = "deadpool"
@@ -1628,13 +1642,34 @@ dependencies = [
"syn 2.0.106",
]
[[package]]
name = "derive_builder"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8"
dependencies = [
"derive_builder_macro 0.12.0",
]
[[package]]
name = "derive_builder"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947"
dependencies = [
"derive_builder_macro",
"derive_builder_macro 0.20.2",
]
[[package]]
name = "derive_builder_core"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f"
dependencies = [
"darling 0.14.4",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
@@ -1643,19 +1678,29 @@ version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8"
dependencies = [
"darling",
"darling 0.20.11",
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "derive_builder_macro"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e"
dependencies = [
"derive_builder_core 0.12.0",
"syn 1.0.109",
]
[[package]]
name = "derive_builder_macro"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c"
dependencies = [
"derive_builder_core",
"derive_builder_core 0.20.2",
"syn 2.0.106",
]
@@ -1694,7 +1739,7 @@ dependencies = [
"serde-cs",
"serde_json",
"serde_urlencoded",
"strsim",
"strsim 0.11.1",
]
[[package]]
@@ -1779,12 +1824,12 @@ version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "415b6ec780d34dcf624666747194393603d0373b7141eef01d12ee58881507d9"
dependencies = [
"phf 0.11.3",
"phf",
]
[[package]]
name = "dump"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"anyhow",
"big_s",
@@ -2027,7 +2072,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "file-store"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"tempfile",
"thiserror 2.0.16",
@@ -2049,7 +2094,7 @@ dependencies = [
[[package]]
name = "filter-parser"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"insta",
"levenshtein_automata",
@@ -2077,7 +2122,7 @@ dependencies = [
[[package]]
name = "flatten-serde-json"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"criterion",
"serde_json",
@@ -2234,7 +2279,7 @@ dependencies = [
[[package]]
name = "fuzzers"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"arbitrary",
"bumpalo",
@@ -2760,7 +2805,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
dependencies = [
"ahash 0.8.12",
"allocator-api2",
"allocator-api2 0.2.21",
]
[[package]]
@@ -2769,7 +2814,7 @@ version = "0.15.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
dependencies = [
"allocator-api2",
"allocator-api2 0.2.21",
"equivalent",
"foldhash",
"serde",
@@ -2793,9 +2838,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "heed"
version = "0.22.1-nested-rtxns-6"
version = "0.22.1-nested-rtxns"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c69e07cd539834bedcfa938f3d7d8520cce1ad2b0776c122b5ccdf8fd5bafe12"
checksum = "0ff115ba5712b1f1fc7617b195f5c2f139e29c397ff79da040cd19db75ccc240"
dependencies = [
"bitflags 2.9.4",
"byteorder",
@@ -3188,7 +3233,7 @@ dependencies = [
[[package]]
name = "index-scheduler"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"anyhow",
"backoff",
@@ -3197,11 +3242,10 @@ dependencies = [
"bumpalo",
"bumparaw-collections",
"byte-unit",
"bytes",
"convert_case 0.8.0",
"crossbeam-channel",
"csv",
"derive_builder",
"derive_builder 0.20.2",
"dump",
"enum-iterator",
"file-store",
@@ -3215,17 +3259,13 @@ dependencies = [
"memmap2",
"page_size",
"rayon",
"reqwest",
"roaring 0.10.12",
"rusty-s3",
"serde",
"serde_json",
"synchronoise",
"tar",
"tempfile",
"thiserror 2.0.16",
"time",
"tokio",
"tracing",
"ureq",
"uuid",
@@ -3368,6 +3408,15 @@ dependencies = [
"either",
]
[[package]]
name = "itertools"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569"
dependencies = [
"either",
]
[[package]]
name = "itertools"
version = "0.13.0"
@@ -3394,49 +3443,26 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "jieba-macros"
version = "0.8.1"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "348294e44ee7e3c42685da656490f8febc7359632544019621588902216da95c"
checksum = "7c676b32a471d3cfae8dac2ad2f8334cd52e53377733cca8c1fb0a5062fec192"
dependencies = [
"phf_codegen",
]
[[package]]
name = "jieba-rs"
version = "0.8.1"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "766bd7012aa5ba49411ebdf4e93bddd59b182d2918e085d58dec5bb9b54b7105"
checksum = "f5dd552bbb95d578520ee68403bf8aaf0dbbb2ce55b0854d019f9350ad61040a"
dependencies = [
"cedarwood",
"fxhash",
"include-flate",
"jieba-macros",
"phf 0.13.1",
"lazy_static",
"phf",
"regex",
"rustc-hash 2.1.1",
]
[[package]]
name = "jiff"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be1f93b8b1eb69c77f24bbb0afdf66f54b632ee39af40ca21c4365a1d7347e49"
dependencies = [
"jiff-static",
"log",
"portable-atomic",
"portable-atomic-util",
"serde",
]
[[package]]
name = "jiff-static"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
@@ -3461,7 +3487,7 @@ dependencies = [
[[package]]
name = "json-depth-checker"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"criterion",
"serde_json",
@@ -3712,7 +3738,7 @@ dependencies = [
"bincode 2.0.1",
"byteorder",
"csv",
"derive_builder",
"derive_builder 0.20.2",
"encoding",
"encoding_rs",
"encoding_rs_io",
@@ -3862,9 +3888,9 @@ checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
[[package]]
name = "lmdb-master-sys"
version = "0.2.6-nested-rtxns-6"
version = "0.2.6-nested-rtxns"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e113d9bf240f974fbe7fd516cbfd8c422e925c0655495501c7237548425493d0"
checksum = "f4ff85130e3c994b36877045fbbb138d521dea7197bfc19dc3d5d95101a8e20a"
dependencies = [
"cc",
"doxygen-rs",
@@ -3962,16 +3988,6 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d"
[[package]]
name = "md-5"
version = "0.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf"
dependencies = [
"cfg-if",
"digest",
]
[[package]]
name = "md5"
version = "0.7.0"
@@ -3980,7 +3996,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
[[package]]
name = "meili-snap"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"insta",
"md5",
@@ -3991,7 +4007,7 @@ dependencies = [
[[package]]
name = "meilisearch"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"actix-cors",
"actix-http",
@@ -4088,7 +4104,7 @@ dependencies = [
[[package]]
name = "meilisearch-auth"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"base64 0.22.1",
"enum-iterator",
@@ -4107,7 +4123,7 @@ dependencies = [
[[package]]
name = "meilisearch-types"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"actix-web",
"anyhow",
@@ -4142,7 +4158,7 @@ dependencies = [
[[package]]
name = "meilitool"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"anyhow",
"clap",
@@ -4176,8 +4192,9 @@ dependencies = [
[[package]]
name = "milli"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"allocator-api2 0.3.1",
"arroy",
"bbqueue",
"big_s",
@@ -4235,7 +4252,6 @@ dependencies = [
"roaring 0.10.12",
"rstar",
"rustc-hash 2.1.1",
"safetensors 0.6.2",
"serde",
"serde_json",
"slice-group-by",
@@ -4757,7 +4773,7 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "permissive-json-pointer"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"big_s",
"serde_json",
@@ -4814,27 +4830,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078"
dependencies = [
"phf_macros",
"phf_shared 0.11.3",
]
[[package]]
name = "phf"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf"
dependencies = [
"phf_shared 0.13.1",
"serde",
"phf_shared",
]
[[package]]
name = "phf_codegen"
version = "0.13.1"
version = "0.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49aa7f9d80421bca176ca8dbfebe668cc7a2684708594ec9f3c0db0805d5d6e1"
checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a"
dependencies = [
"phf_generator 0.13.1",
"phf_shared 0.13.1",
"phf_generator",
"phf_shared",
]
[[package]]
@@ -4843,28 +4849,18 @@ version = "0.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d"
dependencies = [
"phf_shared 0.11.3",
"phf_shared",
"rand 0.8.5",
]
[[package]]
name = "phf_generator"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737"
dependencies = [
"fastrand",
"phf_shared 0.13.1",
]
[[package]]
name = "phf_macros"
version = "0.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216"
dependencies = [
"phf_generator 0.11.3",
"phf_shared 0.11.3",
"phf_generator",
"phf_shared",
"proc-macro2",
"quote",
"syn 2.0.106",
@@ -4879,15 +4875,6 @@ dependencies = [
"siphasher",
]
[[package]]
name = "phf_shared"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266"
dependencies = [
"siphasher",
]
[[package]]
name = "pin-project"
version = "1.1.10"
@@ -4975,15 +4962,6 @@ version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483"
[[package]]
name = "portable-atomic-util"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507"
dependencies = [
"portable-atomic",
]
[[package]]
name = "potential_utf"
version = "0.1.3"
@@ -5161,16 +5139,6 @@ dependencies = [
"version_check",
]
[[package]]
name = "quick-xml"
version = "0.38.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42a232e7487fc2ef313d96dde7948e7a3c05101870d8985e4fd8d26aedd27b89"
dependencies = [
"memchr",
"serde",
]
[[package]]
name = "quinn"
version = "0.11.9"
@@ -5346,12 +5314,12 @@ dependencies = [
[[package]]
name = "rayon-cond"
version = "0.4.0"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2964d0cf57a3e7a06e8183d14a8b527195c706b7983549cd5462d5aa3747438f"
checksum = "059f538b55efd2309c9794130bc149c6a553db90e9d99c2030785c82f0bd7df9"
dependencies = [
"either",
"itertools 0.14.0",
"itertools 0.11.0",
"rayon",
]
@@ -5446,7 +5414,6 @@ dependencies = [
"futures-channel",
"futures-core",
"futures-util",
"h2 0.4.12",
"http 1.3.1",
"http-body",
"http-body-util",
@@ -5737,25 +5704,6 @@ version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
[[package]]
name = "rusty-s3"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fac2edd2f0b56bd79a7343f49afc01c2d41010df480538a510e0abc56044f66c"
dependencies = [
"base64 0.22.1",
"hmac",
"jiff",
"md-5",
"percent-encoding",
"quick-xml",
"serde",
"serde_json",
"sha2",
"url",
"zeroize",
]
[[package]]
name = "ryu"
version = "1.0.20"
@@ -5772,16 +5720,6 @@ dependencies = [
"serde_json",
]
[[package]]
name = "safetensors"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "172dd94c5a87b5c79f945c863da53b2ebc7ccef4eca24ac63cca66a41aab2178"
dependencies = [
"serde",
"serde_json",
]
[[package]]
name = "same-file"
version = "1.0.6"
@@ -6263,6 +6201,12 @@ dependencies = [
"indexmap",
]
[[package]]
name = "strsim"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "strsim"
version = "0.11.1"
@@ -6588,24 +6532,21 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokenizers"
version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6475a27088c98ea96d00b39a9ddfb63780d1ad4cceb6f48374349a96ab2b7842"
version = "0.15.2"
source = "git+https://github.com/huggingface/tokenizers.git?tag=v0.15.2#701a73b869602b5639589d197e805349cdba3223"
dependencies = [
"ahash 0.8.12",
"aho-corasick",
"compact_str",
"dary_heap",
"derive_builder",
"derive_builder 0.12.0",
"esaxx-rs",
"getrandom 0.3.3",
"itertools 0.14.0",
"getrandom 0.2.16",
"itertools 0.12.1",
"lazy_static",
"log",
"macro_rules_attribute",
"monostate",
"onig",
"paste",
"rand 0.9.2",
"rand 0.8.5",
"rayon",
"rayon-cond",
"regex",
@@ -6613,7 +6554,7 @@ dependencies = [
"serde",
"serde_json",
"spm_precompiled",
"thiserror 2.0.16",
"thiserror 1.0.69",
"unicode-normalization-alignments",
"unicode-segmentation",
"unicode_categories",
@@ -6975,7 +6916,7 @@ dependencies = [
"num-traits",
"num_cpus",
"rayon",
"safetensors 0.4.5",
"safetensors",
"serde",
"thiserror 1.0.69",
"tracing",
@@ -7205,7 +7146,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b2bf58be11fc9414104c6d3a2e464163db5ef74b12296bda593cac37b6e4777"
dependencies = [
"anyhow",
"derive_builder",
"derive_builder 0.20.2",
"rustversion",
"vergen-lib",
]
@@ -7217,7 +7158,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f6ee511ec45098eabade8a0750e76eec671e7fb2d9360c563911336bea9cac1"
dependencies = [
"anyhow",
"derive_builder",
"derive_builder 0.20.2",
"git2",
"rustversion",
"time",
@@ -7232,7 +7173,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b07e6010c0f3e59fcb164e0163834597da68d1f864e2b8ca49f74de01e9c166"
dependencies = [
"anyhow",
"derive_builder",
"derive_builder 0.20.2",
"rustversion",
]
@@ -7879,7 +7820,7 @@ dependencies = [
[[package]]
name = "xtask"
version = "1.26.0"
version = "1.24.0"
dependencies = [
"anyhow",
"build-info",

View File

@@ -23,7 +23,7 @@ members = [
]
[workspace.package]
version = "1.26.0"
version = "1.24.0"
authors = [
"Quentin de Quelen <quentin@dequelen.me>",
"Clément Renault <clement@meilisearch.com>",

View File

@@ -39,7 +39,6 @@
## đź–Ą Examples
- [**Movies**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — An application to help you find streaming platforms to watch movies using [hybrid search](https://www.meilisearch.com/solutions/hybrid-search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos).
- [**Flickr**](https://flickr.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — Search and explore one hundred million Flickr images with semantic search.
- [**Ecommerce**](https://ecommerce.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Ecommerce website using disjunctive [facets](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos), range and rating filtering, and pagination.
- [**Songs**](https://music.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search through 47 million of songs.
- [**SaaS**](https://saas.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search for contacts, deals, and companies in this [multi-tenant](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) CRM application.

View File

@@ -9,9 +9,8 @@ use meilisearch_types::error::ResponseError;
use meilisearch_types::keys::Key;
use meilisearch_types::milli::update::IndexDocumentsMethod;
use meilisearch_types::settings::Unchecked;
use meilisearch_types::tasks::enterprise_edition::network::{NetworkTopologyChange, DbTaskNetwork};
use meilisearch_types::tasks::{
Details, ExportIndexSettings, IndexSwap, KindWithContent, Status, Task, TaskId,
Details, ExportIndexSettings, IndexSwap, KindWithContent, Status, Task, TaskId, TaskNetwork,
};
use meilisearch_types::InstanceUid;
use roaring::RoaringBitmap;
@@ -96,9 +95,7 @@ pub struct TaskDump {
)]
pub finished_at: Option<OffsetDateTime>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub network: Option<DbTaskNetwork>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub custom_metadata: Option<String>,
pub network: Option<TaskNetwork>,
}
// A `Kind` specific version made for the dump. If modified you may break the dump.
@@ -164,7 +161,6 @@ pub enum KindDump {
IndexCompaction {
index_uid: String,
},
NetworkTopologyChange(NetworkTopologyChange),
}
impl From<Task> for TaskDump {
@@ -182,7 +178,6 @@ impl From<Task> for TaskDump {
started_at: task.started_at,
finished_at: task.finished_at,
network: task.network,
custom_metadata: task.custom_metadata,
}
}
}
@@ -251,9 +246,6 @@ impl From<KindWithContent> for KindDump {
KindWithContent::IndexCompaction { index_uid } => {
KindDump::IndexCompaction { index_uid }
}
KindWithContent::NetworkTopologyChange(network_topology_change) => {
KindDump::NetworkTopologyChange(network_topology_change)
}
}
}
}
@@ -404,7 +396,6 @@ pub(crate) mod test {
started_at: Some(datetime!(2022-11-20 0:00 UTC)),
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
network: None,
custom_metadata: None,
},
None,
),
@@ -430,7 +421,6 @@ pub(crate) mod test {
started_at: None,
finished_at: None,
network: None,
custom_metadata: None,
},
Some(vec![
json!({ "id": 4, "race": "leonberg" }).as_object().unwrap().clone(),
@@ -451,7 +441,6 @@ pub(crate) mod test {
started_at: None,
finished_at: None,
network: None,
custom_metadata: None,
},
None,
),
@@ -565,8 +554,7 @@ pub(crate) mod test {
Network {
local: Some("myself".to_string()),
remotes: maplit::btreemap! {"other".to_string() => Remote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()), write_api_key: Some("docApiKey".to_string()) }},
leader: None,
version: Default::default(),
sharding: false,
}
}

View File

@@ -164,7 +164,6 @@ impl CompatV5ToV6 {
started_at: task_view.started_at,
finished_at: task_view.finished_at,
network: None,
custom_metadata: None,
};
(task, content_file)

View File

@@ -60,7 +60,7 @@ impl FileStore {
/// Returns the file corresponding to the requested uuid.
pub fn get_update(&self, uuid: Uuid) -> Result<StdFile> {
let path = self.update_path(uuid);
let path = self.get_update_path(uuid);
let file = match StdFile::open(path) {
Ok(file) => file,
Err(e) => {
@@ -72,7 +72,7 @@ impl FileStore {
}
/// Returns the path that correspond to this uuid, the path could not exists.
pub fn update_path(&self, uuid: Uuid) -> PathBuf {
pub fn get_update_path(&self, uuid: Uuid) -> PathBuf {
self.path.join(uuid.to_string())
}

View File

@@ -14,7 +14,6 @@ license.workspace = true
anyhow = "1.0.98"
bincode = "1.3.3"
byte-unit = "5.1.6"
bytes = "1.10.1"
bumpalo = "3.18.1"
bumparaw-collections = "0.1.4"
convert_case = "0.8.0"
@@ -33,7 +32,6 @@ rayon = "1.10.0"
roaring = { version = "0.10.12", features = ["serde"] }
serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.140", features = ["preserve_order"] }
tar = "0.4.44"
synchronoise = "1.0.1"
tempfile = "3.20.0"
thiserror = "2.0.12"
@@ -47,9 +45,6 @@ tracing = "0.1.41"
ureq = "2.12.1"
uuid = { version = "1.17.0", features = ["serde", "v4"] }
backoff = "0.4.0"
reqwest = { version = "0.12.23", features = ["rustls-tls", "http2"], default-features = false }
rusty-s3 = "0.8.1"
tokio = { version = "1.47.1", features = ["full"] }
[dev-dependencies]
big_s = "1.0.2"

View File

@@ -150,7 +150,6 @@ impl<'a> Dump<'a> {
details: task.details,
status: task.status,
network: task.network,
custom_metadata: task.custom_metadata,
kind: match task.kind {
KindDump::DocumentImport {
primary_key,
@@ -238,9 +237,6 @@ impl<'a> Dump<'a> {
KindDump::IndexCompaction { index_uid } => {
KindWithContent::IndexCompaction { index_uid }
}
KindDump::NetworkTopologyChange(network_topology_change) => {
KindWithContent::NetworkTopologyChange(network_topology_change)
}
},
};

View File

@@ -3,13 +3,9 @@ use std::fmt::Display;
use meilisearch_types::batches::BatchId;
use meilisearch_types::error::{Code, ErrorCode};
use meilisearch_types::milli::index::RollbackOutcome;
use meilisearch_types::milli::DocumentId;
use meilisearch_types::tasks::enterprise_edition::network::ReceiveTaskError;
use meilisearch_types::tasks::{Kind, Status};
use meilisearch_types::{heed, milli};
use reqwest::StatusCode;
use thiserror::Error;
use uuid::Uuid;
use crate::TaskId;
@@ -131,14 +127,6 @@ pub enum Error {
#[error("Aborted task")]
AbortedTask,
#[error("S3 error: status: {status}, body: {body}")]
S3Error { status: StatusCode, body: String },
#[error("S3 HTTP error: {0}")]
S3HttpError(reqwest::Error),
#[error("S3 XML error: {0}")]
S3XmlError(Box<dyn std::error::Error + Send + Sync>),
#[error("S3 bucket error: {0}")]
S3BucketError(rusty_s3::BucketError),
#[error(transparent)]
Dump(#[from] dump::Error),
#[error(transparent)]
@@ -194,15 +182,6 @@ pub enum Error {
#[error(transparent)]
HeedTransaction(heed::Error),
#[error("No network topology change task is currently enqueued or processing")]
ImportTaskWithoutNetworkTask,
#[error("The network task version (`{network_task}`) does not match the import task version (`{import_task}`)")]
NetworkVersionMismatch { network_task: Uuid, import_task: Uuid },
#[error("The import task emanates from an unknown remote `{0}`")]
ImportTaskUnknownRemote(String),
#[error("The import task with key `{0}` was already received")]
ImportTaskAlreadyReceived(DocumentId),
#[cfg(test)]
#[error("Planned failure for tests.")]
PlannedFailure,
@@ -247,10 +226,6 @@ impl Error {
| Error::TaskCancelationWithEmptyQuery
| Error::FromRemoteWhenExporting { .. }
| Error::AbortedTask
| Error::S3Error { .. }
| Error::S3HttpError(_)
| Error::S3XmlError(_)
| Error::S3BucketError(_)
| Error::Dump(_)
| Error::Heed(_)
| Error::Milli { .. }
@@ -260,10 +235,6 @@ impl Error {
| Error::Persist(_)
| Error::FeatureNotEnabled(_)
| Error::Export(_)
| Error::ImportTaskWithoutNetworkTask
| Error::NetworkVersionMismatch { .. }
| Error::ImportTaskAlreadyReceived(_)
| Error::ImportTaskUnknownRemote(_)
| Error::Anyhow(_) => true,
Error::CreateBatch(_)
| Error::CorruptedTaskQueue
@@ -322,18 +293,8 @@ impl ErrorCode for Error {
Error::BatchNotFound(_) => Code::BatchNotFound,
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
// TODO: not sure of the Code to use
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
Error::ImportTaskWithoutNetworkTask => Code::ImportTaskWithoutNetworkTask,
Error::NetworkVersionMismatch { .. } => Code::NetworkVersionMismatch,
Error::ImportTaskAlreadyReceived(_) => Code::ImportTaskAlreadyReceived,
Error::ImportTaskUnknownRemote(_) => Code::ImportTaskUnknownRemote,
Error::S3Error { status, .. } if status.is_client_error() => {
Code::InvalidS3SnapshotRequest
}
Error::S3Error { .. } => Code::S3SnapshotServerError,
Error::S3HttpError(_) => Code::S3SnapshotServerError,
Error::S3XmlError(_) => Code::S3SnapshotServerError,
Error::S3BucketError(_) => Code::InvalidS3SnapshotParameters,
Error::Dump(e) => e.error_code(),
Error::Milli { error, .. } => error.error_code(),
Error::ProcessBatchPanicked(_) => Code::Internal,
@@ -365,12 +326,3 @@ impl ErrorCode for Error {
}
}
}
impl From<ReceiveTaskError> for Error {
fn from(value: ReceiveTaskError) -> Self {
match value {
ReceiveTaskError::UnknownRemote(unknown) => Error::ImportTaskUnknownRemote(unknown),
ReceiveTaskError::DuplicateTask(dup) => Error::ImportTaskAlreadyReceived(dup),
}
}
}

View File

@@ -361,11 +361,6 @@ impl IndexMapper {
Ok(())
}
/// The number of indexes in the database
pub fn index_count(&self, rtxn: &RoTxn) -> Result<u64> {
Ok(self.index_mapping.len(rtxn)?)
}
/// Return an index, may open it if it wasn't already opened.
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
if let Some((current_name, current_index)) =

View File

@@ -21,7 +21,7 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
let IndexScheduler {
cleanup_enabled: _,
experimental_no_edition_2024_for_dumps: _,
runtime_tasks,
processing_tasks,
env,
version,
queue,
@@ -36,7 +36,6 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
run_loop_iteration: _,
embedders: _,
chat_settings: _,
runtime: _,
} = scheduler;
let rtxn = env.read_txn().unwrap();
@@ -50,7 +49,7 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
snap.push_str(&format!("index scheduler running on version {indx_sched_version:?}\n"));
}
let processing = runtime_tasks.read().unwrap().processing.clone();
let processing = processing_tasks.read().unwrap().clone();
snap.push_str(&format!("### Autobatching Enabled = {}\n", scheduler.autobatching_enabled));
snap.push_str(&format!(
"### Processing batch {:?}:\n",
@@ -232,7 +231,6 @@ pub fn snapshot_task(task: &Task) -> String {
status,
kind,
network,
custom_metadata,
} = task;
snap.push('{');
snap.push_str(&format!("uid: {uid}, "));
@@ -253,9 +251,6 @@ pub fn snapshot_task(task: &Task) -> String {
if let Some(network) = network {
snap.push_str(&format!("network: {network:?}, "))
}
if let Some(custom_metadata) = custom_metadata {
snap.push_str(&format!("custom_metadata: {custom_metadata:?}"))
}
snap.push('}');
snap
@@ -325,9 +320,6 @@ fn snapshot_details(d: &Details) -> String {
Details::IndexCompaction { index_uid, pre_compaction_size, post_compaction_size } => {
format!("{{ index_uid: {index_uid:?}, pre_compaction_size: {pre_compaction_size:?}, post_compaction_size: {post_compaction_size:?} }}")
}
Details::NetworkTopologyChange { moved_documents, received_documents, message } => {
format!("{{ moved_documents: {moved_documents:?}, received_documents: {received_documents:?}, message: {message:?}")
}
}
}

View File

@@ -68,10 +68,10 @@ use meilisearch_types::milli::vector::{
};
use meilisearch_types::milli::{self, Index};
use meilisearch_types::task_view::TaskView;
use meilisearch_types::tasks::enterprise_edition::network::{DbTaskNetwork, TaskNetwork};
use meilisearch_types::tasks::{KindWithContent, Task};
use meilisearch_types::tasks::{KindWithContent, Task, TaskNetwork};
use meilisearch_types::webhooks::{Webhook, WebhooksDumpView, WebhooksView};
use milli::vector::db::IndexEmbeddingConfig;
use processing::ProcessingTasks;
pub use queue::Query;
use queue::Queue;
use roaring::RoaringBitmap;
@@ -82,7 +82,6 @@ use uuid::Uuid;
use versioning::Versioning;
use crate::index_mapper::IndexMapper;
use crate::processing::RuntimeTasks;
use crate::utils::clamp_to_page_size;
pub(crate) type BEI128 = I128<BE>;
@@ -164,7 +163,7 @@ pub struct IndexScheduler {
pub(crate) env: Env<WithoutTls>,
/// The list of tasks currently processing
pub(crate) runtime_tasks: Arc<RwLock<RuntimeTasks>>,
pub(crate) processing_tasks: Arc<RwLock<ProcessingTasks>>,
/// A database containing only the version of the index-scheduler
pub version: versioning::Versioning,
@@ -217,16 +216,13 @@ pub struct IndexScheduler {
/// A counter that is incremented before every call to [`tick`](IndexScheduler::tick)
#[cfg(test)]
run_loop_iteration: Arc<RwLock<usize>>,
/// The tokio runtime used for asynchronous tasks.
runtime: Option<tokio::runtime::Handle>,
}
impl IndexScheduler {
fn private_clone(&self) -> IndexScheduler {
IndexScheduler {
env: self.env.clone(),
runtime_tasks: self.runtime_tasks.clone(),
processing_tasks: self.processing_tasks.clone(),
version: self.version.clone(),
queue: self.queue.private_clone(),
scheduler: self.scheduler.private_clone(),
@@ -246,7 +242,6 @@ impl IndexScheduler {
run_loop_iteration: self.run_loop_iteration.clone(),
features: self.features.clone(),
chat_settings: self.chat_settings,
runtime: self.runtime.clone(),
}
}
@@ -260,23 +255,13 @@ impl IndexScheduler {
}
/// Create an index scheduler and start its run loop.
#[allow(private_interfaces)] // because test_utils is private
pub fn new(
options: IndexSchedulerOptions,
auth_env: Env<WithoutTls>,
from_db_version: (u32, u32, u32),
runtime: Option<tokio::runtime::Handle>,
) -> Result<Self> {
let this = Self::new_without_run(options, auth_env, from_db_version, runtime)?;
this.run();
Ok(this)
}
fn new_without_run(
options: IndexSchedulerOptions,
auth_env: Env<WithoutTls>,
from_db_version: (u32, u32, u32),
runtime: Option<tokio::runtime::Handle>,
#[cfg(test)] test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
#[cfg(test)] planned_failures: Vec<(usize, test_utils::FailureLocation)>,
) -> Result<Self> {
std::fs::create_dir_all(&options.tasks_path)?;
std::fs::create_dir_all(&options.update_file_path)?;
@@ -331,8 +316,9 @@ impl IndexScheduler {
wtxn.commit()?;
Ok(Self {
runtime_tasks: Arc::new(RwLock::new(RuntimeTasks::new())),
// allow unreachable_code to get rids of the warning in the case of a test build.
let this = Self {
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
version,
queue,
scheduler: Scheduler::new(&options, auth_env),
@@ -347,32 +333,15 @@ impl IndexScheduler {
webhooks: Arc::new(webhooks),
embedders: Default::default(),
#[cfg(test)] // Will be replaced in `new_tests` in test environments
test_breakpoint_sdr: crossbeam_channel::bounded(0).0,
#[cfg(test)] // Will be replaced in `new_tests` in test environments
planned_failures: Default::default(),
#[cfg(test)]
test_breakpoint_sdr,
#[cfg(test)]
planned_failures,
#[cfg(test)]
run_loop_iteration: Arc::new(RwLock::new(0)),
features,
chat_settings,
runtime,
})
}
/// Create an index scheduler and start its run loop.
#[cfg(test)]
fn new_test(
options: IndexSchedulerOptions,
auth_env: Env<WithoutTls>,
from_db_version: (u32, u32, u32),
runtime: Option<tokio::runtime::Handle>,
test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
planned_failures: Vec<(usize, test_utils::FailureLocation)>,
) -> Result<Self> {
let mut this = Self::new_without_run(options, auth_env, from_db_version, runtime)?;
this.test_breakpoint_sdr = test_breakpoint_sdr;
this.planned_failures = planned_failures;
};
this.run();
Ok(this)
@@ -640,19 +609,19 @@ impl IndexScheduler {
/// 3. The number of times the properties appeared.
pub fn get_stats(&self) -> Result<BTreeMap<String, BTreeMap<String, u64>>> {
let rtxn = self.read_txn()?;
self.queue.get_stats(&rtxn, &self.runtime_tasks.read().unwrap().processing)
self.queue.get_stats(&rtxn, &self.processing_tasks.read().unwrap())
}
// Return true if there is at least one task that is processing.
pub fn is_task_processing(&self) -> Result<bool> {
Ok(!self.runtime_tasks.read().unwrap().processing.processing.is_empty())
Ok(!self.processing_tasks.read().unwrap().processing.is_empty())
}
/// Return true iff there is at least one task associated with this index
/// that is processing.
pub fn is_index_processing(&self, index: &str) -> Result<bool> {
let rtxn = self.env.read_txn()?;
let processing_tasks = self.runtime_tasks.read().unwrap().processing.processing.clone();
let processing_tasks = self.processing_tasks.read().unwrap().processing.clone();
let index_tasks = self.queue.tasks.index_tasks(&rtxn, index)?;
let nbr_index_processing_tasks = processing_tasks.intersection_len(&index_tasks);
Ok(nbr_index_processing_tasks > 0)
@@ -678,8 +647,8 @@ impl IndexScheduler {
filters: &meilisearch_auth::AuthFilter,
) -> Result<(Vec<Task>, u64)> {
let rtxn = self.read_txn()?;
let processing = &self.runtime_tasks.read().unwrap().processing;
self.queue.get_tasks_from_authorized_indexes(&rtxn, query, filters, processing)
let processing = self.processing_tasks.read().unwrap();
self.queue.get_tasks_from_authorized_indexes(&rtxn, query, filters, &processing)
}
/// Return the task ids matching the query along with the total number of tasks
@@ -697,18 +666,18 @@ impl IndexScheduler {
filters: &meilisearch_auth::AuthFilter,
) -> Result<(RoaringBitmap, u64)> {
let rtxn = self.read_txn()?;
let processing = &self.runtime_tasks.read().unwrap().processing;
self.queue.get_task_ids_from_authorized_indexes(&rtxn, query, filters, processing)
let processing = self.processing_tasks.read().unwrap();
self.queue.get_task_ids_from_authorized_indexes(&rtxn, query, filters, &processing)
}
pub fn set_task_network(&self, task_id: TaskId, network: DbTaskNetwork) -> Result<Task> {
pub fn set_task_network(&self, task_id: TaskId, network: TaskNetwork) -> Result<()> {
let mut wtxn = self.env.write_txn()?;
let mut task =
self.queue.tasks.get_task(&wtxn, task_id)?.ok_or(Error::TaskNotFound(task_id))?;
task.network = Some(network);
self.queue.tasks.all_tasks.put(&mut wtxn, &task_id, &task)?;
wtxn.commit()?;
Ok(task)
Ok(())
}
/// Return the batches matching the query from the user's point of view along
@@ -726,8 +695,8 @@ impl IndexScheduler {
filters: &meilisearch_auth::AuthFilter,
) -> Result<(Vec<Batch>, u64)> {
let rtxn = self.read_txn()?;
let processing = &self.runtime_tasks.read().unwrap().processing;
self.queue.get_batches_from_authorized_indexes(&rtxn, query, filters, processing)
let processing = self.processing_tasks.read().unwrap();
self.queue.get_batches_from_authorized_indexes(&rtxn, query, filters, &processing)
}
/// Return the batch ids matching the query along with the total number of batches
@@ -745,8 +714,8 @@ impl IndexScheduler {
filters: &meilisearch_auth::AuthFilter,
) -> Result<(RoaringBitmap, u64)> {
let rtxn = self.read_txn()?;
let processing = &self.runtime_tasks.read().unwrap().processing;
self.queue.get_batch_ids_from_authorized_indexes(&rtxn, query, filters, processing)
let processing = self.processing_tasks.read().unwrap();
self.queue.get_batch_ids_from_authorized_indexes(&rtxn, query, filters, &processing)
}
/// Register a new task in the scheduler.
@@ -757,31 +726,6 @@ impl IndexScheduler {
kind: KindWithContent,
task_id: Option<TaskId>,
dry_run: bool,
) -> Result<Task> {
self.register_with_custom_metadata(kind, task_id, None, dry_run, None)
}
/// Register a new task in the scheduler, with metadata.
///
/// If it fails and data was associated with the task, it tries to delete the associated data.
///
/// # Parameters
///
/// - task_network: network of the task to check.
///
/// If the task is an import task, only accept it if:
///
/// 1. There is an ongoing network topology change task
/// 2. The task to register matches the network version of the network topology change task
///
/// Always accept the task if it is not an import task.
pub fn register_with_custom_metadata(
&self,
kind: KindWithContent,
task_id: Option<TaskId>,
custom_metadata: Option<String>,
dry_run: bool,
task_network: Option<TaskNetwork>,
) -> Result<Task> {
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
@@ -792,86 +736,13 @@ impl IndexScheduler {
}
let mut wtxn = self.env.write_txn()?;
if let Some(TaskNetwork::Import { import_from, network_change, metadata }) = &task_network {
let mut network_tasks = self
.queue
.tasks
.get_kind(&wtxn, meilisearch_types::tasks::Kind::NetworkTopologyChange)?;
if network_tasks.is_empty() {
return Err(Error::ImportTaskWithoutNetworkTask);
}
let network_task = {
let processing = self.runtime_tasks.read().unwrap().processing.processing.clone();
if processing.is_disjoint(&network_tasks) {
let enqueued = self
.queue
.tasks
.get_status(&wtxn, meilisearch_types::tasks::Status::Enqueued)?;
network_tasks &= enqueued;
if let Some(network_task) = network_tasks.into_iter().next() {
network_task
} else {
return Err(Error::ImportTaskWithoutNetworkTask);
}
} else {
network_tasks &= &*processing;
network_tasks.into_iter().next().unwrap()
}
};
let mut network_task = self.queue.tasks.get_task(&wtxn, network_task)?.unwrap();
let network_task_version = network_task
.network
.as_ref()
.map(|network| network.network_version())
.unwrap_or_default();
if network_task_version != network_change.network_version {
return Err(Error::NetworkVersionMismatch {
network_task: network_task_version,
import_task: network_change.network_version,
});
}
let KindWithContent::NetworkTopologyChange(network_topology_change) =
&mut network_task.kind
else {
return Err(Error::CorruptedTaskQueue);
};
network_topology_change.receive_remote_task(
&import_from.remote_name,
&import_from.index_name,
metadata.task_key,
import_from.document_count,
metadata.index_count,
metadata.total_index_documents,
)?;
self.queue.tasks.update_task(&mut wtxn, &mut network_task)?;
}
let task = self.queue.register(
&mut wtxn,
&kind,
task_id,
custom_metadata,
dry_run,
task_network.map(DbTaskNetwork::from),
)?;
let task = self.queue.register(&mut wtxn, &kind, task_id, dry_run)?;
// If the registered task is a task cancelation
// we inform the processing tasks to stop (if necessary).
if let KindWithContent::TaskCancelation { tasks, .. } = kind {
let tasks_to_cancel = RoaringBitmap::from_iter(tasks);
if self
.runtime_tasks
.read()
.unwrap()
.processing
.must_cancel_processing_tasks(&tasks_to_cancel)
if self.processing_tasks.read().unwrap().must_cancel_processing_tasks(&tasks_to_cancel)
{
self.scheduler.must_stop_processing.must_stop();
}

View File

@@ -6,28 +6,6 @@ use roaring::RoaringBitmap;
use crate::utils::ProcessingBatch;
pub struct RuntimeTasks {
pub processing: ProcessingTasks,
pub enqueued_network: EnqueuedNetworkTasks,
}
impl RuntimeTasks {
pub(crate) fn new() -> Self {
Self { processing: ProcessingTasks::new(), enqueued_network: Default::default() }
}
}
#[derive(Default)]
pub struct EnqueuedNetworkTasks {
tasks: RoaringBitmap,
}
impl EnqueuedNetworkTasks {
pub fn swap(&mut self, mut new: RoaringBitmap) -> RoaringBitmap {
std::mem::swap(&mut self.tasks, &mut new);
new
}
}
#[derive(Clone, Default)]
pub struct ProcessingTasks {
pub batch: Option<Arc<ProcessingBatch>>,

View File

@@ -26,7 +26,7 @@ fn query_batches_from_and_limit() {
handle.advance_n_successful_batches(3);
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "processed_all_tasks");
let proc = index_scheduler.runtime_tasks.read().unwrap().processing.clone();
let proc = index_scheduler.processing_tasks.read().unwrap().clone();
let rtxn = index_scheduler.env.read_txn().unwrap();
let query = Query { limit: Some(0), ..Default::default() };
let (batches, _) = index_scheduler
@@ -359,7 +359,7 @@ fn query_batches_special_rules() {
handle.advance_till([Start, BatchCreated]);
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.runtime_tasks.read().unwrap().processing.clone();
let proc = index_scheduler.processing_tasks.read().unwrap().clone();
let query = Query { index_uids: Some(vec!["catto".to_owned()]), ..Default::default() };
let (batches, _) = index_scheduler

View File

@@ -15,7 +15,6 @@ use file_store::FileStore;
use meilisearch_types::batches::BatchId;
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
use meilisearch_types::milli::{CboRoaringBitmapCodec, BEU32};
use meilisearch_types::tasks::enterprise_edition::network::DbTaskNetwork;
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
use roaring::RoaringBitmap;
use time::format_description::well_known::Rfc3339;
@@ -258,9 +257,7 @@ impl Queue {
wtxn: &mut RwTxn,
kind: &KindWithContent,
task_id: Option<TaskId>,
custom_metadata: Option<String>,
dry_run: bool,
network: Option<DbTaskNetwork>,
) -> Result<Task> {
let next_task_id = self.tasks.next_task_id(wtxn)?;
@@ -282,8 +279,7 @@ impl Queue {
details: kind.default_details(),
status: Status::Enqueued,
kind: kind.clone(),
network,
custom_metadata,
network: None,
};
// For deletion and cancelation tasks, we want to make extra sure that they
// don't attempt to delete/cancel tasks that are newer than themselves.
@@ -348,9 +344,7 @@ impl Queue {
tasks: to_delete,
},
None,
None,
false,
None,
)?;
Ok(())

View File

@@ -27,7 +27,7 @@ fn query_tasks_from_and_limit() {
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "processed_all_tasks");
let rtxn = index_scheduler.env.read_txn().unwrap();
let processing = &index_scheduler.runtime_tasks.read().unwrap().processing;
let processing = index_scheduler.processing_tasks.read().unwrap();
let query = Query { limit: Some(0), ..Default::default() };
let (tasks, _) = index_scheduler
.queue
@@ -317,7 +317,7 @@ fn query_tasks_special_rules() {
handle.advance_till([Start, BatchCreated]);
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
let proc = index_scheduler.processing_tasks.read().unwrap();
let query = Query { index_uids: Some(vec!["catto".to_owned()]), ..Default::default() };
let (tasks, _) = index_scheduler
@@ -414,7 +414,7 @@ fn query_tasks_canceled_by() {
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "start");
let rtxn = index_scheduler.read_txn().unwrap();
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
let proc = index_scheduler.processing_tasks.read().unwrap();
let query = Query { canceled_by: Some(vec![task_cancelation.uid]), ..Query::default() };
let (tasks, _) = index_scheduler
.queue

View File

@@ -203,30 +203,26 @@ fn test_disable_auto_deletion_of_tasks() {
)
.unwrap();
{
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
}
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
drop(rtxn);
drop(proc);
// now we're above the max number of tasks
// and if we try to advance in the tick function no new task deletion should be enqueued
handle.advance_till([Start, BatchCreated]);
{
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_not_been_enqueued");
}
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_not_been_enqueued");
drop(rtxn);
drop(proc);
}
#[test]
@@ -271,69 +267,59 @@ fn test_auto_deletion_of_tasks() {
)
.unwrap();
{
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
}
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]" }), name: "task_queue_is_full");
drop(rtxn);
drop(proc);
{
// now we're above the max number of tasks
// and if we try to advance in the tick function a new task deletion should be enqueued
handle.advance_till([Start, BatchCreated]);
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_enqueued");
}
// now we're above the max number of tasks
// and if we try to advance in the tick function a new task deletion should be enqueued
handle.advance_till([Start, BatchCreated]);
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_enqueued");
drop(rtxn);
drop(proc);
{
handle.advance_till([InsideProcessBatch, ProcessBatchSucceeded, AfterProcessing]);
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_processed");
}
handle.advance_till([InsideProcessBatch, ProcessBatchSucceeded, AfterProcessing]);
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "task_deletion_have_been_processed");
drop(rtxn);
drop(proc);
handle.advance_one_failed_batch();
// a new task deletion has been enqueued
handle.advance_one_successful_batch();
{
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "after_the_second_task_deletion");
}
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "after_the_second_task_deletion");
drop(rtxn);
drop(proc);
handle.advance_one_failed_batch();
handle.advance_one_successful_batch();
{
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = &index_scheduler.runtime_tasks.read().unwrap().processing;
let tasks = index_scheduler
.queue
.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc)
.unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "everything_has_been_processed");
}
let rtxn = index_scheduler.env.read_txn().unwrap();
let proc = index_scheduler.processing_tasks.read().unwrap();
let tasks =
index_scheduler.queue.get_task_ids(&rtxn, &Query { ..Default::default() }, &proc).unwrap();
let tasks = index_scheduler.queue.tasks.get_existing_tasks(&rtxn, tasks).unwrap();
snapshot!(json_string!(tasks, { "[].enqueuedAt" => "[date]", "[].startedAt" => "[date]", "[].finishedAt" => "[date]", ".**.original_filter" => "[filter]", ".**.query" => "[query]" }), name: "everything_has_been_processed");
drop(rtxn);
drop(proc);
}
#[test]

View File

@@ -74,7 +74,6 @@ impl From<KindWithContent> for AutobatchKind {
| KindWithContent::DumpCreation { .. }
| KindWithContent::Export { .. }
| KindWithContent::UpgradeDatabase { .. }
| KindWithContent::NetworkTopologyChange(_)
| KindWithContent::SnapshotCreation => {
panic!("The autobatcher should never be called with tasks with special priority or that don't apply to an index.")
}

View File

@@ -4,7 +4,6 @@ use std::io::ErrorKind;
use meilisearch_types::heed::RoTxn;
use meilisearch_types::milli::update::IndexDocumentsMethod;
use meilisearch_types::settings::{Settings, Unchecked};
use meilisearch_types::tasks::enterprise_edition::network::NetworkTopologyState;
use meilisearch_types::tasks::{BatchStopReason, Kind, KindWithContent, Status, Task};
use roaring::RoaringBitmap;
use uuid::Uuid;
@@ -60,13 +59,6 @@ pub(crate) enum Batch {
index_uid: String,
task: Task,
},
NetworkIndexBatch {
network_task: Task,
inner_batch: Box<Batch>,
},
NetworkWait {
task: Task,
},
}
#[derive(Debug)]
@@ -148,14 +140,9 @@ impl Batch {
..
} => RoaringBitmap::from_iter(tasks.iter().chain(other).map(|task| task.uid)),
},
Batch::IndexSwap { task } | Batch::NetworkWait { task } => {
Batch::IndexSwap { task } => {
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
}
Batch::NetworkIndexBatch { network_task, inner_batch } => {
let mut tasks = inner_batch.ids();
tasks.insert(network_task.uid);
tasks
}
}
}
@@ -169,14 +156,12 @@ impl Batch {
| Dump(_)
| Export { .. }
| UpgradeDatabase { .. }
| NetworkWait { .. }
| IndexSwap { .. } => None,
IndexOperation { op, .. } => Some(op.index_uid()),
IndexCreation { index_uid, .. }
| IndexUpdate { index_uid, .. }
| IndexDeletion { index_uid, .. }
| IndexCompaction { index_uid, .. } => Some(index_uid),
NetworkIndexBatch { network_task: _, inner_batch } => inner_batch.index_uid(),
}
}
}
@@ -199,8 +184,6 @@ impl fmt::Display for Batch {
Batch::IndexCompaction { .. } => f.write_str("IndexCompaction")?,
Batch::Export { .. } => f.write_str("Export")?,
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
Batch::NetworkIndexBatch { .. } => f.write_str("NetworkTopologyChange")?,
Batch::NetworkWait { .. } => f.write_str("NetworkTopologyChange")?,
};
match index_uid {
Some(name) => f.write_fmt(format_args!(" on {name:?} from tasks: {tasks:?}")),
@@ -477,6 +460,7 @@ impl IndexScheduler {
let mut current_batch = ProcessingBatch::new(batch_id);
let enqueued = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
let count_total_enqueued = enqueued.len();
let failed = &self.queue.tasks.get_status(rtxn, Status::Failed)?;
// 0. we get the last task to cancel.
@@ -535,15 +519,7 @@ impl IndexScheduler {
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
}
// 3. Check for enqueued network topology changes
let network_changes =
self.queue.tasks.get_kind(rtxn, Kind::NetworkTopologyChange)? & enqueued;
if let Some(task_id) = network_changes.iter().next() {
let task = self.queue.tasks.get_task(rtxn, task_id)?.unwrap();
return self.start_processing_network(rtxn, task, enqueued, current_batch);
}
// 4. we get the next task to compact
// 3. we get the next task to compact
let to_compact = self.queue.tasks.get_kind(rtxn, Kind::IndexCompaction)? & enqueued;
if let Some(task_id) = to_compact.min() {
let mut task =
@@ -558,7 +534,7 @@ impl IndexScheduler {
return Ok(Some((Batch::IndexCompaction { index_uid, task }, current_batch)));
}
// 5. we batch the export.
// 4. we batch the export.
let to_export = self.queue.tasks.get_kind(rtxn, Kind::Export)? & enqueued;
if !to_export.is_empty() {
let task_id = to_export.iter().next().expect("There must be at least one export task");
@@ -569,7 +545,7 @@ impl IndexScheduler {
return Ok(Some((Batch::Export { task }, current_batch)));
}
// 6. we batch the snapshot.
// 5. we batch the snapshot.
let to_snapshot = self.queue.tasks.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued;
if !to_snapshot.is_empty() {
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
@@ -579,7 +555,7 @@ impl IndexScheduler {
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
}
// 7. we batch the dumps.
// 6. we batch the dumps.
let to_dump = self.queue.tasks.get_kind(rtxn, Kind::DumpCreation)? & enqueued;
if let Some(to_dump) = to_dump.min() {
let mut task =
@@ -592,25 +568,8 @@ impl IndexScheduler {
return Ok(Some((Batch::Dump(task), current_batch)));
}
// 8. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
let (batch, current_batch) =
self.create_next_batch_unprioritized(rtxn, &enqueued, current_batch)?;
Ok(batch.map(|batch| (batch, current_batch)))
}
fn create_next_batch_unprioritized(
&self,
rtxn: &RoTxn,
enqueued: &RoaringBitmap,
mut current_batch: ProcessingBatch,
) -> Result<(Option<Batch>, ProcessingBatch)> {
let count_total_enqueued = enqueued.len();
let task_id = if let Some(task_id) = enqueued.min() {
task_id
} else {
return Ok((None, current_batch));
};
// 7. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
let mut task =
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
@@ -627,7 +586,7 @@ impl IndexScheduler {
kind: Kind::IndexSwap,
id: task.uid,
});
return Ok((Some(Batch::IndexSwap { task }), current_batch));
return Ok(Some((Batch::IndexSwap { task }, current_batch)));
};
let index_already_exists = self.index_mapper.exists(rtxn, index_name)?;
@@ -692,7 +651,7 @@ impl IndexScheduler {
autobatcher::autobatch(enqueued, index_already_exists, primary_key.as_deref())
{
current_batch.reason(autobatch_stop_reason.unwrap_or(stop_reason));
let batch = self
return Ok(self
.create_next_batch_index(
rtxn,
index_name.to_string(),
@@ -700,127 +659,11 @@ impl IndexScheduler {
&mut current_batch,
create_index,
)?
.map(|batch| batch);
return Ok((batch, current_batch));
.map(|batch| (batch, current_batch)));
}
// If we found no tasks then we were notified for something that got autobatched
// somehow and there is nothing to do.
Ok((None, current_batch))
}
fn start_processing_network(
&self,
rtxn: &RoTxn,
mut task: Task,
enqueued: &RoaringBitmap,
current_batch: ProcessingBatch,
) -> Result<Option<(Batch, ProcessingBatch)>> {
let change_version =
task.network.as_ref().map(|network| network.network_version()).unwrap_or_default();
let KindWithContent::NetworkTopologyChange(network_topology_change) = &task.kind else {
panic!("inconsistent kind with content")
};
match network_topology_change.state() {
NetworkTopologyState::WaitingForOlderTasks => {
let mut old_tasks = RoaringBitmap::new();
for task_id in enqueued {
let task = self
.queue
.tasks
.get_task(rtxn, task_id)?
.ok_or(Error::CorruptedTaskQueue)?;
let has_index = task.index_uid().is_some();
if !has_index {
continue;
}
let has_older_network_version = task
.network
.map(|network| network.network_version() <= change_version)
// if there is no version, we never retain the task
.unwrap_or_default();
if has_older_network_version {
old_tasks.push(task_id);
}
}
let res = self.create_next_batch_unprioritized(rtxn, &old_tasks, current_batch);
self.runtime_tasks.write().unwrap().enqueued_network.swap(old_tasks);
let (batch, mut current_batch) = res?;
current_batch.processing(Some(&mut task));
let batch = match batch {
Some(batch) => {
let inner_batch = Box::new(batch);
Batch::NetworkIndexBatch { network_task: task, inner_batch }
}
None => Batch::NetworkWait { task },
};
Ok(Some((batch, current_batch)))
}
NetworkTopologyState::ImportingDocuments => {
let mut import_tasks = RoaringBitmap::new();
for task_id in enqueued {
let task = self
.queue
.tasks
.get_task(rtxn, task_id)?
.ok_or(Error::CorruptedTaskQueue)?;
let has_index = task.index_uid().is_some();
if !has_index {
continue;
}
let is_import_task = task
.network
.map(|network| {
network.network_version() == change_version
&& network.import_data().is_some()
})
// if there is no version, we never retain the task
.unwrap_or_default();
if is_import_task {
import_tasks.push(task_id);
}
}
let res = self.create_next_batch_unprioritized(rtxn, &import_tasks, current_batch);
self.runtime_tasks.write().unwrap().enqueued_network.swap(import_tasks);
let (batch, mut current_batch) = res?;
current_batch.processing(Some(&mut task));
let batch = match batch {
Some(batch) => {
let inner_batch = Box::new(batch);
Batch::NetworkIndexBatch { network_task: task, inner_batch }
}
None => Batch::NetworkWait { task },
};
Ok(Some((batch, current_batch)))
}
NetworkTopologyState::ExportingDocuments | NetworkTopologyState::Finished => {
Ok(Some((Batch::NetworkWait { task }, current_batch)))
}
}
Ok(None)
}
}
pub enum BatchOutcome {
NoTaskToProcess,
Batch { batch: Batch, processing: ProcessingBatch },
}

View File

@@ -25,7 +25,6 @@ use convert_case::{Case, Casing as _};
use meilisearch_types::error::ResponseError;
use meilisearch_types::heed::{Env, WithoutTls};
use meilisearch_types::milli;
use meilisearch_types::milli::update::S3SnapshotOptions;
use meilisearch_types::tasks::Status;
use process_batch::ProcessBatchInfo;
use rayon::current_num_threads;
@@ -88,14 +87,11 @@ pub struct Scheduler {
/// Snapshot compaction status.
pub(crate) experimental_no_snapshot_compaction: bool,
/// S3 Snapshot options.
pub(crate) s3_snapshot_options: Option<S3SnapshotOptions>,
}
impl Scheduler {
pub(crate) fn private_clone(&self) -> Self {
Self {
pub(crate) fn private_clone(&self) -> Scheduler {
Scheduler {
must_stop_processing: self.must_stop_processing.clone(),
wake_up: self.wake_up.clone(),
autobatching_enabled: self.autobatching_enabled,
@@ -107,52 +103,23 @@ impl Scheduler {
version_file_path: self.version_file_path.clone(),
embedding_cache_cap: self.embedding_cache_cap,
experimental_no_snapshot_compaction: self.experimental_no_snapshot_compaction,
s3_snapshot_options: self.s3_snapshot_options.clone(),
}
}
pub fn new(options: &IndexSchedulerOptions, auth_env: Env<WithoutTls>) -> Scheduler {
let IndexSchedulerOptions {
version_file_path,
auth_path: _,
tasks_path: _,
update_file_path: _,
indexes_path: _,
snapshots_path,
dumps_path,
cli_webhook_url: _,
cli_webhook_authorization: _,
task_db_size: _,
index_base_map_size: _,
enable_mdb_writemap: _,
index_growth_amount: _,
index_count: _,
indexer_config,
autobatching_enabled,
cleanup_enabled: _,
max_number_of_tasks: _,
max_number_of_batched_tasks,
batched_tasks_size_limit,
instance_features: _,
auto_upgrade: _,
embedding_cache_cap,
experimental_no_snapshot_compaction,
} = options;
Scheduler {
must_stop_processing: MustStopProcessing::default(),
// we want to start the loop right away in case meilisearch was ctrl+Ced while processing things
wake_up: Arc::new(SignalEvent::auto(true)),
autobatching_enabled: *autobatching_enabled,
max_number_of_batched_tasks: *max_number_of_batched_tasks,
batched_tasks_size_limit: *batched_tasks_size_limit,
dumps_path: dumps_path.clone(),
snapshots_path: snapshots_path.clone(),
autobatching_enabled: options.autobatching_enabled,
max_number_of_batched_tasks: options.max_number_of_batched_tasks,
batched_tasks_size_limit: options.batched_tasks_size_limit,
dumps_path: options.dumps_path.clone(),
snapshots_path: options.snapshots_path.clone(),
auth_env,
version_file_path: version_file_path.clone(),
embedding_cache_cap: *embedding_cache_cap,
experimental_no_snapshot_compaction: *experimental_no_snapshot_compaction,
s3_snapshot_options: indexer_config.s3_snapshot_options.clone(),
version_file_path: options.version_file_path.clone(),
embedding_cache_cap: options.embedding_cache_cap,
experimental_no_snapshot_compaction: options.experimental_no_snapshot_compaction,
}
}
}
@@ -200,10 +167,9 @@ impl IndexScheduler {
// We reset the must_stop flag to be sure that we don't stop processing tasks
self.scheduler.must_stop_processing.reset();
let progress = self
.runtime_tasks
.processing_tasks
.write()
.unwrap()
.processing
// We can clone the processing batch here because we don't want its modification to affect the view of the processing batches
.start_processing(processing_batch.clone(), ids.clone());
@@ -454,7 +420,7 @@ impl IndexScheduler {
// We should stop processing AFTER everything is processed and written to disk otherwise, a batch (which only lives in RAM) may appear in the processing task
// and then become « not found » for some time until the commit everything is written and the final commit is made.
self.runtime_tasks.write().unwrap().processing.stop_processing();
self.processing_tasks.write().unwrap().stop_processing();
// Once the tasks are committed, we should delete all the update files associated ASAP to avoid leaking files in case of a restart
tracing::debug!("Deleting the update files");

View File

@@ -1,22 +1,15 @@
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use std::collections::{BTreeSet, HashMap, HashSet};
use std::fs::{remove_file, File};
use std::io::{ErrorKind, Seek, SeekFrom};
use std::panic::{catch_unwind, AssertUnwindSafe};
use std::sync::atomic::Ordering;
use std::time::Duration;
use bumpalo::Bump;
use byte_unit::Byte;
use meilisearch_types::batches::{BatchEnqueuedAt, BatchId};
use meilisearch_types::enterprise_edition::network::Remote;
use meilisearch_types::heed::{RoTxn, RwTxn};
use meilisearch_types::milli::documents::PrimaryKey;
use meilisearch_types::milli::heed::CompactionOption;
use meilisearch_types::milli::progress::{EmbedderStats, Progress, VariableNameStep};
use meilisearch_types::milli::update::new::indexer;
use meilisearch_types::milli::update::new::indexer::enterprise_edition::sharding::Shards;
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
use meilisearch_types::milli::{self, ChannelCongestion};
use meilisearch_types::tasks::enterprise_edition::network::{NetworkTopologyState, Origin};
use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status, Task};
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
use milli::update::Settings as MilliSettings;
@@ -30,7 +23,6 @@ use crate::processing::{
IndexCompaction, InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress,
TaskDeletionProgress, UpdateIndexProgress,
};
use crate::scheduler::process_export::{ExportContext, ExportOptions, TargetInstance};
use crate::utils::{
self, remove_n_tasks_datetime_earlier_than, remove_task_datetime, swap_index_uid_in_task,
ProcessingBatch,
@@ -547,209 +539,9 @@ impl IndexScheduler {
Ok((tasks, ProcessBatchInfo::default()))
}
Batch::NetworkIndexBatch { mut network_task, inner_batch } => {
let (mut tasks, info) =
self.process_batch(*inner_batch, current_batch, progress)?;
let KindWithContent::NetworkTopologyChange(network_topology_change) =
&mut network_task.kind
else {
return Err(Error::CorruptedTaskQueue);
};
for task in &tasks {
let Some(network) = task.network.as_ref() else {
continue;
};
let Some(import) = network.import_data() else {
continue;
};
network_topology_change.process_remote_tasks(
&import.remote_name,
&import.index_name,
import.document_count,
);
}
tasks.push(network_task);
Ok((tasks, info))
}
Batch::NetworkWait { mut task } => {
let KindWithContent::NetworkTopologyChange(network_topology_change) =
&mut task.kind
else {
tracing::error!("network topology change task has the wrong kind with content");
return Err(Error::CorruptedTaskQueue);
};
let Some(task_network) = &task.network else {
tracing::error!("network topology change task has no network");
return Err(Error::CorruptedTaskQueue);
};
let origin;
let origin = match task_network.origin() {
Some(origin) => origin,
None => {
let myself =
network_topology_change.in_name().expect("origin is not the leader");
origin = Origin {
remote_name: myself.to_string(),
task_uid: task.uid,
network_version: task_network.network_version(),
};
&origin
}
};
if let Some((remotes, out_name)) = network_topology_change.export_to_process() {
self.balance_documents(
remotes,
out_name,
network_topology_change.in_name(),
origin,
&progress,
&self.scheduler.must_stop_processing,
)?;
}
network_topology_change.update_state();
if network_topology_change.state() == NetworkTopologyState::Finished {
task.status = Status::Succeeded;
}
Ok((vec![task], Default::default()))
}
}
}
fn balance_documents(
&self,
remotes: &BTreeMap<String, Remote>,
out_name: &str,
in_name: Option<&str>,
network_change_origin: &Origin,
progress: &Progress,
must_stop_processing: &crate::scheduler::MustStopProcessing,
) -> crate::Result<()> {
let new_shards = Shards::from_remotes_local(remotes.keys().map(String::as_str), in_name);
// TECHDEBT: this spawns a `ureq` agent additionally to `reqwest`. We probably want to harmonize all of this.
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
let mut indexer_alloc = Bump::new();
let scheduler_rtxn = self.env.read_txn()?;
let index_count = self.index_mapper.index_count(&scheduler_rtxn)?;
// process by batches of 20MiB. Allow for compression? Don't forget about embeddings
let _: Vec<()> = self.index_mapper.try_for_each_index(
&scheduler_rtxn,
|index_uid, index| -> crate::Result<()> {
indexer_alloc.reset();
let err = |err| Error::from_milli(err, Some(index_uid.to_string()));
let index_rtxn = index.read_txn()?;
let all_docids = index.external_documents_ids();
let mut documents_to_move_to: HashMap<String, RoaringBitmap> = HashMap::new();
let mut documents_to_delete = RoaringBitmap::new();
for res in all_docids.iter(&index_rtxn)? {
let (external_docid, docid) = res?;
match new_shards.processing_shard(external_docid) {
Some(shard) if shard.is_own => continue,
Some(shard) => {
documents_to_move_to
.entry(shard.name.clone())
.or_default()
.insert(docid);
}
None => {
documents_to_delete.insert(docid);
}
}
}
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
for (remote, documents_to_move) in documents_to_move_to {
/// TODO: justify the unwrap
let remote = remotes.get(&remote).unwrap();
let target = TargetInstance {
base_url: &remote.url,
api_key: remote.write_api_key.as_deref(),
};
let options = ExportOptions {
index_uid,
payload_size: None,
override_settings: false,
export_mode: super::process_export::ExportMode::NetworkBalancing {
index_count,
export_old_remote_name: out_name,
network_change_origin,
},
};
let ctx = ExportContext {
index,
index_rtxn: &index_rtxn,
universe: &documents_to_move,
progress,
agent: &agent,
must_stop_processing,
};
self.export_one_index(target, options, ctx)?;
documents_to_delete |= documents_to_move;
}
if documents_to_delete.is_empty() {
return Ok(());
}
let mut new_fields_ids_map = fields_ids_map.clone();
// candidates not empty => index not empty => a primary key is set
let primary_key = index.primary_key(&index_rtxn)?.unwrap();
let primary_key = PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
.map_err(milli::Error::from)
.map_err(err)?;
let mut index_wtxn = index.write_txn()?;
let mut indexer = indexer::DocumentDeletion::new();
indexer.delete_documents_by_docids(documents_to_delete);
let document_changes = indexer.into_changes(&indexer_alloc, primary_key);
let embedders = index
.embedding_configs()
.embedding_configs(&index_wtxn)
.map_err(milli::Error::from)
.map_err(err)?;
let embedders = self.embedders(index_uid.to_string(), embedders)?;
let indexer_config = self.index_mapper.indexer_config();
let pool = &indexer_config.thread_pool;
indexer::index(
&mut index_wtxn,
index,
pool,
indexer_config.grenad_parameters(),
&fields_ids_map,
new_fields_ids_map,
None, // document deletion never changes primary key
&document_changes,
embedders,
&|| must_stop_processing.get(),
&progress,
&EmbedderStats::default(),
)
.map_err(err)?;
index_wtxn.commit()?;
Ok(())
},
)?;
Ok(())
}
fn apply_compaction(
&self,
rtxn: &RoTxn,
@@ -919,7 +711,7 @@ impl IndexScheduler {
// 1. Remove from this list the tasks that we are not allowed to delete
let enqueued_tasks = self.queue.tasks.get_status(wtxn, Status::Enqueued)?;
let processing_tasks = &self.runtime_tasks.read().unwrap().processing.processing.clone();
let processing_tasks = &self.processing_tasks.read().unwrap().processing.clone();
let all_task_ids = self.queue.tasks.all_task_ids(wtxn)?;
let mut to_delete_tasks = all_task_ids & matched_tasks;

View File

@@ -7,7 +7,6 @@ use backoff::ExponentialBackoff;
use byte_unit::Byte;
use flate2::write::GzEncoder;
use flate2::Compression;
use meilisearch_types::error::Code;
use meilisearch_types::index_uid_pattern::IndexUidPattern;
use meilisearch_types::milli::constants::RESERVED_VECTORS_FIELD_NAME;
use meilisearch_types::milli::index::EmbeddingsWithMetadata;
@@ -16,11 +15,7 @@ use meilisearch_types::milli::update::{request_threads, Setting};
use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOrArrayOfVectors};
use meilisearch_types::milli::{self, obkv_to_json, Filter, InternalError};
use meilisearch_types::settings::{self, SecretPolicy};
use meilisearch_types::tasks::enterprise_edition::network::{
headers, ImportData, ImportMetadata, Origin,
};
use meilisearch_types::tasks::{DetailsExportIndexSettings, ExportIndexSettings};
use roaring::RoaringBitmap;
use serde::Deserialize;
use ureq::{json, Response};
@@ -55,7 +50,6 @@ impl IndexScheduler {
let agent = ureq::AgentBuilder::new().timeout(Duration::from_secs(5)).build();
let must_stop_processing = self.scheduler.must_stop_processing.clone();
for (i, (_pattern, uid, export_settings)) in indexes.iter().enumerate() {
let err = |err| Error::from_milli(err, Some(uid.to_string()));
if must_stop_processing.get() {
return Err(Error::AbortedTask);
}
@@ -67,31 +61,104 @@ impl IndexScheduler {
));
let ExportIndexSettings { filter, override_settings } = export_settings;
let index = self.index(uid)?;
let index_rtxn = index.read_txn()?;
let filter = filter.as_ref().map(Filter::from_json).transpose().map_err(err)?.flatten();
let filter_universe =
filter.map(|f| f.evaluate(&index_rtxn, &index)).transpose().map_err(err)?;
let whole_universe =
index.documents_ids(&index_rtxn).map_err(milli::Error::from).map_err(err)?;
let bearer = api_key.map(|api_key| format!("Bearer {api_key}"));
// First, check if the index already exists
let url = format!("{base_url}/indexes/{uid}");
let response = retry(&must_stop_processing, || {
let mut request = agent.get(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(Default::default()).map_err(into_backoff_error)
});
let index_exists = match response {
Ok(response) => response.status() == 200,
Err(Error::FromRemoteWhenExporting { code, .. }) if code == "index_not_found" => {
false
}
Err(e) => return Err(e),
};
let primary_key = index
.primary_key(&index_rtxn)
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
// Create the index
if !index_exists {
let url = format!("{base_url}/indexes");
retry(&must_stop_processing, || {
let mut request = agent.post(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "uid": uid, "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
})?;
}
// Patch the index primary key
if index_exists && *override_settings {
let url = format!("{base_url}/indexes/{uid}");
retry(&must_stop_processing, || {
let mut request = agent.patch(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
})?;
}
// Send the index settings
if !index_exists || *override_settings {
let mut settings =
settings::settings(&index, &index_rtxn, SecretPolicy::RevealSecrets)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
// Remove the experimental chat setting if not enabled
if self.features().check_chat_completions("exporting chat settings").is_err() {
settings.chat = Setting::NotSet;
}
// Retry logic for sending settings
let url = format!("{base_url}/indexes/{uid}/settings");
retry(&must_stop_processing, || {
let mut request = agent.patch(&url);
if let Some(bearer) = bearer.as_ref() {
request = request.set("Authorization", bearer);
}
request.send_json(settings.clone()).map_err(into_backoff_error)
})?;
}
let filter = filter
.as_ref()
.map(Filter::from_json)
.transpose()
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?
.flatten();
let filter_universe = filter
.map(|f| f.evaluate(&index_rtxn, &index))
.transpose()
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
let whole_universe = index
.documents_ids(&index_rtxn)
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
let universe = filter_universe.unwrap_or(whole_universe);
let target = TargetInstance { base_url, api_key };
let ctx = ExportContext {
index: &index,
index_rtxn: &index_rtxn,
universe: &universe,
progress: &progress,
agent: &agent,
must_stop_processing: &must_stop_processing,
};
let options = ExportOptions {
index_uid: uid,
payload_size,
override_settings: *override_settings,
export_mode: ExportMode::ExportRoute,
};
let total_documents = self.export_one_index(target, options, ctx)?;
let fields_ids_map = index.fields_ids_map(&index_rtxn)?;
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
// We don't need to keep this one alive as we will
// spawn many threads to process the documents
drop(index_rtxn);
let total_documents = universe.len() as u32;
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
progress.update_progress(progress_step);
output.insert(
IndexUidPattern::new_unchecked(uid.clone()),
@@ -100,307 +167,155 @@ impl IndexScheduler {
matched_documents: Some(total_documents as u64),
},
);
let limit = payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024); // defaults to 20 MiB
let documents_url = format!("{base_url}/indexes/{uid}/documents");
let results = request_threads()
.broadcast(|ctx| {
let index_rtxn = index
.read_txn()
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
let mut buffer = Vec::new();
let mut tmp_buffer = Vec::new();
let mut compressed_buffer = Vec::new();
for (i, docid) in universe.iter().enumerate() {
if i % ctx.num_threads() != ctx.index() {
continue;
}
let document = index
.document(&index_rtxn, docid)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
let mut document = obkv_to_json(&all_fields, &fields_ids_map, document)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
// TODO definitely factorize this code
'inject_vectors: {
let embeddings = index
.embeddings(&index_rtxn, docid)
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
if embeddings.is_empty() {
break 'inject_vectors;
}
let vectors = document
.entry(RESERVED_VECTORS_FIELD_NAME)
.or_insert(serde_json::Value::Object(Default::default()));
let serde_json::Value::Object(vectors) = vectors else {
return Err(Error::from_milli(
milli::Error::UserError(
milli::UserError::InvalidVectorsMapType {
document_id: {
if let Ok(Some(Ok(index))) = index
.external_id_of(
&index_rtxn,
std::iter::once(docid),
)
.map(|it| it.into_iter().next())
{
index
} else {
format!("internal docid={docid}")
}
},
value: vectors.clone(),
},
),
Some(uid.to_string()),
));
};
for (
embedder_name,
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
) in embeddings
{
let embeddings = ExplicitVectors {
embeddings: Some(
VectorOrArrayOfVectors::from_array_of_vectors(embeddings),
),
regenerate: regenerate &&
// Meilisearch does not handle well dumps with fragments, because as the fragments
// are marked as user-provided,
// all embeddings would be regenerated on any settings change or document update.
// To prevent this, we mark embeddings has non regenerate in this case.
!has_fragments,
};
vectors.insert(
embedder_name,
serde_json::to_value(embeddings).unwrap(),
);
}
}
tmp_buffer.clear();
serde_json::to_writer(&mut tmp_buffer, &document)
.map_err(milli::InternalError::from)
.map_err(|e| Error::from_milli(e.into(), Some(uid.to_string())))?;
// Make sure we put at least one document in the buffer even
// though we might go above the buffer limit before sending
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
// We compress the documents before sending them
let mut encoder =
GzEncoder::new(&mut compressed_buffer, Compression::default());
encoder
.write_all(&buffer)
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
encoder
.finish()
.map_err(|e| Error::from_milli(e.into(), Some(uid.clone())))?;
retry(&must_stop_processing, || {
let mut request = agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson");
request = request.set("Content-Encoding", "gzip");
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
})?;
buffer.clear();
compressed_buffer.clear();
}
buffer.extend_from_slice(&tmp_buffer);
if i > 0 && i % 100 == 0 {
step.fetch_add(100, atomic::Ordering::Relaxed);
}
}
retry(&must_stop_processing, || {
let mut request = agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson");
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(&buffer).map_err(into_backoff_error)
})?;
Ok(())
})
.map_err(|e| {
Error::from_milli(
milli::Error::InternalError(InternalError::PanicInThreadPool(e)),
Some(uid.to_string()),
)
})?;
for result in results {
result?;
}
step.store(total_documents, atomic::Ordering::Relaxed);
}
Ok(output)
}
pub(super) fn export_one_index(
&self,
target: TargetInstance<'_>,
options: ExportOptions<'_>,
ctx: ExportContext<'_>,
) -> Result<u64, Error> {
let err = |err| Error::from_milli(err, Some(options.index_uid.to_string()));
let bearer = target.api_key.map(|api_key| format!("Bearer {api_key}"));
let url = format!(
"{base_url}/indexes/{index_uid}",
base_url = target.base_url,
index_uid = options.index_uid
);
let response = retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.get(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(Default::default()).map_err(into_backoff_error)
});
let index_exists = match response {
Ok(response) => response.status() == 200,
Err(Error::FromRemoteWhenExporting { code, .. })
if code == Code::IndexNotFound.name() =>
{
false
}
Err(e) => return Err(e),
};
let primary_key =
ctx.index.primary_key(&ctx.index_rtxn).map_err(milli::Error::from).map_err(err)?;
if !index_exists {
let url = format!("{base_url}/indexes", base_url = target.base_url);
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.post(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "uid": options.index_uid, "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
})?;
}
if index_exists && options.override_settings {
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.patch(&url);
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
let index_param = json!({ "primaryKey": primary_key });
request.send_json(&index_param).map_err(into_backoff_error)
})?;
}
if !index_exists || options.override_settings {
let mut settings =
settings::settings(&ctx.index, &ctx.index_rtxn, SecretPolicy::RevealSecrets)
.map_err(err)?;
// Remove the experimental chat setting if not enabled
if self.features().check_chat_completions("exporting chat settings").is_err() {
settings.chat = Setting::NotSet;
}
// Retry logic for sending settings
let url = format!(
"{base_url}/indexes/{index_uid}/settings",
base_url = target.base_url,
index_uid = options.index_uid
);
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.patch(&url);
if let Some(bearer) = bearer.as_ref() {
request = request.set("Authorization", bearer);
}
request.send_json(settings.clone()).map_err(into_backoff_error)
})?;
}
let fields_ids_map = ctx.index.fields_ids_map(&ctx.index_rtxn)?;
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
let total_documents = ctx.universe.len() as u32;
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
ctx.progress.update_progress(progress_step);
let limit = options.payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(20 * 1024 * 1024);
let documents_url = format!(
"{base_url}/indexes/{index_uid}/documents",
base_url = target.base_url,
index_uid = options.index_uid
);
let results = request_threads()
.broadcast(|broadcast| {
let mut task_network = if let ExportMode::NetworkBalancing {
index_count,
export_old_remote_name,
network_change_origin,
} = options.export_mode
{
Some((
ImportData {
remote_name: export_old_remote_name.to_string(),
index_name: options.index_uid.to_string(),
document_count: 0,
},
network_change_origin.clone(),
ImportMetadata {
index_count,
task_key: 0,
total_index_documents: ctx.universe.len(),
},
))
} else {
None
};
let index_rtxn = ctx.index.read_txn().map_err(milli::Error::from).map_err(err)?;
let mut buffer = Vec::new();
let mut tmp_buffer = Vec::new();
let mut compressed_buffer = Vec::new();
for (i, docid) in ctx.universe.iter().enumerate() {
if i % broadcast.num_threads() != broadcast.index() {
continue;
}
if let Some((import_data, _, metadata)) = &mut task_network {
import_data.document_count += 1;
metadata.task_key = docid;
}
let document = ctx.index.document(&index_rtxn, docid).map_err(err)?;
let mut document =
obkv_to_json(&all_fields, &fields_ids_map, document).map_err(err)?;
// TODO definitely factorize this code
'inject_vectors: {
let embeddings = ctx.index.embeddings(&index_rtxn, docid).map_err(err)?;
if embeddings.is_empty() {
break 'inject_vectors;
}
let vectors = document
.entry(RESERVED_VECTORS_FIELD_NAME)
.or_insert(serde_json::Value::Object(Default::default()));
let serde_json::Value::Object(vectors) = vectors else {
return Err(err(milli::Error::UserError(
milli::UserError::InvalidVectorsMapType {
document_id: {
if let Ok(Some(Ok(index))) = ctx
.index
.external_id_of(&index_rtxn, std::iter::once(docid))
.map(|it| it.into_iter().next())
{
index
} else {
format!("internal docid={docid}")
}
},
value: vectors.clone(),
},
)));
};
for (
embedder_name,
EmbeddingsWithMetadata { embeddings, regenerate, has_fragments },
) in embeddings
{
let embeddings = ExplicitVectors {
embeddings: Some(VectorOrArrayOfVectors::from_array_of_vectors(
embeddings,
)),
regenerate: regenerate &&
// Meilisearch does not handle well dumps with fragments, because as the fragments
// are marked as user-provided,
// all embeddings would be regenerated on any settings change or document update.
// To prevent this, we mark embeddings has non regenerate in this case.
!has_fragments,
};
vectors
.insert(embedder_name, serde_json::to_value(embeddings).unwrap());
}
}
tmp_buffer.clear();
serde_json::to_writer(&mut tmp_buffer, &document)
.map_err(milli::InternalError::from)
.map_err(milli::Error::from)
.map_err(err)?;
// Make sure we put at least one document in the buffer even
// though we might go above the buffer limit before sending
if !buffer.is_empty() && buffer.len() + tmp_buffer.len() > limit {
// We compress the documents before sending them
let mut encoder =
GzEncoder::new(&mut compressed_buffer, Compression::default());
encoder.write_all(&buffer).map_err(milli::Error::from).map_err(err)?;
encoder.finish().map_err(milli::Error::from).map_err(err)?;
match retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson");
request = request.set("Content-Encoding", "gzip");
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
if let Some((import_data, origin, metadata)) = &task_network {
request = set_network_ureq_headers(
request,
import_data,
origin,
metadata,
);
}
request.send_bytes(&compressed_buffer).map_err(into_backoff_error)
}) {
Ok(_response) => {}
Err(Error::FromRemoteWhenExporting { code, .. })
if code == Code::ImportTaskAlreadyReceived.name() =>
{
continue;
}
Err(Error::FromRemoteWhenExporting { code, message, .. })
if code == Code::ImportTaskUnknownRemote.name() =>
{
tracing::warn!("remote answered with: {message}");
break;
}
Err(Error::FromRemoteWhenExporting { code, message, .. })
if code == Code::ImportTaskWithoutNetworkTask.name() =>
{
tracing::warn!("remote answered with: {message}");
break;
}
Err(e) => return Err(e),
}
buffer.clear();
compressed_buffer.clear();
if let Some((import_data, _, metadata)) = &mut task_network {
import_data.document_count = 0;
metadata.task_key = 0;
}
}
buffer.extend_from_slice(&tmp_buffer);
if i > 0 && i % 100 == 0 {
step.fetch_add(100, atomic::Ordering::Relaxed);
}
}
retry(ctx.must_stop_processing, || {
let mut request = ctx.agent.post(&documents_url);
request = request.set("Content-Type", "application/x-ndjson");
if let Some((import_data, origin, metadata)) = &task_network {
request = set_network_ureq_headers(request, import_data, origin, metadata);
}
if let Some(bearer) = &bearer {
request = request.set("Authorization", bearer);
}
request.send_bytes(&buffer).map_err(into_backoff_error)
})?;
Ok(())
})
.map_err(|e| err(milli::Error::InternalError(InternalError::PanicInThreadPool(e))))?;
for result in results {
result?;
}
step.store(total_documents, atomic::Ordering::Relaxed);
Ok(total_documents as u64)
}
}
fn set_network_ureq_headers(
request: ureq::Request,
import_data: &ImportData,
origin: &Origin,
metadata: &ImportMetadata,
) -> ureq::Request {
request
.set(headers::PROXY_ORIGIN_REMOTE_HEADER, &origin.remote_name)
.set(headers::PROXY_ORIGIN_TASK_UID_HEADER, &origin.task_uid.to_string())
.set(
headers::PROXY_ORIGIN_NETWORK_VERSION_HEADER,
&origin.network_version.to_u128_le().to_string(),
)
.set(headers::PROXY_IMPORT_REMOTE_HEADER, &import_data.remote_name)
.set(headers::PROXY_IMPORT_INDEX_HEADER, &import_data.index_name)
.set(headers::PROXY_IMPORT_TASK_KEY_HEADER, &metadata.task_key.to_string())
.set(headers::PROXY_IMPORT_DOCS_HEADER, &import_data.document_count.to_string())
.set(headers::PROXY_IMPORT_INDEX_COUNT_HEADER, &metadata.index_count.to_string())
.set(
headers::PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
&metadata.total_index_documents.to_string(),
)
}
fn retry<F>(must_stop_processing: &MustStopProcessing, send_request: F) -> Result<ureq::Response>
@@ -459,37 +374,4 @@ fn ureq_error_into_error(error: ureq::Error) -> Error {
}
}
// export_one_index arguments
pub(super) struct TargetInstance<'a> {
pub(super) base_url: &'a str,
pub(super) api_key: Option<&'a str>,
}
pub(super) struct ExportOptions<'a> {
pub(super) index_uid: &'a str,
pub(super) payload_size: Option<&'a Byte>,
pub(super) override_settings: bool,
pub(super) export_mode: ExportMode<'a>,
}
pub(super) struct ExportContext<'a> {
pub(super) index: &'a meilisearch_types::milli::Index,
pub(super) index_rtxn: &'a milli::heed::RoTxn<'a>,
pub(super) universe: &'a RoaringBitmap,
pub(super) progress: &'a Progress,
pub(super) agent: &'a ureq::Agent,
pub(super) must_stop_processing: &'a MustStopProcessing,
}
pub(super) enum ExportMode<'a> {
ExportRoute,
NetworkBalancing {
index_count: u64,
export_old_remote_name: &'a str,
network_change_origin: &'a Origin,
},
}
// progress related
enum ExportIndex {}

View File

@@ -12,8 +12,6 @@ use crate::processing::{AtomicUpdateFileStep, SnapshotCreationProgress};
use crate::queue::TaskQueue;
use crate::{Error, IndexScheduler, Result};
const UPDATE_FILES_DIR_NAME: &str = "update_files";
/// # Safety
///
/// See [`EnvOpenOptions::open`].
@@ -80,32 +78,10 @@ impl IndexScheduler {
pub(super) fn process_snapshot(
&self,
progress: Progress,
tasks: Vec<Task>,
mut tasks: Vec<Task>,
) -> Result<Vec<Task>> {
progress.update_progress(SnapshotCreationProgress::StartTheSnapshotCreation);
match self.scheduler.s3_snapshot_options.clone() {
Some(options) => {
#[cfg(not(unix))]
{
let _ = options;
panic!("Non-unix platform does not support S3 snapshotting");
}
#[cfg(unix)]
self.runtime
.as_ref()
.expect("Runtime not initialized")
.block_on(self.process_snapshot_to_s3(progress, options, tasks))
}
None => self.process_snapshots_to_disk(progress, tasks),
}
}
fn process_snapshots_to_disk(
&self,
progress: Progress,
mut tasks: Vec<Task>,
) -> Result<Vec<Task>, Error> {
fs::create_dir_all(&self.scheduler.snapshots_path)?;
let temp_snapshot_dir = tempfile::tempdir()?;
@@ -152,7 +128,7 @@ impl IndexScheduler {
let rtxn = self.env.read_txn()?;
// 2.4 Create the update files directory
let update_files_dir = temp_snapshot_dir.path().join(UPDATE_FILES_DIR_NAME);
let update_files_dir = temp_snapshot_dir.path().join("update_files");
fs::create_dir_all(&update_files_dir)?;
// 2.5 Only copy the update files of the enqueued tasks
@@ -164,7 +140,7 @@ impl IndexScheduler {
let task =
self.queue.tasks.get_task(&rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
if let Some(content_uuid) = task.content_uuid() {
let src = self.queue.file_store.update_path(content_uuid);
let src = self.queue.file_store.get_update_path(content_uuid);
let dst = update_files_dir.join(content_uuid.to_string());
fs::copy(src, dst)?;
}
@@ -230,403 +206,4 @@ impl IndexScheduler {
Ok(tasks)
}
#[cfg(unix)]
pub(super) async fn process_snapshot_to_s3(
&self,
progress: Progress,
opts: meilisearch_types::milli::update::S3SnapshotOptions,
mut tasks: Vec<Task>,
) -> Result<Vec<Task>> {
use meilisearch_types::milli::update::S3SnapshotOptions;
let S3SnapshotOptions {
s3_bucket_url,
s3_bucket_region,
s3_bucket_name,
s3_snapshot_prefix,
s3_access_key,
s3_secret_key,
s3_max_in_flight_parts,
s3_compression_level: level,
s3_signature_duration,
s3_multipart_part_size,
} = opts;
let must_stop_processing = self.scheduler.must_stop_processing.clone();
let retry_backoff = backoff::ExponentialBackoff::default();
let db_name = {
let mut base_path = self.env.path().to_owned();
base_path.pop();
base_path.file_name().and_then(OsStr::to_str).unwrap_or("data.ms").to_string()
};
let (reader, writer) = std::io::pipe()?;
let uploader_task = tokio::spawn(multipart_stream_to_s3(
s3_bucket_url,
s3_bucket_region,
s3_bucket_name,
s3_snapshot_prefix,
s3_access_key,
s3_secret_key,
s3_max_in_flight_parts,
s3_signature_duration,
s3_multipart_part_size,
must_stop_processing,
retry_backoff,
db_name,
reader,
));
let index_scheduler = IndexScheduler::private_clone(self);
let builder_task = tokio::task::spawn_blocking(move || {
stream_tarball_into_pipe(progress, level, writer, index_scheduler)
});
let (uploader_result, builder_result) = tokio::join!(uploader_task, builder_task);
// Check uploader result first to early return on task abortion.
// safety: JoinHandle can return an error if the task was aborted, cancelled, or panicked.
uploader_result.unwrap()?;
builder_result.unwrap()?;
for task in &mut tasks {
task.status = Status::Succeeded;
}
Ok(tasks)
}
}
/// Streams a tarball of the database content into a pipe.
#[cfg(unix)]
fn stream_tarball_into_pipe(
progress: Progress,
level: u32,
writer: std::io::PipeWriter,
index_scheduler: IndexScheduler,
) -> std::result::Result<(), Error> {
use std::io::Write as _;
use std::path::Path;
let writer = flate2::write::GzEncoder::new(writer, flate2::Compression::new(level));
let mut tarball = tar::Builder::new(writer);
// 1. Snapshot the version file
tarball
.append_path_with_name(&index_scheduler.scheduler.version_file_path, VERSION_FILE_NAME)?;
// 2. Snapshot the index scheduler LMDB env
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexScheduler);
let tasks_env_file = index_scheduler.env.try_clone_inner_file()?;
let path = Path::new("tasks").join("data.mdb");
append_file_to_tarball(&mut tarball, path, tasks_env_file)?;
// 2.3 Create a read transaction on the index-scheduler
let rtxn = index_scheduler.env.read_txn()?;
// 2.4 Create the update files directory
// And only copy the update files of the enqueued tasks
progress.update_progress(SnapshotCreationProgress::SnapshotTheUpdateFiles);
let enqueued = index_scheduler.queue.tasks.get_status(&rtxn, Status::Enqueued)?;
let (atomic, update_file_progress) = AtomicUpdateFileStep::new(enqueued.len() as u32);
progress.update_progress(update_file_progress);
// We create the update_files directory so that it
// always exists even if there are no update files
let update_files_dir = Path::new(UPDATE_FILES_DIR_NAME);
let src_update_files_dir = {
let mut path = index_scheduler.env.path().to_path_buf();
path.pop();
path.join(UPDATE_FILES_DIR_NAME)
};
tarball.append_dir(update_files_dir, src_update_files_dir)?;
for task_id in enqueued {
let task = index_scheduler
.queue
.tasks
.get_task(&rtxn, task_id)?
.ok_or(Error::CorruptedTaskQueue)?;
if let Some(content_uuid) = task.content_uuid() {
use std::fs::File;
let src = index_scheduler.queue.file_store.update_path(content_uuid);
let mut update_file = File::open(src)?;
let path = update_files_dir.join(content_uuid.to_string());
tarball.append_file(path, &mut update_file)?;
}
atomic.fetch_add(1, Ordering::Relaxed);
}
// 3. Snapshot every indexes
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexes);
let index_mapping = index_scheduler.index_mapper.index_mapping;
let nb_indexes = index_mapping.len(&rtxn)? as u32;
let indexes_dir = Path::new("indexes");
let indexes_references: Vec<_> = index_scheduler
.index_mapper
.index_mapping
.iter(&rtxn)?
.map(|res| res.map_err(Error::from).map(|(name, uuid)| (name.to_string(), uuid)))
.collect::<Result<_, Error>>()?;
// It's prettier to use a for loop instead of the IndexMapper::try_for_each_index
// method, especially when we need to access the UUID, local path and index number.
for (i, (name, uuid)) in indexes_references.into_iter().enumerate() {
progress.update_progress(VariableNameStep::<SnapshotCreationProgress>::new(
&name, i as u32, nb_indexes,
));
let path = indexes_dir.join(uuid.to_string()).join("data.mdb");
let index = index_scheduler.index_mapper.index(&rtxn, &name)?;
let index_file = index.try_clone_inner_file()?;
tracing::trace!("Appending index file for {name} in {}", path.display());
append_file_to_tarball(&mut tarball, path, index_file)?;
}
drop(rtxn);
// 4. Snapshot the auth LMDB env
progress.update_progress(SnapshotCreationProgress::SnapshotTheApiKeys);
let auth_env_file = index_scheduler.scheduler.auth_env.try_clone_inner_file()?;
let path = Path::new("auth").join("data.mdb");
append_file_to_tarball(&mut tarball, path, auth_env_file)?;
let mut gzencoder = tarball.into_inner()?;
gzencoder.flush()?;
gzencoder.try_finish()?;
let mut writer = gzencoder.finish()?;
writer.flush()?;
Result::<_, Error>::Ok(())
}
#[cfg(unix)]
fn append_file_to_tarball<W, P>(
tarball: &mut tar::Builder<W>,
path: P,
mut auth_env_file: fs::File,
) -> Result<(), Error>
where
W: std::io::Write,
P: AsRef<std::path::Path>,
{
use std::io::{Seek as _, SeekFrom};
// Note: A previous snapshot operation may have left the cursor
// at the end of the file so we need to seek to the start.
auth_env_file.seek(SeekFrom::Start(0))?;
tarball.append_file(path, &mut auth_env_file)?;
Ok(())
}
/// Streams the content read from the given reader to S3.
#[cfg(unix)]
#[allow(clippy::too_many_arguments)]
async fn multipart_stream_to_s3(
s3_bucket_url: String,
s3_bucket_region: String,
s3_bucket_name: String,
s3_snapshot_prefix: String,
s3_access_key: String,
s3_secret_key: String,
s3_max_in_flight_parts: std::num::NonZero<usize>,
s3_signature_duration: std::time::Duration,
s3_multipart_part_size: u64,
must_stop_processing: super::MustStopProcessing,
retry_backoff: backoff::exponential::ExponentialBackoff<backoff::SystemClock>,
db_name: String,
reader: std::io::PipeReader,
) -> Result<(), Error> {
use std::{collections::VecDeque, os::fd::OwnedFd, path::PathBuf};
use bytes::{Bytes, BytesMut};
use reqwest::{Client, Response};
use rusty_s3::S3Action as _;
use rusty_s3::{actions::CreateMultipartUpload, Bucket, BucketError, Credentials, UrlStyle};
use tokio::task::JoinHandle;
let reader = OwnedFd::from(reader);
let reader = tokio::net::unix::pipe::Receiver::from_owned_fd(reader)?;
let s3_snapshot_prefix = PathBuf::from(s3_snapshot_prefix);
let url =
s3_bucket_url.parse().map_err(BucketError::ParseError).map_err(Error::S3BucketError)?;
let bucket = Bucket::new(url, UrlStyle::Path, s3_bucket_name, s3_bucket_region)
.map_err(Error::S3BucketError)?;
let credential = Credentials::new(s3_access_key, s3_secret_key);
// Note for the future (rust 1.91+): use with_added_extension, it's prettier
let object_path = s3_snapshot_prefix.join(format!("{db_name}.snapshot"));
// Note: It doesn't work on Windows and if a port to this platform is needed,
// use the slash-path crate or similar to get the correct path separator.
let object = object_path.display().to_string();
let action = bucket.create_multipart_upload(Some(&credential), &object);
let url = action.sign(s3_signature_duration);
let client = Client::new();
let resp = client.post(url).send().await.map_err(Error::S3HttpError)?;
let status = resp.status();
let body = match resp.error_for_status_ref() {
Ok(_) => resp.text().await.map_err(Error::S3HttpError)?,
Err(_) => {
return Err(Error::S3Error { status, body: resp.text().await.unwrap_or_default() })
}
};
let multipart =
CreateMultipartUpload::parse_response(&body).map_err(|e| Error::S3XmlError(Box::new(e)))?;
tracing::debug!("Starting the upload of the snapshot to {object}");
// We use this bumpalo for etags strings.
let bump = bumpalo::Bump::new();
let mut etags = Vec::<&str>::new();
let mut in_flight = VecDeque::<(JoinHandle<reqwest::Result<Response>>, Bytes)>::with_capacity(
s3_max_in_flight_parts.get(),
);
// Part numbers start at 1 and cannot be larger than 10k
for part_number in 1u16.. {
if must_stop_processing.get() {
return Err(Error::AbortedTask);
}
let part_upload =
bucket.upload_part(Some(&credential), &object, part_number, multipart.upload_id());
let url = part_upload.sign(s3_signature_duration);
// Wait for a buffer to be ready if there are in-flight parts that landed
let mut buffer = if in_flight.len() >= s3_max_in_flight_parts.get() {
let (handle, buffer) = in_flight.pop_front().expect("At least one in flight request");
let resp = join_and_map_error(handle).await?;
extract_and_append_etag(&bump, &mut etags, resp.headers())?;
let mut buffer = match buffer.try_into_mut() {
Ok(buffer) => buffer,
Err(_) => unreachable!("All bytes references were consumed in the task"),
};
buffer.clear();
buffer
} else {
BytesMut::with_capacity(s3_multipart_part_size as usize)
};
// If we successfully read enough bytes,
// we can continue and send the buffer/part
while buffer.len() < (s3_multipart_part_size as usize / 2) {
// Wait for the pipe to be readable
use std::io;
reader.readable().await?;
match reader.try_read_buf(&mut buffer) {
Ok(0) => break,
// We read some bytes but maybe not enough
Ok(_) => continue,
// The readiness event is a false positive.
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
Err(e) => return Err(e.into()),
}
}
if buffer.is_empty() {
// Break the loop if the buffer is
// empty after we tried to read bytes
break;
}
let body = buffer.freeze();
tracing::trace!("Sending part {part_number}");
let task = tokio::spawn({
let client = client.clone();
let body = body.clone();
backoff::future::retry(retry_backoff.clone(), move || {
let client = client.clone();
let url = url.clone();
let body = body.clone();
async move {
match client.put(url).body(body).send().await {
Ok(resp) if resp.status().is_client_error() => {
resp.error_for_status().map_err(backoff::Error::Permanent)
}
Ok(resp) => Ok(resp),
Err(e) => Err(backoff::Error::transient(e)),
}
}
})
});
in_flight.push_back((task, body));
}
for (handle, _buffer) in in_flight {
let resp = join_and_map_error(handle).await?;
extract_and_append_etag(&bump, &mut etags, resp.headers())?;
}
tracing::debug!("Finalizing the multipart upload");
let action = bucket.complete_multipart_upload(
Some(&credential),
&object,
multipart.upload_id(),
etags.iter().map(AsRef::as_ref),
);
let url = action.sign(s3_signature_duration);
let body = action.body();
let resp = backoff::future::retry(retry_backoff, move || {
let client = client.clone();
let url = url.clone();
let body = body.clone();
async move {
match client.post(url).body(body).send().await {
Ok(resp) if resp.status().is_client_error() => {
resp.error_for_status().map_err(backoff::Error::Permanent)
}
Ok(resp) => Ok(resp),
Err(e) => Err(backoff::Error::transient(e)),
}
}
})
.await
.map_err(Error::S3HttpError)?;
let status = resp.status();
let body = resp.text().await.map_err(|e| Error::S3Error { status, body: e.to_string() })?;
if status.is_success() {
Ok(())
} else {
Err(Error::S3Error { status, body })
}
}
#[cfg(unix)]
async fn join_and_map_error(
join_handle: tokio::task::JoinHandle<Result<reqwest::Response, reqwest::Error>>,
) -> Result<reqwest::Response> {
// safety: Panic happens if the task (JoinHandle) was aborted, cancelled, or panicked
let request = join_handle.await.unwrap();
let resp = request.map_err(Error::S3HttpError)?;
match resp.error_for_status_ref() {
Ok(_) => Ok(resp),
Err(_) => Err(Error::S3Error {
status: resp.status(),
body: resp.text().await.unwrap_or_default(),
}),
}
}
#[cfg(unix)]
fn extract_and_append_etag<'b>(
bump: &'b bumpalo::Bump,
etags: &mut Vec<&'b str>,
headers: &reqwest::header::HeaderMap,
) -> Result<()> {
use reqwest::header::ETAG;
let etag = headers.get(ETAG).ok_or_else(|| Error::S3XmlError("Missing ETag header".into()))?;
let etag = etag.to_str().map_err(|e| Error::S3XmlError(Box::new(e)))?;
etags.push(bump.alloc_str(etag));
Ok(())
}

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -57,7 +57,7 @@ girafo: { number_of_documents: 0, field_distribution: {} }
[timestamp] [4,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `indexCreation` that cannot be batched with any other task.", }

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
----------------------------------------------------------------------
### Status:
enqueued [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
----------------------------------------------------------------------
### Status:

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
----------------------------------------------------------------------
### Status:
@@ -37,7 +37,7 @@ catto [1,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
----------------------------------------------------------------------
@@ -40,7 +40,7 @@ doggo [2,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -6,7 +6,7 @@ source: crates/index-scheduler/src/scheduler/test_failure.rs
[]
----------------------------------------------------------------------
### All Tasks:
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 26, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 24, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone"), old_new_uid: None, new_index_uid: None }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
@@ -43,7 +43,7 @@ doggo [2,3,]
[timestamp] [0,]
----------------------------------------------------------------------
### All Batches:
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.26.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.24.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, stop reason: "stopped after the last task of type `upgradeDatabase` because they cannot be batched with tasks of any other type.", }
----------------------------------------------------------------------
### Batch to tasks mapping:
0 [0,]

View File

@@ -126,7 +126,7 @@ impl IndexScheduler {
std::fs::create_dir_all(&options.auth_path).unwrap();
let auth_env = open_auth_store_env(&options.auth_path).unwrap();
let index_scheduler =
Self::new_test(options, auth_env, version, None, sender, planned_failures).unwrap();
Self::new(options, auth_env, version, sender, planned_failures).unwrap();
// To be 100% consistent between all test we're going to start the scheduler right now
// and ensure it's in the expected starting state.

View File

@@ -48,8 +48,6 @@ pub fn upgrade_index_scheduler(
(1, 22, _) => 0,
(1, 23, _) => 0,
(1, 24, _) => 0,
(1, 25, _) => 0,
(1, 26, _) => 0,
(major, minor, patch) => {
if major > current_major
|| (major == current_major && minor > current_minor)
@@ -100,7 +98,6 @@ pub fn upgrade_index_scheduler(
status: Status::Enqueued,
kind: KindWithContent::UpgradeDatabase { from },
network: None,
custom_metadata: None,
},
)?;
wtxn.commit()?;

View File

@@ -286,7 +286,6 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
| K::DumpCreation { .. }
| K::Export { .. }
| K::UpgradeDatabase { .. }
| K::NetworkTopologyChange(_)
| K::SnapshotCreation => (),
};
if let Some(Details::IndexSwap { swaps }) = &mut task.details {
@@ -380,7 +379,6 @@ impl crate::IndexScheduler {
status,
kind,
network: _,
custom_metadata: _,
} = task;
assert_eq!(uid, task.uid);
if task.status != Status::Enqueued {
@@ -628,13 +626,6 @@ impl crate::IndexScheduler {
} => {
assert_eq!(kind.as_kind(), Kind::IndexCompaction);
}
Details::NetworkTopologyChange {
moved_documents: _,
received_documents: _,
message: _,
} => {
assert_eq!(kind.as_kind(), Kind::NetworkTopologyChange);
}
}
}

View File

@@ -7,7 +7,6 @@ use std::collections::BTreeMap;
use milli::update::new::indexer::enterprise_edition::sharding::Shards;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
#[serde(rename_all = "camelCase")]
@@ -17,18 +16,20 @@ pub struct Network {
#[serde(default)]
pub remotes: BTreeMap<String, Remote>,
#[serde(default)]
pub leader: Option<String>,
#[serde(default)]
pub version: Uuid,
pub sharding: bool,
}
impl Network {
pub fn shards(&self) -> Option<Shards> {
if self.leader.is_some() {
Some(Shards::from_remotes_local(
self.remotes.keys().map(String::as_str),
self.local.as_deref(),
))
if self.sharding {
let this = self.local.as_deref().expect("Inconsistent `sharding` and `self`");
let others = self
.remotes
.keys()
.filter(|name| name.as_str() != this)
.map(|name| name.to_owned())
.collect();
Some(Shards { own: vec![this.to_owned()], others })
} else {
None
}

View File

@@ -156,7 +156,7 @@ macro_rules! make_error_codes {
}
/// return error name, used as error code
pub fn name(&self) -> String {
fn name(&self) -> String {
match self {
$(
Code::$code_ident => stringify!($code_ident).to_case(convert_case::Case::Snake)
@@ -214,9 +214,6 @@ ImmutableApiKeyUid , InvalidRequest , BAD_REQU
ImmutableApiKeyUpdatedAt , InvalidRequest , BAD_REQUEST;
ImmutableIndexCreatedAt , InvalidRequest , BAD_REQUEST;
ImmutableIndexUpdatedAt , InvalidRequest , BAD_REQUEST;
ImportTaskAlreadyReceived , InvalidRequest , PRECONDITION_FAILED;
ImportTaskUnknownRemote , InvalidRequest , PRECONDITION_FAILED;
ImportTaskWithoutNetworkTask , InvalidRequest , SERVICE_UNAVAILABLE;
IndexAlreadyExists , InvalidRequest , CONFLICT ;
IndexCreationFailed , Internal , INTERNAL_SERVER_ERROR;
IndexNotFound , InvalidRequest , NOT_FOUND;
@@ -257,12 +254,10 @@ InvalidSearchHybridQuery , InvalidRequest , BAD_REQU
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
InvalidIndexCustomMetadata , InvalidRequest , BAD_REQUEST ;
InvalidIndexUid , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchFacets , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchFacetsByIndex , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchFacetOrder , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchQueryPersonalization , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchFederated , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchFederationOptions , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchMaxValuesPerFacet , InvalidRequest , BAD_REQUEST ;
@@ -273,9 +268,9 @@ InvalidMultiSearchQueryRankingRules , InvalidRequest , BAD_REQU
InvalidMultiSearchQueryPosition , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchRemote , InvalidRequest , BAD_REQUEST ;
InvalidMultiSearchWeight , InvalidRequest , BAD_REQUEST ;
InvalidNetworkLeader , InvalidRequest , BAD_REQUEST ;
InvalidNetworkRemotes , InvalidRequest , BAD_REQUEST ;
InvalidNetworkSelf , InvalidRequest , BAD_REQUEST ;
InvalidNetworkSharding , InvalidRequest , BAD_REQUEST ;
InvalidNetworkSearchApiKey , InvalidRequest , BAD_REQUEST ;
InvalidNetworkWriteApiKey , InvalidRequest , BAD_REQUEST ;
InvalidNetworkUrl , InvalidRequest , BAD_REQUEST ;
@@ -320,8 +315,6 @@ InvalidSearchShowRankingScoreDetails , InvalidRequest , BAD_REQU
InvalidSimilarShowRankingScoreDetails , InvalidRequest , BAD_REQUEST ;
InvalidSearchSort , InvalidRequest , BAD_REQUEST ;
InvalidSearchDistinct , InvalidRequest , BAD_REQUEST ;
InvalidSearchPersonalize , InvalidRequest , BAD_REQUEST ;
InvalidSearchPersonalizeUserContext , InvalidRequest , BAD_REQUEST ;
InvalidSearchMediaAndVector , InvalidRequest , BAD_REQUEST ;
InvalidSettingsDisplayedAttributes , InvalidRequest , BAD_REQUEST ;
InvalidSettingsDistinctAttribute , InvalidRequest , BAD_REQUEST ;
@@ -380,9 +373,7 @@ MissingPayload , InvalidRequest , BAD_REQU
MissingSearchHybrid , InvalidRequest , BAD_REQUEST ;
MissingSwapIndexes , InvalidRequest , BAD_REQUEST ;
MissingTaskFilters , InvalidRequest , BAD_REQUEST ;
NetworkVersionMismatch , InvalidRequest , PRECONDITION_FAILED ;
NoSpaceLeftOnDevice , System , UNPROCESSABLE_ENTITY;
NotLeader , InvalidRequest , BAD_REQUEST ;
PayloadTooLarge , InvalidRequest , PAYLOAD_TOO_LARGE ;
RemoteBadResponse , System , BAD_GATEWAY ;
RemoteBadRequest , InvalidRequest , BAD_REQUEST ;
@@ -396,13 +387,9 @@ TaskFileNotFound , InvalidRequest , NOT_FOUN
BatchNotFound , InvalidRequest , NOT_FOUND ;
TooManyOpenFiles , System , UNPROCESSABLE_ENTITY ;
TooManyVectors , InvalidRequest , BAD_REQUEST ;
UnexpectedNetworkPreviousRemotes , InvalidRequest , BAD_REQUEST ;
UnretrievableDocument , Internal , BAD_REQUEST ;
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
InvalidS3SnapshotRequest , Internal , BAD_REQUEST ;
InvalidS3SnapshotParameters , Internal , BAD_REQUEST ;
S3SnapshotServerError , Internal , BAD_GATEWAY ;
// Experimental features
VectorEmbeddingError , InvalidRequest , BAD_REQUEST ;
@@ -692,18 +679,6 @@ impl fmt::Display for deserr_codes::InvalidNetworkSearchApiKey {
}
}
impl fmt::Display for deserr_codes::InvalidSearchPersonalize {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "the value of `personalize` is invalid, expected a JSON object with `userContext` string.")
}
}
impl fmt::Display for deserr_codes::InvalidSearchPersonalizeUserContext {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "the value of `userContext` is invalid, expected a string.")
}
}
#[macro_export]
macro_rules! internal_error {
($target:ty : $($other:path), *) => {

View File

@@ -346,26 +346,24 @@ impl<T> Settings<T> {
continue;
};
hide_secret(api_key, 0);
Self::hide_secret(api_key);
}
}
}
/// Redact a secret string, starting from the `secret_offset`th byte.
pub fn hide_secret(secret: &mut String, secret_offset: usize) {
match secret.len().checked_sub(secret_offset) {
None => (),
Some(x) if x < 10 => {
secret.replace_range(secret_offset.., "XXX...");
}
Some(x) if x < 20 => {
secret.replace_range((secret_offset + 2).., "XXXX...");
}
Some(x) if x < 30 => {
secret.replace_range((secret_offset + 3).., "XXXXX...");
}
Some(_x) => {
secret.replace_range((secret_offset + 5).., "XXXXXX...");
fn hide_secret(secret: &mut String) {
match secret.len() {
x if x < 10 => {
secret.replace_range(.., "XXX...");
}
x if x < 20 => {
secret.replace_range(2.., "XXXX...");
}
x if x < 30 => {
secret.replace_range(3.., "XXXXX...");
}
_x => {
secret.replace_range(5.., "XXXXXX...");
}
}
}
}

View File

@@ -9,9 +9,9 @@ use utoipa::ToSchema;
use crate::batches::BatchId;
use crate::error::ResponseError;
use crate::settings::{Settings, Unchecked};
use crate::tasks::enterprise_edition::network::DbTaskNetwork;
use crate::tasks::{
serialize_duration, Details, DetailsExportIndexSettings, IndexSwap, Kind, Status, Task, TaskId,
TaskNetwork,
};
#[derive(Debug, Clone, PartialEq, Serialize, ToSchema)]
@@ -54,10 +54,7 @@ pub struct TaskView {
pub finished_at: Option<OffsetDateTime>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub network: Option<DbTaskNetwork>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub custom_metadata: Option<String>,
pub network: Option<TaskNetwork>,
}
impl TaskView {
@@ -76,7 +73,6 @@ impl TaskView {
started_at: task.started_at,
finished_at: task.finished_at,
network: task.network.clone(),
custom_metadata: task.custom_metadata.clone(),
}
}
}
@@ -151,11 +147,6 @@ pub struct DetailsView {
pub pre_compaction_size: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub post_compaction_size: Option<String>,
// network topology change
#[serde(skip_serializing_if = "Option::is_none")]
pub moved_documents: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
impl DetailsView {
@@ -166,17 +157,6 @@ impl DetailsView {
(None, Some(doc)) | (Some(doc), None) => Some(doc),
(Some(left), Some(right)) => Some(left + right),
},
moved_documents: match (self.moved_documents, other.moved_documents) {
(None, None) => None,
(None, Some(doc)) | (Some(doc), None) => Some(doc),
(Some(left), Some(right)) => Some(left + right),
},
message: match (&mut self.message, &other.message) {
(None, None) => None,
(None, Some(message)) => Some(message.clone()),
(Some(message), None) => Some(std::mem::take(message)),
(Some(message), Some(_)) => Some(std::mem::take(message)),
},
indexed_documents: match (self.indexed_documents, other.indexed_documents) {
(None, None) => None,
(None, Some(None)) | (Some(None), None) | (Some(None), Some(None)) => Some(None),
@@ -467,14 +447,6 @@ impl From<Details> for DetailsView {
..Default::default()
}
}
Details::NetworkTopologyChange { moved_documents, received_documents, message } => {
DetailsView {
moved_documents: Some(moved_documents),
received_documents: Some(received_documents),
message: Some(message),
..Default::default()
}
}
}
}
}

View File

@@ -23,8 +23,6 @@ use crate::{versioning, InstanceUid};
pub type TaskId = u32;
pub mod enterprise_edition;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Task {
@@ -46,10 +44,7 @@ pub struct Task {
pub kind: KindWithContent,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub network: Option<enterprise_edition::network::DbTaskNetwork>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub custom_metadata: Option<String>,
pub network: Option<TaskNetwork>,
}
impl Task {
@@ -63,7 +58,6 @@ impl Task {
| TaskDeletion { .. }
| Export { .. }
| UpgradeDatabase { .. }
| NetworkTopologyChange { .. }
| IndexSwap { .. } => None,
DocumentAdditionOrUpdate { index_uid, .. }
| DocumentEdition { index_uid, .. }
@@ -102,7 +96,6 @@ impl Task {
| KindWithContent::SnapshotCreation
| KindWithContent::Export { .. }
| KindWithContent::UpgradeDatabase { .. }
| KindWithContent::NetworkTopologyChange { .. }
| KindWithContent::IndexCompaction { .. } => None,
}
}
@@ -182,7 +175,6 @@ pub enum KindWithContent {
IndexCompaction {
index_uid: String,
},
NetworkTopologyChange(enterprise_edition::network::NetworkTopologyChange),
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
@@ -220,7 +212,6 @@ impl KindWithContent {
KindWithContent::Export { .. } => Kind::Export,
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
KindWithContent::IndexCompaction { .. } => Kind::IndexCompaction,
KindWithContent::NetworkTopologyChange { .. } => Kind::NetworkTopologyChange,
}
}
@@ -233,7 +224,6 @@ impl KindWithContent {
| TaskCancelation { .. }
| TaskDeletion { .. }
| Export { .. }
| NetworkTopologyChange { .. }
| UpgradeDatabase { .. } => vec![],
DocumentAdditionOrUpdate { index_uid, .. }
| DocumentEdition { index_uid, .. }
@@ -347,11 +337,6 @@ impl KindWithContent {
pre_compaction_size: None,
post_compaction_size: None,
}),
KindWithContent::NetworkTopologyChange { .. } => Some(Details::NetworkTopologyChange {
moved_documents: 0,
received_documents: 0,
message: "processing tasks for previous network versions".into(),
}),
}
}
@@ -404,7 +389,7 @@ impl KindWithContent {
})
}
KindWithContent::IndexSwap { .. } => {
unimplemented!("do not call `default_finished_details` for `IndexSwap` tasks")
todo!()
}
KindWithContent::TaskCancelation { query, tasks } => Some(Details::TaskCancelation {
matched_tasks: tasks.len(),
@@ -439,9 +424,6 @@ impl KindWithContent {
pre_compaction_size: None,
post_compaction_size: None,
}),
KindWithContent::NetworkTopologyChange(network_topology_change) => {
Some(network_topology_change.to_details())
}
}
}
}
@@ -509,9 +491,6 @@ impl From<&KindWithContent> for Option<Details> {
pre_compaction_size: None,
post_compaction_size: None,
}),
KindWithContent::NetworkTopologyChange(network_topology_change) => {
Some(network_topology_change.to_details())
}
}
}
}
@@ -623,7 +602,6 @@ pub enum Kind {
Export,
UpgradeDatabase,
IndexCompaction,
NetworkTopologyChange,
}
impl Kind {
@@ -643,7 +621,6 @@ impl Kind {
| Kind::DumpCreation
| Kind::Export
| Kind::UpgradeDatabase
| Kind::NetworkTopologyChange
| Kind::SnapshotCreation => false,
}
}
@@ -666,7 +643,6 @@ impl Display for Kind {
Kind::Export => write!(f, "export"),
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
Kind::IndexCompaction => write!(f, "indexCompaction"),
Kind::NetworkTopologyChange => write!(f, "networkTopologyChange"),
}
}
}
@@ -704,8 +680,6 @@ impl FromStr for Kind {
Ok(Kind::UpgradeDatabase)
} else if kind.eq_ignore_ascii_case("indexCompaction") {
Ok(Kind::IndexCompaction)
} else if kind.eq_ignore_ascii_case("networkTopologyChange") {
Ok(Kind::NetworkTopologyChange)
} else {
Err(ParseTaskKindError(kind.to_owned()))
}
@@ -796,11 +770,36 @@ pub enum Details {
pre_compaction_size: Option<Byte>,
post_compaction_size: Option<Byte>,
},
NetworkTopologyChange {
moved_documents: u64,
received_documents: u64,
message: String,
},
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(untagged, rename_all = "camelCase")]
pub enum TaskNetwork {
Origin { origin: Origin },
Remotes { remote_tasks: BTreeMap<String, RemoteTask> },
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct Origin {
pub remote_name: String,
pub task_uid: usize,
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct RemoteTask {
#[serde(skip_serializing_if = "Option::is_none")]
task_uid: Option<TaskId>,
error: Option<ResponseError>,
}
impl From<Result<TaskId, ResponseError>> for RemoteTask {
fn from(res: Result<TaskId, ResponseError>) -> RemoteTask {
match res {
Ok(task_uid) => RemoteTask { task_uid: Some(task_uid), error: None },
Err(err) => RemoteTask { task_uid: None, error: Some(err) },
}
}
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
@@ -843,9 +842,6 @@ impl Details {
| Self::Export { .. }
| Self::UpgradeDatabase { .. }
| Self::IndexSwap { .. } => (),
Self::NetworkTopologyChange { moved_documents: _, received_documents: _, message } => {
*message = format!("Failed. Previous status: {}", message);
}
}
details

View File

@@ -1,6 +0,0 @@
// Copyright © 2025 Meilisearch Some Rights Reserved
// This file is part of Meilisearch Enterprise Edition (EE).
// Use of this source code is governed by the Business Source License 1.1,
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
pub mod network;

View File

@@ -1,564 +0,0 @@
// Copyright © 2025 Meilisearch Some Rights Reserved
// This file is part of Meilisearch Enterprise Edition (EE).
// Use of this source code is governed by the Business Source License 1.1,
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
use std::collections::BTreeMap;
use milli::DocumentId;
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
use uuid::Uuid;
use crate::enterprise_edition::network::{Network, Remote};
use crate::error::ResponseError;
use crate::tasks::{Details, TaskId};
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(untagged, rename_all = "camelCase")]
// This type is used in the database, care should be taken when modifying it.
pub enum DbTaskNetwork {
/// Tasks that were duplicated from `origin`
Origin { origin: Origin },
/// Tasks that were duplicated as `remote_tasks`
Remotes {
remote_tasks: BTreeMap<String, RemoteTask>,
#[serde(default)]
network_version: Uuid,
},
/// Document import tasks sent in the context of `network_change`
Import { import_from: ImportData, network_change: Origin },
}
impl DbTaskNetwork {
pub fn network_version(&self) -> Uuid {
match self {
DbTaskNetwork::Origin { origin } => origin.network_version,
DbTaskNetwork::Remotes { remote_tasks: _, network_version } => *network_version,
DbTaskNetwork::Import { import_from: _, network_change } => {
network_change.network_version
}
}
}
pub fn import_data(&self) -> Option<&ImportData> {
match self {
DbTaskNetwork::Origin { .. } | DbTaskNetwork::Remotes { .. } => None,
DbTaskNetwork::Import { import_from, .. } => Some(import_from),
}
}
pub fn origin(&self) -> Option<&Origin> {
match self {
DbTaskNetwork::Origin { origin } => Some(origin),
DbTaskNetwork::Remotes { .. } => None,
DbTaskNetwork::Import { network_change, .. } => Some(network_change),
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum TaskNetwork {
/// Tasks that were duplicated from `origin`
Origin { origin: Origin },
/// Tasks that were duplicated as `remote_tasks`
Remotes { remote_tasks: BTreeMap<String, RemoteTask>, network_version: Uuid },
/// Document import tasks sent in the context of `network_change`
Import { import_from: ImportData, network_change: Origin, metadata: ImportMetadata },
}
impl From<TaskNetwork> for DbTaskNetwork {
fn from(value: TaskNetwork) -> Self {
match value {
TaskNetwork::Origin { origin } => DbTaskNetwork::Origin { origin },
TaskNetwork::Remotes { remote_tasks, network_version } => {
DbTaskNetwork::Remotes { remote_tasks, network_version }
}
TaskNetwork::Import { import_from, network_change, metadata: _ } => {
DbTaskNetwork::Import { import_from, network_change }
}
}
}
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct Origin {
pub remote_name: String,
pub task_uid: u32,
pub network_version: Uuid,
}
/// Import data stored in a task
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct ImportData {
pub remote_name: String,
pub index_name: String,
pub document_count: u64,
}
/// Import metadata associated with a task but not stored in the task
#[derive(Debug, PartialEq, Clone)]
pub struct ImportMetadata {
/// Total number of indexes to import from this host
pub index_count: u64,
/// Key unique to this (network_change, index, host, key).
///
/// In practice, an internal document id of one of the documents to import.
pub task_key: DocumentId,
/// Total number of documents to import for this index from this host.
pub total_index_documents: u64,
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct RemoteTask {
#[serde(skip_serializing_if = "Option::is_none")]
task_uid: Option<TaskId>,
error: Option<ResponseError>,
}
impl From<Result<TaskId, ResponseError>> for RemoteTask {
fn from(res: Result<TaskId, ResponseError>) -> RemoteTask {
match res {
Ok(task_uid) => RemoteTask { task_uid: Some(task_uid), error: None },
Err(err) => RemoteTask { task_uid: None, error: Some(err) },
}
}
}
/// Contains the full state of a network topology change.
///
/// A network topology change task is unique in that it can be processed in multiple different batches, as its resolution
/// depends on various document additions tasks being processed.
///
/// A network topology task has 4 states:
///
/// 1. Processing any task that was meant for an earlier version of the network. This is necessary to know that we have the right version of
/// documents.
/// 2. Sending all documents that must be moved to other remotes.
/// 3. Processing any task coming from the remotes.
/// 4. Finished.
///
/// Furthermore, it maintains some stats
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct NetworkTopologyChange {
state: NetworkTopologyState,
// in name, `None` if the node is no longer part of the network
#[serde(skip_serializing_if = "Option::is_none")]
in_name: Option<String>,
// out name, `None` if the node is new to the network
#[serde(skip_serializing_if = "Option::is_none")]
out_name: Option<String>,
out_remotes: BTreeMap<String, Remote>,
in_remotes: BTreeMap<String, InRemote>,
stats: NetworkTopologyStats,
}
impl NetworkTopologyChange {
pub fn new(old_network: Network, new_network: Network) -> Self {
// we use our old name as export name
let out_name = old_network.local;
// we use our new name as import name
let in_name = new_network.local;
// we export to the new network
let mut out_remotes = new_network.remotes;
// don't export to ourselves
if let Some(in_name) = &in_name {
out_remotes.remove(in_name);
}
/// FIXME: doesn't work if the old network is not the same for old nodes
let in_remotes = old_network
.remotes
.into_keys()
// don't await imports from ourselves
.filter(|name| Some(name.as_str()) != out_name.as_deref())
.map(|name| (name, InRemote::new()))
.collect();
Self {
state: NetworkTopologyState::WaitingForOlderTasks,
in_name,
out_name,
out_remotes,
in_remotes,
stats: NetworkTopologyStats { received_documents: 0, moved_documents: 0 },
}
}
pub fn state(&self) -> NetworkTopologyState {
self.state
}
pub fn out_name(&self) -> Option<&str> {
// unwrap: one of out name or in_name must be defined
self.out_name.as_deref()
}
pub fn in_name(&self) -> Option<&str> {
self.in_name.as_deref()
}
pub fn export_to_process(&self) -> Option<(&BTreeMap<String, Remote>, &str)> {
if self.state != NetworkTopologyState::ExportingDocuments {
return None;
}
if self.out_remotes.is_empty() {
return None;
}
let out_name = self.out_name()?;
Some((&self.out_remotes, out_name))
}
/// Compute the next state from the current state of the task.
pub fn update_state(&mut self) {
self.state = match self.state {
NetworkTopologyState::WaitingForOlderTasks => {
// no more older tasks, so finished waiting
NetworkTopologyState::ExportingDocuments
}
NetworkTopologyState::ExportingDocuments => {
// processed all exported documents
NetworkTopologyState::ImportingDocuments
}
NetworkTopologyState::ImportingDocuments => {
if self.is_import_finished() {
NetworkTopologyState::Finished
} else {
NetworkTopologyState::ImportingDocuments
}
}
NetworkTopologyState::Finished => NetworkTopologyState::Finished,
};
}
pub fn receive_remote_task(
&mut self,
remote_name: &str,
index_name: &str,
task_key: DocumentId,
document_count: u64,
total_indexes: u64,
total_index_documents: u64,
) -> Result<(), ReceiveTaskError> {
let remote = self
.in_remotes
.get_mut(remote_name)
.ok_or_else(|| ReceiveTaskError::UnknownRemote(remote_name.to_string()))?;
remote.import_state = match std::mem::take(&mut remote.import_state) {
ImportState::WaitingForInitialTask => {
let mut import_index_state = BTreeMap::new();
import_index_state.insert(
index_name.to_owned(),
ImportIndexState::Ongoing {
total_documents: total_index_documents,
received_documents: document_count,
task_keys: vec![task_key],
processed_documents: 0,
},
);
ImportState::Ongoing { import_index_state, total_indexes }
}
ImportState::Ongoing { mut import_index_state, total_indexes } => {
if let Some((index_name, mut index_state)) =
import_index_state.remove_entry(index_name)
{
index_state = match index_state {
ImportIndexState::Ongoing {
total_documents,
received_documents: previously_received,
processed_documents,
mut task_keys,
} => {
if task_keys.contains(&task_key) {
return Err(ReceiveTaskError::DuplicateTask(task_key));
}
task_keys.push(task_key);
ImportIndexState::Ongoing {
total_documents,
received_documents: previously_received + document_count,
processed_documents,
task_keys,
}
}
ImportIndexState::Finished { total_documents } => {
ImportIndexState::Finished { total_documents }
}
};
import_index_state.insert(index_name, index_state);
} else {
let state = ImportIndexState::Ongoing {
total_documents: total_index_documents,
received_documents: document_count,
processed_documents: 0,
task_keys: vec![task_key],
};
import_index_state.insert(index_name.to_string(), state);
}
ImportState::Ongoing { import_index_state, total_indexes: total_indexes }
}
ImportState::Finished { total_indexes, total_documents } => {
ImportState::Finished { total_indexes, total_documents }
}
};
Ok(())
}
pub fn process_remote_tasks(
&mut self,
remote_name: &str,
index_name: &str,
document_count: u64,
) {
/// FIXME: unwraps and panics
let remote = self.in_remotes.get_mut(remote_name).unwrap();
remote.import_state = match std::mem::take(&mut remote.import_state) {
ImportState::WaitingForInitialTask => panic!("no task received yet one processed"),
ImportState::Ongoing { mut import_index_state, total_indexes } => {
let (index_name, mut index_state) =
import_index_state.remove_entry(index_name).unwrap();
index_state = match index_state {
ImportIndexState::Ongoing {
total_documents,
received_documents,
processed_documents: previously_processed,
task_keys,
} => {
let newly_processed_documents = previously_processed + document_count;
if newly_processed_documents >= total_documents {
ImportIndexState::Finished { total_documents }
} else {
ImportIndexState::Ongoing {
total_documents,
received_documents,
processed_documents: newly_processed_documents,
task_keys,
}
}
}
ImportIndexState::Finished { total_documents } => {
ImportIndexState::Finished { total_documents }
}
};
import_index_state.insert(index_name, index_state);
if import_index_state.len() as u64 == total_indexes
&& import_index_state.values().all(|index| index.is_finished())
{
let total_documents =
import_index_state.values().map(|index| index.total_documents()).sum();
ImportState::Finished { total_indexes, total_documents }
} else {
ImportState::Ongoing { import_index_state, total_indexes }
}
}
ImportState::Finished { total_indexes, total_documents } => {
ImportState::Finished { total_indexes, total_documents }
}
}
}
pub fn to_details(&self) -> Details {
let message = match self.state {
NetworkTopologyState::WaitingForOlderTasks => {
"Waiting for tasks enqueued before the network change to finish processing".into()
}
NetworkTopologyState::ExportingDocuments => "Exporting documents".into(),
NetworkTopologyState::ImportingDocuments => {
let mut finished_count = 0;
let mut first_ongoing = None;
let mut ongoing_total_indexes = 0;
let mut ongoing_processed_documents = 0;
let mut ongoing_missing_documents = 0;
let mut ongoing_total_documents = 0;
let mut other_ongoing_count = 0;
let mut first_waiting = None;
let mut other_waiting_count = 0;
for (remote_name, in_remote) in &self.in_remotes {
match &in_remote.import_state {
ImportState::WaitingForInitialTask => {
first_waiting = match first_waiting {
None => Some(remote_name),
first_waiting => {
other_waiting_count += 1;
first_waiting
}
};
}
ImportState::Ongoing { import_index_state, total_indexes } => {
first_ongoing = match first_ongoing {
None => {
ongoing_total_indexes = *total_indexes;
Some(remote_name)
}
first_ongoing => {
other_ongoing_count += 1;
first_ongoing
}
};
for import_state in import_index_state.values() {
match import_state {
ImportIndexState::Ongoing {
total_documents,
processed_documents,
received_documents,
task_keys: _,
} => {
ongoing_total_documents += total_documents;
ongoing_processed_documents += processed_documents;
ongoing_missing_documents +=
total_documents.saturating_sub(*received_documents);
}
ImportIndexState::Finished { total_documents } => {
ongoing_total_documents += total_documents;
ongoing_processed_documents += total_documents;
}
}
}
}
ImportState::Finished { total_indexes, total_documents } => {
finished_count += 1;
ongoing_total_indexes = *total_indexes;
ongoing_total_documents += *total_documents;
ongoing_processed_documents += *total_documents;
}
}
}
format!(
"Importing documents from {total} remotes{waiting}{ongoing}{finished}",
total = self.in_remotes.len(),
waiting = if let Some(first_waiting) = first_waiting {
&format!(
", waiting on first task from `{}`{others}",
first_waiting,
others = if other_waiting_count > 0 {
&format!(" and {other_waiting_count} other remotes")
} else {
""
}
)
} else {
""
},
ongoing = if let Some(first_ongoing) = first_ongoing {
&format!(", awaiting {ongoing_missing_documents} and processed {ongoing_processed_documents} out of {ongoing_total_documents} documents in {ongoing_total_indexes} indexes from `{first_ongoing}`{others}",
others=if other_ongoing_count > 0 {&format!(" and {other_ongoing_count} other remotes")} else {""})
} else {
""
},
finished = if finished_count >= 0 {
&format!(", {finished_count} remotes finished processing")
} else {
""
}
)
}
NetworkTopologyState::Finished => "Finished".into(),
};
Details::NetworkTopologyChange {
moved_documents: self.stats.moved_documents,
received_documents: self.stats.received_documents,
message,
}
}
fn is_import_finished(&self) -> bool {
self.in_remotes.values().all(|remote| remote.is_finished())
}
}
pub enum ReceiveTaskError {
UnknownRemote(String),
DuplicateTask(DocumentId),
}
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum NetworkTopologyState {
WaitingForOlderTasks,
ExportingDocuments,
ImportingDocuments,
Finished,
}
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct NetworkTopologyStats {
pub received_documents: u64,
pub moved_documents: u64,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct InRemote {
import_state: ImportState,
}
impl InRemote {
pub fn new() -> Self {
Self { import_state: ImportState::WaitingForInitialTask }
}
pub fn is_finished(&self) -> bool {
matches!(self.import_state, ImportState::Finished { .. })
}
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
enum ImportState {
/// Initially Meilisearch doesn't know how many documents it should expect from a remote.
/// The first task for each remote contains the information of how many indexes will be imported,
/// and the first task for each index contains the number of documents to import for that index.
#[default]
WaitingForInitialTask,
Ongoing {
import_index_state: BTreeMap<String, ImportIndexState>,
total_indexes: u64,
},
Finished {
total_indexes: u64,
total_documents: u64,
},
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
enum ImportIndexState {
Ongoing {
total_documents: u64,
received_documents: u64,
processed_documents: u64,
task_keys: Vec<DocumentId>,
},
Finished {
total_documents: u64,
},
}
impl ImportIndexState {
pub fn is_finished(&self) -> bool {
matches!(self, ImportIndexState::Finished { .. })
}
fn total_documents(&self) -> u64 {
match *self {
ImportIndexState::Ongoing { total_documents, .. }
| ImportIndexState::Finished { total_documents } => total_documents,
}
}
}
pub mod headers {
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "Meili-Proxy-Origin-Remote";
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "Meili-Proxy-Origin-TaskUid";
pub const PROXY_ORIGIN_NETWORK_VERSION_HEADER: &str = "Meili-Proxy-Origin-Network-Version";
pub const PROXY_IMPORT_REMOTE_HEADER: &str = "Meili-Proxy-Import-Remote";
pub const PROXY_IMPORT_INDEX_COUNT_HEADER: &str = "Meili-Proxy-Import-Index-Count";
pub const PROXY_IMPORT_INDEX_HEADER: &str = "Meili-Proxy-Import-Index";
pub const PROXY_IMPORT_TASK_KEY_HEADER: &str = "Meili-Proxy-Import-Task-Key";
pub const PROXY_IMPORT_DOCS_HEADER: &str = "Meili-Proxy-Import-Docs";
pub const PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER: &str = "Meili-Proxy-Import-Total-Index-Docs";
}

View File

@@ -11,24 +11,6 @@ pub struct Webhook {
pub headers: BTreeMap<String, String>,
}
impl Webhook {
pub fn redact_authorization_header(&mut self) {
// headers are case insensitive, so to make the redaction robust we iterate over qualifying headers
// rather than getting one canonical `Authorization` header.
for value in self
.headers
.iter_mut()
.filter_map(|(name, value)| name.eq_ignore_ascii_case("authorization").then_some(value))
{
if value.starts_with("Bearer ") {
crate::settings::hide_secret(value, "Bearer ".len());
} else {
crate::settings::hide_secret(value, 0);
}
}
}
}
#[derive(Debug, Serialize, Default, Clone, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct WebhooksView {

View File

@@ -208,7 +208,6 @@ struct Infos {
experimental_no_edition_2024_for_prefix_post_processing: bool,
experimental_no_edition_2024_for_facet_post_processing: bool,
experimental_vector_store_setting: bool,
experimental_personalization: bool,
gpu_enabled: bool,
db_path: bool,
import_dump: bool,
@@ -218,7 +217,6 @@ struct Infos {
import_snapshot: bool,
schedule_snapshot: Option<u64>,
snapshot_dir: bool,
uses_s3_snapshots: bool,
ignore_missing_snapshot: bool,
ignore_snapshot_if_db_exists: bool,
http_addr: bool,
@@ -287,8 +285,6 @@ impl Infos {
indexer_options,
config_file_path,
no_analytics: _,
experimental_personalization_api_key,
s3_snapshot_options,
} = options;
let schedule_snapshot = match schedule_snapshot {
@@ -352,7 +348,6 @@ impl Infos {
import_snapshot: import_snapshot.is_some(),
schedule_snapshot,
snapshot_dir: snapshot_dir != PathBuf::from("snapshots/"),
uses_s3_snapshots: s3_snapshot_options.is_some(),
ignore_missing_snapshot,
ignore_snapshot_if_db_exists,
http_addr: http_addr != default_http_addr(),
@@ -376,7 +371,6 @@ impl Infos {
experimental_no_edition_2024_for_settings,
experimental_no_edition_2024_for_prefix_post_processing,
experimental_no_edition_2024_for_facet_post_processing,
experimental_personalization: experimental_personalization_api_key.is_some(),
}
}
}

View File

@@ -6,10 +6,6 @@ use meilisearch_types::error::{Code, ErrorCode, ResponseError};
use meilisearch_types::index_uid::{IndexUid, IndexUidFormatError};
use meilisearch_types::milli;
use meilisearch_types::milli::OrderBy;
use meilisearch_types::tasks::enterprise_edition::network::headers::{
PROXY_IMPORT_DOCS_HEADER, PROXY_IMPORT_INDEX_COUNT_HEADER, PROXY_IMPORT_INDEX_HEADER,
PROXY_IMPORT_REMOTE_HEADER, PROXY_IMPORT_TASK_KEY_HEADER, PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
};
use serde_json::Value;
use tokio::task::JoinError;
@@ -42,8 +38,6 @@ pub enum MeilisearchHttpError {
PaginationInFederatedQuery(usize, &'static str),
#[error("Inside `.queries[{0}]`: Using facet options is not allowed in federated queries.\n - Hint: remove `facets` from query #{0} or remove `federation` from the request\n - Hint: pass `federation.facetsByIndex.{1}: {2:?}` for facets in federated search")]
FacetsInFederatedQuery(usize, String, Vec<String>),
#[error("Inside `.queries[{0}]`: Using `.personalize` is not allowed in federated queries.\n - Hint: remove `personalize` from query #{0} or remove `federation` from the request")]
PersonalizationInFederatedQuery(usize),
#[error("Inconsistent order for values in facet `{facet}`: index `{previous_uid}` orders {previous_facet_order}, but index `{current_uid}` orders {index_facet_order}.\n - Hint: Remove `federation.mergeFacets` or change `faceting.sortFacetValuesBy` to be consistent in settings.")]
InconsistentFacetOrder {
facet: String,
@@ -97,49 +91,8 @@ pub enum MeilisearchHttpError {
} else { PROXY_ORIGIN_TASK_UID_HEADER }
)]
InconsistentOriginHeaders { is_remote_missing: bool },
#[error("Inconsistent `Import` headers: {remote}: {remote_status}, {index}: {index_status}, {docs}: {docs_status}.\n - Hint: either all three headers should be provided, or none of them",
remote = PROXY_IMPORT_REMOTE_HEADER,
remote_status = if *is_remote_missing { "missing" } else{ "provided" },
index = PROXY_IMPORT_INDEX_HEADER,
index_status = if *is_index_missing { "missing" } else { "provided" },
docs = PROXY_IMPORT_DOCS_HEADER,
docs_status = if *is_docs_missing { "missing" } else { "provided" }
)]
InconsistentImportHeaders {
is_remote_missing: bool,
is_index_missing: bool,
is_docs_missing: bool,
},
#[error("Inconsistent `Import-Metadata` headers: {index_count}: {index_count_status}, {task_key}: {task_key_status}, {total_index_documents}: {total_index_documents_status}.\n - Hint: either all three headers should be provided, or none of them",
index_count = PROXY_IMPORT_INDEX_COUNT_HEADER,
index_count_status = if *is_index_count_missing { "missing" } else { "provided"},
task_key = PROXY_IMPORT_TASK_KEY_HEADER,
task_key_status = if *is_task_key_missing { "missing" } else { "provided"},
total_index_documents = PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
total_index_documents_status = if *is_total_index_documents_missing { "missing" } else { "provided"},
)]
InconsistentImportMetadataHeaders {
is_index_count_missing: bool,
is_task_key_missing: bool,
is_total_index_documents_missing: bool,
},
#[error(
"Inconsistent task network headers: origin headers: {origin_status}, import headers: {import_status}, import metadata: {import_metadata_status}",
origin_status = if *is_missing_origin { "missing"} else { "present" },
import_status = if *is_missing_import { "missing"} else { "present" },
import_metadata_status = if *is_missing_import_metadata { "missing"} else { "present" })]
InconsistentTaskNetworkHeaders {
is_missing_origin: bool,
is_missing_import: bool,
is_missing_import_metadata: bool,
},
#[error("Invalid value for header {header_name}: {msg}")]
InvalidHeaderValue { header_name: &'static str, msg: String },
#[error("This remote is not the leader of the network.\n - Note: only the leader `{leader}` can receive new tasks.")]
NotLeader { leader: String },
#[error("Unexpected `previousRemotes` in network call.\n - Note: `previousRemote` is reserved for internal use.")]
UnexpectedNetworkPreviousRemotes,
}
impl MeilisearchHttpError {
@@ -184,20 +137,10 @@ impl ErrorCode for MeilisearchHttpError {
MeilisearchHttpError::InconsistentFacetOrder { .. } => {
Code::InvalidMultiSearchFacetOrder
}
MeilisearchHttpError::PersonalizationInFederatedQuery(_) => {
Code::InvalidMultiSearchQueryPersonalization
}
MeilisearchHttpError::InconsistentOriginHeaders { .. }
| MeilisearchHttpError::InconsistentImportHeaders { .. }
| MeilisearchHttpError::InconsistentImportMetadataHeaders { .. }
| MeilisearchHttpError::InconsistentTaskNetworkHeaders { .. } => {
MeilisearchHttpError::InconsistentOriginHeaders { .. } => {
Code::InconsistentDocumentChangeHeaders
}
MeilisearchHttpError::InvalidHeaderValue { .. } => Code::InvalidHeaderValue,
MeilisearchHttpError::NotLeader { .. } => Code::NotLeader,
MeilisearchHttpError::UnexpectedNetworkPreviousRemotes => {
Code::UnexpectedNetworkPreviousRemotes
}
}
}
}

View File

@@ -11,7 +11,6 @@ pub mod middleware;
pub mod option;
#[cfg(test)]
mod option_test;
pub mod personalization;
pub mod routes;
pub mod search;
pub mod search_queue;
@@ -59,7 +58,6 @@ use tracing::{error, info_span};
use tracing_subscriber::filter::Targets;
use crate::error::MeilisearchHttpError;
use crate::personalization::PersonalizationService;
/// Default number of simultaneously opened indexes.
///
@@ -130,8 +128,12 @@ pub type LogStderrType = tracing_subscriber::filter::Filtered<
>;
pub fn create_app(
services: ServicesData,
index_scheduler: Data<IndexScheduler>,
auth_controller: Data<AuthController>,
search_queue: Data<SearchQueue>,
opt: Opt,
logs: (LogRouteHandle, LogStderrHandle),
analytics: Data<Analytics>,
enable_dashboard: bool,
) -> actix_web::App<
impl ServiceFactory<
@@ -143,7 +145,17 @@ pub fn create_app(
>,
> {
let app = actix_web::App::new()
.configure(|s| configure_data(s, services, &opt))
.configure(|s| {
configure_data(
s,
index_scheduler.clone(),
auth_controller.clone(),
search_queue.clone(),
&opt,
logs,
analytics.clone(),
)
})
.configure(routes::configure)
.configure(|s| dashboard(s, enable_dashboard));
@@ -204,10 +216,7 @@ enum OnFailure {
KeepDb,
}
pub fn setup_meilisearch(
opt: &Opt,
handle: tokio::runtime::Handle,
) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
let index_scheduler_opt = IndexSchedulerOptions {
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
auth_path: opt.db_path.join("auth"),
@@ -221,11 +230,7 @@ pub fn setup_meilisearch(
task_db_size: opt.max_task_db_size.as_u64() as usize,
index_base_map_size: opt.max_index_size.as_u64() as usize,
enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage,
indexer_config: Arc::new({
let s3_snapshot_options =
opt.s3_snapshot_options.clone().map(|opt| opt.try_into()).transpose()?;
IndexerConfig { s3_snapshot_options, ..(&opt.indexer_options).try_into()? }
}),
indexer_config: Arc::new((&opt.indexer_options).try_into()?),
autobatching_enabled: true,
cleanup_enabled: !opt.experimental_replication_parameters,
max_number_of_tasks: 1_000_000,
@@ -251,7 +256,6 @@ pub fn setup_meilisearch(
index_scheduler_opt,
OnFailure::RemoveDb,
binary_version, // the db is empty
handle,
)?,
Err(e) => {
std::fs::remove_dir_all(&opt.db_path)?;
@@ -269,7 +273,7 @@ pub fn setup_meilisearch(
bail!("snapshot doesn't exist at {}", snapshot_path.display())
// the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag
} else {
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
}
} else if let Some(ref path) = opt.import_dump {
let src_path_exists = path.exists();
@@ -280,7 +284,6 @@ pub fn setup_meilisearch(
index_scheduler_opt,
OnFailure::RemoveDb,
binary_version, // the db is empty
handle,
)?;
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
Ok(()) => (index_scheduler, auth_controller),
@@ -301,10 +304,10 @@ pub fn setup_meilisearch(
// the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag
// or, the dump is missing but we can ignore that because of the ignore_missing_dump flag
} else {
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
}
} else {
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version, handle)?
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
};
// We create a loop in a thread that registers snapshotCreation tasks
@@ -335,7 +338,6 @@ fn open_or_create_database_unchecked(
index_scheduler_opt: IndexSchedulerOptions,
on_failure: OnFailure,
version: (u32, u32, u32),
handle: tokio::runtime::Handle,
) -> anyhow::Result<(IndexScheduler, AuthController)> {
// we don't want to create anything in the data.ms yet, thus we
// wrap our two builders in a closure that'll be executed later.
@@ -343,7 +345,7 @@ fn open_or_create_database_unchecked(
let auth_env = open_auth_store_env(&index_scheduler_opt.auth_path).unwrap();
let auth_controller = AuthController::new(auth_env.clone(), &opt.master_key);
let index_scheduler_builder = || -> anyhow::Result<_> {
Ok(IndexScheduler::new(index_scheduler_opt, auth_env, version, Some(handle))?)
Ok(IndexScheduler::new(index_scheduler_opt, auth_env, version)?)
};
match (
@@ -450,7 +452,6 @@ fn open_or_create_database(
index_scheduler_opt: IndexSchedulerOptions,
empty_db: bool,
binary_version: (u32, u32, u32),
handle: tokio::runtime::Handle,
) -> anyhow::Result<(IndexScheduler, AuthController)> {
let version = if !empty_db {
check_version(opt, &index_scheduler_opt, binary_version)?
@@ -458,7 +459,7 @@ fn open_or_create_database(
binary_version
};
open_or_create_database_unchecked(opt, index_scheduler_opt, OnFailure::KeepDb, version, handle)
open_or_create_database_unchecked(opt, index_scheduler_opt, OnFailure::KeepDb, version)
}
fn import_dump(
@@ -526,11 +527,7 @@ fn import_dump(
let indexer_config = if base_config.max_threads.is_none() {
let (thread_pool, _) = default_thread_pool_and_threads();
let _config = IndexerConfig {
thread_pool,
s3_snapshot_options: base_config.s3_snapshot_options.clone(),
..*base_config
};
let _config = IndexerConfig { thread_pool, ..*base_config };
backup_config = _config;
&backup_config
} else {
@@ -678,26 +675,23 @@ fn import_dump(
Ok(index_scheduler_dump.finish()?)
}
pub fn configure_data(config: &mut web::ServiceConfig, services: ServicesData, opt: &Opt) {
let ServicesData {
index_scheduler,
auth,
search_queue,
personalization_service,
logs_route_handle,
logs_stderr_handle,
analytics,
} = services;
pub fn configure_data(
config: &mut web::ServiceConfig,
index_scheduler: Data<IndexScheduler>,
auth: Data<AuthController>,
search_queue: Data<SearchQueue>,
opt: &Opt,
(logs_route, logs_stderr): (LogRouteHandle, LogStderrHandle),
analytics: Data<Analytics>,
) {
let http_payload_size_limit = opt.http_payload_size_limit.as_u64() as usize;
config
.app_data(index_scheduler)
.app_data(auth)
.app_data(search_queue)
.app_data(analytics)
.app_data(personalization_service)
.app_data(logs_route_handle)
.app_data(logs_stderr_handle)
.app_data(web::Data::new(logs_route))
.app_data(web::Data::new(logs_stderr))
.app_data(web::Data::new(opt.clone()))
.app_data(
web::JsonConfig::default()
@@ -758,14 +752,3 @@ pub fn dashboard(config: &mut web::ServiceConfig, enable_frontend: bool) {
pub fn dashboard(config: &mut web::ServiceConfig, _enable_frontend: bool) {
config.service(web::resource("/").route(web::get().to(routes::running)));
}
#[derive(Clone)]
pub struct ServicesData {
pub index_scheduler: Data<IndexScheduler>,
pub auth: Data<AuthController>,
pub search_queue: Data<SearchQueue>,
pub personalization_service: Data<PersonalizationService>,
pub logs_route_handle: Data<LogRouteHandle>,
pub logs_stderr_handle: Data<LogStderrHandle>,
pub analytics: Data<Analytics>,
}

View File

@@ -14,11 +14,10 @@ use index_scheduler::IndexScheduler;
use is_terminal::IsTerminal;
use meilisearch::analytics::Analytics;
use meilisearch::option::LogMode;
use meilisearch::personalization::PersonalizationService;
use meilisearch::search_queue::SearchQueue;
use meilisearch::{
analytics, create_app, setup_meilisearch, LogRouteHandle, LogRouteType, LogStderrHandle,
LogStderrType, Opt, ServicesData, SubscriberForSecondLayer,
LogStderrType, Opt, SubscriberForSecondLayer,
};
use meilisearch_auth::{generate_master_key, AuthController, MASTER_KEY_MIN_SIZE};
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
@@ -77,10 +76,7 @@ fn on_panic(info: &std::panic::PanicHookInfo) {
#[actix_web::main]
async fn main() -> anyhow::Result<()> {
// won't panic inside of tokio::main
let runtime = tokio::runtime::Handle::current();
try_main(runtime).await.inspect_err(|error| {
try_main().await.inspect_err(|error| {
tracing::error!(%error);
let mut current = error.source();
let mut depth = 0;
@@ -92,7 +88,7 @@ async fn main() -> anyhow::Result<()> {
})
}
async fn try_main(runtime: tokio::runtime::Handle) -> anyhow::Result<()> {
async fn try_main() -> anyhow::Result<()> {
let (opt, config_read_from) = Opt::try_build()?;
std::panic::set_hook(Box::new(on_panic));
@@ -126,7 +122,7 @@ async fn try_main(runtime: tokio::runtime::Handle) -> anyhow::Result<()> {
_ => (),
}
let (index_scheduler, auth_controller) = setup_meilisearch(&opt, runtime)?;
let (index_scheduler, auth_controller) = setup_meilisearch(&opt)?;
let analytics =
analytics::Analytics::new(&opt, index_scheduler.clone(), auth_controller.clone()).await;
@@ -153,15 +149,8 @@ async fn run_http(
let enable_dashboard = &opt.env == "development";
let opt_clone = opt.clone();
let index_scheduler = Data::from(index_scheduler);
let auth = Data::from(auth_controller);
let auth_controller = Data::from(auth_controller);
let analytics = Data::from(analytics);
// Create personalization service with API key from options
let personalization_service = Data::new(
opt.experimental_personalization_api_key
.clone()
.map(PersonalizationService::cohere)
.unwrap_or_else(PersonalizationService::disabled),
);
let search_queue = SearchQueue::new(
opt.experimental_search_queue_size,
available_parallelism()
@@ -173,25 +162,21 @@ async fn run_http(
usize::from(opt.experimental_drop_search_after) as u64
));
let search_queue = Data::new(search_queue);
let (logs_route_handle, logs_stderr_handle) = logs;
let logs_route_handle = Data::new(logs_route_handle);
let logs_stderr_handle = Data::new(logs_stderr_handle);
let services = ServicesData {
index_scheduler,
auth,
search_queue,
personalization_service,
logs_route_handle,
logs_stderr_handle,
analytics,
};
let http_server =
HttpServer::new(move || create_app(services.clone(), opt.clone(), enable_dashboard))
// Disable signals allows the server to terminate immediately when a user enter CTRL-C
.disable_signals()
.keep_alive(KeepAlive::Os);
let http_server = HttpServer::new(move || {
create_app(
index_scheduler.clone(),
auth_controller.clone(),
search_queue.clone(),
opt.clone(),
logs.clone(),
analytics.clone(),
enable_dashboard,
)
})
// Disable signals allows the server to terminate immediately when a user enter CTRL-C
.disable_signals()
.keep_alive(KeepAlive::Os);
if let Some(config) = opt_clone.get_ssl_config()? {
http_server.bind_rustls_0_23(opt_clone.http_addr, config)?.run().await?;

View File

@@ -114,9 +114,4 @@ lazy_static! {
"Meilisearch Task Queue Size Until Stop Registering",
))
.expect("Can't create a metric");
pub static ref MEILISEARCH_PERSONALIZED_SEARCH_REQUESTS: IntGauge = register_int_gauge!(opts!(
"meilisearch_personalized_search_requests",
"Meilisearch number of search requests with personalization"
))
.expect("Can't create a metric");
}

View File

@@ -7,13 +7,12 @@ use std::ops::Deref;
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use std::{env, fmt, fs};
use byte_unit::{Byte, ParseError, UnitType};
use clap::Parser;
use meilisearch_types::features::InstanceTogglableFeatures;
use meilisearch_types::milli::update::{IndexerConfig, S3SnapshotOptions};
use meilisearch_types::milli::update::IndexerConfig;
use meilisearch_types::milli::ThreadPoolNoAbortBuilder;
use rustls::server::{ServerSessionMemoryCache, WebPkiClientVerifier};
use rustls::RootCertStore;
@@ -75,22 +74,6 @@ const MEILI_EXPERIMENTAL_EMBEDDING_CACHE_ENTRIES: &str =
const MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION: &str = "MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION";
const MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS: &str =
"MEILI_EXPERIMENTAL_NO_EDITION_2024_FOR_DUMPS";
const MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY: &str =
"MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY";
// Related to S3 snapshots
const MEILI_S3_BUCKET_URL: &str = "MEILI_S3_BUCKET_URL";
const MEILI_S3_BUCKET_REGION: &str = "MEILI_S3_BUCKET_REGION";
const MEILI_S3_BUCKET_NAME: &str = "MEILI_S3_BUCKET_NAME";
const MEILI_S3_SNAPSHOT_PREFIX: &str = "MEILI_S3_SNAPSHOT_PREFIX";
const MEILI_S3_ACCESS_KEY: &str = "MEILI_S3_ACCESS_KEY";
const MEILI_S3_SECRET_KEY: &str = "MEILI_S3_SECRET_KEY";
const MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS: &str = "MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS";
const MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL: &str = "MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL";
const MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS: &str =
"MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS";
const MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE: &str = "MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE";
const DEFAULT_CONFIG_FILE_PATH: &str = "./config.toml";
const DEFAULT_DB_PATH: &str = "./data.ms";
const DEFAULT_HTTP_ADDR: &str = "localhost:7700";
@@ -100,10 +83,6 @@ const DEFAULT_SNAPSHOT_DIR: &str = "snapshots/";
const DEFAULT_SNAPSHOT_INTERVAL_SEC: u64 = 86400;
const DEFAULT_SNAPSHOT_INTERVAL_SEC_STR: &str = "86400";
const DEFAULT_DUMP_DIR: &str = "dumps/";
const DEFAULT_S3_SNAPSHOT_MAX_IN_FLIGHT_PARTS: NonZeroUsize = NonZeroUsize::new(10).unwrap();
const DEFAULT_S3_SNAPSHOT_COMPRESSION_LEVEL: u32 = 0;
const DEFAULT_S3_SNAPSHOT_SIGNATURE_DURATION_SECONDS: u64 = 8 * 3600; // 8 hours
const DEFAULT_S3_SNAPSHOT_MULTIPART_PART_SIZE: Byte = Byte::from_u64(375 * 1024 * 1024); // 375 MiB
const MEILI_MAX_INDEXING_MEMORY: &str = "MEILI_MAX_INDEXING_MEMORY";
const MEILI_MAX_INDEXING_THREADS: &str = "MEILI_MAX_INDEXING_THREADS";
@@ -496,20 +475,10 @@ pub struct Opt {
#[serde(default)]
pub experimental_no_snapshot_compaction: bool,
/// Experimental personalization API key feature.
///
/// Sets the API key for personalization features.
#[clap(long, env = MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY)]
pub experimental_personalization_api_key: Option<String>,
#[serde(flatten)]
#[clap(flatten)]
pub indexer_options: IndexerOpts,
#[serde(flatten)]
#[clap(flatten)]
pub s3_snapshot_options: Option<S3SnapshotOpts>,
/// Set the path to a configuration file that should be used to setup the engine.
/// Format must be TOML.
#[clap(long)]
@@ -611,8 +580,6 @@ impl Opt {
experimental_limit_batched_tasks_total_size,
experimental_embedding_cache_entries,
experimental_no_snapshot_compaction,
experimental_personalization_api_key,
s3_snapshot_options,
} = self;
export_to_env_if_not_present(MEILI_DB_PATH, db_path);
export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr);
@@ -713,22 +680,7 @@ impl Opt {
MEILI_EXPERIMENTAL_NO_SNAPSHOT_COMPACTION,
experimental_no_snapshot_compaction.to_string(),
);
if let Some(experimental_personalization_api_key) = experimental_personalization_api_key {
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_PERSONALIZATION_API_KEY,
experimental_personalization_api_key,
);
}
indexer_options.export_to_env();
if let Some(s3_snapshot_options) = s3_snapshot_options {
#[cfg(not(unix))]
{
let _ = s3_snapshot_options;
panic!("S3 snapshot options are not supported on Windows");
}
#[cfg(unix)]
s3_snapshot_options.export_to_env();
}
}
pub fn get_ssl_config(&self) -> anyhow::Result<Option<rustls::ServerConfig>> {
@@ -897,16 +849,6 @@ impl TryFrom<&IndexerOpts> for IndexerConfig {
type Error = anyhow::Error;
fn try_from(other: &IndexerOpts) -> Result<Self, Self::Error> {
let IndexerOpts {
max_indexing_memory,
max_indexing_threads,
skip_index_budget,
experimental_no_edition_2024_for_settings,
experimental_no_edition_2024_for_dumps,
experimental_no_edition_2024_for_prefix_post_processing,
experimental_no_edition_2024_for_facet_post_processing,
} = other;
let thread_pool = ThreadPoolNoAbortBuilder::new_for_indexing()
.num_threads(other.max_indexing_threads.unwrap_or_else(|| num_cpus::get() / 2))
.build()?;
@@ -914,163 +856,21 @@ impl TryFrom<&IndexerOpts> for IndexerConfig {
Ok(Self {
thread_pool,
log_every_n: Some(DEFAULT_LOG_EVERY_N),
max_memory: max_indexing_memory.map(|b| b.as_u64() as usize),
max_threads: max_indexing_threads.0,
max_memory: other.max_indexing_memory.map(|b| b.as_u64() as usize),
max_threads: *other.max_indexing_threads,
max_positions_per_attributes: None,
skip_index_budget: *skip_index_budget,
experimental_no_edition_2024_for_settings: *experimental_no_edition_2024_for_settings,
experimental_no_edition_2024_for_dumps: *experimental_no_edition_2024_for_dumps,
skip_index_budget: other.skip_index_budget,
experimental_no_edition_2024_for_settings: other
.experimental_no_edition_2024_for_settings,
experimental_no_edition_2024_for_dumps: other.experimental_no_edition_2024_for_dumps,
chunk_compression_type: Default::default(),
chunk_compression_level: Default::default(),
documents_chunk_size: Default::default(),
max_nb_chunks: Default::default(),
experimental_no_edition_2024_for_prefix_post_processing:
*experimental_no_edition_2024_for_prefix_post_processing,
experimental_no_edition_2024_for_facet_post_processing:
*experimental_no_edition_2024_for_facet_post_processing,
s3_snapshot_options: None,
})
}
}
#[derive(Debug, Clone, Parser, Deserialize)]
// This group is a bit tricky but makes it possible to require all listed fields if one of them
// is specified. It lets us keep an Option for the S3SnapshotOpts configuration.
// <https://github.com/clap-rs/clap/issues/5092#issuecomment-2616986075>
#[group(requires_all = ["s3_bucket_url", "s3_bucket_region", "s3_bucket_name", "s3_snapshot_prefix", "s3_access_key", "s3_secret_key"])]
pub struct S3SnapshotOpts {
/// The S3 bucket URL in the format https://s3.<region>.amazonaws.com.
#[clap(long, env = MEILI_S3_BUCKET_URL, required = false)]
#[serde(default)]
pub s3_bucket_url: String,
/// The region in the format us-east-1.
#[clap(long, env = MEILI_S3_BUCKET_REGION, required = false)]
#[serde(default)]
pub s3_bucket_region: String,
/// The bucket name.
#[clap(long, env = MEILI_S3_BUCKET_NAME, required = false)]
#[serde(default)]
pub s3_bucket_name: String,
/// The prefix path where to put the snapshot, uses normal slashes (/).
#[clap(long, env = MEILI_S3_SNAPSHOT_PREFIX, required = false)]
#[serde(default)]
pub s3_snapshot_prefix: String,
/// The S3 access key.
#[clap(long, env = MEILI_S3_ACCESS_KEY, required = false)]
#[serde(default)]
pub s3_access_key: String,
/// The S3 secret key.
#[clap(long, env = MEILI_S3_SECRET_KEY, required = false)]
#[serde(default)]
pub s3_secret_key: String,
/// The maximum number of parts that can be uploaded in parallel.
///
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
#[clap(long, env = MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS, default_value_t = default_experimental_s3_snapshot_max_in_flight_parts())]
#[serde(default = "default_experimental_s3_snapshot_max_in_flight_parts")]
pub experimental_s3_max_in_flight_parts: NonZeroUsize,
/// The compression level. Defaults to no compression (0).
///
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
#[clap(long, env = MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL, default_value_t = default_experimental_s3_snapshot_compression_level())]
#[serde(default = "default_experimental_s3_snapshot_compression_level")]
pub experimental_s3_compression_level: u32,
/// The signature duration for the multipart upload.
///
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
#[clap(long, env = MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS, default_value_t = default_experimental_s3_snapshot_signature_duration_seconds())]
#[serde(default = "default_experimental_s3_snapshot_signature_duration_seconds")]
pub experimental_s3_signature_duration_seconds: u64,
/// The size of the the multipart parts.
///
/// Must not be less than 10MiB and larger than 8GiB. Yes,
/// twice the boundaries of the AWS S3 multipart upload
/// because we use it a bit differently internally.
///
/// For more information, see <https://github.com/orgs/meilisearch/discussions/869>.
#[clap(long, env = MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE, default_value_t = default_experimental_s3_snapshot_multipart_part_size())]
#[serde(default = "default_experimental_s3_snapshot_multipart_part_size")]
pub experimental_s3_multipart_part_size: Byte,
}
impl S3SnapshotOpts {
/// Exports the values to their corresponding env vars if they are not set.
pub fn export_to_env(self) {
let S3SnapshotOpts {
s3_bucket_url,
s3_bucket_region,
s3_bucket_name,
s3_snapshot_prefix,
s3_access_key,
s3_secret_key,
experimental_s3_max_in_flight_parts,
experimental_s3_compression_level,
experimental_s3_signature_duration_seconds,
experimental_s3_multipart_part_size,
} = self;
export_to_env_if_not_present(MEILI_S3_BUCKET_URL, s3_bucket_url);
export_to_env_if_not_present(MEILI_S3_BUCKET_REGION, s3_bucket_region);
export_to_env_if_not_present(MEILI_S3_BUCKET_NAME, s3_bucket_name);
export_to_env_if_not_present(MEILI_S3_SNAPSHOT_PREFIX, s3_snapshot_prefix);
export_to_env_if_not_present(MEILI_S3_ACCESS_KEY, s3_access_key);
export_to_env_if_not_present(MEILI_S3_SECRET_KEY, s3_secret_key);
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_S3_MAX_IN_FLIGHT_PARTS,
experimental_s3_max_in_flight_parts.to_string(),
);
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_S3_COMPRESSION_LEVEL,
experimental_s3_compression_level.to_string(),
);
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_S3_SIGNATURE_DURATION_SECONDS,
experimental_s3_signature_duration_seconds.to_string(),
);
export_to_env_if_not_present(
MEILI_EXPERIMENTAL_S3_MULTIPART_PART_SIZE,
experimental_s3_multipart_part_size.to_string(),
);
}
}
impl TryFrom<S3SnapshotOpts> for S3SnapshotOptions {
type Error = anyhow::Error;
fn try_from(other: S3SnapshotOpts) -> Result<Self, Self::Error> {
let S3SnapshotOpts {
s3_bucket_url,
s3_bucket_region,
s3_bucket_name,
s3_snapshot_prefix,
s3_access_key,
s3_secret_key,
experimental_s3_max_in_flight_parts,
experimental_s3_compression_level,
experimental_s3_signature_duration_seconds,
experimental_s3_multipart_part_size,
} = other;
Ok(S3SnapshotOptions {
s3_bucket_url,
s3_bucket_region,
s3_bucket_name,
s3_snapshot_prefix,
s3_access_key,
s3_secret_key,
s3_max_in_flight_parts: experimental_s3_max_in_flight_parts,
s3_compression_level: experimental_s3_compression_level,
s3_signature_duration: Duration::from_secs(experimental_s3_signature_duration_seconds),
s3_multipart_part_size: experimental_s3_multipart_part_size.as_u64(),
experimental_no_edition_2024_for_prefix_post_processing: other
.experimental_no_edition_2024_for_prefix_post_processing,
experimental_no_edition_2024_for_facet_post_processing: other
.experimental_no_edition_2024_for_facet_post_processing,
})
}
}
@@ -1289,22 +1089,6 @@ fn default_snapshot_interval_sec() -> &'static str {
DEFAULT_SNAPSHOT_INTERVAL_SEC_STR
}
fn default_experimental_s3_snapshot_max_in_flight_parts() -> NonZeroUsize {
DEFAULT_S3_SNAPSHOT_MAX_IN_FLIGHT_PARTS
}
fn default_experimental_s3_snapshot_compression_level() -> u32 {
DEFAULT_S3_SNAPSHOT_COMPRESSION_LEVEL
}
fn default_experimental_s3_snapshot_signature_duration_seconds() -> u64 {
DEFAULT_S3_SNAPSHOT_SIGNATURE_DURATION_SECONDS
}
fn default_experimental_s3_snapshot_multipart_part_size() -> Byte {
DEFAULT_S3_SNAPSHOT_MULTIPART_PART_SIZE
}
fn default_dump_dir() -> PathBuf {
PathBuf::from(DEFAULT_DUMP_DIR)
}

View File

@@ -1,366 +0,0 @@
use crate::search::{Personalize, SearchResult};
use meilisearch_types::{
error::{Code, ErrorCode, ResponseError},
milli::TimeBudget,
};
use rand::Rng;
use reqwest::Client;
use serde::{Deserialize, Serialize};
use std::time::Duration;
use tracing::{debug, info, warn};
const COHERE_API_URL: &str = "https://api.cohere.ai/v1/rerank";
const MAX_RETRIES: u32 = 10;
#[derive(Debug, thiserror::Error)]
enum PersonalizationError {
#[error("Personalization service: HTTP request failed: {0}")]
Request(#[from] reqwest::Error),
#[error("Personalization service: Failed to parse response: {0}")]
Parse(String),
#[error("Personalization service: Cohere API error: {0}")]
Api(String),
#[error("Personalization service: Unauthorized: invalid API key")]
Unauthorized,
#[error("Personalization service: Rate limited: too many requests")]
RateLimited,
#[error("Personalization service: Bad request: {0}")]
BadRequest(String),
#[error("Personalization service: Internal server error: {0}")]
InternalServerError(String),
#[error("Personalization service: Network error: {0}")]
Network(String),
#[error("Personalization service: Deadline exceeded")]
DeadlineExceeded,
#[error(transparent)]
FeatureNotEnabled(#[from] index_scheduler::error::FeatureNotEnabledError),
}
impl ErrorCode for PersonalizationError {
fn error_code(&self) -> Code {
match self {
PersonalizationError::FeatureNotEnabled { .. } => Code::FeatureNotEnabled,
PersonalizationError::Unauthorized => Code::RemoteInvalidApiKey,
PersonalizationError::RateLimited => Code::TooManySearchRequests,
PersonalizationError::BadRequest(_) => Code::RemoteBadRequest,
PersonalizationError::InternalServerError(_) => Code::RemoteRemoteError,
PersonalizationError::Network(_) | PersonalizationError::Request(_) => {
Code::RemoteCouldNotSendRequest
}
PersonalizationError::Parse(_) | PersonalizationError::Api(_) => {
Code::RemoteBadResponse
}
PersonalizationError::DeadlineExceeded => Code::Internal, // should not be returned to the client
}
}
}
pub struct CohereService {
client: Client,
api_key: String,
}
impl CohereService {
pub fn new(api_key: String) -> Self {
info!("Personalization service initialized with Cohere API");
let client = Client::builder()
.timeout(Duration::from_secs(30))
.build()
.expect("Failed to create HTTP client");
Self { client, api_key }
}
pub async fn rerank_search_results(
&self,
search_result: SearchResult,
personalize: &Personalize,
query: Option<&str>,
time_budget: TimeBudget,
) -> Result<SearchResult, ResponseError> {
if time_budget.exceeded() {
warn!("Could not rerank due to deadline");
// If the deadline is exceeded, return the original search result instead of an error
return Ok(search_result);
}
// Extract user context from personalization
let user_context = personalize.user_context.as_str();
// Build the prompt by merging query and user context
let prompt = match query {
Some(q) => format!("User Context: {user_context}\nQuery: {q}"),
None => format!("User Context: {user_context}"),
};
// Extract documents for reranking
let documents: Vec<String> = search_result
.hits
.iter()
.map(|hit| {
// Convert the document to a string representation for reranking
serde_json::to_string(&hit.document).unwrap_or_else(|_| "{}".to_string())
})
.collect();
if documents.is_empty() {
return Ok(search_result);
}
// Call Cohere's rerank API with retry logic
let reranked_indices =
match self.call_rerank_with_retry(&prompt, &documents, time_budget).await {
Ok(indices) => indices,
Err(PersonalizationError::DeadlineExceeded) => {
// If the deadline is exceeded, return the original search result instead of an error
return Ok(search_result);
}
Err(e) => return Err(e.into()),
};
debug!("Cohere rerank successful, reordering {} results", search_result.hits.len());
// Reorder the hits based on Cohere's reranking
let mut reranked_hits = Vec::new();
for index in reranked_indices.iter() {
if let Some(hit) = search_result.hits.get(*index) {
reranked_hits.push(hit.clone());
}
}
Ok(SearchResult { hits: reranked_hits, ..search_result })
}
async fn call_rerank_with_retry(
&self,
query: &str,
documents: &[String],
time_budget: TimeBudget,
) -> Result<Vec<usize>, PersonalizationError> {
let request_body = CohereRerankRequest {
query: query.to_string(),
documents: documents.to_vec(),
model: "rerank-english-v3.0".to_string(),
};
// Retry loop similar to vector extraction
for attempt in 0..MAX_RETRIES {
let response_result = self.send_rerank_request(&request_body).await;
let retry_duration = match self.handle_response(response_result).await {
Ok(indices) => return Ok(indices),
Err(retry) => {
warn!("Cohere rerank attempt #{} failed: {}", attempt, retry.error);
if time_budget.exceeded() {
warn!("Could not rerank due to deadline");
return Err(PersonalizationError::DeadlineExceeded);
} else {
match retry.into_duration(attempt) {
Ok(d) => d,
Err(error) => return Err(error),
}
}
}
};
// randomly up to double the retry duration
let retry_duration = retry_duration
+ rand::thread_rng().gen_range(std::time::Duration::ZERO..retry_duration);
warn!("Retrying after {}ms", retry_duration.as_millis());
tokio::time::sleep(retry_duration).await;
}
// Final attempt without retry
let response_result = self.send_rerank_request(&request_body).await;
match self.handle_response(response_result).await {
Ok(indices) => Ok(indices),
Err(retry) => Err(retry.into_error()),
}
}
async fn send_rerank_request(
&self,
request_body: &CohereRerankRequest,
) -> Result<reqwest::Response, reqwest::Error> {
self.client
.post(COHERE_API_URL)
.header("Authorization", format!("Bearer {}", self.api_key))
.header("Content-Type", "application/json")
.json(request_body)
.send()
.await
}
async fn handle_response(
&self,
response_result: Result<reqwest::Response, reqwest::Error>,
) -> Result<Vec<usize>, Retry> {
let response = match response_result {
Ok(r) => r,
Err(e) if e.is_timeout() => {
return Err(Retry::retry_later(PersonalizationError::Network(format!(
"Request timeout: {}",
e
))));
}
Err(e) => {
return Err(Retry::retry_later(PersonalizationError::Network(format!(
"Network error: {}",
e
))));
}
};
let status = response.status();
let status_code = status.as_u16();
if status.is_success() {
let rerank_response: CohereRerankResponse = match response.json().await {
Ok(r) => r,
Err(e) => {
return Err(Retry::retry_later(PersonalizationError::Parse(format!(
"Failed to parse response: {}",
e
))));
}
};
// Extract indices from rerank results
let indices: Vec<usize> =
rerank_response.results.iter().map(|result| result.index as usize).collect();
return Ok(indices);
}
// Handle error status codes
let error_body = response.text().await.unwrap_or_else(|_| "Unknown error".to_string());
let retry = match status_code {
401 => Retry::give_up(PersonalizationError::Unauthorized),
429 => Retry::rate_limited(PersonalizationError::RateLimited),
400 => Retry::give_up(PersonalizationError::BadRequest(error_body)),
500..=599 => Retry::retry_later(PersonalizationError::InternalServerError(format!(
"Status {}: {}",
status_code, error_body
))),
402..=499 => Retry::give_up(PersonalizationError::Api(format!(
"Status {}: {}",
status_code, error_body
))),
_ => Retry::retry_later(PersonalizationError::Api(format!(
"Unexpected status {}: {}",
status_code, error_body
))),
};
Err(retry)
}
}
#[derive(Serialize)]
struct CohereRerankRequest {
query: String,
documents: Vec<String>,
model: String,
}
#[derive(Deserialize)]
struct CohereRerankResponse {
results: Vec<CohereRerankResult>,
}
#[derive(Deserialize)]
struct CohereRerankResult {
index: u32,
}
// Retry strategy similar to vector extraction
struct Retry {
error: PersonalizationError,
strategy: RetryStrategy,
}
enum RetryStrategy {
GiveUp,
Retry,
RetryAfterRateLimit,
}
impl Retry {
fn give_up(error: PersonalizationError) -> Self {
Self { error, strategy: RetryStrategy::GiveUp }
}
fn retry_later(error: PersonalizationError) -> Self {
Self { error, strategy: RetryStrategy::Retry }
}
fn rate_limited(error: PersonalizationError) -> Self {
Self { error, strategy: RetryStrategy::RetryAfterRateLimit }
}
fn into_duration(self, attempt: u32) -> Result<Duration, PersonalizationError> {
match self.strategy {
RetryStrategy::GiveUp => Err(self.error),
RetryStrategy::Retry => {
// Exponential backoff: 10^attempt milliseconds
Ok(Duration::from_millis((10u64).pow(attempt)))
}
RetryStrategy::RetryAfterRateLimit => {
// Longer backoff for rate limits: 100ms + exponential
Ok(Duration::from_millis(100 + (10u64).pow(attempt)))
}
}
}
fn into_error(self) -> PersonalizationError {
self.error
}
}
pub enum PersonalizationService {
Cohere(CohereService),
Disabled,
}
impl PersonalizationService {
pub fn cohere(api_key: String) -> Self {
// If the API key is empty, consider the personalization service as disabled
if api_key.trim().is_empty() {
Self::disabled()
} else {
Self::Cohere(CohereService::new(api_key))
}
}
pub fn disabled() -> Self {
debug!("Personalization service disabled");
Self::Disabled
}
pub async fn rerank_search_results(
&self,
search_result: SearchResult,
personalize: &Personalize,
query: Option<&str>,
time_budget: TimeBudget,
) -> Result<SearchResult, ResponseError> {
match self {
Self::Cohere(cohere_service) => {
cohere_service
.rerank_search_results(search_result, personalize, query, time_budget)
.await
}
Self::Disabled => Err(PersonalizationError::FeatureNotEnabled(
index_scheduler::error::FeatureNotEnabledError {
disabled_action: "reranking search results",
feature: "personalization",
issue_link: "https://github.com/orgs/meilisearch/discussions/866",
},
)
.into()),
}
}
}

View File

@@ -45,9 +45,7 @@ use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::GuardedData;
use crate::extractors::payload::Payload;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::indexes::enterprise_edition::proxy::{
proxy, task_network_and_check_leader, Body,
};
use crate::routes::indexes::enterprise_edition::proxy::{proxy, Body};
use crate::routes::indexes::search::fix_sort_query_parameters;
use crate::routes::{
get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT,
@@ -335,16 +333,13 @@ impl Aggregate for DocumentsDeletionAggregator {
pub async fn delete_document(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
path: web::Path<DocumentParam>,
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
req: HttpRequest,
opt: web::Data<Opt>,
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let CustomMetadataQuery { custom_metadata } = params.into_inner();
let DocumentParam { index_uid, document_id } = path.into_inner();
let index_uid = IndexUid::try_from(index_uid)?;
let network = index_scheduler.network();
let task_network = task_network_and_check_leader(&req, &network)?;
analytics.publish(
DocumentsDeletionAggregator {
@@ -362,23 +357,13 @@ pub async fn delete_document(
};
let uid = get_task_id(&req, &opt)?;
let dry_run = is_dry_run(&req, &opt)?;
let mut task = {
let task = {
let index_scheduler = index_scheduler.clone();
tokio::task::spawn_blocking(move || {
index_scheduler.register_with_custom_metadata(
task,
uid,
custom_metadata,
dry_run,
task_network,
)
})
.await??
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
};
if let Some(task_network) = task.network.take() {
proxy(&index_scheduler, Some(&index_uid), &req, task_network, network, Body::none(), &task)
.await?;
if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
}
let task: SummarizedTaskView = task.into();
@@ -693,19 +678,6 @@ pub struct UpdateDocumentsQuery {
#[param(value_type = char, default = ",", example = ";")]
#[deserr(default, try_from(char) = from_char_csv_delimiter -> DeserrQueryParamError<InvalidDocumentCsvDelimiter>, error = DeserrQueryParamError<InvalidDocumentCsvDelimiter>)]
pub csv_delimiter: Option<u8>,
#[param(example = "custom")]
#[deserr(default, error = DeserrQueryParamError<InvalidIndexCustomMetadata>)]
pub custom_metadata: Option<String>,
}
#[derive(Deserialize, Debug, Deserr, IntoParams)]
#[deserr(error = DeserrQueryParamError, rename_all = camelCase, deny_unknown_fields)]
#[into_params(parameter_in = Query, rename_all = "camelCase")]
pub struct CustomMetadataQuery {
#[param(example = "custom")]
#[deserr(default, error = DeserrQueryParamError<InvalidIndexCustomMetadata>)]
pub custom_metadata: Option<String>,
}
fn from_char_csv_delimiter(
@@ -847,7 +819,6 @@ pub async fn replace_documents(
body,
IndexDocumentsMethod::ReplaceDocuments,
uid,
params.custom_metadata,
dry_run,
allow_index_creation,
&req,
@@ -950,7 +921,6 @@ pub async fn update_documents(
body,
IndexDocumentsMethod::UpdateDocuments,
uid,
params.custom_metadata,
dry_run,
allow_index_creation,
&req,
@@ -970,14 +940,12 @@ async fn document_addition(
body: Payload,
method: IndexDocumentsMethod,
task_id: Option<TaskId>,
custom_metadata: Option<String>,
dry_run: bool,
allow_index_creation: bool,
req: &HttpRequest,
) -> Result<SummarizedTaskView, MeilisearchHttpError> {
let mime_type = extract_mime_type(req)?;
let network = index_scheduler.network();
let task_network = task_network_and_check_leader(&req, &network)?;
let format = match (
mime_type.as_ref().map(|m| (m.type_().as_str(), m.subtype().as_str())),
@@ -1096,18 +1064,9 @@ async fn document_addition(
index_uid: index_uid.to_string(),
};
/// FIXME: not new to this PR, but _any_ error here will cause the payload to unduly persist
let scheduler = index_scheduler.clone();
let mut task = match tokio::task::spawn_blocking(move || {
scheduler.register_with_custom_metadata(
task,
task_id,
custom_metadata,
dry_run,
task_network,
)
})
.await?
let task = match tokio::task::spawn_blocking(move || scheduler.register(task, task_id, dry_run))
.await?
{
Ok(task) => task,
Err(e) => {
@@ -1116,13 +1075,12 @@ async fn document_addition(
}
};
if let Some(task_network) = task.network.take() {
if network.sharding {
if let Some(file) = file {
proxy(
&index_scheduler,
Some(&index_uid),
&index_uid,
req,
task_network,
network,
Body::with_ndjson_payload(file),
&task,
@@ -1172,7 +1130,7 @@ async fn copy_body_to_file(
/// Delete a set of documents based on an array of document ids.
#[utoipa::path(
post,
path = "{indexUid}/documents/delete-batch",
path = "{indexUid}/delete-batch",
tag = "Documents",
security(("Bearer" = ["documents.delete", "documents.*", "*"])),
params(
@@ -1203,17 +1161,13 @@ pub async fn delete_documents_batch(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
body: web::Json<Vec<Value>>,
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
req: HttpRequest,
opt: web::Data<Opt>,
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
debug!(parameters = ?body, "Delete documents by batch");
let CustomMetadataQuery { custom_metadata } = params.into_inner();
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let network = index_scheduler.network();
let task_network = task_network_and_check_leader(&req, &network)?;
analytics.publish(
DocumentsDeletionAggregator {
@@ -1234,31 +1188,13 @@ pub async fn delete_documents_batch(
KindWithContent::DocumentDeletion { index_uid: index_uid.to_string(), documents_ids: ids };
let uid = get_task_id(&req, &opt)?;
let dry_run = is_dry_run(&req, &opt)?;
let mut task = {
let task = {
let index_scheduler = index_scheduler.clone();
tokio::task::spawn_blocking(move || {
index_scheduler.register_with_custom_metadata(
task,
uid,
custom_metadata,
dry_run,
task_network,
)
})
.await??
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
};
if let Some(task_network) = task.network.take() {
proxy(
&index_scheduler,
Some(&index_uid),
&req,
task_network,
network,
Body::inline(body),
&task,
)
.await?;
if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(body), &task).await?;
}
let task: SummarizedTaskView = task.into();
@@ -1308,20 +1244,16 @@ pub struct DocumentDeletionByFilter {
pub async fn delete_documents_by_filter(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
body: AwebJson<DocumentDeletionByFilter, DeserrJsonError>,
req: HttpRequest,
opt: web::Data<Opt>,
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
debug!(parameters = ?body, "Delete documents by filter");
let CustomMetadataQuery { custom_metadata } = params.into_inner();
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let index_uid = index_uid.into_inner();
let filter = body.into_inner();
let network = index_scheduler.network();
let task_network = task_network_and_check_leader(&req, &network)?;
analytics.publish(
DocumentsDeletionAggregator {
@@ -1348,31 +1280,13 @@ pub async fn delete_documents_by_filter(
let uid = get_task_id(&req, &opt)?;
let dry_run = is_dry_run(&req, &opt)?;
let mut task = {
let task = {
let index_scheduler = index_scheduler.clone();
tokio::task::spawn_blocking(move || {
index_scheduler.register_with_custom_metadata(
task,
uid,
custom_metadata,
dry_run,
task_network,
)
})
.await??
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
};
if let Some(task_network) = task.network.take() {
proxy(
&index_scheduler,
Some(&index_uid),
&req,
task_network,
network,
Body::inline(filter),
&task,
)
.await?;
if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(filter), &task).await?;
}
let task: SummarizedTaskView = task.into();
@@ -1458,41 +1372,38 @@ impl Aggregate for EditDocumentsByFunctionAggregator {
pub async fn edit_documents_by_function(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_ALL }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
body: AwebJson<DocumentEditionByFunction, DeserrJsonError>,
params: AwebJson<DocumentEditionByFunction, DeserrJsonError>,
req: HttpRequest,
opt: web::Data<Opt>,
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
debug!(parameters = ?body, "Edit documents by function");
let CustomMetadataQuery { custom_metadata } = params.into_inner();
debug!(parameters = ?params, "Edit documents by function");
index_scheduler
.features()
.check_edit_documents_by_function("Using the documents edit route")?;
let network = index_scheduler.network();
let task_network = task_network_and_check_leader(&req, &network)?;
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let index_uid = index_uid.into_inner();
let body = body.into_inner();
let params = params.into_inner();
analytics.publish(
EditDocumentsByFunctionAggregator {
filtered: body.filter.is_some(),
with_context: body.context.is_some(),
filtered: params.filter.is_some(),
with_context: params.context.is_some(),
index_creation: index_scheduler.index(&index_uid).is_err(),
},
&req,
);
let engine = milli::rhai::Engine::new();
if let Err(e) = engine.compile(&body.function) {
if let Err(e) = engine.compile(&params.function) {
return Err(ResponseError::from_msg(e.to_string(), Code::BadRequest));
}
if let Some(ref filter) = body.filter {
if let Some(ref filter) = params.filter {
// we ensure the filter is well formed before enqueuing it
crate::search::parse_filter(
filter,
@@ -1503,8 +1414,8 @@ pub async fn edit_documents_by_function(
}
let task = KindWithContent::DocumentEdition {
index_uid: index_uid.clone(),
filter_expr: body.filter.clone(),
context: match body.context.clone() {
filter_expr: params.filter.clone(),
context: match params.context.clone() {
Some(Value::Object(m)) => Some(m),
None => None,
_ => {
@@ -1514,36 +1425,18 @@ pub async fn edit_documents_by_function(
))
}
},
function: body.function.clone(),
function: params.function.clone(),
};
let uid = get_task_id(&req, &opt)?;
let dry_run = is_dry_run(&req, &opt)?;
let mut task = {
let task = {
let index_scheduler = index_scheduler.clone();
tokio::task::spawn_blocking(move || {
index_scheduler.register_with_custom_metadata(
task,
uid,
custom_metadata,
dry_run,
task_network,
)
})
.await??
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
};
if let Some(task_network) = task.network.take() {
proxy(
&index_scheduler,
Some(&index_uid),
&req,
task_network,
network,
Body::inline(body),
&task,
)
.await?;
if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::Inline(params), &task).await?;
}
let task: SummarizedTaskView = task.into();
@@ -1584,15 +1477,12 @@ pub async fn edit_documents_by_function(
pub async fn clear_all_documents(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
params: AwebQueryParameter<CustomMetadataQuery, DeserrQueryParamError>,
req: HttpRequest,
opt: web::Data<Opt>,
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let network = index_scheduler.network();
let CustomMetadataQuery { custom_metadata } = params.into_inner();
let task_network = task_network_and_check_leader(&req, &network)?;
analytics.publish(
DocumentsDeletionAggregator {
@@ -1608,24 +1498,14 @@ pub async fn clear_all_documents(
let uid = get_task_id(&req, &opt)?;
let dry_run = is_dry_run(&req, &opt)?;
let mut task = {
let task = {
let index_scheduler = index_scheduler.clone();
tokio::task::spawn_blocking(move || {
index_scheduler.register_with_custom_metadata(
task,
uid,
custom_metadata,
dry_run,
task_network,
)
})
.await??
tokio::task::spawn_blocking(move || index_scheduler.register(task, uid, dry_run)).await??
};
if let Some(task_network) = task.network.take() {
proxy(&index_scheduler, Some(&index_uid), &req, task_network, network, Body::none(), &task)
.await?;
if network.sharding && !dry_run {
proxy(&index_scheduler, &index_uid, &req, network, Body::none(), &task).await?;
}
let task: SummarizedTaskView = task.into();

View File

@@ -3,7 +3,6 @@
// Use of this source code is governed by the Business Source License 1.1,
// as found in the LICENSE-EE file or at <https://mariadb.com/bsl11>
use std::borrow::Cow;
use std::collections::BTreeMap;
use std::fs::File;
@@ -11,18 +10,8 @@ use actix_web::http::header::CONTENT_TYPE;
use actix_web::HttpRequest;
use bytes::Bytes;
use index_scheduler::IndexScheduler;
use meilisearch_types::enterprise_edition::network::Remote;
use meilisearch_types::error::ResponseError;
use meilisearch_types::milli::DocumentId;
use meilisearch_types::tasks::enterprise_edition::network::headers::{
PROXY_IMPORT_DOCS_HEADER, PROXY_IMPORT_INDEX_COUNT_HEADER, PROXY_IMPORT_INDEX_HEADER,
PROXY_IMPORT_REMOTE_HEADER, PROXY_IMPORT_TASK_KEY_HEADER, PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
PROXY_ORIGIN_NETWORK_VERSION_HEADER, PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER,
};
use meilisearch_types::tasks::enterprise_edition::network::{
DbTaskNetwork, ImportData, ImportMetadata, Origin, TaskNetwork,
};
use meilisearch_types::tasks::Task;
use meilisearch_types::tasks::{Origin, RemoteTask, TaskNetwork};
use reqwest::StatusCode;
use serde::de::DeserializeOwned;
use serde_json::Value;
@@ -33,18 +22,13 @@ use crate::routes::indexes::enterprise_edition::proxy::error::{
};
use crate::routes::SummarizedTaskView;
pub enum Body<T, F>
where
T: serde::Serialize,
F: FnMut(&str, &Remote, &mut T),
{
pub enum Body<T: serde::Serialize> {
NdJsonPayload(File),
Inline(T),
Generated(T, F),
None,
}
impl Body<(), fn(&str, &Remote, &mut ())> {
impl Body<()> {
pub fn with_ndjson_payload(file: File) -> Self {
Self::NdJsonPayload(file)
}
@@ -54,115 +38,7 @@ impl Body<(), fn(&str, &Remote, &mut ())> {
}
}
impl<T> Body<T, fn(&str, &Remote, &mut T)>
where
T: serde::Serialize,
{
pub fn inline(payload: T) -> Self {
Self::Inline(payload)
}
}
impl<T, F> Body<T, F>
where
T: serde::Serialize,
F: FnMut(&str, &Remote, &mut T),
{
pub fn generated(initial: T, f: F) -> Self {
Self::Generated(initial, f)
}
}
impl<T, F> Body<T, F>
where
T: serde::Serialize,
F: FnMut(&str, &Remote, &mut T),
{
pub fn into_bytes_iter(
self,
remotes: impl IntoIterator<Item = (String, Remote)>,
) -> Result<
impl Iterator<Item = (Option<Bytes>, (String, Remote))>,
meilisearch_types::milli::Error,
> {
let bytes = match self {
Body::NdJsonPayload(file) => {
Some(Bytes::from_owner(unsafe { memmap2::Mmap::map(&file)? }))
}
Body::Inline(payload) => {
Some(Bytes::copy_from_slice(&serde_json::to_vec(&payload).unwrap()))
}
Body::None => None,
Body::Generated(mut initial, mut f) => {
return Ok(either::Right(remotes.into_iter().map(move |(name, remote)| {
f(&name, &remote, &mut initial);
let bytes =
Some(Bytes::copy_from_slice(&serde_json::to_vec(&initial).unwrap()));
(bytes, (name, remote))
})));
}
};
Ok(either::Left(std::iter::repeat(bytes).zip(remotes)))
}
}
/// Parses the header to determine if this task is a duplicate and originates with a remote.
///
/// If not, checks whether this remote is the leader and return `MeilisearchHttpError::NotLeader` if not.
///
/// If there is no leader, returns `Ok(None)`
///
/// # Errors
///
/// - `MeiliearchHttpError::NotLeader`: if the following are true simultaneously:
/// 1. The task originates with the current node
/// 2. There's a declared `leader`
/// 3. The declared leader is **not** the current node
/// - `MeilisearchHttpError::InvalidHeaderValue`: if only parts of the headers are present, or if they cannot be parsed as a task network.
/// - `MeilisearchHttpError::Inconsistent`
pub fn task_network_and_check_leader(
req: &HttpRequest,
network: &meilisearch_types::enterprise_edition::network::Network,
) -> Result<Option<TaskNetwork>, MeilisearchHttpError> {
match (origin_from_req(req)?, import_data_from_req(req)?, import_metadata_from_req(req)?) {
(Some(network_change), Some(import_from), Some(metadata)) => {
Ok(Some(TaskNetwork::Import { import_from, network_change, metadata }))
}
(Some(origin), None, None) => Ok(Some(TaskNetwork::Origin { origin })),
(None, None, None) => {
match (network.leader.as_deref(), network.local.as_deref()) {
// 1. Always allowed if there is no leader
(None, _) => return Ok(None),
// 2. Allowed if the leader is self
(Some(leader), Some(this)) if leader == this => (),
// 3. Any other change is disallowed
(Some(leader), _) => {
return Err(
MeilisearchHttpError::NotLeader { leader: leader.to_string() }.into()
)
}
}
Ok(Some(TaskNetwork::Remotes {
remote_tasks: Default::default(),
network_version: network.version,
}))
}
// all good cases were matched, so this is always an error
(origin, import_from, metadata) => {
Err(MeilisearchHttpError::InconsistentTaskNetworkHeaders {
is_missing_origin: origin.is_none(),
is_missing_import: import_from.is_none(),
is_missing_import_metadata: metadata.is_none(),
})
}
}
}
/// Updates the task description and, if necessary, proxies the passed request to the network and update the task description.
/// If necessary, proxies the passed request to the network and update the task description.
///
/// This function reads the custom headers from the request to determine if must proxy the request or if the request
/// has already been proxied.
@@ -172,139 +48,152 @@ pub fn task_network_and_check_leader(
/// with the task ids from the task queues of the remotes.
/// - when the request has already been proxied, the custom headers contains information about the remote that created the initial task.
/// This information is copied to the passed task.
///
/// # Returns
///
/// The updated task. The task is read back from the database to avoid erasing concurrent changes.
pub async fn proxy<T, F>(
pub async fn proxy<T: serde::Serialize>(
index_scheduler: &IndexScheduler,
index_uid: Option<&str>,
index_uid: &str,
req: &HttpRequest,
mut task_network: DbTaskNetwork,
network: meilisearch_types::enterprise_edition::network::Network,
body: Body<T, F>,
body: Body<T>,
task: &meilisearch_types::tasks::Task,
) -> Result<Task, MeilisearchHttpError>
where
T: serde::Serialize,
F: FnMut(&str, &Remote, &mut T),
{
if let DbTaskNetwork::Remotes { remote_tasks, network_version: _ } = &mut task_network {
let this = network
.local
.as_deref()
.expect("inconsistent `network.sharding` and `network.self`")
.to_owned();
let content_type = match &body {
// for file bodies, force x-ndjson
Body::NdJsonPayload(_) => Some(b"application/x-ndjson".as_slice()),
// otherwise get content type from request
_ => req.headers().get(CONTENT_TYPE).map(|h| h.as_bytes()),
};
let mut in_flight_remote_queries = BTreeMap::new();
let client = reqwest::ClientBuilder::new()
.connect_timeout(std::time::Duration::from_secs(3))
.build()
.unwrap();
let method = from_old_http_method(req.method());
// send payload to all remotes
for (body, (node_name, node)) in body
.into_bytes_iter(network.remotes.into_iter().filter(|(name, _)| name.as_str() != this))
.map_err(|err| {
MeilisearchHttpError::from_milli(err, index_uid.map(ToOwned::to_owned))
})?
{
let client = client.clone();
let api_key = node.write_api_key;
let this = this.clone();
let method = method.clone();
let path_and_query = req.uri().path_and_query().map(|paq| paq.as_str()).unwrap_or("/");
in_flight_remote_queries.insert(
node_name,
tokio::spawn({
let url = format!("{}{}", node.url, path_and_query);
let url_encoded_this = urlencoding::encode(&this).into_owned();
let url_encoded_task_uid = task.uid.to_string(); // it's url encoded i promize
let content_type = content_type.map(|b| b.to_owned());
let backoff = backoff::ExponentialBackoffBuilder::new()
.with_max_elapsed_time(Some(std::time::Duration::from_secs(25)))
.build();
backoff::future::retry(backoff, move || {
let url = url.clone();
let client = client.clone();
let url_encoded_this = url_encoded_this.clone();
let url_encoded_task_uid = url_encoded_task_uid.clone();
let content_type = content_type.clone();
let body = body.clone();
let api_key = api_key.clone();
let method = method.clone();
async move {
try_proxy(
method,
&url,
content_type.as_deref(),
api_key.as_deref(),
&client,
&url_encoded_this,
&url_encoded_task_uid,
body,
)
.await
}
})
}),
);
) -> Result<(), MeilisearchHttpError> {
match origin_from_req(req)? {
Some(origin) => {
index_scheduler.set_task_network(task.uid, TaskNetwork::Origin { origin })?
}
None => {
let this = network
.local
.as_deref()
.expect("inconsistent `network.sharding` and `network.self`")
.to_owned();
// wait for all in-flight queries to finish and collect their results
for (node_name, handle) in in_flight_remote_queries {
match handle.await {
Ok(Ok(res)) => {
let task_uid = res.task_uid;
let content_type = match &body {
// for file bodies, force x-ndjson
Body::NdJsonPayload(_) => Some(b"application/x-ndjson".as_slice()),
// otherwise get content type from request
_ => req.headers().get(CONTENT_TYPE).map(|h| h.as_bytes()),
};
remote_tasks.insert(node_name, Ok(task_uid).into());
let body = match body {
Body::NdJsonPayload(file) => Some(Bytes::from_owner(unsafe {
memmap2::Mmap::map(&file).map_err(|err| {
MeilisearchHttpError::from_milli(err.into(), Some(index_uid.to_owned()))
})?
})),
Body::Inline(payload) => {
Some(Bytes::copy_from_slice(&serde_json::to_vec(&payload).unwrap()))
}
Ok(Err(error)) => {
remote_tasks.insert(node_name, Err(error.as_response_error()).into());
}
Err(panic) => match panic.try_into_panic() {
Ok(panic) => {
let msg = match panic.downcast_ref::<&'static str>() {
Some(s) => *s,
None => match panic.downcast_ref::<String>() {
Some(s) => &s[..],
None => "Box<dyn Any>",
},
};
remote_tasks.insert(
node_name,
Err(ResponseError::from_msg(
msg.to_string(),
meilisearch_types::error::Code::Internal,
))
.into(),
);
}
Err(_) => {
tracing::error!("proxy task was unexpectedly cancelled")
}
},
Body::None => None,
};
let mut in_flight_remote_queries = BTreeMap::new();
let client = reqwest::ClientBuilder::new()
.connect_timeout(std::time::Duration::from_secs(3))
.build()
.unwrap();
let method = from_old_http_method(req.method());
// send payload to all remotes
for (node_name, node) in
network.remotes.into_iter().filter(|(name, _)| name.as_str() != this)
{
let body = body.clone();
let client = client.clone();
let api_key = node.write_api_key;
let this = this.clone();
let method = method.clone();
let path_and_query =
req.uri().path_and_query().map(|paq| paq.as_str()).unwrap_or("/");
in_flight_remote_queries.insert(
node_name,
tokio::spawn({
let url = format!("{}{}", node.url, path_and_query);
let url_encoded_this = urlencoding::encode(&this).into_owned();
let url_encoded_task_uid = task.uid.to_string(); // it's url encoded i promize
let content_type = content_type.map(|b| b.to_owned());
let backoff = backoff::ExponentialBackoffBuilder::new()
.with_max_elapsed_time(Some(std::time::Duration::from_secs(25)))
.build();
backoff::future::retry(backoff, move || {
let url = url.clone();
let client = client.clone();
let url_encoded_this = url_encoded_this.clone();
let url_encoded_task_uid = url_encoded_task_uid.clone();
let content_type = content_type.clone();
let body = body.clone();
let api_key = api_key.clone();
let method = method.clone();
async move {
try_proxy(
method,
&url,
content_type.as_deref(),
api_key.as_deref(),
&client,
&url_encoded_this,
&url_encoded_task_uid,
body,
)
.await
}
})
}),
);
}
// wait for all in-flight queries to finish and collect their results
let mut remote_tasks: BTreeMap<String, RemoteTask> = BTreeMap::new();
for (node_name, handle) in in_flight_remote_queries {
match handle.await {
Ok(Ok(res)) => {
let task_uid = res.task_uid;
remote_tasks.insert(node_name, Ok(task_uid).into());
}
Ok(Err(error)) => {
remote_tasks.insert(node_name, Err(error.as_response_error()).into());
}
Err(panic) => match panic.try_into_panic() {
Ok(panic) => {
let msg = match panic.downcast_ref::<&'static str>() {
Some(s) => *s,
None => match panic.downcast_ref::<String>() {
Some(s) => &s[..],
None => "Box<dyn Any>",
},
};
remote_tasks.insert(
node_name,
Err(ResponseError::from_msg(
msg.to_string(),
meilisearch_types::error::Code::Internal,
))
.into(),
);
}
Err(_) => {
tracing::error!("proxy task was unexpectedly cancelled")
}
},
}
}
// edit details to contain the return values from the remotes
index_scheduler.set_task_network(task.uid, TaskNetwork::Remotes { remote_tasks })?;
}
}
Ok(index_scheduler.set_task_network(task.uid, task_network)?)
Ok(())
}
fn from_old_http_method(method: &actix_http::Method) -> reqwest::Method {
@@ -486,23 +375,25 @@ mod error {
}
}
pub const PROXY_ORIGIN_REMOTE_HEADER: &str = "Meili-Proxy-Origin-Remote";
pub const PROXY_ORIGIN_TASK_UID_HEADER: &str = "Meili-Proxy-Origin-TaskUid";
pub fn origin_from_req(req: &HttpRequest) -> Result<Option<Origin>, MeilisearchHttpError> {
let (remote_name, task_uid, network_version) = match (
let (remote_name, task_uid) = match (
req.headers().get(PROXY_ORIGIN_REMOTE_HEADER),
req.headers().get(PROXY_ORIGIN_TASK_UID_HEADER),
req.headers().get(PROXY_ORIGIN_NETWORK_VERSION_HEADER),
) {
(None, None, _) => return Ok(None),
(None, Some(_), _) => {
(None, None) => return Ok(None),
(None, Some(_)) => {
return Err(MeilisearchHttpError::InconsistentOriginHeaders { is_remote_missing: true })
}
(Some(_), None, _) => {
(Some(_), None) => {
return Err(MeilisearchHttpError::InconsistentOriginHeaders {
is_remote_missing: false,
})
}
(Some(remote_name), Some(task_uid), network_version) => {
let remote_name = urlencoding::decode(remote_name.to_str().map_err(|err| {
(Some(remote_name), Some(task_uid)) => (
urlencoding::decode(remote_name.to_str().map_err(|err| {
MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_ORIGIN_REMOTE_HEADER,
msg: format!("while parsing remote name as UTF-8: {err}"),
@@ -511,8 +402,8 @@ pub fn origin_from_req(req: &HttpRequest) -> Result<Option<Origin>, MeilisearchH
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_ORIGIN_REMOTE_HEADER,
msg: format!("while URL-decoding remote name: {err}"),
})?;
let task_uid = urlencoding::decode(task_uid.to_str().map_err(|err| {
})?,
urlencoding::decode(task_uid.to_str().map_err(|err| {
MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
msg: format!("while parsing task UID as UTF-8: {err}"),
@@ -521,182 +412,15 @@ pub fn origin_from_req(req: &HttpRequest) -> Result<Option<Origin>, MeilisearchH
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
msg: format!("while URL-decoding task UID: {err}"),
})?;
let network_version = match network_version {
Some(network_version) => {
urlencoding::decode(network_version.to_str().map_err(|err| {
MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_ORIGIN_NETWORK_VERSION_HEADER,
msg: format!("while parsing network version as UTF-8: {err}"),
}
})?)
.map_err(|err| {
MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_ORIGIN_NETWORK_VERSION_HEADER,
msg: format!("while URL-decoding network version: {err}"),
}
})?
}
None => Cow::Borrowed("0"),
};
(remote_name, task_uid, network_version)
}
})?,
),
};
let task_uid: u32 =
let task_uid: usize =
task_uid.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_ORIGIN_TASK_UID_HEADER,
msg: format!("while parsing the task UID as an integer: {err}"),
})?;
let network_version: u128 =
network_version.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_ORIGIN_NETWORK_VERSION_HEADER,
msg: format!("while parsing the network version as a u128: {err}"),
})?;
let network_version = uuid::Uuid::from_u128(network_version);
Ok(Some(Origin { remote_name: remote_name.into_owned(), task_uid, network_version }))
}
pub fn import_data_from_req(req: &HttpRequest) -> Result<Option<ImportData>, MeilisearchHttpError> {
let (remote_name, index_name, documents) = match (
req.headers().get(PROXY_IMPORT_REMOTE_HEADER),
req.headers().get(PROXY_IMPORT_INDEX_HEADER),
req.headers().get(PROXY_IMPORT_DOCS_HEADER),
) {
(None, None, None) => return Ok(None),
(Some(remote_name), Some(index_name), Some(documents)) => {
let remote_name = urlencoding::decode(remote_name.to_str().map_err(|err| {
MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_REMOTE_HEADER,
msg: format!("while parsing import remote name as UTF-8: {err}"),
}
})?)
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_REMOTE_HEADER,
msg: format!("while URL-decoding import remote name: {err}"),
})?;
let index_name = urlencoding::decode(index_name.to_str().map_err(|err| {
MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_INDEX_HEADER,
msg: format!("while parsing import index name as UTF-8: {err}"),
}
})?)
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_INDEX_HEADER,
msg: format!("while URL-decoding import index name: {err}"),
})?;
let documents = urlencoding::decode(documents.to_str().map_err(|err| {
MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_DOCS_HEADER,
msg: format!("while parsing documents as UTF-8: {err}"),
}
})?)
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_DOCS_HEADER,
msg: format!("while URL-decoding documents: {err}"),
})?;
(remote_name, index_name, documents)
}
// catch-all pattern that has to contain an inconsistency since we already matched (None, None, None) and (Some, Some, Some)
(remote_name, index_name, documents) => {
return Err(MeilisearchHttpError::InconsistentImportHeaders {
is_remote_missing: remote_name.is_none(),
is_index_missing: index_name.is_none(),
is_docs_missing: documents.is_none(),
})
}
};
let document_count: u64 =
documents.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_DOCS_HEADER,
msg: format!("while parsing the documents as an integer: {err}"),
})?;
Ok(Some(ImportData {
remote_name: remote_name.to_string(),
index_name: index_name.to_string(),
document_count,
}))
}
pub fn import_metadata_from_req(
req: &HttpRequest,
) -> Result<Option<ImportMetadata>, MeilisearchHttpError> {
let (index_count, task_key, total_index_documents) = match (
req.headers().get(PROXY_IMPORT_INDEX_COUNT_HEADER),
req.headers().get(PROXY_IMPORT_TASK_KEY_HEADER),
req.headers().get(PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER),
) {
(None, None, None) => return Ok(None),
(Some(index_count), Some(task_key), Some(total_index_documents)) => {
let index_count = urlencoding::decode(index_count.to_str().map_err(|err| {
MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_REMOTE_HEADER,
msg: format!("while parsing import index count as UTF-8: {err}"),
}
})?)
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_INDEX_COUNT_HEADER,
msg: format!("while URL-decoding import index count: {err}"),
})?;
let task_key = urlencoding::decode(task_key.to_str().map_err(|err| {
MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_TASK_KEY_HEADER,
msg: format!("while parsing import task key as UTF-8: {err}"),
}
})?)
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_TASK_KEY_HEADER,
msg: format!("while URL-decoding import task key: {err}"),
})?;
let total_index_documents =
urlencoding::decode(total_index_documents.to_str().map_err(|err| {
MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
msg: format!("while parsing total index documents as UTF-8: {err}"),
}
})?)
.map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
msg: format!("while URL-decoding total index documents: {err}"),
})?;
(index_count, task_key, total_index_documents)
}
// catch-all pattern that has to contain an inconsistency since we already matched (None, None, None) and (Some, Some, Some)
(index_count, task_key, total_index_documents) => {
return Err(MeilisearchHttpError::InconsistentImportMetadataHeaders {
is_index_count_missing: index_count.is_none(),
is_task_key_missing: task_key.is_none(),
is_total_index_documents_missing: total_index_documents.is_none(),
})
}
};
let index_count: u64 =
index_count.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_INDEX_COUNT_HEADER,
msg: format!("while parsing the index count as an integer: {err}"),
})?;
let task_key: DocumentId =
task_key.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_TASK_KEY_HEADER,
msg: format!("while parsing import task key as an integer: {err}"),
})?;
let total_index_documents: u64 =
total_index_documents.parse().map_err(|err| MeilisearchHttpError::InvalidHeaderValue {
header_name: PROXY_IMPORT_TOTAL_INDEX_DOCS_HEADER,
msg: format!("while parsing the total index documents as an integer: {err}"),
})?;
Ok(Some(ImportMetadata { index_count, task_key, total_index_documents }))
Ok(Some(Origin { remote_name: remote_name.into_owned(), task_uid }))
}

View File

@@ -343,7 +343,6 @@ impl From<FacetSearchQuery> for SearchQuery {
hybrid,
ranking_score_threshold,
locales,
personalize: None,
}
}
}

View File

@@ -30,7 +30,7 @@ use crate::Opt;
pub mod compact;
pub mod documents;
pub mod enterprise_edition;
mod enterprise_edition;
pub mod facet_search;
pub mod search;
mod search_analytics;
@@ -41,9 +41,7 @@ mod settings_analytics;
pub mod similar;
mod similar_analytics;
pub use meilisearch_types::tasks::enterprise_edition::network::headers::{
PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER,
};
pub use enterprise_edition::proxy::{PROXY_ORIGIN_REMOTE_HEADER, PROXY_ORIGIN_TASK_UID_HEADER};
#[derive(OpenApi)]
#[openapi(

View File

@@ -24,9 +24,9 @@ use crate::metrics::MEILISEARCH_DEGRADED_SEARCH_REQUESTS;
use crate::routes::indexes::search_analytics::{SearchAggregator, SearchGET, SearchPOST};
use crate::routes::parse_include_metadata_header;
use crate::search::{
add_search_rules, perform_search, HybridQuery, MatchingStrategy, Personalize,
RankingScoreThreshold, RetrieveVectors, SearchKind, SearchParams, SearchQuery, SearchResult,
SemanticRatio, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG,
add_search_rules, perform_search, HybridQuery, MatchingStrategy, RankingScoreThreshold,
RetrieveVectors, SearchKind, SearchParams, SearchQuery, SearchResult, SemanticRatio,
DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG,
DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET, DEFAULT_SEMANTIC_RATIO,
};
use crate::search_queue::SearchQueue;
@@ -134,8 +134,6 @@ pub struct SearchQueryGet {
#[deserr(default, error = DeserrQueryParamError<InvalidSearchLocales>)]
#[param(value_type = Vec<Locale>, explode = false)]
pub locales: Option<CS<Locale>>,
#[deserr(default, error = DeserrQueryParamError<InvalidSearchPersonalizeUserContext>)]
pub personalize_user_context: Option<String>,
}
#[derive(Debug, Clone, Copy, PartialEq, deserr::Deserr)]
@@ -207,9 +205,6 @@ impl TryFrom<SearchQueryGet> for SearchQuery {
));
}
let personalize =
other.personalize_user_context.map(|user_context| Personalize { user_context });
Ok(Self {
q: other.q,
// `media` not supported for `GET`
@@ -239,7 +234,6 @@ impl TryFrom<SearchQueryGet> for SearchQuery {
hybrid,
ranking_score_threshold: other.ranking_score_threshold.map(|o| o.0),
locales: other.locales.map(|o| o.into_iter().collect()),
personalize,
})
}
}
@@ -328,7 +322,6 @@ pub fn fix_sort_query_parameters(sort_query: &str) -> Vec<String> {
pub async fn search_with_url_query(
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
search_queue: web::Data<SearchQueue>,
personalization_service: web::Data<crate::personalization::PersonalizationService>,
index_uid: web::Path<String>,
params: AwebQueryParameter<SearchQueryGet, DeserrQueryParamError>,
req: HttpRequest,
@@ -349,16 +342,9 @@ pub async fn search_with_url_query(
let index = index_scheduler.index(&index_uid)?;
// Extract personalization and query string before moving query
let personalize = query.personalize.take();
let search_kind =
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
// Save the query string for personalization if requested
let personalize_query = personalize.is_some().then(|| query.q.clone()).flatten();
let permit = search_queue.try_get_search_permit().await?;
let include_metadata = parse_include_metadata_header(&req);
@@ -379,24 +365,12 @@ pub async fn search_with_url_query(
.await;
permit.drop().await;
let search_result = search_result?;
if let Ok((search_result, _)) = search_result.as_ref() {
if let Ok(ref search_result) = search_result {
aggregate.succeed(search_result);
}
analytics.publish(aggregate, &req);
let (mut search_result, time_budget) = search_result?;
// Apply personalization if requested
if let Some(personalize) = personalize.as_ref() {
search_result = personalization_service
.rerank_search_results(
search_result,
personalize,
personalize_query.as_deref(),
time_budget,
)
.await?;
}
let search_result = search_result?;
debug!(request_uid = ?request_uid, returns = ?search_result, "Search get");
Ok(HttpResponse::Ok().json(search_result))
@@ -461,7 +435,6 @@ pub async fn search_with_url_query(
pub async fn search_with_post(
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
search_queue: web::Data<SearchQueue>,
personalization_service: web::Data<crate::personalization::PersonalizationService>,
index_uid: web::Path<String>,
params: AwebJson<SearchQuery, DeserrJsonError>,
req: HttpRequest,
@@ -482,18 +455,12 @@ pub async fn search_with_post(
let index = index_scheduler.index(&index_uid)?;
// Extract personalization and query string before moving query
let personalize = query.personalize.take();
let search_kind =
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
let include_metadata = parse_include_metadata_header(&req);
// Save the query string for personalization if requested
let personalize_query = personalize.is_some().then(|| query.q.clone()).flatten();
let permit = search_queue.try_get_search_permit().await?;
let search_result = tokio::task::spawn_blocking(move || {
perform_search(
@@ -512,7 +479,7 @@ pub async fn search_with_post(
.await;
permit.drop().await;
let search_result = search_result?;
if let Ok((ref search_result, _)) = search_result {
if let Ok(ref search_result) = search_result {
aggregate.succeed(search_result);
if search_result.degraded {
MEILISEARCH_DEGRADED_SEARCH_REQUESTS.inc();
@@ -520,19 +487,7 @@ pub async fn search_with_post(
}
analytics.publish(aggregate, &req);
let (mut search_result, time_budget) = search_result?;
// Apply personalization if requested
if let Some(personalize) = personalize.as_ref() {
search_result = personalization_service
.rerank_search_results(
search_result,
personalize,
personalize_query.as_deref(),
time_budget,
)
.await?;
}
let search_result = search_result?;
debug!(request_uid = ?request_uid, returns = ?search_result, "Search post");
Ok(HttpResponse::Ok().json(search_result))

View File

@@ -7,7 +7,6 @@ use serde_json::{json, Value};
use crate::aggregate_methods;
use crate::analytics::{Aggregate, AggregateMethod};
use crate::metrics::MEILISEARCH_PERSONALIZED_SEARCH_REQUESTS;
use crate::search::{
SearchQuery, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT,
@@ -96,9 +95,6 @@ pub struct SearchAggregator<Method: AggregateMethod> {
show_ranking_score_details: bool,
ranking_score_threshold: bool,
// personalization
total_personalized: usize,
marker: std::marker::PhantomData<Method>,
}
@@ -133,7 +129,6 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
hybrid,
ranking_score_threshold,
locales,
personalize,
} = query;
let mut ret = Self::default();
@@ -209,12 +204,6 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
ret.locales = locales.iter().copied().collect();
}
// personalization
if personalize.is_some() {
ret.total_personalized = 1;
MEILISEARCH_PERSONALIZED_SEARCH_REQUESTS.inc();
}
ret.highlight_pre_tag = *highlight_pre_tag != DEFAULT_HIGHLIGHT_PRE_TAG();
ret.highlight_post_tag = *highlight_post_tag != DEFAULT_HIGHLIGHT_POST_TAG();
ret.crop_marker = *crop_marker != DEFAULT_CROP_MARKER();
@@ -307,7 +296,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
total_used_negative_operator,
ranking_score_threshold,
mut locales,
total_personalized,
marker: _,
} = *new;
@@ -393,9 +381,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
// locales
self.locales.append(&mut locales);
// personalization
self.total_personalized = self.total_personalized.saturating_add(total_personalized);
self
}
@@ -441,7 +426,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
total_used_negative_operator,
ranking_score_threshold,
locales,
total_personalized,
marker: _,
} = *self;
@@ -515,9 +499,6 @@ impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {
"show_ranking_score_details": show_ranking_score_details,
"ranking_score_threshold": ranking_score_threshold,
},
"personalization": {
"total_personalized": total_personalized,
},
})
}
}

View File

@@ -41,9 +41,7 @@ use crate::routes::indexes::IndexView;
use crate::routes::multi_search::SearchResults;
use crate::routes::network::{Network, Remote};
use crate::routes::swap_indexes::SwapIndexesPayload;
use crate::routes::webhooks::{
WebhookResults, WebhookSettings, WebhookWithMetadataRedactedAuthorization,
};
use crate::routes::webhooks::{WebhookResults, WebhookSettings, WebhookWithMetadata};
use crate::search::{
FederatedSearch, FederatedSearchResult, Federation, FederationOptions, MergeFacets,
SearchQueryWithIndex, SearchResultWithIndex, SimilarQuery, SimilarResult,
@@ -105,7 +103,7 @@ mod webhooks;
url = "/",
description = "Local server",
)),
components(schemas(PaginationView<KeyView>, PaginationView<IndexView>, IndexView, DocumentDeletionByFilter, AllBatches, BatchStats, ProgressStepView, ProgressView, BatchView, RuntimeTogglableFeatures, SwapIndexesPayload, DocumentEditionByFunction, MergeFacets, FederationOptions, SearchQueryWithIndex, Federation, FederatedSearch, FederatedSearchResult, SearchResults, SearchResultWithIndex, SimilarQuery, SimilarResult, PaginationView<serde_json::Value>, BrowseQuery, UpdateIndexRequest, IndexUid, IndexCreateRequest, KeyView, Action, CreateApiKey, UpdateStderrLogs, LogMode, GetLogs, IndexStats, Stats, HealthStatus, HealthResponse, VersionResponse, Code, ErrorType, AllTasks, TaskView, Status, DetailsView, ResponseError, Settings<Unchecked>, Settings<Checked>, TypoSettings, MinWordSizeTyposSetting, FacetingSettings, PaginationSettings, SummarizedTaskView, Kind, Network, Remote, FilterableAttributesRule, FilterableAttributesPatterns, AttributePatterns, FilterableAttributesFeatures, FilterFeatures, Export, WebhookSettings, WebhookResults, WebhookWithMetadataRedactedAuthorization, meilisearch_types::milli::vector::VectorStoreBackend))
components(schemas(PaginationView<KeyView>, PaginationView<IndexView>, IndexView, DocumentDeletionByFilter, AllBatches, BatchStats, ProgressStepView, ProgressView, BatchView, RuntimeTogglableFeatures, SwapIndexesPayload, DocumentEditionByFunction, MergeFacets, FederationOptions, SearchQueryWithIndex, Federation, FederatedSearch, FederatedSearchResult, SearchResults, SearchResultWithIndex, SimilarQuery, SimilarResult, PaginationView<serde_json::Value>, BrowseQuery, UpdateIndexRequest, IndexUid, IndexCreateRequest, KeyView, Action, CreateApiKey, UpdateStderrLogs, LogMode, GetLogs, IndexStats, Stats, HealthStatus, HealthResponse, VersionResponse, Code, ErrorType, AllTasks, TaskView, Status, DetailsView, ResponseError, Settings<Unchecked>, Settings<Checked>, TypoSettings, MinWordSizeTyposSetting, FacetingSettings, PaginationSettings, SummarizedTaskView, Kind, Network, Remote, FilterableAttributesRule, FilterableAttributesPatterns, AttributePatterns, FilterableAttributesFeatures, FilterFeatures, Export, WebhookSettings, WebhookResults, WebhookWithMetadata, meilisearch_types::milli::vector::VectorStoreBackend))
)]
pub struct MeilisearchApi;
@@ -218,8 +216,6 @@ pub struct SummarizedTaskView {
deserialize_with = "time::serde::rfc3339::deserialize"
)]
enqueued_at: OffsetDateTime,
#[serde(default, skip_serializing_if = "Option::is_none")]
custom_metadata: Option<String>,
}
impl From<Task> for SummarizedTaskView {
@@ -230,7 +226,6 @@ impl From<Task> for SummarizedTaskView {
status: task.status,
kind: task.kind.as_kind(),
enqueued_at: task.enqueued_at,
custom_metadata: task.custom_metadata,
}
}
}

View File

@@ -146,7 +146,6 @@ pub struct SearchResults {
pub async fn multi_search_with_post(
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
search_queue: Data<SearchQueue>,
personalization_service: web::Data<crate::personalization::PersonalizationService>,
params: AwebJson<FederatedSearch, DeserrJsonError>,
req: HttpRequest,
analytics: web::Data<Analytics>,
@@ -237,7 +236,7 @@ pub async fn multi_search_with_post(
// changes.
let search_results: Result<_, (ResponseError, usize)> = async {
let mut search_results = Vec::with_capacity(queries.len());
for (query_index, (index_uid, mut query, federation_options)) in queries
for (query_index, (index_uid, query, federation_options)) in queries
.into_iter()
.map(SearchQueryWithIndex::into_index_query_federation)
.enumerate()
@@ -270,13 +269,6 @@ pub async fn multi_search_with_post(
})
.with_index(query_index)?;
// Extract personalization and query string before moving query
let personalize = query.personalize.take();
// Save the query string for personalization if requested
let personalize_query =
personalize.is_some().then(|| query.q.clone()).flatten();
let index_uid_str = index_uid.to_string();
let search_kind = search_kind(
@@ -288,7 +280,7 @@ pub async fn multi_search_with_post(
.with_index(query_index)?;
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
let (mut search_result, time_budget) = tokio::task::spawn_blocking(move || {
let search_result = tokio::task::spawn_blocking(move || {
perform_search(
SearchParams {
index_uid: index_uid_str.clone(),
@@ -303,25 +295,11 @@ pub async fn multi_search_with_post(
)
})
.await
.with_index(query_index)?
.with_index(query_index)?;
// Apply personalization if requested
if let Some(personalize) = personalize.as_ref() {
search_result = personalization_service
.rerank_search_results(
search_result,
personalize,
personalize_query.as_deref(),
time_budget,
)
.await
.with_index(query_index)?;
}
search_results.push(SearchResultWithIndex {
index_uid: index_uid.into_inner(),
result: search_result,
result: search_result.with_index(query_index)?,
});
}
Ok(search_results)

View File

@@ -67,7 +67,6 @@ impl MultiSearchAggregator {
hybrid: _,
ranking_score_threshold: _,
locales: _,
personalize: _,
} in &federated_search.queries
{
if let Some(federation_options) = federation_options {

View File

@@ -9,27 +9,20 @@ use itertools::{EitherOrBoth, Itertools};
use meilisearch_types::deserr::DeserrJsonError;
use meilisearch_types::enterprise_edition::network::{Network as DbNetwork, Remote as DbRemote};
use meilisearch_types::error::deserr_codes::{
InvalidNetworkLeader, InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf,
InvalidNetworkRemotes, InvalidNetworkSearchApiKey, InvalidNetworkSelf, InvalidNetworkSharding,
InvalidNetworkUrl, InvalidNetworkWriteApiKey,
};
use meilisearch_types::error::ResponseError;
use meilisearch_types::keys::actions;
use meilisearch_types::milli::update::Setting;
use meilisearch_types::tasks::enterprise_edition::network::{
NetworkTopologyChange, Origin, DbTaskNetwork,
};
use meilisearch_types::tasks::KindWithContent;
use serde::Serialize;
use tracing::debug;
use utoipa::{OpenApi, ToSchema};
use crate::analytics::{Aggregate, Analytics};
use crate::error::MeilisearchHttpError;
use crate::extractors::authentication::policies::ActionPolicy;
use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::indexes::enterprise_edition::proxy::{proxy, Body};
use crate::routes::SummarizedTaskView;
#[derive(OpenApi)]
#[openapi(
@@ -90,7 +83,7 @@ async fn get_network(
Ok(HttpResponse::Ok().json(network))
}
#[derive(Clone, Debug, Deserr, ToSchema, Serialize)]
#[derive(Debug, Deserr, ToSchema, Serialize)]
#[deserr(error = DeserrJsonError<InvalidNetworkRemotes>, rename_all = camelCase, deny_unknown_fields)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
@@ -113,19 +106,12 @@ pub struct Remote {
pub write_api_key: Setting<String>,
}
#[derive(Clone, Debug, Deserr, ToSchema, Serialize)]
#[derive(Debug, Deserr, ToSchema, Serialize)]
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
pub struct Network {
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!({
"ms-00": {
"url": "http://localhost:7700"
},
"ms-01": {
"url": "http://localhost:7701"
}
}))]
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!("http://localhost:7700"))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
#[serde(default)]
pub remotes: Setting<BTreeMap<String, Option<Remote>>>,
@@ -133,21 +119,10 @@ pub struct Network {
#[serde(default, rename = "self")]
#[deserr(default, rename = "self", error = DeserrJsonError<InvalidNetworkSelf>)]
pub local: Setting<String>,
#[schema(value_type = Option<String>, example = json!("ms-00"))]
#[schema(value_type = Option<bool>, example = json!(true))]
#[serde(default)]
#[deserr(default, error = DeserrJsonError<InvalidNetworkLeader>)]
pub leader: Setting<String>,
#[schema(value_type = Option<BTreeMap<String, Remote>>, example = json!({
"ms-00": {
"url": "http://localhost:7700"
},
"ms-01": {
"url": "http://localhost:7701"
}
}))]
#[deserr(default, error = DeserrJsonError<InvalidNetworkRemotes>)]
#[serde(default)]
pub previous_remotes: Setting<BTreeMap<String, Option<Remote>>>,
#[deserr(default, error = DeserrJsonError<InvalidNetworkSharding>)]
pub sharding: Setting<bool>,
}
impl Remote {
@@ -232,203 +207,29 @@ async fn patch_network(
) -> Result<HttpResponse, ResponseError> {
index_scheduler.features().check_network("Using the /network route")?;
match crate::routes::indexes::enterprise_edition::proxy::origin_from_req(&req)? {
Some(origin) => {
patch_network_with_origin(index_scheduler, new_network, req, origin, analytics).await
}
None => patch_network_without_origin(index_scheduler, new_network, req, analytics).await,
}
}
async fn patch_network_without_origin(
index_scheduler: GuardedData<ActionPolicy<{ actions::NETWORK_UPDATE }>, Data<IndexScheduler>>,
new_network: AwebJson<Network, DeserrJsonError>,
req: HttpRequest,
analytics: Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let new_network = new_network.0;
let old_network = index_scheduler.network();
debug!(parameters = ?new_network, "Patch network");
if !matches!(new_network.previous_remotes, Setting::NotSet) {
return Err(MeilisearchHttpError::UnexpectedNetworkPreviousRemotes.into());
}
let merged_network = merge_networks(old_network.clone(), new_network)?;
index_scheduler.put_network(merged_network.clone())?;
analytics.publish(
PatchNetworkAnalytics {
network_size: merged_network.remotes.len(),
network_has_self: merged_network.local.is_some(),
},
&req,
);
/// TODO: spawn task only if necessary
let network_topology_change =
NetworkTopologyChange::new(old_network.clone(), merged_network.clone());
let task = KindWithContent::NetworkTopologyChange(network_topology_change);
let task = {
let index_scheduler = index_scheduler.clone();
tokio::task::spawn_blocking(move || index_scheduler.register(task, None, false)).await??
};
let mut proxied_network = Network {
remotes: Setting::Set(to_settings_remotes(&merged_network.remotes)),
local: Setting::NotSet,
leader: Setting::some_or_not_set(merged_network.leader.clone()),
previous_remotes: Setting::Set(to_settings_remotes(&old_network.remotes)),
};
let mut deleted_network = old_network;
let deleted_remotes = &mut deleted_network.remotes;
deleted_remotes.retain(|node, _| !merged_network.remotes.contains_key(node));
// proxy network change to the remaining remotes.
let updated_task = proxy(
&index_scheduler,
None,
&req,
DbTaskNetwork::Remotes {
remote_tasks: Default::default(),
network_version: merged_network.version,
},
merged_network,
Body::generated(proxied_network.clone(), |name, _remote, network| {
network.local = Setting::Set(name.to_string());
}),
&task,
)
.await?;
// unwrap: network was set by `proxy`
let task_network = updated_task.network.unwrap();
proxied_network.previous_remotes = Setting::NotSet;
// proxy network change to the deleted remotes
proxy(
&index_scheduler,
None,
&req,
task_network,
deleted_network,
Body::generated(proxied_network.clone(), |_name, _remote, network| {
network.local = Setting::Reset;
}),
&task,
)
.await?;
let task: SummarizedTaskView = task.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
}
async fn patch_network_with_origin(
index_scheduler: GuardedData<ActionPolicy<{ actions::NETWORK_UPDATE }>, Data<IndexScheduler>>,
merged_network: AwebJson<Network, DeserrJsonError>,
req: HttpRequest,
origin: Origin,
analytics: Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let merged_network = merged_network.into_inner();
debug!(parameters = ?merged_network, ?origin, "Patch network");
let mut remotes = BTreeMap::new();
let mut old_network = index_scheduler.network();
for (name, remote) in merged_network.remotes.set().into_iter().flat_map(|x| x.into_iter()) {
let Some(remote) = remote else { continue };
let remote = remote.try_into_db_node(&name)?;
remotes.insert(name, remote);
}
let mut previous_remotes = BTreeMap::new();
for (name, remote) in
merged_network.previous_remotes.set().into_iter().flat_map(|x| x.into_iter())
{
let Some(remote) = remote else {
continue;
};
let remote = remote.try_into_db_node(&name)?;
previous_remotes.insert(name, remote);
}
old_network.remotes = previous_remotes;
let new_network = DbNetwork {
local: merged_network.local.set(),
remotes,
leader: merged_network.leader.set(),
version: origin.network_version,
};
index_scheduler.put_network(new_network.clone())?;
analytics.publish(
PatchNetworkAnalytics {
network_size: new_network.remotes.len(),
network_has_self: new_network.local.is_some(),
},
&req,
);
/// TODO: spawn task only if necessary
let network_topology_change = NetworkTopologyChange::new(old_network, new_network);
let task = KindWithContent::NetworkTopologyChange(network_topology_change);
let task = {
let index_scheduler = index_scheduler.clone();
tokio::task::spawn_blocking(move || index_scheduler.register(task, None, false)).await??
};
index_scheduler.set_task_network(task.uid, DbTaskNetwork::Origin { origin })?;
let task: SummarizedTaskView = task.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
}
fn to_settings_remotes(
db_remotes: &BTreeMap<String, DbRemote>,
) -> BTreeMap<String, Option<Remote>> {
db_remotes
.iter()
.map(|(name, remote)| {
(
name.clone(),
Some(Remote {
url: Setting::Set(remote.url.clone()),
search_api_key: Setting::some_or_not_set(remote.search_api_key.clone()),
write_api_key: Setting::some_or_not_set(remote.write_api_key.clone()),
}),
)
})
.collect()
}
fn merge_networks(
old_network: DbNetwork,
new_network: Network,
) -> Result<DbNetwork, ResponseError> {
let merged_self = match new_network.local {
Setting::Set(new_self) => Some(new_self),
Setting::Reset => None,
Setting::NotSet => old_network.local,
};
let merged_leader = match new_network.leader {
Setting::Set(new_leader) => Some(new_leader),
Setting::Reset => None,
Setting::NotSet => old_network.leader,
let merged_sharding = match new_network.sharding {
Setting::Set(new_sharding) => new_sharding,
Setting::Reset => false,
Setting::NotSet => old_network.sharding,
};
match (merged_leader.as_deref(), merged_self.as_deref()) {
// 1. Always allowed if there is no leader
(None, _) => (),
// 2. Allowed if the leader is self
(Some(leader), Some(this)) if leader == this => (),
// 3. Any other change is disallowed
(Some(leader), _) => {
return Err(MeilisearchHttpError::NotLeader { leader: leader.to_string() }.into())
}
if merged_sharding && merged_self.is_none() {
return Err(ResponseError::from_msg(
"`.sharding`: enabling the sharding requires `.self` to be set\n - Hint: Disable `sharding` or set `self` to a value.".into(),
meilisearch_types::error::Code::InvalidNetworkSharding,
));
}
let new_version = uuid::Uuid::now_v7();
let merged_remotes = match new_network.remotes {
Setting::Set(new_remotes) => {
let mut merged_remotes = BTreeMap::new();
@@ -500,11 +301,18 @@ fn merge_networks(
Setting::Reset => BTreeMap::new(),
Setting::NotSet => old_network.remotes,
};
let merged_network = DbNetwork {
local: merged_self,
remotes: merged_remotes,
leader: merged_leader,
version: new_version,
};
Ok(merged_network)
analytics.publish(
PatchNetworkAnalytics {
network_size: merged_remotes.len(),
network_has_self: merged_self.is_some(),
},
&req,
);
let merged_network =
DbNetwork { local: merged_self, remotes: merged_remotes, sharding: merged_sharding };
index_scheduler.put_network(merged_network.clone())?;
debug!(returns = ?merged_network, "Patch network");
Ok(HttpResponse::Ok().json(merged_network))
}

View File

@@ -90,7 +90,7 @@ fn deny_immutable_fields_webhook(
#[derive(Debug, Serialize, ToSchema)]
#[serde(rename_all = "camelCase")]
#[schema(rename_all = "camelCase")]
pub(super) struct WebhookWithMetadataRedactedAuthorization {
pub(super) struct WebhookWithMetadata {
uuid: Uuid,
is_editable: bool,
#[schema(value_type = WebhookSettings)]
@@ -98,9 +98,8 @@ pub(super) struct WebhookWithMetadataRedactedAuthorization {
webhook: Webhook,
}
impl WebhookWithMetadataRedactedAuthorization {
pub fn from(uuid: Uuid, mut webhook: Webhook) -> Self {
webhook.redact_authorization_header();
impl WebhookWithMetadata {
pub fn from(uuid: Uuid, webhook: Webhook) -> Self {
Self { uuid, is_editable: uuid != Uuid::nil(), webhook }
}
}
@@ -108,7 +107,7 @@ impl WebhookWithMetadataRedactedAuthorization {
#[derive(Debug, Serialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub(super) struct WebhookResults {
results: Vec<WebhookWithMetadataRedactedAuthorization>,
results: Vec<WebhookWithMetadata>,
}
#[utoipa::path(
@@ -151,7 +150,7 @@ async fn get_webhooks(
let results = webhooks
.webhooks
.into_iter()
.map(|(uuid, webhook)| WebhookWithMetadataRedactedAuthorization::from(uuid, webhook))
.map(|(uuid, webhook)| WebhookWithMetadata::from(uuid, webhook))
.collect::<Vec<_>>();
let results = WebhookResults { results };
@@ -302,7 +301,7 @@ fn check_changed(uuid: Uuid, webhook: &Webhook) -> Result<(), WebhooksError> {
tag = "Webhooks",
security(("Bearer" = ["webhooks.get", "webhooks.*", "*.get", "*"])),
responses(
(status = 200, description = "Webhook found", body = WebhookWithMetadataRedactedAuthorization, content_type = "application/json", example = json!({
(status = 200, description = "Webhook found", body = WebhookWithMetadata, content_type = "application/json", example = json!({
"uuid": "550e8400-e29b-41d4-a716-446655440000",
"url": "https://your.site/on-tasks-completed",
"headers": {
@@ -325,7 +324,7 @@ async fn get_webhook(
let mut webhooks = index_scheduler.webhooks_view();
let webhook = webhooks.webhooks.remove(&uuid).ok_or(WebhookNotFound(uuid))?;
let webhook = WebhookWithMetadataRedactedAuthorization::from(uuid, webhook);
let webhook = WebhookWithMetadata::from(uuid, webhook);
debug!(returns = ?webhook, "Get webhook");
Ok(HttpResponse::Ok().json(webhook))
@@ -338,7 +337,7 @@ async fn get_webhook(
request_body = WebhookSettings,
security(("Bearer" = ["webhooks.create", "webhooks.*", "*"])),
responses(
(status = 201, description = "Webhook created successfully", body = WebhookWithMetadataRedactedAuthorization, content_type = "application/json", example = json!({
(status = 201, description = "Webhook created successfully", body = WebhookWithMetadata, content_type = "application/json", example = json!({
"uuid": "550e8400-e29b-41d4-a716-446655440000",
"url": "https://your.site/on-tasks-completed",
"headers": {
@@ -384,7 +383,7 @@ async fn post_webhook(
analytics.publish(PostWebhooksAnalytics, &req);
let response = WebhookWithMetadataRedactedAuthorization::from(uuid, webhook);
let response = WebhookWithMetadata::from(uuid, webhook);
debug!(returns = ?response, "Post webhook");
Ok(HttpResponse::Created().json(response))
}
@@ -396,7 +395,7 @@ async fn post_webhook(
request_body = WebhookSettings,
security(("Bearer" = ["webhooks.update", "webhooks.*", "*"])),
responses(
(status = 200, description = "Webhook updated successfully", body = WebhookWithMetadataRedactedAuthorization, content_type = "application/json", example = json!({
(status = 200, description = "Webhook updated successfully", body = WebhookWithMetadata, content_type = "application/json", example = json!({
"uuid": "550e8400-e29b-41d4-a716-446655440000",
"url": "https://your.site/on-tasks-completed",
"headers": {
@@ -436,7 +435,7 @@ async fn patch_webhook(
analytics.publish(PatchWebhooksAnalytics, &req);
let response = WebhookWithMetadataRedactedAuthorization::from(uuid, webhook);
let response = WebhookWithMetadata::from(uuid, webhook);
debug!(returns = ?response, "Patch webhook");
Ok(HttpResponse::Ok().json(response))
}

View File

@@ -601,10 +601,6 @@ impl PartitionedQueries {
.into());
}
if federated_query.has_personalize() {
return Err(MeilisearchHttpError::PersonalizationInFederatedQuery(query_index).into());
}
let (index_uid, query, federation_options) = federated_query.into_index_query_federation();
let federation_options = federation_options.unwrap_or_default();

View File

@@ -59,13 +59,6 @@ pub const DEFAULT_HIGHLIGHT_POST_TAG: fn() -> String = || "</em>".to_string();
pub const DEFAULT_SEMANTIC_RATIO: fn() -> SemanticRatio = || SemanticRatio(0.5);
pub const INCLUDE_METADATA_HEADER: &str = "Meili-Include-Metadata";
#[derive(Clone, Default, PartialEq, Deserr, ToSchema, Debug)]
#[deserr(error = DeserrJsonError<InvalidSearchPersonalize>, rename_all = camelCase, deny_unknown_fields)]
pub struct Personalize {
#[deserr(error = DeserrJsonError<InvalidSearchPersonalizeUserContext>)]
pub user_context: String,
}
#[derive(Clone, Default, PartialEq, Deserr, ToSchema)]
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
pub struct SearchQuery {
@@ -129,8 +122,6 @@ pub struct SearchQuery {
pub ranking_score_threshold: Option<RankingScoreThreshold>,
#[deserr(default, error = DeserrJsonError<InvalidSearchLocales>)]
pub locales: Option<Vec<Locale>>,
#[deserr(default, error = DeserrJsonError<InvalidSearchPersonalize>, default)]
pub personalize: Option<Personalize>,
}
impl From<SearchParameters> for SearchQuery {
@@ -178,7 +169,6 @@ impl From<SearchParameters> for SearchQuery {
highlight_post_tag: DEFAULT_HIGHLIGHT_POST_TAG(),
crop_marker: DEFAULT_CROP_MARKER(),
locales: None,
personalize: None,
}
}
}
@@ -260,7 +250,6 @@ impl fmt::Debug for SearchQuery {
attributes_to_search_on,
ranking_score_threshold,
locales,
personalize,
} = self;
let mut debug = f.debug_struct("SearchQuery");
@@ -349,10 +338,6 @@ impl fmt::Debug for SearchQuery {
debug.field("locales", &locales);
}
if let Some(personalize) = personalize {
debug.field("personalize", &personalize);
}
debug.finish()
}
}
@@ -558,9 +543,6 @@ pub struct SearchQueryWithIndex {
pub ranking_score_threshold: Option<RankingScoreThreshold>,
#[deserr(default, error = DeserrJsonError<InvalidSearchLocales>, default)]
pub locales: Option<Vec<Locale>>,
#[deserr(default, error = DeserrJsonError<InvalidSearchPersonalize>, default)]
#[serde(skip)]
pub personalize: Option<Personalize>,
#[deserr(default)]
pub federation_options: Option<FederationOptions>,
@@ -585,10 +567,6 @@ impl SearchQueryWithIndex {
self.facets.as_deref().filter(|v| !v.is_empty())
}
pub fn has_personalize(&self) -> bool {
self.personalize.is_some()
}
pub fn from_index_query_federation(
index_uid: IndexUid,
query: SearchQuery,
@@ -622,7 +600,6 @@ impl SearchQueryWithIndex {
attributes_to_search_on,
ranking_score_threshold,
locales,
personalize,
} = query;
SearchQueryWithIndex {
@@ -654,7 +631,6 @@ impl SearchQueryWithIndex {
attributes_to_search_on,
ranking_score_threshold,
locales,
personalize,
federation_options,
}
}
@@ -690,7 +666,6 @@ impl SearchQueryWithIndex {
hybrid,
ranking_score_threshold,
locales,
personalize,
} = self;
(
index_uid,
@@ -722,7 +697,6 @@ impl SearchQueryWithIndex {
hybrid,
ranking_score_threshold,
locales,
personalize,
// do not use ..Default::default() here,
// rather add any missing field from `SearchQuery` to `SearchQueryWithIndex`
},
@@ -1175,10 +1149,7 @@ pub struct SearchParams {
pub include_metadata: bool,
}
pub fn perform_search(
params: SearchParams,
index: &Index,
) -> Result<(SearchResult, TimeBudget), ResponseError> {
pub fn perform_search(params: SearchParams, index: &Index) -> Result<SearchResult, ResponseError> {
let SearchParams {
index_uid,
query,
@@ -1197,7 +1168,7 @@ pub fn perform_search(
};
let (search, is_finite_pagination, max_total_hits, offset) =
prepare_search(index, &rtxn, &query, &search_kind, time_budget.clone(), features)?;
prepare_search(index, &rtxn, &query, &search_kind, time_budget, features)?;
let (
milli::SearchResult {
@@ -1255,7 +1226,6 @@ pub fn perform_search(
attributes_to_search_on: _,
filter: _,
distinct: _,
personalize: _,
} = query;
let format = AttributesFormat {
@@ -1321,7 +1291,7 @@ pub fn perform_search(
request_uid: Some(request_uid),
metadata,
};
Ok((result, time_budget))
Ok(result)
}
#[derive(Debug, Clone, Default, Serialize, Deserialize, ToSchema)]

View File

@@ -91,16 +91,7 @@ impl<'a> Index<'a, Owned> {
documents: Value,
primary_key: Option<&str>,
) -> (Value, StatusCode) {
self._add_documents(documents, primary_key, None).await
}
pub async fn add_documents_with_custom_metadata(
&self,
documents: Value,
primary_key: Option<&str>,
custom_metadata: Option<&str>,
) -> (Value, StatusCode) {
self._add_documents(documents, primary_key, custom_metadata).await
self._add_documents(documents, primary_key).await
}
pub async fn raw_add_documents(
@@ -361,25 +352,12 @@ impl<State> Index<'_, State> {
&self,
documents: Value,
primary_key: Option<&str>,
custom_metadata: Option<&str>,
) -> (Value, StatusCode) {
let url = match (primary_key, custom_metadata) {
(Some(key), Some(meta)) => {
format!(
"/indexes/{}/documents?primaryKey={key}&customMetadata={meta}",
urlencode(self.uid.as_ref()),
)
let url = match primary_key {
Some(key) => {
format!("/indexes/{}/documents?primaryKey={}", urlencode(self.uid.as_ref()), key)
}
(None, Some(meta)) => {
format!(
"/indexes/{}/documents?&customMetadata={meta}",
urlencode(self.uid.as_ref()),
)
}
(Some(key), None) => {
format!("/indexes/{}/documents?&primaryKey={key}", urlencode(self.uid.as_ref()),)
}
(None, None) => format!("/indexes/{}/documents", urlencode(self.uid.as_ref())),
None => format!("/indexes/{}/documents", urlencode(self.uid.as_ref())),
};
self.service.post_encoded(url, documents, self.encoder).await
}

View File

@@ -241,7 +241,7 @@ pub async fn shared_index_with_documents() -> &'static Index<'static, Shared> {
let server = Server::new_shared();
let index = server._index("SHARED_DOCUMENTS").to_shared();
let documents = DOCUMENTS.clone();
let (response, _code) = index._add_documents(documents, None, None).await;
let (response, _code) = index._add_documents(documents, None).await;
server.wait_task(response.uid()).await.succeeded();
let (response, _code) = index
._update_settings(
@@ -284,7 +284,7 @@ pub async fn shared_index_with_score_documents() -> &'static Index<'static, Shar
let server = Server::new_shared();
let index = server._index("SHARED_SCORE_DOCUMENTS").to_shared();
let documents = SCORE_DOCUMENTS.clone();
let (response, _code) = index._add_documents(documents, None, None).await;
let (response, _code) = index._add_documents(documents, None).await;
server.wait_task(response.uid()).await.succeeded();
let (response, _code) = index
._update_settings(
@@ -361,7 +361,7 @@ pub async fn shared_index_with_nested_documents() -> &'static Index<'static, Sha
let server = Server::new_shared();
let index = server._index("SHARED_NESTED_DOCUMENTS").to_shared();
let documents = NESTED_DOCUMENTS.clone();
let (response, _code) = index._add_documents(documents, None, None).await;
let (response, _code) = index._add_documents(documents, None).await;
server.wait_task(response.uid()).await.succeeded();
let (response, _code) = index
._update_settings(
@@ -508,7 +508,7 @@ pub async fn shared_index_with_geo_documents() -> &'static Index<'static, Shared
.get_or_init(|| async {
let server = Server::new_shared();
let index = server._index("SHARED_GEO_DOCUMENTS").to_shared();
let (response, _code) = index._add_documents(GEO_DOCUMENTS.clone(), None, None).await;
let (response, _code) = index._add_documents(GEO_DOCUMENTS.clone(), None).await;
server.wait_task(response.uid()).await.succeeded();
let (response, _code) = index
@@ -531,7 +531,7 @@ pub async fn shared_index_geojson_documents() -> &'static Index<'static, Shared>
let index = server._index("SHARED_GEOJSON_DOCUMENTS").to_shared();
let countries = include_str!("../documents/geojson/assets/countries.json");
let lille = serde_json::from_str::<serde_json::Value>(countries).unwrap();
let (response, _code) = index._add_documents(Value(lille), Some("name"), None).await;
let (response, _code) = index._add_documents(Value(lille), Some("name")).await;
server.wait_task(response.uid()).await.succeeded();
let (response, _code) =

View File

@@ -49,8 +49,8 @@ impl Server<Owned> {
}
let options = default_settings(dir.path());
let handle = tokio::runtime::Handle::current();
let (index_scheduler, auth) = setup_meilisearch(&options, handle).unwrap();
let (index_scheduler, auth) = setup_meilisearch(&options).unwrap();
let service = Service { index_scheduler, auth, options, api_key: None };
Server { service, _dir: Some(dir), _marker: PhantomData }
@@ -65,9 +65,7 @@ impl Server<Owned> {
options.master_key = Some("MASTER_KEY".to_string());
let handle = tokio::runtime::Handle::current();
let (index_scheduler, auth) = setup_meilisearch(&options, handle).unwrap();
let (index_scheduler, auth) = setup_meilisearch(&options).unwrap();
let service = Service { index_scheduler, auth, options, api_key: None };
Server { service, _dir: Some(dir), _marker: PhantomData }
@@ -80,9 +78,7 @@ impl Server<Owned> {
}
pub async fn new_with_options(options: Opt) -> Result<Self, anyhow::Error> {
let handle = tokio::runtime::Handle::current();
let (index_scheduler, auth) = setup_meilisearch(&options, handle)?;
let (index_scheduler, auth) = setup_meilisearch(&options)?;
let service = Service { index_scheduler, auth, options, api_key: None };
Ok(Server { service, _dir: None, _marker: PhantomData })
@@ -221,9 +217,8 @@ impl Server<Shared> {
}
let options = default_settings(dir.path());
let handle = tokio::runtime::Handle::current();
let (index_scheduler, auth) = setup_meilisearch(&options, handle).unwrap();
let (index_scheduler, auth) = setup_meilisearch(&options).unwrap();
let service = Service { index_scheduler, auth, api_key: None, options };
Server { service, _dir: Some(dir), _marker: PhantomData }

View File

@@ -10,9 +10,8 @@ use actix_web::test::TestRequest;
use actix_web::web::Data;
use index_scheduler::IndexScheduler;
use meilisearch::analytics::Analytics;
use meilisearch::personalization::PersonalizationService;
use meilisearch::search_queue::SearchQueue;
use meilisearch::{create_app, Opt, ServicesData, SubscriberForSecondLayer};
use meilisearch::{create_app, Opt, SubscriberForSecondLayer};
use meilisearch_auth::AuthController;
use tracing::level_filters::LevelFilter;
use tracing_subscriber::Layer;
@@ -136,24 +135,14 @@ impl Service {
self.options.experimental_search_queue_size,
NonZeroUsize::new(1).unwrap(),
);
let personalization_service = self
.options
.experimental_personalization_api_key
.clone()
.map(PersonalizationService::cohere)
.unwrap_or_else(PersonalizationService::disabled);
actix_web::test::init_service(create_app(
ServicesData {
index_scheduler: self.index_scheduler.clone().into(),
auth: self.auth.clone().into(),
search_queue: Data::new(search_queue),
personalization_service: Data::new(personalization_service),
logs_route_handle: Data::new(route_layer_handle),
logs_stderr_handle: Data::new(stderr_layer_handle),
analytics: Data::new(Analytics::no_analytics()),
},
self.index_scheduler.clone().into(),
self.auth.clone().into(),
Data::new(search_queue),
self.options.clone(),
(route_layer_handle, stderr_layer_handle),
Data::new(Analytics::no_analytics()),
true,
))
.await

View File

@@ -207,118 +207,3 @@ async fn errors() {
}
"###);
}
#[actix_rt::test]
async fn search_with_personalization_without_enabling_the_feature() {
let server = Server::new().await;
let index = server.unique_index();
// Create the index and add some documents
let (task, _code) = index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let (task, _code) = index
.add_documents(
json!([
{"id": 1, "title": "The Dark Knight", "genre": "Action"},
{"id": 2, "title": "Inception", "genre": "Sci-Fi"},
{"id": 3, "title": "The Matrix", "genre": "Sci-Fi"}
]),
None,
)
.await;
server.wait_task(task.uid()).await.succeeded();
// Try to search with personalization - should return feature_not_enabled error
let (response, code) = index
.search_post(json!({
"q": "movie",
"personalize": {
"userContext": "I love science fiction movies"
}
}))
.await;
meili_snap::snapshot!(code, @"400 Bad Request");
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
{
"message": "reranking search results requires enabling the `personalization` experimental feature. See https://github.com/orgs/meilisearch/discussions/866",
"code": "feature_not_enabled",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#feature_not_enabled"
}
"###);
}
#[actix_rt::test]
async fn multi_search_with_personalization_without_enabling_the_feature() {
let server = Server::new().await;
let index = server.unique_index();
// Create the index and add some documents
let (task, _code) = index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let (task, _code) = index
.add_documents(
json!([
{"id": 1, "title": "The Dark Knight", "genre": "Action"},
{"id": 2, "title": "Inception", "genre": "Sci-Fi"},
{"id": 3, "title": "The Matrix", "genre": "Sci-Fi"}
]),
None,
)
.await;
server.wait_task(task.uid()).await.succeeded();
// Try to multi-search with personalization - should return feature_not_enabled error
let (response, code) = server
.multi_search(json!({
"queries": [
{
"indexUid": index.uid,
"q": "movie",
"personalize": {
"userContext": "I love science fiction movies"
}
}
]
}))
.await;
meili_snap::snapshot!(code, @"400 Bad Request");
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
{
"message": "Inside `.queries[0]`: reranking search results requires enabling the `personalization` experimental feature. See https://github.com/orgs/meilisearch/discussions/866",
"code": "feature_not_enabled",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#feature_not_enabled"
}
"###);
// Try to federated search with personalization - should return feature_not_enabled error
let (response, code) = server
.multi_search(json!({
"federation": {},
"queries": [
{
"indexUid": index.uid,
"q": "movie",
"personalize": {
"userContext": "I love science fiction movies"
}
}
]
}))
.await;
meili_snap::snapshot!(code, @"400 Bad Request");
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
{
"message": "Inside `.queries[0]`: Using `.personalize` is not allowed in federated queries.\n - Hint: remove `personalize` from query #0 or remove `federation` from the request",
"code": "invalid_multi_search_query_personalization",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_multi_search_query_personalization"
}
"###);
}

View File

@@ -8,9 +8,8 @@ use actix_web::http::header::ContentType;
use actix_web::web::Data;
use meili_snap::snapshot;
use meilisearch::analytics::Analytics;
use meilisearch::personalization::PersonalizationService;
use meilisearch::search_queue::SearchQueue;
use meilisearch::{create_app, Opt, ServicesData, SubscriberForSecondLayer};
use meilisearch::{create_app, Opt, SubscriberForSecondLayer};
use tracing::level_filters::LevelFilter;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::Layer;
@@ -51,16 +50,12 @@ async fn basic_test_log_stream_route() {
);
let app = actix_web::test::init_service(create_app(
ServicesData {
index_scheduler: server.service.index_scheduler.clone().into(),
auth: server.service.auth.clone().into(),
search_queue: Data::new(search_queue),
personalization_service: Data::new(PersonalizationService::disabled()),
logs_route_handle: Data::new(route_layer_handle),
logs_stderr_handle: Data::new(stderr_layer_handle),
analytics: Data::new(Analytics::no_analytics()),
},
server.service.index_scheduler.clone().into(),
server.service.auth.clone().into(),
Data::new(search_queue),
server.service.options.clone(),
(route_layer_handle, stderr_layer_handle),
Data::new(Analytics::no_analytics()),
true,
))
.await;

View File

@@ -1,11 +1,7 @@
use meili_snap::*;
use meilisearch::Opt;
use tempfile::TempDir;
use super::test_settings_documents_indexing_swapping_and_search;
use crate::common::{
default_settings, shared_does_not_exists_index, Server, DOCUMENTS, NESTED_DOCUMENTS,
};
use crate::common::{shared_does_not_exists_index, Server, DOCUMENTS, NESTED_DOCUMENTS};
use crate::json;
#[actix_rt::test]
@@ -1324,98 +1320,3 @@ async fn search_with_contains_without_enabling_the_feature() {
}
"#);
}
#[actix_rt::test]
#[ignore]
async fn search_with_personalization_invalid_api_key() {
// Create a server with a fake personalization API key
let dir = TempDir::new().unwrap();
let options = Opt {
experimental_personalization_api_key: Some("fake-api-key-12345".to_string()),
..default_settings(dir.path())
};
let server = Server::new_with_options(options).await.unwrap();
let index = server.unique_index();
// Create the index and add some documents
let (task, _code) = index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let (task, _code) = index
.add_documents(
json!([
{"id": 1, "title": "The Dark Knight", "genre": "Action"},
{"id": 2, "title": "Inception", "genre": "Sci-Fi"},
{"id": 3, "title": "The Matrix", "genre": "Sci-Fi"}
]),
None,
)
.await;
server.wait_task(task.uid()).await.succeeded();
// Try to search with personalization - should return remote_invalid_api_key error
let (response, code) = index
.search_post(json!({
"q": "the",
"personalize": {
"userContext": "I love science fiction movies"
}
}))
.await;
snapshot!(code, @"403 Forbidden");
snapshot!(json_string!(response), @r#"
{
"message": "Personalization service: Unauthorized: invalid API key",
"code": "remote_invalid_api_key",
"type": "auth",
"link": "https://docs.meilisearch.com/errors#remote_invalid_api_key"
}
"#);
}
#[actix_rt::test]
async fn search_with_personalization_no_user_context() {
// Create a server with a fake personalization API key
let dir = TempDir::new().unwrap();
let options = Opt {
experimental_personalization_api_key: Some("fake-api-key-12345".to_string()),
..default_settings(dir.path())
};
let server = Server::new_with_options(options).await.unwrap();
let index = server.unique_index();
// Create the index and add some documents
let (task, _code) = index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let (task, _code) = index
.add_documents(
json!([
{"id": 1, "title": "The Dark Knight", "genre": "Action"},
{"id": 2, "title": "Inception", "genre": "Sci-Fi"},
{"id": 3, "title": "The Matrix", "genre": "Sci-Fi"}
]),
None,
)
.await;
server.wait_task(task.uid()).await.succeeded();
// Try to search with personalization - should return remote_invalid_api_key error
let (response, code) = index
.search_post(json!({
"q": "the",
"personalize": {}
}))
.await;
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Missing field `userContext` inside `.personalize`",
"code": "invalid_search_personalize",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_personalize"
}
"###);
}

View File

@@ -3141,513 +3141,3 @@ fn fail(override_response_body: Option<&str>) -> ResponseTemplate {
response.set_body_json(json!({"error": "provoked error", "code": "test_error", "link": "https://docs.meilisearch.com/errors#test_error"}))
}
}
#[actix_rt::test]
async fn remote_auto_sharding() {
let ms0 = Server::new().await;
let ms1 = Server::new().await;
let ms2 = Server::new().await;
// enable feature
let (response, code) = ms0.set_features(json!({"network": true})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["network"]), @"true");
let (response, code) = ms1.set_features(json!({"network": true})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["network"]), @"true");
let (response, code) = ms2.set_features(json!({"network": true})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["network"]), @"true");
// set self & sharding
let (response, code) = ms0.set_network(json!({"self": "ms0", "sharding": true})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
"remotes": {},
"sharding": true
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1", "sharding": true})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
"remotes": {},
"sharding": true
}
"###);
let (response, code) = ms2.set_network(json!({"self": "ms2", "sharding": true})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms2",
"remotes": {},
"sharding": true
}
"###);
// wrap servers
let ms0 = Arc::new(ms0);
let ms1 = Arc::new(ms1);
let ms2 = Arc::new(ms2);
let rms0 = LocalMeili::new(ms0.clone()).await;
let rms1 = LocalMeili::new(ms1.clone()).await;
let rms2 = LocalMeili::new(ms2.clone()).await;
// set network
let network = json!({"remotes": {
"ms0": {
"url": rms0.url()
},
"ms1": {
"url": rms1.url()
},
"ms2": {
"url": rms2.url()
}
}});
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms1.set_network(network.clone()).await;
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms2.set_network(network.clone()).await;
snapshot!(status_code, @"200 OK");
// add documents
let documents = SCORE_DOCUMENTS.clone();
let documents = documents.as_array().unwrap();
let index0 = ms0.index("test");
let _index1 = ms1.index("test");
let _index2 = ms2.index("test");
let (task, _status_code) = index0.add_documents(json!(documents), None).await;
let t0 = task.uid();
let (t, _) = ms0.get_task(task.uid()).await;
let t1 = t["network"]["remote_tasks"]["ms1"]["taskUid"].as_u64().unwrap();
let t2 = t["network"]["remote_tasks"]["ms2"]["taskUid"].as_u64().unwrap();
ms0.wait_task(t0).await.succeeded();
ms1.wait_task(t1).await.succeeded();
ms2.wait_task(t2).await.succeeded();
// perform multi-search
let query = "badman returns";
let request = json!({
"federation": {},
"queries": [
{
"q": query,
"indexUid": "test",
"federationOptions": {
"remote": "ms0"
}
},
{
"q": query,
"indexUid": "test",
"federationOptions": {
"remote": "ms1"
}
},
{
"q": query,
"indexUid": "test",
"federationOptions": {
"remote": "ms2"
}
},
]
});
let (response, _status_code) = ms0.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
"title": "Batman Returns",
"id": "C",
"_federation": {
"indexUid": "test",
"queriesPosition": 2,
"weightedRankingScore": 0.8317901234567902,
"remote": "ms2"
}
},
{
"title": "Batman the dark knight returns: Part 1",
"id": "A",
"_federation": {
"indexUid": "test",
"queriesPosition": 1,
"weightedRankingScore": 0.7028218694885362,
"remote": "ms1"
}
},
{
"title": "Batman the dark knight returns: Part 2",
"id": "B",
"_federation": {
"indexUid": "test",
"queriesPosition": 1,
"weightedRankingScore": 0.7028218694885362,
"remote": "ms1"
}
},
{
"title": "Badman",
"id": "E",
"_federation": {
"indexUid": "test",
"queriesPosition": 2,
"weightedRankingScore": 0.5,
"remote": "ms2"
}
},
{
"title": "Batman",
"id": "D",
"_federation": {
"indexUid": "test",
"queriesPosition": 0,
"weightedRankingScore": 0.23106060606060605,
"remote": "ms0"
}
}
],
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 5,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
let (response, _status_code) = ms1.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
"title": "Batman Returns",
"id": "C",
"_federation": {
"indexUid": "test",
"queriesPosition": 2,
"weightedRankingScore": 0.8317901234567902,
"remote": "ms2"
}
},
{
"title": "Batman the dark knight returns: Part 1",
"id": "A",
"_federation": {
"indexUid": "test",
"queriesPosition": 1,
"weightedRankingScore": 0.7028218694885362,
"remote": "ms1"
}
},
{
"title": "Batman the dark knight returns: Part 2",
"id": "B",
"_federation": {
"indexUid": "test",
"queriesPosition": 1,
"weightedRankingScore": 0.7028218694885362,
"remote": "ms1"
}
},
{
"title": "Badman",
"id": "E",
"_federation": {
"indexUid": "test",
"queriesPosition": 2,
"weightedRankingScore": 0.5,
"remote": "ms2"
}
},
{
"title": "Batman",
"id": "D",
"_federation": {
"indexUid": "test",
"queriesPosition": 0,
"weightedRankingScore": 0.23106060606060605,
"remote": "ms0"
}
}
],
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 5,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
let (response, _status_code) = ms2.multi_search(request.clone()).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]", ".requestUid" => "[uuid]" }), @r###"
{
"hits": [
{
"title": "Batman Returns",
"id": "C",
"_federation": {
"indexUid": "test",
"queriesPosition": 2,
"weightedRankingScore": 0.8317901234567902,
"remote": "ms2"
}
},
{
"title": "Batman the dark knight returns: Part 1",
"id": "A",
"_federation": {
"indexUid": "test",
"queriesPosition": 1,
"weightedRankingScore": 0.7028218694885362,
"remote": "ms1"
}
},
{
"title": "Batman the dark knight returns: Part 2",
"id": "B",
"_federation": {
"indexUid": "test",
"queriesPosition": 1,
"weightedRankingScore": 0.7028218694885362,
"remote": "ms1"
}
},
{
"title": "Badman",
"id": "E",
"_federation": {
"indexUid": "test",
"queriesPosition": 2,
"weightedRankingScore": 0.5,
"remote": "ms2"
}
},
{
"title": "Batman",
"id": "D",
"_federation": {
"indexUid": "test",
"queriesPosition": 0,
"weightedRankingScore": 0.23106060606060605,
"remote": "ms0"
}
}
],
"processingTimeMs": "[time]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 5,
"requestUid": "[uuid]",
"remoteErrors": {}
}
"###);
}
#[actix_rt::test]
async fn remote_auto_sharding_with_custom_metadata() {
let ms0 = Server::new().await;
let ms1 = Server::new().await;
let ms2 = Server::new().await;
// enable feature
let (response, code) = ms0.set_features(json!({"network": true})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["network"]), @"true");
let (response, code) = ms1.set_features(json!({"network": true})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["network"]), @"true");
let (response, code) = ms2.set_features(json!({"network": true})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["network"]), @"true");
// set self & sharding
let (response, code) = ms0.set_network(json!({"self": "ms0", "sharding": true})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms0",
"remotes": {},
"sharding": true
}
"###);
let (response, code) = ms1.set_network(json!({"self": "ms1", "sharding": true})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms1",
"remotes": {},
"sharding": true
}
"###);
let (response, code) = ms2.set_network(json!({"self": "ms2", "sharding": true})).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(response), @r###"
{
"self": "ms2",
"remotes": {},
"sharding": true
}
"###);
// wrap servers
let ms0 = Arc::new(ms0);
let ms1 = Arc::new(ms1);
let ms2 = Arc::new(ms2);
let rms0 = LocalMeili::new(ms0.clone()).await;
let rms1 = LocalMeili::new(ms1.clone()).await;
let rms2 = LocalMeili::new(ms2.clone()).await;
// set network
let network = json!({"remotes": {
"ms0": {
"url": rms0.url()
},
"ms1": {
"url": rms1.url()
},
"ms2": {
"url": rms2.url()
}
}});
println!("{}", serde_json::to_string_pretty(&network).unwrap());
let (_response, status_code) = ms0.set_network(network.clone()).await;
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms1.set_network(network.clone()).await;
snapshot!(status_code, @"200 OK");
let (_response, status_code) = ms2.set_network(network.clone()).await;
snapshot!(status_code, @"200 OK");
// add documents
let documents = SCORE_DOCUMENTS.clone();
let documents = documents.as_array().unwrap();
let index0 = ms0.index("test");
let _index1 = ms1.index("test");
let _index2 = ms2.index("test");
let (task, _status_code) = index0
.add_documents_with_custom_metadata(
json!(documents),
None,
Some("remote_auto_sharding_with_custom_metadata"),
)
.await;
let t0 = task.uid();
let (t, _) = ms0.get_task(task.uid()).await;
let t1 = t["network"]["remote_tasks"]["ms1"]["taskUid"].as_u64().unwrap();
let t2 = t["network"]["remote_tasks"]["ms2"]["taskUid"].as_u64().unwrap();
let t = ms0.wait_task(t0).await.succeeded();
snapshot!(t, @r###"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "test",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 5,
"indexedDocuments": 1
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]",
"network": {
"remote_tasks": {
"ms1": {
"taskUid": 0,
"error": null
},
"ms2": {
"taskUid": 0,
"error": null
}
}
},
"customMetadata": "remote_auto_sharding_with_custom_metadata"
}
"###);
let t = ms1.wait_task(t1).await.succeeded();
snapshot!(t, @r###"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "test",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 5,
"indexedDocuments": 2
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]",
"network": {
"origin": {
"remoteName": "ms0",
"taskUid": 0
}
},
"customMetadata": "remote_auto_sharding_with_custom_metadata"
}
"###);
let t = ms2.wait_task(t2).await.succeeded();
snapshot!(t, @r###"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "test",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 5,
"indexedDocuments": 2
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]",
"network": {
"origin": {
"remoteName": "ms0",
"taskUid": 0
}
},
"customMetadata": "remote_auto_sharding_with_custom_metadata"
}
"###);
}

View File

@@ -82,7 +82,7 @@ async fn cli_only() {
let (webhooks, code) = server.get_webhooks().await;
snapshot!(code, @"200 OK");
snapshot!(webhooks, @r###"
snapshot!(webhooks, @r#"
{
"results": [
{
@@ -90,12 +90,12 @@ async fn cli_only() {
"isEditable": false,
"url": "https://example-cli.com/",
"headers": {
"Authorization": "Bearer a-XXXX..."
"Authorization": "Bearer a-secret-token"
}
}
]
}
"###);
"#);
}
#[actix_web::test]
@@ -233,7 +233,7 @@ async fn cli_with_dumps() {
let (webhooks, code) = server.get_webhooks().await;
snapshot!(code, @"200 OK");
snapshot!(webhooks, @r###"
snapshot!(webhooks, @r#"
{
"results": [
{
@@ -241,7 +241,7 @@ async fn cli_with_dumps() {
"isEditable": false,
"url": "http://defined-in-test-cli.com/",
"headers": {
"Authorization": "Bearer a-secXXXXXX..."
"Authorization": "Bearer a-secret-token-defined-in-test-cli"
}
},
{
@@ -255,7 +255,7 @@ async fn cli_with_dumps() {
"isEditable": true,
"url": "https://example.com/hook",
"headers": {
"authorization": "XXX..."
"authorization": "TOKEN"
}
},
{
@@ -266,7 +266,7 @@ async fn cli_with_dumps() {
}
]
}
"###);
"#);
}
#[actix_web::test]
@@ -367,30 +367,30 @@ async fn post_get_delete() {
}))
.await;
snapshot!(code, @"201 Created");
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r###"
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r#"
{
"uuid": "[uuid]",
"isEditable": true,
"url": "https://example.com/hook",
"headers": {
"authorization": "XXX..."
"authorization": "TOKEN"
}
}
"###);
"#);
let uuid = value.get("uuid").unwrap().as_str().unwrap();
let (value, code) = server.get_webhook(uuid).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r###"
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r#"
{
"uuid": "[uuid]",
"isEditable": true,
"url": "https://example.com/hook",
"headers": {
"authorization": "XXX..."
"authorization": "TOKEN"
}
}
"###);
"#);
let (_value, code) = server.delete_webhook(uuid).await;
snapshot!(code, @"204 No Content");
@@ -430,31 +430,31 @@ async fn create_and_patch() {
let (value, code) =
server.patch_webhook(&uuid, json!({ "headers": { "authorization": "TOKEN" } })).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r###"
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r#"
{
"uuid": "[uuid]",
"isEditable": true,
"url": "https://example.com/hook",
"headers": {
"authorization": "XXX..."
"authorization": "TOKEN"
}
}
"###);
"#);
let (value, code) =
server.patch_webhook(&uuid, json!({ "headers": { "authorization2": "TOKEN" } })).await;
snapshot!(code, @"200 OK");
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r###"
snapshot!(json_string!(value, { ".uuid" => "[uuid]" }), @r#"
{
"uuid": "[uuid]",
"isEditable": true,
"url": "https://example.com/hook",
"headers": {
"authorization": "XXX...",
"authorization": "TOKEN",
"authorization2": "TOKEN"
}
}
"###);
"#);
let (value, code) =
server.patch_webhook(&uuid, json!({ "headers": { "authorization": null } })).await;
@@ -656,119 +656,3 @@ async fn forbidden_fields() {
}
"#);
}
#[actix_web::test]
async fn receive_custom_metadata() {
let WebhookHandle { server_handle: handle1, url: url1, receiver: mut receiver1 } =
create_webhook_server().await;
let WebhookHandle { server_handle: handle2, url: url2, receiver: mut receiver2 } =
create_webhook_server().await;
let WebhookHandle { server_handle: handle3, url: url3, receiver: mut receiver3 } =
create_webhook_server().await;
let db_path = tempfile::tempdir().unwrap();
let server = Server::new_with_options(Opt {
task_webhook_url: Some(Url::parse(&url3).unwrap()),
..default_settings(db_path.path())
})
.await
.unwrap();
for url in [url1, url2] {
let (value, code) = server.create_webhook(json!({ "url": url })).await;
snapshot!(code, @"201 Created");
snapshot!(json_string!(value, { ".uuid" => "[uuid]", ".url" => "[ignored]" }), @r#"
{
"uuid": "[uuid]",
"isEditable": true,
"url": "[ignored]",
"headers": {}
}
"#);
}
let index = server.index("tamo");
let (response, code) = index
.add_documents_with_custom_metadata(
json!({ "id": 1, "doggo": "bone" }),
None,
Some("test_meta"),
)
.await;
snapshot!(response, @r###"
{
"taskUid": 0,
"indexUid": "tamo",
"status": "enqueued",
"type": "documentAdditionOrUpdate",
"enqueuedAt": "[date]",
"customMetadata": "test_meta"
}
"###);
snapshot!(code, @"202 Accepted");
let mut count1 = 0;
let mut count2 = 0;
let mut count3 = 0;
while count1 == 0 || count2 == 0 || count3 == 0 {
tokio::select! {
msg = receiver1.recv() => {
if let Some(msg) = msg {
count1 += 1;
check_metadata(msg);
}
},
msg = receiver2.recv() => {
if let Some(msg) = msg {
count2 += 1;
check_metadata(msg);
}
},
msg = receiver3.recv() => {
if let Some(msg) = msg {
count3 += 1;
check_metadata(msg);
}
},
}
}
assert_eq!(count1, 1);
assert_eq!(count2, 1);
assert_eq!(count3, 1);
handle1.abort();
handle2.abort();
handle3.abort();
}
fn check_metadata(msg: Vec<u8>) {
let msg = String::from_utf8(msg).unwrap();
let tasks = msg.split('\n');
for task in tasks {
if task.is_empty() {
continue;
}
let task: serde_json::Value = serde_json::from_str(task).unwrap();
snapshot!(common::Value(task), @r###"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "tamo",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
"details": {
"receivedDocuments": 1,
"indexedDocuments": 1
},
"error": null,
"duration": "[duration]",
"enqueuedAt": "[date]",
"startedAt": "[date]",
"finishedAt": "[date]",
"customMetadata": "test_meta"
}
"###);
}
}

View File

@@ -43,7 +43,7 @@ async fn version_too_old() {
std::fs::write(db_path.join("VERSION"), "1.11.9999").unwrap();
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.26.0");
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.24.0");
}
#[actix_rt::test]
@@ -58,7 +58,7 @@ async fn version_requires_downgrade() {
std::fs::write(db_path.join("VERSION"), format!("{major}.{minor}.{patch}")).unwrap();
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
snapshot!(err, @"Database version 1.26.1 is higher than the Meilisearch version 1.26.0. Downgrade is not supported");
snapshot!(err, @"Database version 1.24.1 is higher than the Meilisearch version 1.24.0. Downgrade is not supported");
}
#[actix_rt::test]

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.26.0"
"upgradeTo": "v1.24.0"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.26.0"
"upgradeTo": "v1.24.0"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.26.0"
"upgradeTo": "v1.24.0"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.26.0"
"upgradeTo": "v1.24.0"
},
"error": null,
"duration": "[duration]",

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.26.0"
"upgradeTo": "v1.24.0"
},
"error": null,
"duration": "[duration]",

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.26.0"
"upgradeTo": "v1.24.0"
},
"error": null,
"duration": "[duration]",

View File

@@ -8,7 +8,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"progress": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.26.0"
"upgradeTo": "v1.24.0"
},
"stats": {
"totalNbTasks": 1,

View File

@@ -12,7 +12,7 @@ source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
"canceledBy": null,
"details": {
"upgradeFrom": "v1.12.0",
"upgradeTo": "v1.26.0"
"upgradeTo": "v1.24.0"
},
"error": null,
"duration": "[duration]",

View File

@@ -68,7 +68,7 @@ fn convert_update_files(db_path: &Path) -> anyhow::Result<()> {
for uuid in file_store.all_uuids().context("while retrieving uuids from file store")? {
let uuid = uuid.context("while retrieving uuid from file store")?;
let update_file_path = file_store.update_path(uuid);
let update_file_path = file_store.get_update_path(uuid);
let update_file = file_store
.get_update(uuid)
.with_context(|| format!("while getting update file for uuid {uuid:?}"))?;

View File

@@ -18,7 +18,7 @@ bincode = "1.3.3"
bstr = "1.12.0"
bytemuck = { version = "1.23.1", features = ["extern_crate_alloc"] }
byteorder = "1.5.0"
charabia = { version = "0.9.8", default-features = false }
charabia = { version = "0.9.7", default-features = false }
cellulite = "0.3.1-nested-rtxns-2"
concat-arrays = "0.1.2"
convert_case = "0.8.0"
@@ -34,7 +34,7 @@ grenad = { version = "0.5.0", default-features = false, features = [
"rayon",
"tempfile",
] }
heed = { version = "0.22.1-nested-rtxns-6", default-features = false, features = [
heed = { version = "0.22.1-nested-rtxns", default-features = false, features = [
"serde-json",
"serde-bincode",
] }
@@ -74,13 +74,12 @@ csv = "1.3.1"
candle-core = { version = "0.9.1" }
candle-transformers = { version = "0.9.1" }
candle-nn = { version = "0.9.1" }
tokenizers = { version = "0.22.1", default-features = false, features = [
tokenizers = { git = "https://github.com/huggingface/tokenizers.git", tag = "v0.15.2", version = "0.15.2", default-features = false, features = [
"onig",
] }
hf-hub = { git = "https://github.com/dureuill/hf-hub.git", branch = "rust_tls", default-features = false, features = [
"online",
] }
safetensors = "0.6.2"
tiktoken-rs = "0.7.0"
liquid = "0.26.11"
rhai = { version = "1.22.2", features = [
@@ -101,6 +100,7 @@ bumpalo = "3.18.1"
bumparaw-collections = "0.1.4"
steppe = { version = "0.4", default-features = false }
thread_local = "1.1.9"
allocator-api2 = "0.3.0"
rustc-hash = "2.1.1"
enum-iterator = "2.1.0"
bbqueue = { git = "https://github.com/meilisearch/bbqueue" }

View File

@@ -425,10 +425,6 @@ impl Index {
self.env.info().map_size
}
pub fn try_clone_inner_file(&self) -> heed::Result<File> {
self.env.try_clone_inner_file()
}
pub fn copy_to_file(&self, file: &mut File, option: CompactionOption) -> Result<()> {
self.env.copy_to_file(file, option).map_err(Into::into)
}

View File

@@ -1173,7 +1173,6 @@ pub fn extract_embeddings_from_fragments<R: io::Read + io::Seek>(
request_threads,
&doc_alloc,
embedder_stats,
false,
on_embed,
);
@@ -1193,12 +1192,12 @@ pub fn extract_embeddings_from_fragments<R: io::Read + io::Seek>(
Metadata { docid, external_docid: "", extractor_id },
value,
unused_vectors_distribution,
)?;
);
}
}
// send last chunk
let on_embed = session.drain(unused_vectors_distribution)?;
let on_embed = session.drain(unused_vectors_distribution);
on_embed.finish()
}

View File

@@ -1,6 +1,3 @@
use std::num::NonZeroUsize;
use std::time::Duration;
use grenad::CompressionType;
use super::GrenadParameters;
@@ -23,7 +20,6 @@ pub struct IndexerConfig {
pub experimental_no_edition_2024_for_dumps: bool,
pub experimental_no_edition_2024_for_prefix_post_processing: bool,
pub experimental_no_edition_2024_for_facet_post_processing: bool,
pub s3_snapshot_options: Option<S3SnapshotOptions>,
}
impl IndexerConfig {
@@ -41,20 +37,6 @@ impl IndexerConfig {
}
}
#[derive(Debug, Clone)]
pub struct S3SnapshotOptions {
pub s3_bucket_url: String,
pub s3_bucket_region: String,
pub s3_bucket_name: String,
pub s3_snapshot_prefix: String,
pub s3_access_key: String,
pub s3_secret_key: String,
pub s3_max_in_flight_parts: NonZeroUsize,
pub s3_compression_level: u32,
pub s3_signature_duration: Duration,
pub s3_multipart_part_size: u64,
}
/// By default use only 1 thread for indexing in tests
#[cfg(test)]
pub fn default_thread_pool_and_threads() -> (ThreadPoolNoAbort, Option<usize>) {
@@ -94,7 +76,6 @@ impl Default for IndexerConfig {
experimental_no_edition_2024_for_dumps: false,
experimental_no_edition_2024_for_prefix_post_processing: false,
experimental_no_edition_2024_for_facet_post_processing: false,
s3_snapshot_options: None,
}
}
}

View File

@@ -5,7 +5,7 @@ pub use self::concurrent_available_ids::ConcurrentAvailableIds;
pub use self::facet::bulk::FacetsUpdateBulk;
pub use self::facet::incremental::FacetsUpdateIncrementalInner;
pub use self::index_documents::{request_threads, *};
pub use self::indexer_config::{default_thread_pool_and_threads, IndexerConfig, S3SnapshotOptions};
pub use self::indexer_config::{default_thread_pool_and_threads, IndexerConfig};
pub use self::new::ChannelCongestion;
pub use self::settings::{validate_embedding_settings, Setting, Settings};
pub use self::update_step::UpdateIndexingStep;

View File

@@ -35,7 +35,6 @@ pub struct EmbeddingExtractor<'a, 'b> {
possible_embedding_mistakes: PossibleEmbeddingMistakes,
embedder_stats: &'a EmbedderStats,
threads: &'a ThreadPoolNoAbort,
failure_modes: EmbedderFailureModes,
}
impl<'a, 'b> EmbeddingExtractor<'a, 'b> {
@@ -47,15 +46,7 @@ impl<'a, 'b> EmbeddingExtractor<'a, 'b> {
threads: &'a ThreadPoolNoAbort,
) -> Self {
let possible_embedding_mistakes = PossibleEmbeddingMistakes::new(field_distribution);
let failure_modes = EmbedderFailureModes::from_env();
Self {
embedders,
sender,
threads,
possible_embedding_mistakes,
embedder_stats,
failure_modes,
}
Self { embedders, sender, threads, possible_embedding_mistakes, embedder_stats }
}
}
@@ -100,7 +91,6 @@ impl<'extractor> Extractor<'extractor> for EmbeddingExtractor<'_, '_> {
self.threads,
self.sender,
&context.doc_alloc,
self.failure_modes,
))
}
@@ -277,7 +267,6 @@ pub struct SettingsChangeEmbeddingExtractor<'a, 'b, SD> {
sender: EmbeddingSender<'a, 'b>,
possible_embedding_mistakes: PossibleEmbeddingMistakes,
threads: &'a ThreadPoolNoAbort,
failure_modes: EmbedderFailureModes,
}
impl<'a, 'b, SD: SettingsDelta> SettingsChangeEmbeddingExtractor<'a, 'b, SD> {
@@ -290,16 +279,7 @@ impl<'a, 'b, SD: SettingsDelta> SettingsChangeEmbeddingExtractor<'a, 'b, SD> {
threads: &'a ThreadPoolNoAbort,
) -> Self {
let possible_embedding_mistakes = PossibleEmbeddingMistakes::new(field_distribution);
let failure_modes = EmbedderFailureModes::from_env();
Self {
settings_delta,
embedder_stats,
sender,
threads,
possible_embedding_mistakes,
failure_modes,
}
Self { settings_delta, embedder_stats, sender, threads, possible_embedding_mistakes }
}
}
@@ -356,7 +336,6 @@ impl<'extractor, SD: SettingsDelta + Sync> SettingsChangeExtractor<'extractor>
self.threads,
self.sender,
&context.doc_alloc,
self.failure_modes,
),
reindex_action,
));
@@ -560,7 +539,6 @@ struct Chunks<'a, 'b, 'extractor> {
enum ChunkType<'a, 'b> {
DocumentTemplate {
document_template: &'a Prompt,
ignore_document_template_failures: bool,
session: EmbedSession<'a, OnEmbeddingDocumentUpdates<'a, 'b>, &'a str>,
},
Fragments {
@@ -581,7 +559,6 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
threads: &'a ThreadPoolNoAbort,
sender: EmbeddingSender<'a, 'b>,
doc_alloc: &'a Bump,
failure_modes: EmbedderFailureModes,
) -> Self {
let embedder = &runtime.embedder;
let dimensions = embedder.dimensions();
@@ -590,14 +567,12 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
let kind = if fragments.is_empty() {
ChunkType::DocumentTemplate {
document_template: &runtime.document_template,
ignore_document_template_failures: failure_modes.ignore_document_template_failures,
session: EmbedSession::new(
&runtime.embedder,
embedder_name,
threads,
doc_alloc,
embedder_stats,
failure_modes.ignore_embedder_failures,
OnEmbeddingDocumentUpdates {
embedder_id: embedder_info.embedder_id,
sender,
@@ -614,7 +589,6 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
threads,
doc_alloc,
embedder_stats,
failure_modes.ignore_embedder_failures,
OnEmbeddingDocumentUpdates {
embedder_id: embedder_info.embedder_id,
sender,
@@ -710,7 +684,7 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
metadata,
input,
unused_vectors_distribution,
)?;
);
}
ExtractorDiff::Unchanged => { /* nothing to do */ }
}
@@ -719,11 +693,7 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
},
)?;
}
ChunkType::DocumentTemplate {
document_template,
ignore_document_template_failures,
session,
} => {
ChunkType::DocumentTemplate { document_template, session } => {
let doc_alloc = session.doc_alloc();
let old_embedder = settings_delta.old_embedders().get(session.embedder_name());
@@ -732,7 +702,6 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
} else {
old_embedder.as_ref().map(|old_embedder| &old_embedder.document_template)
};
let extractor =
DocumentTemplateExtractor::new(document_template, doc_alloc, fields_ids_map);
let old_extractor = old_document_template.map(|old_document_template| {
@@ -741,15 +710,7 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
let metadata =
Metadata { docid, external_docid, extractor_id: extractor.extractor_id() };
let extractor_diff = if *ignore_document_template_failures {
let extractor = extractor.ignore_errors();
let old_extractor = old_extractor.map(DocumentTemplateExtractor::ignore_errors);
extractor.diff_settings(document, &external_docid, old_extractor.as_ref())?
} else {
extractor.diff_settings(document, &external_docid, old_extractor.as_ref())?
};
match extractor_diff {
match extractor.diff_settings(document, &external_docid, old_extractor.as_ref())? {
ExtractorDiff::Removed => {
if old_is_user_provided || full_reindex {
session.on_embed_mut().clear_vectors(docid);
@@ -763,7 +724,7 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
if old_is_user_provided || full_reindex {
session.on_embed_mut().clear_vectors(docid);
}
session.request_embedding(metadata, input, unused_vectors_distribution)?;
session.request_embedding(metadata, input, unused_vectors_distribution);
}
ExtractorDiff::Unchanged => { /* do nothing */ }
}
@@ -797,45 +758,27 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
new_must_regenerate,
);
match &mut self.kind {
ChunkType::DocumentTemplate {
document_template,
ignore_document_template_failures,
session,
} => {
ChunkType::DocumentTemplate { document_template, session } => {
let doc_alloc = session.doc_alloc();
let ex = DocumentTemplateExtractor::new(
document_template,
doc_alloc,
new_fields_ids_map,
);
)
.ignore_errors();
if *ignore_document_template_failures {
update_autogenerated(
docid,
external_docid,
[ex.ignore_errors()],
old_document,
new_document,
&external_docid,
old_must_regenerate,
old_is_user_provided,
session,
unused_vectors_distribution,
)
} else {
update_autogenerated(
docid,
external_docid,
[ex],
old_document,
new_document,
&external_docid,
old_must_regenerate,
old_is_user_provided,
session,
unused_vectors_distribution,
)
}?
update_autogenerated(
docid,
external_docid,
[ex],
old_document,
new_document,
&external_docid,
old_must_regenerate,
old_is_user_provided,
session,
unused_vectors_distribution,
)?
}
ChunkType::Fragments { fragments, session } => {
let doc_alloc = session.doc_alloc();
@@ -902,38 +845,23 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
);
match &mut self.kind {
ChunkType::DocumentTemplate {
document_template,
ignore_document_template_failures,
session,
} => {
ChunkType::DocumentTemplate { document_template, session } => {
let doc_alloc = session.doc_alloc();
let ex = DocumentTemplateExtractor::new(
document_template,
doc_alloc,
new_fields_ids_map,
);
if *ignore_document_template_failures {
insert_autogenerated(
docid,
external_docid,
[ex.ignore_errors()],
new_document,
&external_docid,
session,
unused_vectors_distribution,
)?;
} else {
insert_autogenerated(
docid,
external_docid,
[ex],
new_document,
&external_docid,
session,
unused_vectors_distribution,
)?;
}
insert_autogenerated(
docid,
external_docid,
[ex],
new_document,
&external_docid,
session,
unused_vectors_distribution,
)?;
}
ChunkType::Fragments { fragments, session } => {
let doc_alloc = session.doc_alloc();
@@ -957,15 +885,11 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
pub fn drain(self, unused_vectors_distribution: &UnusedVectorsDistributionBump) -> Result<()> {
match self.kind {
ChunkType::DocumentTemplate {
document_template: _,
ignore_document_template_failures: _,
session,
} => {
session.drain(unused_vectors_distribution)?;
ChunkType::DocumentTemplate { document_template: _, session } => {
session.drain(unused_vectors_distribution);
}
ChunkType::Fragments { fragments: _, session } => {
session.drain(unused_vectors_distribution)?;
session.drain(unused_vectors_distribution);
}
}
Ok(())
@@ -973,11 +897,9 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
pub fn embedder_name(&self) -> &'a str {
match &self.kind {
ChunkType::DocumentTemplate {
document_template: _,
ignore_document_template_failures: _,
session,
} => session.embedder_name(),
ChunkType::DocumentTemplate { document_template: _, session } => {
session.embedder_name()
}
ChunkType::Fragments { fragments: _, session } => session.embedder_name(),
}
}
@@ -1046,11 +968,7 @@ impl<'a, 'b, 'extractor> Chunks<'a, 'b, 'extractor> {
}
}
match &mut self.kind {
ChunkType::DocumentTemplate {
document_template: _,
ignore_document_template_failures: _,
session,
} => {
ChunkType::DocumentTemplate { document_template: _, session } => {
session.on_embed_mut().process_embeddings(
Metadata { docid, external_docid, extractor_id: 0 },
embeddings,
@@ -1119,7 +1037,7 @@ where
Metadata { docid, external_docid, extractor_id: extractor.extractor_id() };
if let Some(new_rendered) = new_rendered {
session.request_embedding(metadata, new_rendered, unused_vectors_distribution)?
session.request_embedding(metadata, new_rendered, unused_vectors_distribution)
} else {
// remove any existing embedding
OnEmbed::process_embedding_response(
@@ -1155,47 +1073,9 @@ where
Metadata { docid, external_docid, extractor_id: extractor.extractor_id() },
new_rendered,
unused_vectors_distribution,
)?;
);
}
}
Ok(())
}
#[derive(Clone, Copy, PartialEq, Eq, Default)]
struct EmbedderFailureModes {
pub ignore_document_template_failures: bool,
pub ignore_embedder_failures: bool,
}
impl EmbedderFailureModes {
fn from_env() -> Self {
match std::env::var("MEILI_EXPERIMENTAL_CONFIG_EMBEDDER_FAILURE_MODES") {
Ok(failure_modes) => Self::parse_from_str(
&failure_modes,
"`MEILI_EXPERIMENTAL_CONFIG_EMBEDDER_FAILURE_MODES`",
),
Err(std::env::VarError::NotPresent) => Self::default(),
Err(std::env::VarError::NotUnicode(_)) => panic!(
"`MEILI_EXPERIMENTAL_CONFIG_EMBEDDER_FAILURE_MODES` contains a non-unicode value"
),
}
}
fn parse_from_str(failure_modes: &str, provenance: &'static str) -> Self {
let Self { mut ignore_document_template_failures, mut ignore_embedder_failures } =
Default::default();
for segment in failure_modes.split(',') {
let segment = segment.trim();
match segment {
"ignore_document_template_failures" => {
ignore_document_template_failures = true;
}
"ignore_embedder_failures" => ignore_embedder_failures = true,
"" => continue,
segment => panic!("Unrecognized segment value for {provenance}: {segment}"),
}
}
Self { ignore_document_template_failures, ignore_embedder_failures }
}
}

View File

@@ -5,36 +5,18 @@
use std::hash::{BuildHasher as _, BuildHasherDefault};
pub struct Shards(pub Vec<Shard>);
pub struct Shard {
pub is_own: bool,
pub name: String,
pub struct Shards {
pub own: Vec<String>,
pub others: Vec<String>,
}
impl Shards {
pub fn from_remotes_local<'a>(
remotes: impl IntoIterator<Item = &'a str>,
local: Option<&str>,
) -> Self {
Shards(
remotes
.into_iter()
.map(|name| Shard { is_own: Some(name) == local, name: name.to_owned() })
.collect(),
)
}
pub fn must_process(&self, docid: &str) -> bool {
self.processing_shard(docid).map(|shard| shard.is_own).unwrap_or_default()
}
pub fn processing_shard<'a>(&'a self, docid: &str) -> Option<&'a Shard> {
let hasher = BuildHasherDefault::<twox_hash::XxHash3_64>::new();
let to_hash = |shard: &'a Shard| (shard, hasher.hash_one((&shard.name, docid)));
let to_hash = |shard: &String| hasher.hash_one((shard, docid));
let shard =
self.0.iter().map(to_hash).max_by_key(|(_, hash)| *hash).map(|(shard, _)| shard);
shard
let max_hash = self.others.iter().map(to_hash).max().unwrap_or_default();
self.own.iter().map(to_hash).any(|hash| hash > max_hash)
}
}

View File

@@ -4,7 +4,7 @@ use std::io::{BufReader, BufWriter, Read, Seek, Write};
use std::iter;
use hashbrown::HashMap;
use heed::types::{Bytes, DecodeIgnore, Str};
use heed::types::{Bytes, DecodeIgnore};
use heed::{BytesDecode, Database, Error, RoTxn, RwTxn};
use rayon::iter::{IndexedParallelIterator as _, IntoParallelIterator, ParallelIterator as _};
use roaring::MultiOps;
@@ -16,29 +16,22 @@ use crate::heed_codec::StrBEU16Codec;
use crate::update::GrenadParameters;
use crate::{CboRoaringBitmapCodec, Index, Prefix, Result};
struct WordPrefixDocids<'i> {
index: &'i Index,
struct WordPrefixDocids {
database: Database<Bytes, CboRoaringBitmapCodec>,
prefix_database: Database<Bytes, CboRoaringBitmapCodec>,
max_memory_by_thread: Option<usize>,
/// Do not use an experimental LMDB feature to read uncommitted data in parallel.
no_experimental_post_processing: bool,
}
impl<'i> WordPrefixDocids<'i> {
impl WordPrefixDocids {
fn new(
index: &'i Index,
database: Database<Bytes, CboRoaringBitmapCodec>,
prefix_database: Database<Bytes, CboRoaringBitmapCodec>,
grenad_parameters: &GrenadParameters,
) -> WordPrefixDocids<'i> {
) -> WordPrefixDocids {
WordPrefixDocids {
index,
database,
prefix_database,
max_memory_by_thread: grenad_parameters.max_memory_by_thread(),
no_experimental_post_processing: grenad_parameters
.experimental_no_edition_2024_for_prefix_post_processing,
}
}
@@ -49,77 +42,7 @@ impl<'i> WordPrefixDocids<'i> {
prefix_to_delete: &BTreeSet<Prefix>,
) -> Result<()> {
delete_prefixes(wtxn, &self.prefix_database, prefix_to_delete)?;
if self.no_experimental_post_processing {
self.recompute_modified_prefixes(wtxn, prefix_to_compute)
} else {
self.recompute_modified_prefixes_no_frozen(wtxn, prefix_to_compute)
}
}
#[tracing::instrument(level = "trace", skip_all, target = "indexing::prefix")]
fn recompute_modified_prefixes_no_frozen(
&self,
wtxn: &mut RwTxn,
prefix_to_compute: &BTreeSet<Prefix>,
) -> Result<()> {
let thread_count = rayon::current_num_threads();
let rtxns = iter::repeat_with(|| self.index.env.nested_read_txn(wtxn))
.take(thread_count)
.collect::<heed::Result<Vec<_>>>()?;
let outputs = rtxns
.into_par_iter()
.enumerate()
.map(|(thread_id, rtxn)| {
// `indexes` represent offsets at which prefixes computations were stored in the `file`.
let mut indexes = Vec::new();
let mut file = BufWriter::new(spooled_tempfile(
self.max_memory_by_thread.unwrap_or(usize::MAX),
));
let mut buffer = Vec::new();
for (prefix_index, prefix) in prefix_to_compute.iter().enumerate() {
// Is prefix for another thread?
if prefix_index % thread_count != thread_id {
continue;
}
let output = self
.database
.prefix_iter(&rtxn, prefix.as_bytes())?
.remap_types::<Str, CboRoaringBitmapCodec>()
.map(|result| result.map(|(_word, bitmap)| bitmap))
.union()?;
buffer.clear();
CboRoaringBitmapCodec::serialize_into_vec(&output, &mut buffer);
indexes.push(PrefixEntry { prefix, serialized_length: buffer.len() });
file.write_all(&buffer)?;
}
Ok((indexes, file))
})
.collect::<Result<Vec<_>>>()?;
// We iterate over all the collected and serialized bitmaps through
// the files and entries to eventually put them in the final database.
let mut buffer = Vec::new();
for (index, file) in outputs {
let mut file = file.into_inner().map_err(|e| e.into_error())?;
file.rewind()?;
let mut file = BufReader::new(file);
for PrefixEntry { prefix, serialized_length } in index {
buffer.resize(serialized_length, 0);
file.read_exact(&mut buffer)?;
self.prefix_database.remap_data_type::<Bytes>().put(
wtxn,
prefix.as_bytes(),
&buffer,
)?;
}
}
Ok(())
self.recompute_modified_prefixes(wtxn, prefix_to_compute)
}
#[tracing::instrument(level = "trace", skip_all, target = "indexing::prefix")]
@@ -540,7 +463,6 @@ pub fn compute_word_prefix_docids(
grenad_parameters: &GrenadParameters,
) -> Result<()> {
WordPrefixDocids::new(
index,
index.word_docids.remap_key_type(),
index.word_prefix_docids.remap_key_type(),
grenad_parameters,
@@ -557,7 +479,6 @@ pub fn compute_exact_word_prefix_docids(
grenad_parameters: &GrenadParameters,
) -> Result<()> {
WordPrefixDocids::new(
index,
index.exact_word_docids.remap_key_type(),
index.exact_word_prefix_docids.remap_key_type(),
grenad_parameters,

View File

@@ -1631,11 +1631,8 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
// Update index settings
let embedding_config_updates = self.update_embedding_configs()?;
self.update_user_defined_searchable_attributes()?;
let mut new_inner_settings =
InnerIndexSettings::from_index(self.index, self.wtxn, None)?;
new_inner_settings.recompute_searchables(self.wtxn, self.index)?;
let new_inner_settings = InnerIndexSettings::from_index(self.index, self.wtxn, None)?;
let primary_key_id = self
.index

View File

@@ -42,8 +42,6 @@ const UPGRADE_FUNCTIONS: &[&dyn UpgradeIndex] = &[
&ToTargetNoOp { target: (1, 22, 0) },
&ToTargetNoOp { target: (1, 23, 0) },
&ToTargetNoOp { target: (1, 24, 0) },
&ToTargetNoOp { target: (1, 25, 0) },
&ToTargetNoOp { target: (1, 26, 0) },
// This is the last upgrade function, it will be called when the index is up to date.
// any other upgrade function should be added before this one.
&ToCurrentNoOp {},
@@ -79,8 +77,6 @@ const fn start(from: (u32, u32, u32)) -> Option<usize> {
(1, 22, _) => function_index!(12),
(1, 23, _) => function_index!(13),
(1, 24, _) => function_index!(14),
(1, 25, _) => function_index!(15),
(1, 26, _) => function_index!(16),
// We deliberately don't add a placeholder with (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) here to force manually
// considering dumpless upgrade.
(_major, _minor, _patch) => return None,

View File

@@ -1,11 +1,9 @@
use candle_core::Tensor;
use candle_nn::VarBuilder;
use candle_transformers::models::bert::{BertModel, Config as BertConfig, DTYPE};
use candle_transformers::models::modernbert::{Config as ModernConfig, ModernBert};
use candle_transformers::models::bert::{BertModel, Config, DTYPE};
// FIXME: currently we'll be using the hub to retrieve model, in the future we might want to embed it into Meilisearch itself
use hf_hub::api::sync::Api;
use hf_hub::{Repo, RepoType};
use safetensors::SafeTensors;
use tokenizers::{PaddingParams, Tokenizer};
use super::EmbeddingCache;
@@ -86,21 +84,14 @@ impl Default for EmbedderOptions {
}
}
enum ModelKind {
Bert(BertModel),
Modern(ModernBert),
}
/// Perform embedding of documents and queries
pub struct Embedder {
model: ModelKind,
model: BertModel,
tokenizer: Tokenizer,
options: EmbedderOptions,
dimensions: usize,
pooling: Pooling,
cache: EmbeddingCache,
device: candle_core::Device,
max_len: usize,
}
impl std::fmt::Debug for Embedder {
@@ -110,60 +101,10 @@ impl std::fmt::Debug for Embedder {
.field("tokenizer", &self.tokenizer)
.field("options", &self.options)
.field("pooling", &self.pooling)
.field("device", &self.device)
.field("max_len", &self.max_len)
.finish()
}
}
// some models do not have the "model." prefix in their safetensors weights
fn change_tensor_names(
weights_path: &std::path::Path,
) -> Result<std::path::PathBuf, NewEmbedderError> {
let data = std::fs::read(weights_path)
.map_err(|e| NewEmbedderError::safetensor_weight(candle_core::Error::Io(e)))?;
let tensors = SafeTensors::deserialize(&data)
.map_err(|e| NewEmbedderError::safetensor_weight(candle_core::Error::Msg(e.to_string())))?;
let names = tensors.names();
let has_model_prefix = names.iter().any(|n| n.starts_with("model."));
if has_model_prefix {
return Ok(weights_path.to_path_buf());
}
let fixed_path = weights_path.with_extension("fixed.safetensors");
if fixed_path.exists() {
return Ok(fixed_path);
}
let mut new_tensors = vec![];
for name in names {
let tensor_view = tensors.tensor(name).map_err(|e| {
NewEmbedderError::safetensor_weight(candle_core::Error::Msg(e.to_string()))
})?;
let new_name = format!("model.{}", name);
let data_offset = tensor_view.data();
let shape = tensor_view.shape();
let dtype = tensor_view.dtype();
new_tensors.push((new_name, shape.to_vec(), dtype, data_offset));
}
use safetensors::tensor::TensorView;
let views = new_tensors.iter().map(|(name, shape, dtype, data)| {
(name.as_str(), TensorView::new(*dtype, shape.clone(), data).unwrap())
});
safetensors::serialize_to_file(views, None, &fixed_path)
.map_err(|e| NewEmbedderError::safetensor_weight(candle_core::Error::Msg(e.to_string())))?;
Ok(fixed_path)
}
#[derive(Clone, Copy, serde::Deserialize)]
struct PoolingConfig {
#[serde(default)]
@@ -279,42 +220,19 @@ impl Embedder {
(config, tokenizer, weights, source, pooling)
};
let config_str = std::fs::read_to_string(&config_filename)
let config = std::fs::read_to_string(&config_filename)
.map_err(|inner| NewEmbedderError::open_config(config_filename.clone(), inner))?;
let cfg_val: serde_json::Value = match serde_json::from_str(&config_str) {
Ok(v) => v,
Err(inner) => {
return Err(NewEmbedderError::deserialize_config(
options.model.clone(),
config_str.clone(),
config_filename.clone(),
inner,
));
}
};
let model_type = cfg_val.get("model_type").and_then(|v| v.as_str()).unwrap_or_default();
let arch_arr = cfg_val.get("architectures").and_then(|v| v.as_array());
let has_arch = |needle: &str| {
model_type.eq_ignore_ascii_case(needle)
|| arch_arr.is_some_and(|arr| {
arr.iter().filter_map(|v| v.as_str()).any(|s| s.to_lowercase().contains(needle))
})
};
let is_modern = has_arch("modernbert");
tracing::debug!(is_modern, model_type, "detected HF architecture");
let config: Config = serde_json::from_str(&config).map_err(|inner| {
NewEmbedderError::deserialize_config(
options.model.clone(),
config,
config_filename,
inner,
)
})?;
let mut tokenizer = Tokenizer::from_file(&tokenizer_filename)
.map_err(|inner| NewEmbedderError::open_tokenizer(tokenizer_filename, inner))?;
let weights_filename = if is_modern && weight_source == WeightSource::Safetensors {
change_tensor_names(&weights_filename)?
} else {
weights_filename
};
let vb = match weight_source {
WeightSource::Pytorch => VarBuilder::from_pth(&weights_filename, DTYPE, &device)
.map_err(NewEmbedderError::pytorch_weight)?,
@@ -326,31 +244,7 @@ impl Embedder {
tracing::debug!(model = options.model, weight=?weight_source, pooling=?pooling, "model config");
// max length from config, fallback to 512
let max_len =
cfg_val.get("max_position_embeddings").and_then(|v| v.as_u64()).unwrap_or(512) as usize;
let model = if is_modern {
let config: ModernConfig = serde_json::from_str(&config_str).map_err(|inner| {
NewEmbedderError::deserialize_config(
options.model.clone(),
config_str.clone(),
config_filename.clone(),
inner,
)
})?;
ModelKind::Modern(ModernBert::load(vb, &config).map_err(NewEmbedderError::load_model)?)
} else {
let config: BertConfig = serde_json::from_str(&config_str).map_err(|inner| {
NewEmbedderError::deserialize_config(
options.model.clone(),
config_str.clone(),
config_filename.clone(),
inner,
)
})?;
ModelKind::Bert(BertModel::load(vb, &config).map_err(NewEmbedderError::load_model)?)
};
let model = BertModel::load(vb, &config).map_err(NewEmbedderError::load_model)?;
if let Some(pp) = tokenizer.get_padding_mut() {
pp.strategy = tokenizers::PaddingStrategy::BatchLongest
@@ -369,8 +263,6 @@ impl Embedder {
dimensions: 0,
pooling,
cache: EmbeddingCache::new(cache_cap),
device,
max_len,
};
let embeddings = this
@@ -429,29 +321,15 @@ impl Embedder {
pub fn embed_one(&self, text: &str) -> std::result::Result<Embedding, EmbedError> {
let tokens = self.tokenizer.encode(text, true).map_err(EmbedError::tokenize)?;
let token_ids = tokens.get_ids();
let token_ids = if token_ids.len() > 512 { &token_ids[..512] } else { token_ids };
let token_ids =
if token_ids.len() > self.max_len { &token_ids[..self.max_len] } else { token_ids };
let token_ids = Tensor::new(token_ids, &self.device).map_err(EmbedError::tensor_shape)?;
Tensor::new(token_ids, &self.model.device).map_err(EmbedError::tensor_shape)?;
let token_ids = Tensor::stack(&[token_ids], 0).map_err(EmbedError::tensor_shape)?;
let embeddings = match &self.model {
ModelKind::Bert(model) => {
let token_type_ids = token_ids.zeros_like().map_err(EmbedError::tensor_shape)?;
model
.forward(&token_ids, &token_type_ids, None)
.map_err(EmbedError::model_forward)?
}
ModelKind::Modern(model) => {
let mut mask_vec = tokens.get_attention_mask().to_vec();
if mask_vec.len() > self.max_len {
mask_vec.truncate(self.max_len);
}
let mask = Tensor::new(mask_vec.as_slice(), &self.device)
.map_err(EmbedError::tensor_shape)?;
let mask = Tensor::stack(&[mask], 0).map_err(EmbedError::tensor_shape)?;
model.forward(&token_ids, &mask).map_err(EmbedError::model_forward)?
}
};
let token_type_ids = token_ids.zeros_like().map_err(EmbedError::tensor_shape)?;
let embeddings = self
.model
.forward(&token_ids, &token_type_ids, None)
.map_err(EmbedError::model_forward)?;
let embedding = Self::pooling(embeddings, self.pooling)?;

View File

@@ -91,7 +91,6 @@ struct EmbedderData {
request: RequestData,
response: Response,
configuration_source: ConfigurationSource,
max_retry_duration: std::time::Duration,
}
#[derive(Debug)]
@@ -183,15 +182,10 @@ impl Embedder {
) -> Result<Self, NewEmbedderError> {
let bearer = options.api_key.as_deref().map(|api_key| format!("Bearer {api_key}"));
let timeout = std::env::var("MEILI_EXPERIMENTAL_REST_EMBEDDER_TIMEOUT_SECONDS")
.ok()
.map(|p| p.parse().unwrap())
.unwrap_or(30);
let client = ureq::AgentBuilder::new()
.max_idle_connections(REQUEST_PARALLELISM * 2)
.max_idle_connections_per_host(REQUEST_PARALLELISM * 2)
.timeout(std::time::Duration::from_secs(timeout))
.timeout(std::time::Duration::from_secs(30))
.build();
let request = RequestData::new(
@@ -202,14 +196,6 @@ impl Embedder {
let response = Response::new(options.response, &request)?;
let max_retry_duration =
std::env::var("MEILI_EXPERIMENTAL_REST_EMBEDDER_MAX_RETRY_DURATION_SECONDS")
.ok()
.map(|p| p.parse().unwrap())
.unwrap_or(60);
let max_retry_duration = std::time::Duration::from_secs(max_retry_duration);
let data = EmbedderData {
client,
bearer,
@@ -218,7 +204,6 @@ impl Embedder {
response,
configuration_source,
headers: options.headers,
max_retry_duration,
};
let dimensions = if let Some(dimensions) = options.dimensions {
@@ -472,7 +457,7 @@ where
}
}?;
let retry_duration = retry_duration.min(data.max_retry_duration); // don't wait more than the max duration
let retry_duration = retry_duration.min(std::time::Duration::from_secs(60)); // don't wait more than a minute
// randomly up to double the retry duration
let retry_duration = retry_duration

View File

@@ -550,9 +550,9 @@ pub struct DeserializePoolingConfig {
#[derive(Debug, thiserror::Error)]
#[error("model `{model_name}` appears to be unsupported{}\n - inner error: {inner}",
if architectures.is_empty() {
"\n - Note: only models with architecture \"BertModel\" or \"ModernBert\" are supported.".to_string()
"\n - Note: only models with architecture \"BertModel\" are supported.".to_string()
} else {
format!("\n - Note: model has declared architectures `{architectures:?}`, only models with architecture `\"BertModel\"` or `\"ModernBert\"` are supported.")
format!("\n - Note: model has declared architectures `{architectures:?}`, only models with architecture `\"BertModel\"` are supported.")
})]
pub struct UnsupportedModel {
pub model_name: String,

View File

@@ -5,7 +5,7 @@ use serde_json::Value;
use super::error::EmbedError;
use super::{Embedder, Embedding};
use crate::progress::EmbedderStats;
use crate::{DocumentId, Result, ThreadPoolNoAbort};
use crate::{DocumentId, ThreadPoolNoAbort};
type ExtractorId = u8;
#[derive(Clone, Copy)]
@@ -44,7 +44,6 @@ pub struct EmbedSession<'doc, C, I> {
embedder_name: &'doc str,
embedder_stats: &'doc EmbedderStats,
ignore_embedding_failures: bool,
on_embed: C,
}
@@ -88,7 +87,6 @@ impl<'doc, C: OnEmbed<'doc>, I: Input> EmbedSession<'doc, C, I> {
threads: &'doc ThreadPoolNoAbort,
doc_alloc: &'doc Bump,
embedder_stats: &'doc EmbedderStats,
ignore_embedding_failures: bool,
on_embed: C,
) -> Self {
let capacity = embedder.prompt_count_in_chunk_hint() * embedder.chunk_count_hint();
@@ -101,7 +99,6 @@ impl<'doc, C: OnEmbed<'doc>, I: Input> EmbedSession<'doc, C, I> {
threads,
embedder_name,
embedder_stats,
ignore_embedding_failures,
on_embed,
}
}
@@ -111,32 +108,28 @@ impl<'doc, C: OnEmbed<'doc>, I: Input> EmbedSession<'doc, C, I> {
metadata: Metadata<'doc>,
rendered: I,
unused_vectors_distribution: &C::ErrorMetadata,
) -> Result<()> {
) {
if self.inputs.len() < self.inputs.capacity() {
self.inputs.push(rendered);
self.metadata.push(metadata);
return Ok(());
return;
}
self.embed_chunks(unused_vectors_distribution)
}
pub fn drain(mut self, unused_vectors_distribution: &C::ErrorMetadata) -> Result<C> {
self.embed_chunks(unused_vectors_distribution)?;
Ok(self.on_embed)
pub fn drain(mut self, unused_vectors_distribution: &C::ErrorMetadata) -> C {
self.embed_chunks(unused_vectors_distribution);
self.on_embed
}
#[allow(clippy::too_many_arguments)]
fn embed_chunks(&mut self, unused_vectors_distribution: &C::ErrorMetadata) -> Result<()> {
fn embed_chunks(&mut self, _unused_vectors_distribution: &C::ErrorMetadata) {
if self.inputs.is_empty() {
return Ok(());
return;
}
let res = match I::embed_ref(
self.inputs.as_slice(),
self.embedder,
self.threads,
self.embedder_stats,
) {
match I::embed_ref(self.inputs.as_slice(), self.embedder, self.threads, self.embedder_stats)
{
Ok(embeddings) => {
for (metadata, embedding) in self.metadata.iter().copied().zip(embeddings) {
self.on_embed.process_embedding_response(EmbeddingResponse {
@@ -144,36 +137,37 @@ impl<'doc, C: OnEmbed<'doc>, I: Input> EmbedSession<'doc, C, I> {
embedding: Some(embedding),
});
}
Ok(())
}
Err(error) => {
// send metadata to the error processing.
let doc_alloc = self.metadata.bump();
let metadata = std::mem::replace(
&mut self.metadata,
BVec::with_capacity_in(self.inputs.capacity(), doc_alloc),
tracing::warn!(
%error,
"error embedding batch of documents, retrying one by one"
);
Err(self.on_embed.process_embedding_error(
error,
self.embedder_name,
unused_vectors_distribution,
metadata,
))
// retry with one call per input
for (metadata, input) in self.metadata.iter().copied().zip(self.inputs.chunks(1)) {
match I::embed_ref(input, self.embedder, self.threads, self.embedder_stats) {
Ok(mut embeddings) => {
let Some(embedding) = embeddings.pop() else {
continue;
};
self.on_embed.process_embedding_response(EmbeddingResponse {
metadata,
embedding: Some(embedding),
})
}
Err(err) => {
tracing::warn!(
docid = metadata.external_docid,
%err,
"error embedding document"
);
}
}
}
}
};
self.inputs.clear();
self.metadata.clear();
if self.ignore_embedding_failures {
if let Err(err) = res {
tracing::warn!(
%err,
"ignored error embedding batch of documents due to failure policy"
);
}
Ok(())
} else {
res
}
}
pub(crate) fn embedder_name(&self) -> &'doc str {