mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-07-18 04:11:07 +00:00
Compare commits
2713 Commits
check-wind
...
embedding-
Author | SHA1 | Date | |
---|---|---|---|
3aef2c9e42 | |||
53dbb790bb | |||
556a9ce9dc | |||
b9e4c6b8c2 | |||
fd8b2451d7 | |||
058f9ffda5 | |||
5d363205a5 | |||
a683faa882 | |||
8887cbdcd5 | |||
634865ff53 | |||
36fccf8525 | |||
d6bd60d569 | |||
48ad959fc1 | |||
1bc30cb4c8 | |||
77138a42d6 | |||
0791506124 | |||
2a015ac3b8 | |||
6f248b78a9 | |||
d694e312ff | |||
d76dcc8998 | |||
e654f66223 | |||
34f2ab7093 | |||
1a9dbd364e | |||
662c5d9871 | |||
5cd61b50f9 | |||
9a9be76757 | |||
cfa6ba6c3b | |||
f4f333dbf6 | |||
1ade76ba10 | |||
ae26658913 | |||
aa09edb3fb | |||
3f42f1a036 | |||
9bdfdd395b | |||
78d0625a91 | |||
3f655ea20e | |||
50bc1d55f3 | |||
0a4f2ef891 | |||
faa1f7c5b7 | |||
3cc5d86598 | |||
1ae47bec77 | |||
2f1be0ff86 | |||
9cee432255 | |||
ff8d48d2f1 | |||
a56c036994 | |||
511c48f520 | |||
4623691d1f | |||
3261aadcf2 | |||
073e9f2967 | |||
5f8f48ec95 | |||
ed2fe365a0 | |||
f7c8a77f89 | |||
a8030850ee | |||
132065afda | |||
51c298662b | |||
70a860a0f0 | |||
a3254d7d7d | |||
73c9c1ebdc | |||
4c7a6e5c1b | |||
ef4c87accf | |||
ced7ea4a5c | |||
fa3990daf9 | |||
c5993196b3 | |||
16234e1313 | |||
be9f4f96df | |||
b274106ad3 | |||
48527761e7 | |||
6792d048b8 | |||
07bfed99e6 | |||
8dfded2993 | |||
3714f16696 | |||
d0cd3cacec | |||
fef089c7b6 | |||
d47e1e15de | |||
caccb51814 | |||
a76a3e8f11 | |||
32dede35c7 | |||
6397ef12a0 | |||
cf9b311f71 | |||
7423243be0 | |||
b5e41f0e46 | |||
5690700601 | |||
2faad504c6 | |||
2bcd69750f | |||
9f0d33ec99 | |||
de24e75be8 | |||
a3af9fe057 | |||
90683d0e4e | |||
5c79273748 | |||
90e6b6416f | |||
2b75072b09 | |||
6e6fd077d4 | |||
b45eea0d3e | |||
a051ab3d9a | |||
0b89ef1fd7 | |||
65ba7b47af | |||
8af76a65bf | |||
6b94033c97 | |||
dfe0c8664e | |||
0ca652de28 | |||
87f105747f | |||
735634e998 | |||
3740755d9c | |||
bbcabc47bd | |||
a06cb1bfd6 | |||
549dc985b8 | |||
428463e45c | |||
7113fcf63a | |||
aa6855cd4f | |||
895db76a51 | |||
a88146d59e | |||
91e77abf4f | |||
f60814b319 | |||
5a675bcb82 | |||
82a796aea7 | |||
f6287602e9 | |||
ede456c5b0 | |||
3f5b5df139 | |||
d72e5f5f69 | |||
aa366d593d | |||
205430854d | |||
be64006211 | |||
eda309d562 | |||
119d618a76 | |||
2b2e6c0b3a | |||
e6329e77e1 | |||
b086c51a23 | |||
9ce5598fef | |||
e30c24b5bf | |||
c1a132fa06 | |||
e54fc59248 | |||
11e7c0d75f | |||
c593fbe648 | |||
2b3327ea74 | |||
d14184f4da | |||
46bceb91f1 | |||
cab5e35ff7 | |||
f8232976ed | |||
22d363c05a | |||
41620d5325 | |||
f3d5c74c02 | |||
d48baece51 | |||
c45ede44a8 | |||
4235a82dcf | |||
e7b9b8f002 | |||
5716ab70f3 | |||
422a786ffd | |||
836ae19bec | |||
0b5bc41b79 | |||
b45059e8f2 | |||
c16c60b599 | |||
0114796d2a | |||
17a94c40dc | |||
76ca44b214 | |||
d2e4d6dd8a | |||
879cf85037 | |||
c2d5b20a42 | |||
600178c5ab | |||
b93ca3945e | |||
8fef48f8ca | |||
dedae94102 | |||
7ae9a4afee | |||
d2776efb11 | |||
9211e94c4f | |||
b7bebe9bbb | |||
37a692f942 | |||
25c19a306b | |||
c078efd730 | |||
9dac91efe0 | |||
074d509d92 | |||
d439a3cb9d | |||
e92b6beb20 | |||
27cc357362 | |||
73dfeefc7c | |||
d85480de89 | |||
9f55708d84 | |||
280c3907be | |||
8419fd9b3b | |||
283944ea89 | |||
8aacd6374a | |||
8326f34ad1 | |||
259fc067d3 | |||
e8b2bb3ea6 | |||
7dfb2071b5 | |||
9cfbef478e | |||
efd5fd96cc | |||
f4a908669c | |||
eb2c2815b6 | |||
0ef52941c7 | |||
0d85f8fcee | |||
f4bb6cbca8 | |||
ad03c86c44 | |||
85037352b9 | |||
29e9c74a49 | |||
1b54c866e1 | |||
e414284335 | |||
7a204609fe | |||
f6803dd7d1 | |||
f86f4f619f | |||
e35d58b531 | |||
63827bbee0 | |||
6b2b8ed676 | |||
6db5939f84 | |||
d35b2d8d33 | |||
0687cf058a | |||
340d9e6edc | |||
7219299436 | |||
657bbf5d1e | |||
28adbc0d18 | |||
e3fba62e13 | |||
fb9170b8e3 | |||
c15763f910 | |||
7fa1c41190 | |||
77802dabf6 | |||
a685eeafeb | |||
f16e6f7c37 | |||
900be0ccad | |||
51a087b764 | |||
31142b3663 | |||
e60b855a54 | |||
510a4b91be | |||
e704f4d1ec | |||
82fe80b360 | |||
0f1dd3614c | |||
3aa6c3c750 | |||
b956918c11 | |||
e3003c1609 | |||
bf13268649 | |||
0bb7866f1e | |||
e6e9a033aa | |||
63031219c5 | |||
44d6430bae | |||
4d26e9c6f2 | |||
2ff382c023 | |||
0f6dd133b2 | |||
29f6eeff8f | |||
ef007d547d | |||
3fc16c627d | |||
9422b6d654 | |||
ddba52414a | |||
4534dc2cab | |||
b05cb80803 | |||
6e0526090a | |||
a743da3061 | |||
c6216517c7 | |||
2d4f7c635e | |||
ee812b31c4 | |||
3329248a84 | |||
bc08cd0deb | |||
3e2f468213 | |||
7c448bcc00 | |||
acb7c0a449 | |||
e8795d2608 | |||
e023ee4b6b | |||
e74c3b692a | |||
1d3b18f774 | |||
00bc86e74b | |||
adc9976615 | |||
2090e9ea31 | |||
1c8f1c18f4 | |||
ae8c1461e1 | |||
5f62274f21 | |||
c4a96b40eb | |||
5f50fc9464 | |||
89498a2bea | |||
211c1b753f | |||
d08e89ea3d | |||
695877043a | |||
bc4d1530ee | |||
d7721fe607 | |||
4a179fb3c0 | |||
59a1c5d9a7 | |||
2f82d94502 | |||
bd2bd0f33b | |||
e02733df4a | |||
f373ecc96a | |||
748a327271 | |||
4925b30196 | |||
43c4a229b7 | |||
ca112a8b95 | |||
855fa555a3 | |||
a237c0797a | |||
5c46dc702a | |||
4cadc8113b | |||
2d6dc83940 | |||
ab768f379f | |||
705e9a9e5e | |||
c17031d3de | |||
67f2a30d7c | |||
99732f4084 | |||
5081d837ea | |||
9e1cb792f4 | |||
b6b7ede266 | |||
f50e586a4f | |||
11fedea788 | |||
032b34c377 | |||
b421c8e7de | |||
00eb258a53 | |||
fc6cc80705 | |||
138d20b277 | |||
7c1a9113f9 | |||
07ae297ffd | |||
4069dbcfca | |||
03eb50fbac | |||
2616d776f2 | |||
3004db95af | |||
9a729bf31d | |||
8bfa6a7f54 | |||
056f18bd02 | |||
fe9866aca8 | |||
60f105a4a3 | |||
abb399b802 | |||
aeaac7270e | |||
f45770a3ce | |||
0e10ff1aa3 | |||
6ee608c2d1 | |||
95e8a9bef1 | |||
0598320252 | |||
2269104337 | |||
6b4d69996c | |||
df4e3c2e43 | |||
e2b549c5ee | |||
8390006ebf | |||
7200437246 | |||
68e7bfb37f | |||
209c4bfc18 | |||
396d76046d | |||
9ae73e3c05 | |||
933e319364 | |||
596617dd31 | |||
f3dd6834c6 | |||
e8774ad079 | |||
5d191c479e | |||
c3368e6859 | |||
40776ed4cd | |||
9bda9a9a64 | |||
aefebdeb8b | |||
646e44ddf9 | |||
9275ce1503 | |||
48d2d3a5cd | |||
7ec0c9aa83 | |||
484fdd9ce2 | |||
7533a11143 | |||
19d077a4b1 | |||
b8845d1015 | |||
620867d611 | |||
77cc3678b5 | |||
a73d3c03e9 | |||
824f5b12ce | |||
bb4baf7fae | |||
0263eb0aec | |||
8a916a4e42 | |||
506ee40dc5 | |||
952fabf8a0 | |||
7ea2e4ec7b | |||
a0a4ac66ec | |||
b037e416d3 | |||
e9d547556d | |||
ab0eba2f72 | |||
5ceb3c6a10 | |||
34d572e3e5 | |||
28e6adc435 | |||
6a683975bf | |||
c60d11fb42 | |||
32207f9f19 | |||
7c1b15fd06 | |||
4352a924d7 | |||
bbe802c656 | |||
b32e30ad27 | |||
ae115cee78 | |||
1824fbd1b5 | |||
34d8a54c4b | |||
8fa6e8670a | |||
c640856cc1 | |||
1a1317ab0f | |||
9cab754942 | |||
4a0ec15ad2 | |||
985b892b7a | |||
605dea4f85 | |||
95d4775d4a | |||
416fcf47f1 | |||
6433e49882 | |||
85939ae8ad | |||
e654eddf56 | |||
170ad87e44 | |||
bc56087a17 | |||
29d82ade56 | |||
a7f5d3bb7a | |||
48e8356a16 | |||
1fda05c2fd | |||
8f96724adf | |||
01e5b0effa | |||
2ec9664878 | |||
7f5a0c0013 | |||
f5c3dad3ed | |||
10028515ac | |||
63ccd19ab1 | |||
1b4d344e18 | |||
89c0cf9b12 | |||
3770e70581 | |||
e497008161 | |||
a15ebb283f | |||
3f256a7959 | |||
b41af0d0f6 | |||
3ebff65ef3 | |||
717a026fdd | |||
70670c3be4 | |||
62e2a5a324 | |||
90d96ee415 | |||
38b317857d | |||
765e76857f | |||
204cf423b2 | |||
e575b5af74 | |||
4fc24cb691 | |||
8bc8484e95 | |||
7b49c30d8c | |||
239851046d | |||
60796dfb14 | |||
c7cb72a77a | |||
4d819ea636 | |||
4dfb89168b | |||
258e6a115b | |||
666680bd87 | |||
27527849bb | |||
cf2bc03bed | |||
1d02efeab9 | |||
53fc98d3b0 | |||
263300b3a3 | |||
ab3d92d163 | |||
ef9fc6c854 | |||
61b0f50d4d | |||
0557a4dd2f | |||
930d5a09a8 | |||
8b0c4291ae | |||
c9efdf8c88 | |||
72736c0ea9 | |||
92d0d36ff6 | |||
352ac759b5 | |||
28dc7b836b | |||
c4e1407e77 | |||
49317bbee4 | |||
82313a4444 | |||
8fdcdee0cc | |||
3c218cc3a0 | |||
7d574433b6 | |||
201a808fe2 | |||
f827c2442c | |||
87d2e213f3 | |||
3b931e75d9 | |||
ae135d1d46 | |||
0efb72fe66 | |||
bed442528f | |||
496685fa26 | |||
02cbcea3db | |||
0f7f5fa104 | |||
50fafbbc8b | |||
2821163b95 | |||
2da64e835e | |||
420c6e1932 | |||
2a067d3327 | |||
564cad1163 | |||
33dfd422db | |||
036a9d5dbc | |||
7b74810b03 | |||
3e53527bff | |||
7929872091 | |||
afb43d266e | |||
05828ff2c7 | |||
75c3f33478 | |||
c6930c8819 | |||
439146289e | |||
6bf214bb14 | |||
fcf694026d | |||
0b675bd530 | |||
7636365a65 | |||
46680585ae | |||
bcec8d8984 | |||
56c1bd3afe | |||
1a84f00fbf | |||
39320a6fce | |||
1d2dbcb51f | |||
341183cd57 | |||
b9716ec346 | |||
564f85280c | |||
7fa74b4931 | |||
7d8415448c | |||
c7839b5a84 | |||
a52b513023 | |||
77e03e3f8c | |||
148816a3da | |||
511eef87bf | |||
aef8448fc6 | |||
5fab2aee51 | |||
1235523918 | |||
d4a16f2349 | |||
0f05c0eb6f | |||
2cd85c732a | |||
82fa70da83 | |||
951be67060 | |||
5400f3941a | |||
af54c8381e | |||
693fcd5752 | |||
733175359a | |||
7c6162f0bf | |||
d6ae39bf0f | |||
e416bbc1de | |||
5d0d12dfbd | |||
2cfd363dc6 | |||
70aa78a2c2 | |||
96c81762ed | |||
0b1f634afa | |||
d3d5015854 | |||
f95f29c492 | |||
a50b69b868 | |||
3668f5f021 | |||
54fdf379bb | |||
41b1cd5a73 | |||
5c14a25d5a | |||
fda2843135 | |||
9347330f3a | |||
56c9190dab | |||
6b986dceaf | |||
cb7bb36080 | |||
161cb736ea | |||
ea6bb4df1d | |||
a3d2f64725 | |||
d5526cffff | |||
5cb75d1f2a | |||
921e3c4ffe | |||
52591761af | |||
f80182f0a9 | |||
3b30b6a57a | |||
5efc78db55 | |||
cffbe3fcb6 | |||
8d8fcb9846 | |||
20049669c9 | |||
db28d13cb1 | |||
5a7cfc57fd | |||
790621dc29 | |||
1d577ae98b | |||
88e9a55d44 | |||
dbe551cf99 | |||
a299fbd33b | |||
193119acb9 | |||
4c71118699 | |||
5fe2943d3c | |||
86ff502327 | |||
6b1a345dce | |||
b54ece690b | |||
3ea167bade | |||
1158d6689f | |||
d9b0463a0b | |||
ae9899f179 | |||
308fd7128e | |||
27e7c00622 | |||
58207da934 | |||
fb8b832192 | |||
17207b5405 | |||
bd95503eba | |||
8b8b0d802c | |||
d329e86250 | |||
d416b3b390 | |||
54f5e74744 | |||
fd4b192a39 | |||
3c13feebf7 | |||
1811168b96 | |||
b06cc1e0a2 | |||
44f812c36d | |||
c8e77b5f25 | |||
283f516e15 | |||
b4ca0a8c98 | |||
b658e38acd | |||
f87e46cc16 | |||
65354b414a | |||
025df397c0 | |||
f77abc9dc8 | |||
7e9909ee45 | |||
43ec97fe45 | |||
02929e241b | |||
c13efde042 | |||
36f0a1492c | |||
ce65ad213b | |||
3e0de6cb83 | |||
f3d691667d | |||
ce9c930d10 | |||
fc88b003b4 | |||
cf5d26124a | |||
38b1c57fa8 | |||
25c525b057 | |||
83cd28b60b | |||
48cad4132a | |||
4897ad99d0 | |||
5b67de0367 | |||
46ff78b4ec | |||
5810fb239f | |||
b007ed6be9 | |||
9ad43b6841 | |||
c9ec502ed9 | |||
18aed75d3b | |||
6738a4f6ee | |||
a1ff41cabb | |||
d2948adea3 | |||
f54b57e5be | |||
95821d0bde | |||
f690fa0686 | |||
24e94b28c1 | |||
34d58f35c8 | |||
1d5265caf4 | |||
97aeb6db4d | |||
ff64c64abe | |||
ee326a1ecc | |||
c204a7bb12 | |||
cf4798bd2b | |||
4d761d3444 | |||
c9b78970c9 | |||
ae3c4e27c4 | |||
1b718afd11 | |||
01ef055f40 | |||
f888f87635 | |||
293a425183 | |||
699ec18de8 | |||
73e4206b3c | |||
a964251cee | |||
8c8d98eeaa | |||
c5ae43cac6 | |||
57eecd6197 | |||
2fe5c78cb6 | |||
8068337b07 | |||
8047cfe438 | |||
f26826f115 | |||
5717e5c1af | |||
bb07038c31 | |||
d1a088ea0b | |||
b68e22c0e6 | |||
03a36f116e | |||
8a0bf24ed5 | |||
e2763471e5 | |||
b2f2c5d69f | |||
e547bfb428 | |||
1594c54e23 | |||
768cfb6c2d | |||
13b607bd68 | |||
3d130d31c8 | |||
4cda584b0c | |||
248c90bad5 | |||
0e9040e605 | |||
3e3c00f44c | |||
d986a3bbaf | |||
c2ceb8e41b | |||
a25eb9c136 | |||
cc2011a27f | |||
604e156c2b | |||
1d6777ee68 | |||
79db2e67fb | |||
0940f0e4f4 | |||
d40290aaaf | |||
865f24cfef | |||
fd2de7c668 | |||
448564b674 | |||
c5dd8e7d6f | |||
c9b4c1fb81 | |||
0f10ec96af | |||
8608d10fa2 | |||
83e71cd7b9 | |||
3fbe1df770 | |||
150d1db86b | |||
806e983aa5 | |||
e96c1d4b0f | |||
15cdc6924b | |||
677e8b122c | |||
75a7e40a27 | |||
d9a527854a | |||
e4f05326be | |||
d99419acfb | |||
f349630e78 | |||
c8939944c6 | |||
4e6252fb03 | |||
2d1412afce | |||
0f4536df2d | |||
3531efb169 | |||
8bd8e744f3 | |||
6ec430b633 | |||
4041978402 | |||
53f32a7dd7 | |||
47a7ed93d3 | |||
71ab11f1fe | |||
436776cdbf | |||
96bc519f9e | |||
2ac826edca | |||
8b23eddc10 | |||
185f2b8f74 | |||
c0e987979a | |||
89aff2081c | |||
032c67662d | |||
03f59786c2 | |||
f7c1f19dd8 | |||
1542ff30ae | |||
20d0aa499a | |||
0cb2bf34a5 | |||
de03b7e437 | |||
a315726f96 | |||
91d2a07499 | |||
3b773b3416 | |||
648b2876f6 | |||
c5360bcdbf | |||
1bdc08a73a | |||
63b5e21ae1 | |||
eb0b5239cb | |||
121c1ac1dd | |||
b82dda2d0d | |||
ea9330e9c9 | |||
b6a9d8d2ac | |||
a03eef6511 | |||
42fae9994d | |||
e1aa534389 | |||
49add50cb3 | |||
29b947ee43 | |||
3f683c4238 | |||
294ccb6f44 | |||
63a4dfa2a8 | |||
3b8965bc76 | |||
9fd9fcb03e | |||
30805bbed5 | |||
2984be880f | |||
fd0623c085 | |||
eeb33b913c | |||
3d93efc6aa | |||
425ef1b205 | |||
f607449cb7 | |||
e9b4794f2b | |||
c413855156 | |||
7cdb4aa473 | |||
bfe4968d7e | |||
7372083a5a | |||
8cecc6989a | |||
1f1edd6e25 | |||
bc5efa9a76 | |||
3ec5b9d488 | |||
b61eb19601 | |||
231a027c7d | |||
f8ff91ed30 | |||
b73660fa8e | |||
55adbac2dd | |||
fd7fbfa9eb | |||
3a93f88ba6 | |||
7c1c4f9c26 | |||
1f5412003d | |||
5da92a3d53 | |||
c4a8b84dc0 | |||
ffe3faeca7 | |||
0f07cfed14 | |||
326a728434 | |||
e4733dcd42 | |||
a500fa053c | |||
61db56f785 | |||
235556d699 | |||
a3a1065c16 | |||
b025f1bcf1 | |||
707d106a24 | |||
97d6726291 | |||
82fa571ef7 | |||
5d453e6049 | |||
9e7d7beb4a | |||
a225ab2637 | |||
94b43001db | |||
796a325972 | |||
1db550ec7f | |||
c3c5a928e4 | |||
c4787760d3 | |||
7ca2a8eb6f | |||
c1c065079f | |||
1cca4abf5a | |||
bd172bf68a | |||
70ed6ba798 | |||
f3ab940776 | |||
87547550f5 | |||
e067d796b3 | |||
c2ff4dd3b2 | |||
31bda976f2 | |||
fce0fa9c57 | |||
a10efedd2f | |||
55ec96d31a | |||
4249630791 | |||
418fa47963 | |||
0656a0d515 | |||
19f4c1ac98 | |||
a0bfcf8872 | |||
64477aac60 | |||
4d90e3d2ec | |||
4ab547c6fa | |||
e36a8c50b9 | |||
249da5846c | |||
ee15d4fe77 | |||
f0f6c3000f | |||
08ff135ad6 | |||
f729864466 | |||
94ea263bef | |||
85efa6f493 | |||
0e475cb5e6 | |||
62de70b73c | |||
7707fb18dd | |||
ba6d755120 | |||
5607802fe1 | |||
bb2e9419d3 | |||
a8afd5dbcb | |||
cf68713145 | |||
55f620a986 | |||
811143cbe9 | |||
c670e9a39b | |||
be6abb952d | |||
2f07afa97e | |||
65f1b13475 | |||
db7ce03763 | |||
7ed9adde29 | |||
bf3a29b60d | |||
9ce7ccfbe7 | |||
3deb1ef78f | |||
5820d822c8 | |||
637bea0370 | |||
3acf036526 | |||
fd079c6757 | |||
182e5d5632 | |||
eefefc482b | |||
43c8a206b4 | |||
a8c407fa36 | |||
18bc56f1fa | |||
38b3e03dde | |||
82aee6a9af | |||
6b1c262b74 | |||
0f654e45c9 | |||
d71c6f3483 | |||
8b4166410c | |||
9d3037aa1a | |||
5414887bff | |||
03a0550b63 | |||
fca947219f | |||
fb7ae9f97f | |||
cd421fea1e | |||
1ad4235beb | |||
de6c7e551e | |||
c0fe70c5f0 | |||
2800e42243 | |||
a09d08c7b6 | |||
2e6aa63efc | |||
5759afac41 | |||
868c902935 | |||
e019ad7692 | |||
1f67f373d1 | |||
2c0bd35923 | |||
b3aaa64de5 | |||
7b3072ad28 | |||
db26c1e5bf | |||
9aee12c906 | |||
debd2b21b8 | |||
39aca661dd | |||
5b51e8a083 | |||
3928fb36b3 | |||
2ddc1d2258 | |||
7c267a8a0e | |||
d39d915a7e | |||
3160ddf9df | |||
d286e63f15 | |||
9ee6254eec | |||
e2c824a7cd | |||
0dd65caffe | |||
4397b7d170 | |||
15db203b7d | |||
041f635214 | |||
f9807ba32e | |||
8c8cc59a6c | |||
f540a69ac3 | |||
537bf27e7c | |||
7df2bdfb15 | |||
71f7456748 | |||
cf31a65a88 | |||
0f7d71041f | |||
c98b313d03 | |||
69678ed8e1 | |||
91d221ebe7 | |||
9162e8ba04 | |||
2118cc092e | |||
c7564d500f | |||
bf144a94d8 | |||
b0b1888ef9 | |||
6ec1d2b712 | |||
cbdf80893d | |||
e2156ddfc7 | |||
49dd50dab2 | |||
13a88d6131 | |||
d9875b782d | |||
cb16baab18 | |||
2500e3c067 | |||
d3e4b2dfe7 | |||
2a46624e19 | |||
009c36a4d0 | |||
2a47e25e6d | |||
82912e191b | |||
e2d372823a | |||
1876132172 | |||
d0b0b90d17 | |||
b08544e86d | |||
d9111fe8ce | |||
41d8161017 | |||
7df5715d39 | |||
5fe02ab5e0 | |||
5ef7767429 | |||
3fad48167b | |||
a92a48b9b9 | |||
d53225bf64 | |||
20896500c2 | |||
1af520077c | |||
7e07cb9de1 | |||
a12b06d99d | |||
331dc3d241 | |||
ef9d9f8481 | |||
d3d22d8ed4 | |||
5e6abcf50c | |||
a4aaf932ba | |||
16c962eb30 | |||
55ca2c4481 | |||
fedb444e66 | |||
bef5954741 | |||
ff8cf38d6b | |||
f8ac575ec5 | |||
566b4efb06 | |||
1d499ed9b2 | |||
3bc62f0549 | |||
21bbbdec76 | |||
78ebd8dba2 | |||
34df44a002 | |||
48a27f669e | |||
e2d0ce52ba | |||
995f8962bd | |||
1cd00f37c0 | |||
1aa3375e12 | |||
60ff1b19a8 | |||
7df5e3f059 | |||
0197dc87e0 | |||
7a172b82ca | |||
eb3ff325d1 | |||
d3cd5ea689 | |||
3ed43f9097 | |||
a2a86ef4e2 | |||
d500c7f625 | |||
ea7e299663 | |||
a370b467fe | |||
8790880589 | |||
7072fe9780 | |||
d0dda78f3d | |||
fa8afc5cfd | |||
6d52c6e711 | |||
dfb8411647 | |||
6269f757ff | |||
40c5f911fd | |||
abef655849 | |||
b12ffd1356 | |||
c9a4c6ed96 | |||
aa32b719c7 | |||
41d2b1e52b | |||
54ee81bb09 | |||
689e69d6d2 | |||
9d9e0d4c54 | |||
19c9caed39 | |||
21c3b3957e | |||
f292fc9ac0 | |||
1d3c4642a6 | |||
9a282be0a2 | |||
bea28968a0 | |||
ed1dcbe0f7 | |||
5ceddbda84 | |||
ca41ce3bbd | |||
8ec0c322ea | |||
b88aa9cc76 | |||
3fd86e8d76 | |||
67f7470c83 | |||
4fab72cbea | |||
afb4b9677f | |||
73d2dbd60f | |||
57a6beee30 | |||
b190b612a3 | |||
111e77eff2 | |||
ba30747de3 | |||
25f0536f5a | |||
c8c0951c43 | |||
63e753bde0 | |||
5fa4b5c50a | |||
a7a62e5e4c | |||
683a2ac685 | |||
e751342dfb | |||
17bf82235d | |||
0401c4e511 | |||
4798c35c50 | |||
9585950e0e | |||
b8c6eb5453 | |||
02586e727e | |||
0cfc9261ba | |||
035674d56e | |||
d35470e29b | |||
23e07f1a93 | |||
f2a28a4dd7 | |||
1994494155 | |||
6dbec91d2b | |||
9a75dc6ab3 | |||
ae8d453868 | |||
95bccaf5f5 | |||
659855c88e | |||
286d310287 | |||
4f7ece2411 | |||
967033579d | |||
0200c65ebf | |||
c63c25a9a2 | |||
046bbea864 | |||
c5cb7d2f2c | |||
5e7f226ac9 | |||
754f254a00 | |||
39b5ad3c86 | |||
80adbb1bdc | |||
4b6fa1cf41 | |||
dc78d8e9c4 | |||
d4063c9dcd | |||
abebc574f6 | |||
f32ab67819 | |||
d25953f322 | |||
405bbd04c1 | |||
5d421abdc4 | |||
9f3663e768 | |||
d9642ec916 | |||
818e8b0237 | |||
4f77a7fba5 | |||
058f08dff5 | |||
9a6c1730aa | |||
91a8a97045 | |||
15788773af | |||
025b9b79bb | |||
1c60b17a37 | |||
3b2cd54b9d | |||
0833cb7d34 | |||
b0d4f9590f | |||
dfce20be21 | |||
24fe6cd205 | |||
e374b095a2 | |||
9f3e4801b1 | |||
b85180fedb | |||
3cdcc54a9e | |||
294cf39cad | |||
4a2643daa2 | |||
8d2d9066ba | |||
526476e168 | |||
ea7bae9a71 | |||
76fd5d92d7 | |||
245a55722a | |||
434fad5327 | |||
243a5fa6a8 | |||
9d314ace09 | |||
1b1172ad16 | |||
1d99c8465c | |||
05cc8c650c | |||
14e1459bf5 | |||
589bf30ec6 | |||
b367c71ad2 | |||
3ff1de0a21 | |||
1005a60fb8 | |||
e9add14189 | |||
4a058a080e | |||
11a11fc870 | |||
cd0dfa3f1b | |||
7b4ce468a6 | |||
11759c4be4 | |||
0f1aeb8eaa | |||
5e7803632d | |||
885710a07b | |||
c55fdad2c3 | |||
1caad4c4b0 | |||
8419ed52a1 | |||
a65c52cc97 | |||
49e9655c24 | |||
fa763ca5dc | |||
c7aeb554b2 | |||
88d9d47928 | |||
8e0d8d31f9 | |||
81a38099ec | |||
bd27fe7d02 | |||
41203f0931 | |||
803a699b15 | |||
246ad3b06e | |||
a21c440274 | |||
c01d26ffd7 | |||
225af069a9 | |||
70305b9f71 | |||
5dab435d13 | |||
c83c1a3c51 | |||
afc6c10a2a | |||
b83275c9c5 | |||
d7f35ee3ba | |||
1dce341bfb | |||
4876c1c8eb | |||
43c8d54501 | |||
84e2a1f836 | |||
00eb47d42e | |||
9293e7f2c1 | |||
80198aa855 | |||
fa00b42c93 | |||
6c9409edf8 | |||
acb06cb3e6 | |||
7d0d8f4445 | |||
491d115c3c | |||
55fa2dda00 | |||
c71eea8023 | |||
df40533741 | |||
4e819a6187 | |||
0c3e7fe963 | |||
45f843ccb9 | |||
35b6bca598 | |||
7f82d33597 | |||
f2185438ee | |||
8c5856007c | |||
ae1d7f4d9b | |||
792be63567 | |||
ca1ad51564 | |||
70aac71c63 | |||
a1d1e7c82a | |||
56438bdea4 | |||
a562d6abc1 | |||
33b67b82e1 | |||
b7fdd9516c | |||
5f2a1a4fd1 | |||
2b0e17ede0 | |||
37092adc71 | |||
86fcad788e | |||
2ea5c57871 | |||
7b4f2aa593 | |||
1fb96d3edb | |||
b63c64395d | |||
628119e31e | |||
78867b6852 | |||
b21b8e8f30 | |||
4a9e5ae215 | |||
6e1865b75b | |||
64409a1de7 | |||
1b81cab782 | |||
88190b5602 | |||
0b27aa5138 | |||
35160788d7 | |||
c3e5c3ba36 | |||
04ac0af54b | |||
9996533364 | |||
3f6b334fc5 | |||
b30e5a7a35 | |||
6d79cb23ba | |||
e34afca6d7 | |||
4918b9ffb6 | |||
73474e7af0 | |||
7ae6dda03f | |||
00e764b0d3 | |||
4abf0db0b4 | |||
acc885fd0a | |||
61e8cfd4bc | |||
796acd1aee | |||
cc8df5e11f | |||
ede74ccc42 | |||
e93a5719ef | |||
d34f0b606c | |||
6425451bbc | |||
acc400face | |||
fe46855462 | |||
8e7d2d25f2 | |||
a436534515 | |||
aa2327591e | |||
a6f9e0ddf0 | |||
60470bb647 | |||
294e1ba16d | |||
8e6893ddbe | |||
d018346f18 | |||
2385842537 | |||
6a70c0ec92 | |||
7a9382b115 | |||
62dabeba5f | |||
48812229a9 | |||
915cc377fb | |||
96544bfa43 | |||
09d474da63 | |||
aaefbfae1f | |||
97e17f52a1 | |||
62ced0e3f1 | |||
71bb24f17e | |||
c72f114b33 | |||
8ed39f5de0 | |||
424c5bde40 | |||
bdd3005d10 | |||
4224edea28 | |||
cb1b7513af | |||
2f89b8209f | |||
a9d0f4a002 | |||
db032079d8 | |||
a00796c46a | |||
6112bd8caa | |||
cec88cfc29 | |||
8439aeb7cf | |||
42257eec53 | |||
1beda3b9af | |||
8676e94f5c | |||
ef47a0d820 | |||
e0f0da57e2 | |||
485e3127c7 | |||
58f90b70c7 | |||
508db9020d | |||
6ff37c6fc4 | |||
f21ae1f5d1 | |||
483c52f07b | |||
f0d7ab81ad | |||
f88f415a00 | |||
19bc885b07 | |||
47f70e3d79 | |||
0f8eb3b506 | |||
4a5923a55e | |||
de98656ed1 | |||
da7469be38 | |||
df9d10ac44 | |||
528d9d6d8b | |||
4fb5c39b92 | |||
022205af90 | |||
50280bf02b | |||
9b579069df | |||
f5a4a1c8b2 | |||
5ab4cdb1f3 | |||
1f54f07f72 | |||
73d8a4eace | |||
c1e5897076 | |||
718a98fbbf | |||
86bf231d29 | |||
182c3f4b80 | |||
c1eba66443 | |||
7197ced673 | |||
4f21ee6c66 | |||
787472453d | |||
8f65f35de9 | |||
c27c923439 | |||
fd5649091d | |||
9a57736773 | |||
7740997ea8 | |||
7eb23f73ba | |||
b9e9fc376a | |||
27bf2f1298 | |||
d4d82fbd0c | |||
eda09a54da | |||
b132d70413 | |||
e41ebd3047 | |||
705d31e8bd | |||
7d95950ce6 | |||
c6b4c21c23 | |||
bf96fdb858 | |||
41eeffd88d | |||
1eb9fe8562 | |||
bac7a1623a | |||
5458850d21 | |||
20ac59c946 | |||
cfc1e193b6 | |||
0cc25c7e4c | |||
102681e384 | |||
3ef7a478cd | |||
e70ac35e02 | |||
d3654906bf | |||
e6295c9c5f | |||
b15de68831 | |||
6723700fb9 | |||
2c099b7c23 | |||
50fca8fc70 | |||
b9d92c481b | |||
d142c5e432 | |||
4d4683adb6 | |||
d6063079af | |||
2e04ab4737 | |||
d95384a636 | |||
c0690f5b9e | |||
909d84447d | |||
2cf57d584e | |||
59242b9c4f | |||
6a6212d4e1 | |||
a8006a3750 | |||
0e0e462f5b | |||
805531c90d | |||
a6470a0c37 | |||
8a54f14b8e | |||
be5e521cb0 | |||
60f20119a2 | |||
2f257fdc3d | |||
0991cb0de4 | |||
4709c638ed | |||
0776217801 | |||
9eae36ce3e | |||
3f501c9b85 | |||
c85146524b | |||
79d192fb3f | |||
a4ed36f0cc | |||
dddb51a9ca | |||
8f006eeaf3 | |||
445e5aff02 | |||
234d0c360f | |||
cd181b36c3 | |||
4cfe0dbdd8 | |||
89a4ac92eb | |||
deb90ff573 | |||
0c10063a87 | |||
87ea080c10 | |||
6d62fa061b | |||
de6cd3ac01 | |||
cb8f033130 | |||
03097e65e8 | |||
c32bec338f | |||
73d3d286d9 | |||
29eeb84ce3 | |||
d78951feb7 | |||
63c8cbae5b | |||
72ded27e98 | |||
c25781f720 | |||
c3b18fede9 | |||
4070895a21 | |||
a21711f473 | |||
f0ec8cbffe | |||
e568dbbabb | |||
8ff15b3dfb | |||
247eaed872 | |||
8b1fcfd7f8 | |||
45f289488d | |||
b0ef7701ae | |||
c9fb6c48b8 | |||
0de34aa8fa | |||
6bfcad4b05 | |||
67a0c9fff8 | |||
cc4aca78c4 | |||
5c7fa9b924 | |||
9837de271d | |||
fd251c37bb | |||
adb6bca950 | |||
42854c0bca | |||
d0bdff7b7b | |||
8650ee66c1 | |||
377fa09cb7 | |||
00a03742ff | |||
d11e359244 | |||
09d45439c7 | |||
5d92da0c73 | |||
677bb39e73 | |||
85ea77de0b | |||
03317be0bd | |||
4aa7c8f7b1 | |||
ce57a342a3 | |||
1cc6cd78e0 | |||
c204afdc79 | |||
c14967eeac | |||
f38db86120 | |||
50b155fa2d | |||
a533c8e041 | |||
e5595a05df | |||
908adee6fc | |||
7b3353252f | |||
647a10bf18 | |||
f2141a894a | |||
08c332980b | |||
7b57a44b5a | |||
fe2c0cc3d5 | |||
eecf4c53e7 | |||
cf4c3c287b | |||
71e5605daa | |||
890a5c64dd | |||
0ee4671a91 | |||
68333424c6 | |||
d4529d8c83 | |||
5e8144b0e1 | |||
3e3695445f | |||
091f989b72 | |||
dd28a3fd5a | |||
6f24b438e0 | |||
48a9ad4c17 | |||
b997039a91 | |||
0e6b6bd130 | |||
b1b0b0b67c | |||
27155f845c | |||
c6f14279d7 | |||
fa15356209 | |||
99f5e09a79 | |||
a8ef6f08e0 | |||
ae5a04e85c | |||
8ebfc9fa92 | |||
21026f0ca8 | |||
e579554c84 | |||
ff49250c1a | |||
8b95c6ae56 | |||
28162759a4 | |||
dd128656cb | |||
4456df5a46 | |||
0b104b3efa | |||
ac944f0960 | |||
5f55e88484 | |||
aab6ffec30 | |||
1dd33af8a3 | |||
8a2a1e4d27 | |||
e2686c0fce | |||
9473a2a6ca | |||
11ce3b9636 | |||
0bf4157a75 | |||
4eaa626bca | |||
668b26b641 | |||
04e4586fb3 | |||
78f6f22a80 | |||
13afdaf393 | |||
742d0ee531 | |||
4275833bab | |||
de7f8c4406 | |||
f00a285a6d | |||
43bb02e7b4 | |||
56fd4ee9bd | |||
0625d08e4e | |||
9269086fda | |||
98e3ecb86b | |||
9af9e73c45 | |||
4b107b17cb | |||
cb82b0798a | |||
7f1071943e | |||
3c1e7c7428 | |||
baeefa4817 | |||
e8ba7833ec | |||
db676aee73 | |||
1a0d8810e5 | |||
4615d86748 | |||
525e67ba93 | |||
44eb153619 | |||
195785c47f | |||
4eae92f411 | |||
d7cb319217 | |||
15062e7dba | |||
bf19f86e38 | |||
91c7ef8723 | |||
fc23a0ee52 | |||
d3491851bc | |||
886404cc4d | |||
75a7f0e26c | |||
47827ca5c1 | |||
f75d74a967 | |||
42648919c7 | |||
6987cac1ba | |||
082237863e | |||
4bcdd7a9f9 | |||
fc4b7ccb70 | |||
df9ac07922 | |||
ba27a09efe | |||
bc51d3a918 | |||
b39d4e9b50 | |||
b18cd9075d | |||
36b897858a | |||
a7b2f461cf | |||
fce132a21b | |||
9c857ff48f | |||
f27b33dabe | |||
9eb4b84abd | |||
71834787ec | |||
b004db37c7 | |||
0c04cd1d9f | |||
63ea405b3e | |||
ba11121cfc | |||
acdd5aa6ea | |||
2f3cc8cdd2 | |||
7a95fed23f | |||
961de4d34e | |||
18ce95dcbf | |||
c177210b1b | |||
1fc90fbacb | |||
6c72559457 | |||
1fdfa3f208 | |||
1a01196a80 | |||
0d0c18f519 | |||
d12364c1e0 | |||
8cd3a1aa57 | |||
08fd026ebd | |||
75d5cea624 | |||
ab9213fa94 | |||
45d5d4bf40 | |||
fa885e75b4 | |||
29fc77ee5b | |||
ad4dc70720 | |||
5d682b4700 | |||
f1beb60204 | |||
85577e70cd | |||
c5536c37b5 | |||
9245c89cfe | |||
f4ff722247 | |||
262b429a4c | |||
eaabc1af2f | |||
04a24a9239 | |||
1f54dfa883 | |||
786b0fabea | |||
26733c705d | |||
ab75f53efd | |||
867e6a8f1d | |||
6f4823fc97 | |||
df9b68f8ed | |||
0a0a5f84bf | |||
5bc6391700 | |||
eaa897d983 | |||
c06f386ac3 | |||
bfca54cc2c | |||
04a62d2b97 | |||
8c19cb0a0b | |||
5c492031d9 | |||
fb1caa4724 | |||
5622b9607d | |||
01bcc601be | |||
93fbdc06d3 | |||
69c931334f | |||
d683f5980c | |||
f8ba112f66 | |||
c614d0dd35 | |||
479607e5dd | |||
bb00e70087 | |||
2a04ecccc4 | |||
e974be9518 | |||
aeb6b74725 | |||
a751972c57 | |||
6b269795d2 | |||
d075be798a | |||
89637bcaaf | |||
866ac91be3 | |||
e610af36aa | |||
7cf6707ed3 | |||
34254b42b6 | |||
1995040846 | |||
07f42e8057 | |||
71f59749dc | |||
3b0b9967f6 | |||
123b54a178 | |||
f5dd8dfc3e | |||
6768e4ef75 | |||
bcfed70888 | |||
503ef3bbc9 | |||
08f2c696b0 | |||
54e34beac6 | |||
c0aa018c87 | |||
b75f1f4c17 | |||
95ed079761 | |||
4a082683df | |||
26be5e0733 | |||
bd5110a2fe | |||
fa8b9acdf6 | |||
2b74d1824b | |||
c77b00d3ac | |||
c77073efcc | |||
1537323eb9 | |||
a0a3b55700 | |||
214b51de87 | |||
95975944d7 | |||
9a9383643f | |||
cac355bfa7 | |||
9020a50df8 | |||
52843123d4 | |||
6298db5bea | |||
a003a0934a | |||
3a11e39c01 | |||
5f896b1050 | |||
d0c4e6da6b | |||
2da5584bb5 | |||
b7eb802ae6 | |||
2e32d0474c | |||
cb99ac6f7e | |||
be411435f5 | |||
29ef164530 | |||
739c52a3cd | |||
7a2af06b1e | |||
cb0c3a5aad | |||
8388698993 | |||
cbcf6c9ba3 | |||
bf742d81cf | |||
7458f0386c | |||
fc1df5793c | |||
3ded069042 | |||
261d2ceb06 | |||
1a17e2e572 | |||
5b8cd68abe | |||
5ce9acb0b9 | |||
953a82ca04 | |||
54341c2e80 | |||
96831ed9bb | |||
0459b1a242 | |||
8ecb726683 | |||
297e72e262 | |||
0ad2f57a92 | |||
b21d7aedf9 | |||
71d53f413f | |||
054622bd16 | |||
e905a72d73 | |||
2e879c1df8 | |||
d040aff101 | |||
5e30731cad | |||
beeb31ce41 | |||
057143214d | |||
6a1d26a60c | |||
d78f4666a0 | |||
a439fa3e1a | |||
767259be7e | |||
e9f34fb4b1 | |||
d5c07ef7b3 | |||
5e218f3f4d | |||
bcab61ab1d | |||
263c5a348e | |||
2f1a9105b9 | |||
be7d2fbe63 | |||
f7f9a131e4 | |||
5df5eb2db2 | |||
30eb0e5b5b | |||
5b860cb989 | |||
76d0623b11 | |||
db4eaf4d2d | |||
13f21206a6 | |||
27bb591331 | |||
14ee7aa84c | |||
8a35cd1743 | |||
8d33af1dff | |||
3c7ac093d3 | |||
d49d127863 | |||
b57dd5c58e | |||
90b428a8c3 | |||
096a28656e | |||
3dc87f5baa | |||
cc4bd54669 | |||
5383f41bba | |||
58eab9a018 | |||
9f36ffcbdb | |||
68c4717e21 | |||
5c488e20cc | |||
da650f834e | |||
e83534a430 | |||
98d4a2909e | |||
a514ce472a | |||
cc63802115 | |||
acec45ad7c | |||
08d6413365 | |||
70802eb7c7 | |||
6ac5b3b136 | |||
e1e76f39d0 | |||
2094ce8a9a | |||
8442db8101 | |||
79671c9faa | |||
a2f64f6552 | |||
fde2e0691c | |||
18a9af353c | |||
aae0dc715d | |||
d0b2c0a523 | |||
2e896f30a5 | |||
8f57b4fdf4 | |||
f014e78684 | |||
9008ecda3d | |||
d7bcfb2d19 | |||
fb66fec398 | |||
fa15be5bc4 | |||
aa460819a7 | |||
e241f91285 | |||
d66dc363ed | |||
5560452ef9 | |||
d9df7e00e1 | |||
b4fb2dabd4 | |||
5606679c53 | |||
a3103f347e | |||
25aac45fc7 | |||
dd76eaaaec | |||
98a785b0d7 | |||
ba7500998e | |||
19e6f675b3 | |||
323ecbb885 | |||
ffb60cb885 | |||
dcc3caef0d | |||
221e547e86 | |||
61d0615253 | |||
5727e00374 | |||
9b60843831 | |||
36962b943b | |||
32bcacefd5 | |||
4ed195426c | |||
ff38f29981 | |||
5899861ff0 | |||
94b260fd25 | |||
03ab6b39e7 | |||
ab2c83f868 | |||
9a08757a70 | |||
1f9692cd04 | |||
1e694ae432 | |||
71807cac6d | |||
21a2264782 | |||
bda2b41d11 | |||
6e6acfcf1b | |||
e0864f1b21 | |||
a38344acb3 | |||
4d616f8794 | |||
ff9c92c409 | |||
8380ddbdcd | |||
d4d8becfa7 | |||
867138f166 | |||
567bd4538b | |||
84600a10d1 | |||
35bbe1c2a2 | |||
7d64e8dbd3 | |||
ec06879d28 | |||
83d1f858c1 | |||
cae8c89467 | |||
a7ac590e9e | |||
7cb8732b45 | |||
8ad68dd708 | |||
fe5d50969a | |||
56c7c5d5f0 | |||
4cdfdddd6d | |||
2afa33011a | |||
61feca1f41 | |||
f893b5153e | |||
ca779c21f9 | |||
477077bdc2 | |||
b1f8aec348 | |||
ba7f091db3 | |||
8049df125b | |||
50d1bd01df | |||
a28d4f5d0c | |||
fc14f4bc66 | |||
5f8a82d6f5 | |||
fe04e51a49 | |||
01b27e40ad | |||
8076d98544 | |||
9e951baad5 | |||
52f2fc4c46 | |||
3957917e0b | |||
651c30899e | |||
2c7a7fe4e8 | |||
23f0c2c29b | |||
6641c3f59b | |||
07a72824b7 | |||
000eb55c4e | |||
b4bf7ce9b0 | |||
1aef0e4037 | |||
32d0e50a75 | |||
df5884b0c1 | |||
9e0eb5ebb0 | |||
3cf1352ae1 | |||
aba8a0e9e0 | |||
670aff5553 | |||
7e379b3d14 | |||
56eacd221f | |||
bdb51a85fe | |||
b24a34830d | |||
e145d71a62 | |||
d9a4e69990 | |||
b906e3ed70 | |||
4abcd9c04e | |||
229fa0f902 | |||
5d10c2312b | |||
f1d38581e5 | |||
62646af7b9 | |||
1fcb9526f5 | |||
15eefa4fcc | |||
ad9763ffcd | |||
d489f5635f | |||
a1251c3c83 | |||
6062914654 | |||
057fcb3993 | |||
41dbdd2d18 | |||
bfefaf71c2 | |||
c782c09208 | |||
75943a5a9b | |||
c1d8ee2a8d | |||
04c38220ca | |||
5f93651cef | |||
510ca99996 | |||
8924d486db | |||
e0c3f3d560 | |||
0a21d9bfb3 | |||
1f8b01a598 | |||
e736a74729 | |||
e9d17136b2 | |||
a05e448cf8 | |||
cd796b0f4b | |||
6570da3bcb | |||
5b4c06c24c | |||
3a8051866a | |||
9150c8f052 | |||
c202f3dbe2 | |||
677d7293f5 | |||
bd31ea2174 | |||
83865d2ebd | |||
72ba353498 | |||
4ff2b3c2ee | |||
91c58cfa38 | |||
9e8367f1e6 | |||
0dd321afc7 | |||
0e3c5d91ab | |||
695c2c6b99 | |||
40dd25d6b2 | |||
8e5b1a3ec1 | |||
e627e182ce | |||
51b6293738 | |||
b17896d899 | |||
94fb55bb6f | |||
a01bc7b454 | |||
7accfea624 | |||
009709eace | |||
82dcaba6ca | |||
cb1d6613dd | |||
3b0cb5b487 | |||
bfdcd1cf33 | |||
1d13e804f7 | |||
c4e9f761e9 | |||
8a6e61c77f | |||
68bbf674c9 | |||
980921e078 | |||
1fcd5f091e | |||
6094bb299a | |||
bef8fc6cf1 | |||
e32677999f | |||
5185aa21b8 | |||
8a314ab81d | |||
4706a0eb49 | |||
d97af4d8e6 | |||
2eb1801e85 | |||
a5d7ae23bd | |||
1f5d801271 | |||
7864530589 | |||
03886d0012 | |||
700757c01f | |||
01f8f30a7a | |||
0e4e9e866a | |||
1477b81d38 | |||
c9f478bc45 | |||
b427b9e88f | |||
39366a67c4 | |||
e2138170ad | |||
03650e3217 | |||
8b95f5ccc6 | |||
10f49f0d75 | |||
ee03743355 | |||
10feeb88f2 | |||
a9ecbf0b64 | |||
6b67f9fc4c | |||
2e4d4b398d | |||
da59a043ba | |||
da4d47b5d0 | |||
0507f5d99b | |||
8b260de5a0 | |||
be2a7c70f2 | |||
33b1f54b41 | |||
ede086bc30 | |||
7415ef7ff5 | |||
a5d138ac34 | |||
0f74a93346 | |||
e4993aa705 | |||
66b7e0824e | |||
f193c3a67c | |||
9799812b27 | |||
db55638714 | |||
ad52c950ba | |||
48ab898ca2 | |||
a5dc783ffa | |||
1b49b60486 | |||
d0b1ba20cb | |||
a1f228f662 | |||
99a9fde37f | |||
106cc7fe3a | |||
4eef0cd332 | |||
5f57306858 | |||
690eb42fc0 | |||
a9b61c8434 | |||
ddd03e9b37 | |||
362836efb7 | |||
22229d3046 | |||
186326fe40 | |||
cf6ad1ae5e | |||
3658f57f93 | |||
c79ca9679b | |||
94a1f5a8ea | |||
a77d5ea8c1 | |||
c9082130c8 | |||
df5bc3c9fd | |||
0f6a1dbce7 | |||
4ebedf4dc8 | |||
b02a72c0c0 | |||
a934b0ac6a | |||
1075dd34bb | |||
28274292d8 | |||
7058959a46 | |||
9cbb2b066a | |||
5efd70c251 | |||
65470e26e0 | |||
bbb67ae0a8 | |||
af9f96e2af | |||
1960003805 | |||
2a91849660 | |||
663deac236 | |||
c8189e975c | |||
9e7c455a01 | |||
c22dc55694 | |||
50de3fba7b | |||
ee72f622c7 | |||
b0da626506 | |||
3d29226a7f | |||
f372ee505f | |||
3753f87fd8 | |||
89243f7df0 | |||
9fe5122176 | |||
aff8ca4397 | |||
1a3f4e719d | |||
c278024709 | |||
73e29ee155 | |||
124b5c3df8 | |||
60cc09abec | |||
8ef8035bf2 | |||
3353bcd82d | |||
9c1e54a2c8 | |||
5675585fe8 | |||
af589c85ec | |||
ac919df37d | |||
73b5722896 | |||
c94679bde6 | |||
e51e6f902a | |||
6c226a4580 | |||
89e2d2b2b9 | |||
3a7a20c716 | |||
cd378e5bd2 | |||
fa1db6b721 | |||
1ab6fec903 | |||
c1fcb2ebc6 | |||
18ac4032aa | |||
d9115b74f0 | |||
0749633618 | |||
0fde49640a | |||
4ee65d870e | |||
ef77c7699b | |||
7382fb21e4 | |||
e4ace98004 | |||
aa7a34ffe8 | |||
6728cfbfac | |||
ea6883189e | |||
0647f75e6b | |||
fdeb47fb54 | |||
e66fccc3f2 | |||
73e87c152a | |||
86a0097311 | |||
c75de1f391 | |||
198238687f | |||
f9a6c624a7 | |||
017757004e | |||
75b2f22add | |||
152683083b | |||
c283c95f6a | |||
9a0e1dc375 | |||
1e81d72b5f | |||
52b95c4e59 | |||
7e1dc8439b | |||
5a74d4729c | |||
e44e7b5e81 | |||
a0b3887709 | |||
96658ec775 | |||
c01ee7b732 | |||
6ad3f57bc1 | |||
28d92c521a | |||
7df20d8282 | |||
b4102741e6 | |||
4b4a6c7863 | |||
3085092e04 | |||
c4efd1df4e | |||
c32282acb1 | |||
92070a3578 | |||
a525598ad6 | |||
4e97e38177 | |||
d675e73af1 | |||
a2fbf2ea21 | |||
132916f62c | |||
8371819114 | |||
a90563df3f | |||
466604725e | |||
6028d6ba43 | |||
68a2502388 | |||
995394a516 | |||
6e37ae8619 | |||
657c645603 | |||
7f5d0837c3 | |||
39b27e42be | |||
470c2272dd | |||
30f3c30389 | |||
d907d1b22d | |||
ed267fa063 | |||
6af55b1a80 | |||
2230674c0a | |||
5b04189f7a | |||
eb09dfed04 | |||
83c09d0db0 | |||
c0912aa685 | |||
af38f46621 | |||
c11b7e5c0f | |||
03579aba13 | |||
c3de3a9ab7 | |||
386ca86297 | |||
dff2d54784 | |||
58d96fbea3 | |||
4665bfcb19 | |||
a7a01646cf | |||
0409a26cd8 | |||
8221c94e7f | |||
35f78b5423 | |||
14261f8f04 | |||
774ed28539 | |||
d79f75f630 | |||
c427d9e2ad | |||
40336ce87d | |||
2a18917af3 | |||
ccf01c2471 | |||
0566f2549d | |||
0c2661ea90 | |||
62dfbd6255 | |||
cc669f90d5 | |||
37a9d64c44 | |||
b1dc10e771 | |||
4b598fa648 | |||
17571805b4 | |||
2654ce6e6c | |||
d9e4db9983 | |||
6d16230f17 | |||
b7a5ba100e | |||
dead7a56a3 | |||
0a8cb471df | |||
00e045b249 | |||
d83c9a4074 | |||
f3356ddaa4 | |||
31de5c747e | |||
3843240940 | |||
8cb5e7437d | |||
5b776556fe | |||
bb7a503e5d | |||
eabc14c268 | |||
e78da35287 | |||
64589278ac | |||
8df6daf308 | |||
5b552caf42 | |||
2b51a63418 | |||
3d8024fb2b | |||
4b0da0ff24 | |||
079f2b5de0 | |||
84b4219a4f | |||
5539a1904a | |||
00ccf53ffa | |||
d20a39b959 | |||
71b364286b | |||
86183e0807 | |||
78a4b7949d | |||
960060ebdf | |||
3d244451df | |||
5f53935c8a | |||
29a7623c3f | |||
e97041f7d0 | |||
52d7f3ed1c | |||
86d5e6d9ff | |||
759b9b1546 | |||
3f7a500f3b | |||
dc2cb58cf1 | |||
e9580fe619 | |||
8205254f4c | |||
974272f2e9 | |||
7ad037841f | |||
e0c7067355 | |||
efdc5739d7 | |||
b31e9bea26 | |||
6e87332410 | |||
2d1caf27df | |||
92678383d6 | |||
7f148c127c | |||
7f048b9732 | |||
8b4e2c7b17 | |||
645a55317a | |||
8caf97db86 | |||
b8a74e0464 | |||
fd8447c521 | |||
f2d187ba3e | |||
79d8a7a51a | |||
86da0e83fe | |||
0704fb71e9 | |||
4ce5d3d66d | |||
1e4d4e69c4 | |||
ff931edb55 | |||
42b093687d | |||
835c5f98f9 | |||
6ba4baecbf | |||
f00664247d | |||
3c63d4a1e5 | |||
4551abf6d4 | |||
193d7f5d34 | |||
013acb3d93 | |||
7f20c13f3f | |||
462a2329f1 | |||
afa3ae0cbd | |||
f6483cf15d | |||
bd34ed01d9 | |||
74199f328d | |||
1113c42de0 | |||
465afe01b2 | |||
7d6768e4c4 | |||
f77661ec44 | |||
b8fd85a46d | |||
fd43c6c404 | |||
2564ec1496 | |||
b6b73fe41c | |||
6dde41cc46 | |||
163f8023a1 | |||
2b120b89e4 | |||
84f842233d | |||
633537ccd7 | |||
e8d7c00d30 | |||
3f6301dbc9 | |||
ca71b63ed1 | |||
2b6952eda1 | |||
79f29eed3c | |||
cc45e264ca | |||
5f474a640d | |||
bbaee3dbc6 | |||
877717cb26 | |||
0ffeea5a52 | |||
716817122a | |||
ff523a2357 | |||
29c3aca72a | |||
00f8d03f43 | |||
50981ea778 | |||
c2caff1716 | |||
30aa1f6dea | |||
83113998f9 | |||
4c355bede7 | |||
174d69ff72 | |||
52a52f97cf | |||
5de4b48552 | |||
df648ce7a6 | |||
af8edab21d | |||
c42746c4cd | |||
98b77aec66 | |||
54d3ba3357 | |||
6e058709f2 | |||
0fbf9ea5b1 | |||
9f1fb4b425 | |||
f7337affd6 | |||
1120a5296c | |||
a35a339c3d | |||
cac5836f6f | |||
5239ae0297 | |||
2fdb1d8018 | |||
3c5e363554 | |||
da0dd6febf | |||
a197d63ab6 | |||
390eadb733 | |||
93f0317b94 | |||
29ff02f3ff | |||
d9e0df74ea | |||
dc8a662209 | |||
6732dd95d7 | |||
95da428dc8 | |||
38c4be1c8e | |||
91dfab317f | |||
47e3c4b5c3 | |||
533f1d4345 | |||
7b55462610 | |||
f6114a1ff2 | |||
7c084b1286 | |||
57f9517a98 | |||
72cc573e0a | |||
a48b1d5a79 | |||
a94a87ee54 | |||
e098cc8320 | |||
ec815fa368 | |||
4a922a176f | |||
51bc7b3173 | |||
f4ab1f168e | |||
4b55ba68bc | |||
1a0e962299 | |||
f13e076b8a | |||
7ba49b849e | |||
993408d3ba | |||
dcb61f8b3a | |||
51085206cc | |||
a2a16bf846 | |||
cab63abc84 | |||
65e3d61a95 | |||
cc6a2aec06 | |||
f7652186e1 | |||
23e14138bb | |||
e44325683a | |||
e7af499314 | |||
b2f4e67c9a | |||
ff5d3b59f5 | |||
aa69308e45 | |||
eb9a20ff0b | |||
edcb4c60ba | |||
0d868f36d7 | |||
e7d9db078f | |||
3e9198ebaa | |||
2a0ad0982f | |||
2b317c681b | |||
39b5990f64 | |||
3848adf5a2 | |||
b4de06259e | |||
02c2b660f8 | |||
8287c2644f | |||
c1c44a0b81 | |||
04596f3616 | |||
24cb5839ad | |||
8d97b7b28c | |||
f69688e8f7 | |||
f18e9cb7b3 | |||
8fd0afaaaa | |||
72c6a21a30 | |||
8412be4a7d | |||
10f09c531f | |||
8fd99b111b | |||
f6b3d1f9a5 | |||
db0cf3b2ed | |||
73ce67862d | |||
f6abf01d2c | |||
0fc02f7351 | |||
34f11e3380 | |||
28da759f11 | |||
ea96d19525 | |||
d352b1ee83 | |||
27308eaab1 | |||
b33ec9ba3f | |||
9c0a1cd9fd | |||
0b061f1e70 | |||
19d937ab21 | |||
1d59c19cd2 | |||
98e48371c3 | |||
6d74fb0229 | |||
1eb75a1040 | |||
3b82d8b5b9 | |||
781a186f75 | |||
6a399556b5 | |||
27b4cab857 | |||
3f3cebf5f9 | |||
b278815617 | |||
52d32b4ee9 | |||
da61408e52 | |||
fe69385bd7 | |||
40e13ceef3 | |||
18a2c13e4e | |||
ed19b7c3c3 | |||
66bda2ce8a | |||
1ac008926b | |||
c49d892c82 | |||
de962a26f3 | |||
c1557734dc | |||
005204e9e5 | |||
1040e5e2b4 | |||
c50d3edc4a | |||
80408c92dc | |||
5369bf4a62 | |||
bcb1aa3d22 | |||
fa1a0beb0c | |||
5aefe7cd17 | |||
e6dd66e4a0 | |||
6e3839d8b6 | |||
cd271b8762 | |||
3ce8500d4c | |||
588000d398 | |||
92b151607c | |||
42e7499260 | |||
41aa1e1424 | |||
9b7858fb90 | |||
ab01679a8f | |||
521775f788 | |||
72e7b7846e | |||
6526ce1208 | |||
24ace5c381 | |||
21296190a3 | |||
03fda78901 | |||
30a143f149 | |||
4464d319af | |||
580ea2f450 | |||
915cf4bae5 | |||
e639ec79d1 | |||
bb885a5810 | |||
b625d31c7d | |||
6487a67f2b | |||
271ce91b3b | |||
54f2eb4507 | |||
794ebcd582 | |||
b7c77c7a39 | |||
0c57cf7565 | |||
27df9e6c73 | |||
45c060831e | |||
874c1ac538 | |||
e6ffa4d454 | |||
637a9c8bdd | |||
c683fa98e6 | |||
9a756cf2c5 | |||
36d8684dc8 | |||
b12e997c8a | |||
8bf89ec394 | |||
ee62d9ce30 | |||
0f965d3574 | |||
ade54493ab | |||
07c8ed0459 | |||
c3cdc407ec | |||
2f10273d14 | |||
321639364f | |||
442d06dce7 | |||
8f6a98df07 | |||
b44e17c4c3 | |||
e3ef0ae19e | |||
57f7af77c7 | |||
2d16d0aea1 | |||
c817718e07 | |||
e64d0e0ca8 | |||
21aa430b5e | |||
8535dc0be2 | |||
72b9005344 | |||
420c33132c | |||
9ef710cad4 | |||
48f7329a83 | |||
ab1ec9ca21 | |||
9d6efd92d2 | |||
abdb337fd6 | |||
1c755c8899 | |||
3a42c3134e | |||
5aa6cb3600 | |||
9b7764575b | |||
0e68718027 | |||
7c3fc8c655 | |||
8acd3f50bb | |||
25791e3f46 | |||
866922ecc3 | |||
f05ea04879 | |||
b1b3a1a98b | |||
143d6cde10 | |||
c457069367 | |||
bb1283222e | |||
7a5a38f870 | |||
ded3cd0dd6 | |||
68f885f1c4 | |||
9372c34dab | |||
6666c57880 | |||
b53a019b07 | |||
d262b1df32 | |||
ed795bc837 | |||
993264227d | |||
953d3a44bd | |||
e5345fb0eb | |||
2d9a055fb9 | |||
110dc01f40 | |||
9719dec443 | |||
fa77a949aa | |||
abe128476f | |||
a663e408ad | |||
986991277f | |||
c2c1ba39ee | |||
35567b2137 | |||
00c97c7152 | |||
d4ea7cc2a9 | |||
8532fe8afc | |||
2413592bbf | |||
553440632e | |||
7a347966da | |||
6c598fa06d | |||
8338df0dbe | |||
4654d51e05 | |||
22ef2d877f | |||
76bc2c18e8 | |||
59115fd058 | |||
a918561ac1 | |||
70d71581ee | |||
4fbe048cbf | |||
e06fbcc607 | |||
04fa44e7eb | |||
90c0a6db7d | |||
d82f8fd904 | |||
cc02920f2b | |||
c26bd68de5 | |||
80fdea9afc | |||
e3faacd160 | |||
988552e178 | |||
0d8199f3b7 | |||
4b74803dae | |||
d731fa661b | |||
a1beddd5d9 | |||
4109182ca4 | |||
1a297c048e | |||
ecee0c922f | |||
303e601b87 | |||
f6d2c59bca | |||
50b7093f8e | |||
48bc797dce | |||
c6b33fd407 | |||
6e9d0de8b7 | |||
1bfb16386c | |||
ea73615abf | |||
02c61eabfa | |||
56b60ec7a0 | |||
8f416e8f34 | |||
cf760cbfb1 | |||
2af9481804 | |||
7a292b572a | |||
8d6ac261ae | |||
b4c8b01c88 | |||
24240934f9 | |||
f4c94ac57f | |||
4087a88dbe | |||
5adacf2f45 | |||
65d0c32aa7 | |||
82647bcded | |||
1582c7e788 | |||
20094eba06 | |||
c35904d6e8 | |||
2cacc448b6 | |||
a61b852695 | |||
3167411e98 | |||
83d71662aa | |||
5c323cecc7 | |||
77b9347fff | |||
c85dd9f635 | |||
7da95d62e2 | |||
2cda1360ee | |||
5f9c05b944 | |||
d3a6d2a6fa | |||
2123d76089 | |||
edab4e75b0 | |||
b9982587d4 | |||
e83da00446 | |||
7fb3e378ff | |||
12a7a45930 | |||
677ed6bbf6 | |||
29b44e5541 | |||
6e80364c50 | |||
603676cb3b | |||
23e102ca71 | |||
f36f34c2f7 | |||
3bac22fd87 | |||
ce61cb7fe6 | |||
1693d1a311 | |||
febea735ca | |||
93ba051094 | |||
cd7a20fa32 | |||
41f51adbec | |||
0ca1a4e805 | |||
50a7393c55 | |||
837274f853 | |||
487997f6ad | |||
94809090a3 | |||
01144b2c74 | |||
e97600eead | |||
767553519d | |||
aace587dd1 | |||
e706023969 | |||
bcd0c5f5a4 | |||
f35d6710f3 | |||
b7b8f564c3 | |||
862d49e4af | |||
81ec0abad1 | |||
b67d385cf0 | |||
dfecb25814 | |||
2eae2015d7 | |||
33fa17bf12 | |||
400e6b93ce | |||
f32e6c32fc | |||
f4add93043 | |||
f07256971a | |||
2fae96ac14 | |||
246f0e7130 | |||
45af18ae9c | |||
2d97164d9f | |||
efc156a4a4 | |||
ba85959642 | |||
1702b5cf44 | |||
2099b4f0dd | |||
0d5bc4578e | |||
8f60ad0a23 | |||
9570139eeb | |||
9d6885793e | |||
98cd6a865c | |||
5f4530ce57 | |||
0ecaf861fa | |||
4d5005b01a | |||
952e742321 | |||
ee9aa63044 | |||
43db4f4242 | |||
9feba5028d | |||
0a40a98bb6 | |||
aac15f6719 | |||
ea21b948b1 | |||
53a359286c | |||
47e526f5ea | |||
4aa7d386d8 | |||
84fabb9314 | |||
cd46ebd6b5 | |||
ef8d9a20f8 | |||
6afa578688 | |||
300bdfc2a7 | |||
e7e74c0099 | |||
05cc2d1fac | |||
22b9c277d0 | |||
16bde973aa | |||
13d1d78a2d | |||
b2b7a633a6 | |||
7be109cafe | |||
6ebefd1067 | |||
d25ae36e22 | |||
b64b4ab6ca | |||
427861b323 | |||
d29cb75061 | |||
128e6c7502 | |||
3129f96603 | |||
c701d89fdc | |||
3d9befd64f | |||
ee14d5196c | |||
d96372b9c4 | |||
ea67816a21 | |||
c885fcebcc | |||
b6e1a1f2f5 | |||
277f4883f6 | |||
015d90a962 | |||
0df84bbba7 | |||
e53de15b8e | |||
8c4921b9dd | |||
f6a00f4a90 | |||
ce08dc509b | |||
1daaed163a | |||
809e742253 | |||
decdfe03bc | |||
aae5c324d7 | |||
a108d8f6f3 | |||
34cf576339 | |||
eb292a7a62 | |||
e28332a904 | |||
a1dcde6b9a | |||
544e98ca99 | |||
1e4699b82c | |||
2c09c324f7 | |||
3d6b61d8d2 | |||
1374b661d1 | |||
7e3c306c54 | |||
2608a596a0 | |||
e16edb2c35 | |||
5c758438fc | |||
ab6cac2321 | |||
6fb36ed30e | |||
dcdc83946f | |||
3c4c46377b | |||
7da21bb601 | |||
13161fd7d0 | |||
b81e2951a9 | |||
d75e0098c7 | |||
27496354e2 | |||
2e0ff56f3f | |||
a74fb87d1e | |||
558b66e535 | |||
cade18bd47 | |||
298c7b0c93 | |||
606e108420 | |||
7be17b7e4c | |||
1693332cab | |||
ddd564665b | |||
2a38f5c757 | |||
133d33d72c | |||
fb683fe88b | |||
4ae11bfd31 | |||
9736e16a88 | |||
6fa4da8ae7 | |||
19d7cdc20d | |||
c229200820 | |||
bad28cc9e2 | |||
534f696b29 | |||
a04041c8f2 | |||
b347b66619 | |||
e580d6b98f | |||
8ba65e333b | |||
43875e6758 | |||
d7844a6e45 | |||
e9bf4c43a4 | |||
a8a0854421 | |||
0a8f50695e | |||
09d9b63e1c | |||
b9b938c902 | |||
6bf07d969e | |||
e35ef31738 | |||
3f212a8202 | |||
bc547dad6f | |||
3bc8f81abc | |||
a89eea233b | |||
34fabed214 | |||
fca9fe39b3 | |||
f5cf01e7d1 | |||
d1dd7e5d09 | |||
d18c1f77d7 | |||
d0b05ae691 | |||
e9bf4eb100 | |||
b368105272 | |||
e0eff08095 | |||
304a9df52d | |||
39f60abd7d | |||
1991bd03da | |||
ee39309aae | |||
0d31be1494 | |||
3493093c4f | |||
7cef2299cf | |||
a838f39fce | |||
600e97d9dc | |||
d1962b2b0f | |||
8b450b84f8 | |||
7add7d053c | |||
7559dfc814 | |||
6c6c4732a1 | |||
0502b17501 | |||
3976fe660e | |||
50f8218a5d | |||
19585f1a4f | |||
8ec6e175e5 | |||
57d066595b | |||
75b2e02cd2 | |||
40f05fe156 | |||
734d1c53ad | |||
52d0d35b39 | |||
5432776132 | |||
66470b27e6 | |||
0a9bd398c7 | |||
7967e93c16 | |||
a6f3a01c6a | |||
4ca4a3f954 | |||
e4a69c5ac3 | |||
ff2e498267 | |||
531e3d7d6a | |||
63dded3961 | |||
2cdcb703d9 | |||
6607875f49 | |||
ea61e5cbec | |||
31a793d226 | |||
d85ab23b82 | |||
b7349910d9 | |||
49fa41ce65 | |||
400cf3eb92 | |||
376b3a19a7 | |||
d92c173fdc | |||
b867829ef1 | |||
6b29676e7e | |||
caad40964a | |||
cc5dca8321 | |||
5d50850e12 | |||
a73ccc78a6 | |||
9eb6f522ea | |||
04f6523f3c | |||
30d66abf8d | |||
84e498299b | |||
7a84697570 | |||
4148fbbe85 | |||
cb765ad249 | |||
2e50c6ec81 | |||
40b2345394 | |||
30293883e0 | |||
b833be46b9 | |||
0a4118329e | |||
261e92d7e6 | |||
5cd08979b1 | |||
2af7e4dbe9 | |||
a998b881f6 | |||
b81953a65d | |||
091bb157f1 | |||
1b639ce44b | |||
87cf8a3c94 | |||
0f578348f1 | |||
fad4675abe | |||
1ab03c4ede | |||
0c6e4b2f00 | |||
42b3f52ef9 | |||
93f5defedc | |||
33241a6b12 | |||
ff87b4db26 | |||
ba9fadc8f1 | |||
98e062a714 | |||
d29d4f88da | |||
17c5ceeb9d | |||
8412665957 | |||
fc584f1db3 | |||
2b6db6541e | |||
d6bd88ce4f | |||
c32d746069 | |||
b9a0ff0dd6 | |||
75496af985 | |||
0e9eb9eedb | |||
c2fb7afe59 | |||
3f1a510069 | |||
3a78e988da | |||
d9e5074189 | |||
bc210bdc00 | |||
4bf83f701c | |||
db3887929f | |||
9af103a88e | |||
99211eb375 | |||
41976b82b1 | |||
c36410fcbf | |||
7ce2691374 | |||
4f03b0cf5b | |||
c26db7878c | |||
06a9803544 | |||
b2588d8101 | |||
62d27172f4 | |||
1ab88e10b9 | |||
6a4b2516aa | |||
aac1d769a7 | |||
abdc4afcca | |||
75d5c0ae1f | |||
a88554216a | |||
2cf3e1c80a | |||
e1fbfde6c4 | |||
27b75ec648 | |||
07fdb081a4 | |||
ba75d23bfe | |||
7fbb3bf8e8 | |||
9066a446a3 | |||
f762307838 | |||
3e94a90722 | |||
fc7e817221 | |||
0f78703b85 |
14
.github/ISSUE_TEMPLATE/sprint_issue.md
vendored
14
.github/ISSUE_TEMPLATE/sprint_issue.md
vendored
@ -22,6 +22,20 @@ Related product discussion:
|
||||
|
||||
<!---If necessary, create a list with technical/product steps-->
|
||||
|
||||
### Are you modifying a database?
|
||||
- [ ] If not, add the `no db change` label to your PR, and you're good to merge.
|
||||
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
|
||||
|
||||
### Reminders when modifying the API
|
||||
|
||||
- [ ] Update the openAPI file with utoipa:
|
||||
- [ ] If a new module has been introduced, create a new structure deriving [the OpenAPI proc-macro](https://docs.rs/utoipa/latest/utoipa/derive.OpenApi.html) and nest it in the main [openAPI structure](https://github.com/meilisearch/meilisearch/blob/f2185438eed60fa32d25b15480c5ee064f6fba4a/crates/meilisearch/src/routes/mod.rs#L64-L78).
|
||||
- [ ] If a new route has been introduced, add the [path decorator](https://docs.rs/utoipa/latest/utoipa/attr.path.html) to it and add the route at the top of the file in its openAPI structure.
|
||||
- [ ] If a structure which is deserialized or serialized in the API has been introduced or modified, it must derive the [`schema`](https://docs.rs/utoipa/latest/utoipa/macro.schema.html) or the [`IntoParams`](https://docs.rs/utoipa/latest/utoipa/derive.IntoParams.html) proc-macro.
|
||||
If it's a **new** structure you must also add it to the big list of structures [in the main `OpenApi` structure](https://github.com/meilisearch/meilisearch/blob/f2185438eed60fa32d25b15480c5ee064f6fba4a/crates/meilisearch/src/routes/mod.rs#L88).
|
||||
- [ ] Once everything is done, start Meilisearch with the swagger flag: `cargo run --features swagger`, open `http://localhost:7700/scalar` on your browser, and ensure everything works as expected.
|
||||
- For more info, refer to [this presentation](https://pitch.com/v/generating-the-openapi-file-jrn3nh).
|
||||
|
||||
### Reminders when modifying the Setting API
|
||||
|
||||
<!--- Special steps to remind when adding a new index setting -->
|
||||
|
41
.github/workflows/bench-manual.yml
vendored
41
.github/workflows/bench-manual.yml
vendored
@ -1,30 +1,27 @@
|
||||
name: Bench (manual)
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
workload:
|
||||
description: 'The path to the workloads to execute (workloads/...)'
|
||||
required: true
|
||||
default: 'workloads/movies.json'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
workload:
|
||||
description: "The path to the workloads to execute (workloads/...)"
|
||||
required: true
|
||||
default: "workloads/movies.json"
|
||||
|
||||
env:
|
||||
WORKLOAD_NAME: ${{ github.event.inputs.workload }}
|
||||
WORKLOAD_NAME: ${{ github.event.inputs.workload }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- name: Run benchmarks - workload ${WORKLOAD_NAME} - branch ${{ github.ref }} - commit ${{ github.sha }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Manual [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- ${WORKLOAD_NAME}
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
- name: Run benchmarks - workload ${WORKLOAD_NAME} - branch ${{ github.ref }} - commit ${{ github.sha }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Manual [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- ${WORKLOAD_NAME}
|
||||
|
109
.github/workflows/bench-pr.yml
vendored
109
.github/workflows/bench-pr.yml
vendored
@ -1,53 +1,82 @@
|
||||
name: Bench (PR)
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
issues: write
|
||||
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
run-benchmarks-on-comment:
|
||||
if: startsWith(github.event.comment.body, '/bench')
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- name: Check for Command
|
||||
id: command
|
||||
uses: xt0rted/slash-command-action@v2
|
||||
with:
|
||||
command: bench
|
||||
reaction-type: "rocket"
|
||||
repo-token: ${{ env.GH_TOKEN }}
|
||||
run-benchmarks-on-comment:
|
||||
if: startsWith(github.event.comment.body, '/bench')
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- name: Check permissions
|
||||
id: permission
|
||||
env:
|
||||
PR_AUTHOR: ${{github.event.issue.user.login }}
|
||||
COMMENT_AUTHOR: ${{github.event.comment.user.login }}
|
||||
REPOSITORY: ${{github.repository}}
|
||||
PR_ID: ${{github.event.issue.number}}
|
||||
run: |
|
||||
PR_REPOSITORY=$(gh api /repos/"$REPOSITORY"/pulls/"$PR_ID" --jq .head.repo.full_name)
|
||||
if $(gh api /repos/"$REPOSITORY"/collaborators/"$PR_AUTHOR"/permission --jq .user.permissions.push)
|
||||
then
|
||||
echo "::notice title=Authentication success::PR author authenticated"
|
||||
else
|
||||
echo "::error title=Authentication error::PR author doesn't have push permission on this repository"
|
||||
exit 1
|
||||
fi
|
||||
if $(gh api /repos/"$REPOSITORY"/collaborators/"$COMMENT_AUTHOR"/permission --jq .user.permissions.push)
|
||||
then
|
||||
echo "::notice title=Authentication success::Comment author authenticated"
|
||||
else
|
||||
echo "::error title=Authentication error::Comment author doesn't have push permission on this repository"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$PR_REPOSITORY" = "$REPOSITORY" ]
|
||||
then
|
||||
echo "::notice title=Authentication success::PR started from main repository"
|
||||
else
|
||||
echo "::error title=Authentication error::PR started from a fork"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- uses: xt0rted/pull-request-comment-branch@v2
|
||||
id: comment-branch
|
||||
with:
|
||||
repo_token: ${{ env.GH_TOKEN }}
|
||||
- name: Check for Command
|
||||
id: command
|
||||
uses: xt0rted/slash-command-action@v2
|
||||
with:
|
||||
command: bench
|
||||
reaction-type: "rocket"
|
||||
repo-token: ${{ env.GH_TOKEN }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
if: success()
|
||||
with:
|
||||
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||
- uses: xt0rted/pull-request-comment-branch@v3
|
||||
id: comment-branch
|
||||
with:
|
||||
repo_token: ${{ env.GH_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: actions/checkout@v3
|
||||
if: success()
|
||||
with:
|
||||
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||
|
||||
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" \
|
||||
--dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" \
|
||||
--reason "[Comment](${{ github.event.comment.html_url }}) on [#${{ github.event.issue.number }}](${{ github.event.issue.html_url }})" \
|
||||
-- ${{ steps.command.outputs.command-arguments }} > benchlinks.txt
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
- name: Send comment in PR
|
||||
run: |
|
||||
gh pr comment ${{github.event.issue.number}} --body-file benchlinks.txt
|
||||
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" \
|
||||
--dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" \
|
||||
--reason "[Comment](${{ github.event.comment.html_url }}) on [#${{ github.event.issue.number }}](${{ github.event.issue.html_url }})" \
|
||||
-- ${{ steps.command.outputs.command-arguments }} > benchlinks.txt
|
||||
|
||||
- name: Send comment in PR
|
||||
run: |
|
||||
gh pr comment ${{github.event.issue.number}} --body-file benchlinks.txt
|
||||
|
35
.github/workflows/bench-push-indexing.yml
vendored
35
.github/workflows/bench-push-indexing.yml
vendored
@ -1,25 +1,22 @@
|
||||
name: Indexing bench (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Push on `main` [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- workloads/*.json
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Push on `main` [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- workloads/*.json
|
||||
|
12
.github/workflows/benchmarks-manual.yml
vendored
12
.github/workflows/benchmarks-manual.yml
vendored
@ -4,9 +4,9 @@ on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dataset_name:
|
||||
description: 'The name of the dataset used to benchmark (search_songs, search_wiki, search_geo or indexing)'
|
||||
description: "The name of the dataset used to benchmark (search_songs, search_wiki, search_geo or indexing)"
|
||||
required: false
|
||||
default: 'search_songs'
|
||||
default: "search_songs"
|
||||
|
||||
env:
|
||||
BENCH_NAME: ${{ github.event.inputs.dataset_name }}
|
||||
@ -18,11 +18,9 @@ jobs:
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
@ -45,7 +43,7 @@ jobs:
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cd crates/benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
@ -69,7 +67,7 @@ jobs:
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
- name: "README: compare with another benchmark"
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
|
39
.github/workflows/benchmarks-pr.yml
vendored
39
.github/workflows/benchmarks-pr.yml
vendored
@ -13,11 +13,40 @@ jobs:
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- name: Check permissions
|
||||
id: permission
|
||||
env:
|
||||
PR_AUTHOR: ${{github.event.issue.user.login }}
|
||||
COMMENT_AUTHOR: ${{github.event.comment.user.login }}
|
||||
REPOSITORY: ${{github.repository}}
|
||||
PR_ID: ${{github.event.issue.number}}
|
||||
run: |
|
||||
PR_REPOSITORY=$(gh api /repos/"$REPOSITORY"/pulls/"$PR_ID" --jq .head.repo.full_name)
|
||||
if $(gh api /repos/"$REPOSITORY"/collaborators/"$PR_AUTHOR"/permission --jq .user.permissions.push)
|
||||
then
|
||||
echo "::notice title=Authentication success::PR author authenticated"
|
||||
else
|
||||
echo "::error title=Authentication error::PR author doesn't have push permission on this repository"
|
||||
exit 1
|
||||
fi
|
||||
if $(gh api /repos/"$REPOSITORY"/collaborators/"$COMMENT_AUTHOR"/permission --jq .user.permissions.push)
|
||||
then
|
||||
echo "::notice title=Authentication success::Comment author authenticated"
|
||||
else
|
||||
echo "::error title=Authentication error::Comment author doesn't have push permission on this repository"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$PR_REPOSITORY" = "$REPOSITORY" ]
|
||||
then
|
||||
echo "::notice title=Authentication success::PR started from main repository"
|
||||
else
|
||||
echo "::error title=Authentication error::PR started from a fork"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- name: Check for Command
|
||||
id: command
|
||||
@ -27,7 +56,7 @@ jobs:
|
||||
reaction-type: "eyes"
|
||||
repo-token: ${{ env.GH_TOKEN }}
|
||||
|
||||
- uses: xt0rted/pull-request-comment-branch@v2
|
||||
- uses: xt0rted/pull-request-comment-branch@v3
|
||||
id: comment-branch
|
||||
with:
|
||||
repo_token: ${{ env.GH_TOKEN }}
|
||||
@ -59,7 +88,7 @@ jobs:
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${{ steps.command.outputs.command-arguments }} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cd crates/benchmarks
|
||||
cargo bench --bench ${{ steps.command.outputs.command-arguments }} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
|
@ -16,11 +16,9 @@ jobs:
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
@ -43,7 +41,7 @@ jobs:
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cd crates/benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
@ -71,7 +69,7 @@ jobs:
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
- name: "README: compare with another benchmark"
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
|
@ -15,11 +15,9 @@ jobs:
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
@ -42,7 +40,7 @@ jobs:
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cd crates/benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
@ -70,7 +68,7 @@ jobs:
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
- name: "README: compare with another benchmark"
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
|
@ -15,11 +15,9 @@ jobs:
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
@ -42,7 +40,7 @@ jobs:
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cd crates/benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
@ -70,7 +68,7 @@ jobs:
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
- name: "README: compare with another benchmark"
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
|
@ -15,11 +15,9 @@ jobs:
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
@ -42,7 +40,7 @@ jobs:
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cd crates/benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
@ -70,7 +68,7 @@ jobs:
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
- name: "README: compare with another benchmark"
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
|
100
.github/workflows/check-valid-milestone.yml
vendored
Normal file
100
.github/workflows/check-valid-milestone.yml
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
name: PR Milestone Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, edited, synchronize, milestoned, demilestoned]
|
||||
branches:
|
||||
- "main"
|
||||
- "release-v*.*.*"
|
||||
|
||||
jobs:
|
||||
check-milestone:
|
||||
name: Check PR Milestone
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Validate PR milestone
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
// Get PR number directly from the event payload
|
||||
const prNumber = context.payload.pull_request.number;
|
||||
|
||||
// Get PR details
|
||||
const { data: prData } = await github.rest.pulls.get({
|
||||
owner: 'meilisearch',
|
||||
repo: 'meilisearch',
|
||||
pull_number: prNumber
|
||||
});
|
||||
|
||||
// Get base branch name
|
||||
const baseBranch = prData.base.ref;
|
||||
console.log(`Base branch: ${baseBranch}`);
|
||||
|
||||
// Get PR milestone
|
||||
const prMilestone = prData.milestone;
|
||||
if (!prMilestone) {
|
||||
core.setFailed('PR must have a milestone assigned');
|
||||
return;
|
||||
}
|
||||
console.log(`PR milestone: ${prMilestone.title}`);
|
||||
|
||||
// Validate milestone format: vx.y.z
|
||||
const milestoneRegex = /^v\d+\.\d+\.\d+$/;
|
||||
if (!milestoneRegex.test(prMilestone.title)) {
|
||||
core.setFailed(`Milestone "${prMilestone.title}" does not follow the required format vx.y.z`);
|
||||
return;
|
||||
}
|
||||
|
||||
// For main branch PRs, check if the milestone is the highest one
|
||||
if (baseBranch === 'main') {
|
||||
// Get all milestones
|
||||
const { data: milestones } = await github.rest.issues.listMilestones({
|
||||
owner: 'meilisearch',
|
||||
repo: 'meilisearch',
|
||||
state: 'open',
|
||||
sort: 'due_on',
|
||||
direction: 'desc'
|
||||
});
|
||||
|
||||
// Sort milestones by version number (vx.y.z)
|
||||
const sortedMilestones = milestones
|
||||
.filter(m => milestoneRegex.test(m.title))
|
||||
.sort((a, b) => {
|
||||
const versionA = a.title.substring(1).split('.').map(Number);
|
||||
const versionB = b.title.substring(1).split('.').map(Number);
|
||||
|
||||
// Compare major version
|
||||
if (versionA[0] !== versionB[0]) return versionB[0] - versionA[0];
|
||||
// Compare minor version
|
||||
if (versionA[1] !== versionB[1]) return versionB[1] - versionA[1];
|
||||
// Compare patch version
|
||||
return versionB[2] - versionA[2];
|
||||
});
|
||||
|
||||
if (sortedMilestones.length === 0) {
|
||||
core.setFailed('No valid milestones found in the repository. Please create at least one milestone with the format vx.y.z');
|
||||
return;
|
||||
}
|
||||
|
||||
const highestMilestone = sortedMilestones[0];
|
||||
console.log(`Highest milestone: ${highestMilestone.title}`);
|
||||
|
||||
if (prMilestone.title !== highestMilestone.title) {
|
||||
core.setFailed(`PRs targeting the main branch must use the highest milestone (${highestMilestone.title}), but this PR uses ${prMilestone.title}`);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
// For release branches, the milestone should match the branch version
|
||||
const branchVersion = baseBranch.substring(8); // remove 'release-'
|
||||
if (prMilestone.title !== branchVersion) {
|
||||
core.setFailed(`PRs targeting release branch "${baseBranch}" must use the matching milestone "${branchVersion}", but this PR uses "${prMilestone.title}"`);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
console.log('PR milestone validation passed!');
|
57
.github/workflows/db-change-comments.yml
vendored
Normal file
57
.github/workflows/db-change-comments.yml
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
name: Comment when db change labels are added
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
env:
|
||||
MESSAGE: |
|
||||
### Hello, I'm a bot 🤖
|
||||
|
||||
You are receiving this message because you declared that this PR make changes to the Meilisearch database.
|
||||
Depending on the nature of the change, additional actions might be required on your part. The following sections detail the additional actions depending on the nature of the change, please copy the relevant section in the description of your PR, and make sure to perform the required actions.
|
||||
|
||||
Thank you for contributing to Meilisearch :heart:
|
||||
|
||||
## This PR makes forward-compatible changes
|
||||
|
||||
*Forward-compatible changes are changes to the database such that databases created in an older version of Meilisearch are still valid in the new version of Meilisearch. They usually represent additive changes, like adding a new optional attribute or setting.*
|
||||
|
||||
- [ ] Detail the change to the DB format and why they are forward compatible
|
||||
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
|
||||
|
||||
|
||||
## This PR makes breaking changes
|
||||
|
||||
*Breaking changes are changes to the database such that databases created in an older version of Meilisearch need changes to remain valid in the new version of Meilisearch. This typically happens when the way to store the data changed (change of database, new required key, etc). This can also happen due to breaking changes in the API of an experimental feature. ⚠️ This kind of changes are more difficult to achieve safely, so proceed with caution and test dumpless upgrade right before merging the PR.*
|
||||
|
||||
- [ ] Detail the changes to the DB format,
|
||||
- [ ] which are compatible, and why
|
||||
- [ ] which are not compatible, why, and how they will be fixed up in the upgrade
|
||||
- [ ] /!\ Ensure all the read operations still work!
|
||||
- If the change happened in milli, you may need to check the version of the database before doing any read operation
|
||||
- If the change happened in the index-scheduler, make sure the new code can immediately read the old database
|
||||
- If the change happened in the meilisearch-auth database, reach out to the team; we don't know yet how to handle these changes
|
||||
- [ ] Write the code to go from the old database to the new one
|
||||
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
|
||||
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
|
||||
- [ ] Write an integration test [here](https://github.com/meilisearch/meilisearch/blob/main/crates/meilisearch/tests/upgrade/mod.rs) ensuring you can read the old database, upgrade to the new database, and read the new database as expected
|
||||
|
||||
|
||||
jobs:
|
||||
add-comment:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.label.name == 'db change'
|
||||
steps:
|
||||
- name: Add comment
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const message = process.env.MESSAGE;
|
||||
github.rest.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: message
|
||||
})
|
28
.github/workflows/db-change-missing.yml
vendored
Normal file
28
.github/workflows/db-change-missing.yml
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
name: Check db change labels
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, labeled, unlabeled]
|
||||
|
||||
jobs:
|
||||
check-labels:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Check db change labels
|
||||
id: check_labels
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
URL=/repos/meilisearch/meilisearch/pulls/${{ github.event.pull_request.number }}/labels
|
||||
echo ${{ github.event.pull_request.number }}
|
||||
echo $URL
|
||||
LABELS=$(gh api -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" /repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/labels -q .[].name)
|
||||
echo "Labels: $LABELS"
|
||||
if [[ ! "$LABELS" =~ "db change" && ! "$LABELS" =~ "no db change" ]]; then
|
||||
echo "::error::Pull request must contain either the 'db change' or 'no db change' label."
|
||||
exit 1
|
||||
else
|
||||
echo "The label is set"
|
||||
fi
|
40
.github/workflows/flaky-tests.yml
vendored
40
.github/workflows/flaky-tests.yml
vendored
@ -1,4 +1,5 @@
|
||||
name: Look for flaky tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
@ -8,25 +9,22 @@ jobs:
|
||||
flaky:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||
image: ubuntu:22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Install cargo-flaky
|
||||
run: cargo install cargo-flaky
|
||||
- name: Run cargo flaky in the dumps
|
||||
run: cd dump; cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in the index-scheduler
|
||||
run: cd index-scheduler; cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in the auth
|
||||
run: cd meilisearch-auth; cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in meilisearch
|
||||
run: cd meilisearch; cargo flaky -i 100 --release
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Install cargo-flaky
|
||||
run: cargo install cargo-flaky
|
||||
- name: Run cargo flaky in the dumps
|
||||
run: cd crates/dump; cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in the index-scheduler
|
||||
run: cd crates/index-scheduler; cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in the auth
|
||||
run: cd crates/meilisearch-auth; cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in meilisearch
|
||||
run: cd crates/meilisearch; cargo flaky -i 100 --release
|
||||
|
4
.github/workflows/fuzzer-indexing.yml
vendored
4
.github/workflows/fuzzer-indexing.yml
vendored
@ -12,11 +12,9 @@ jobs:
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run the fuzzer
|
||||
|
46
.github/workflows/milestone-workflow.yml
vendored
46
.github/workflows/milestone-workflow.yml
vendored
@ -5,6 +5,7 @@ name: Milestone's workflow
|
||||
# For each Milestone created (not opened!), and if the release is NOT a patch release (only the patch changed)
|
||||
# - the roadmap issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/roadmap-issue.md
|
||||
# - the changelog issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/changelog-issue.md
|
||||
# - update the ruleset to add the current release version to the list of allowed versions and be able to use the merge queue.
|
||||
|
||||
# For each Milestone closed
|
||||
# - the `release_version` label is created
|
||||
@ -21,10 +22,9 @@ env:
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
|
||||
# -----------------
|
||||
# MILESTONE CREATED
|
||||
# -----------------
|
||||
# -----------------
|
||||
# MILESTONE CREATED
|
||||
# -----------------
|
||||
|
||||
get-release-version:
|
||||
if: github.event.action == 'created'
|
||||
@ -148,9 +148,41 @@ jobs:
|
||||
--body-file $ISSUE_TEMPLATE \
|
||||
--milestone $MILESTONE_VERSION
|
||||
|
||||
# ----------------
|
||||
# MILESTONE CLOSED
|
||||
# ----------------
|
||||
update-ruleset:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.action == 'created'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install jq
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y jq
|
||||
- name: Update ruleset
|
||||
env:
|
||||
# gh api repos/meilisearch/meilisearch/rulesets --jq '.[] | {name: .name, id: .id}'
|
||||
RULESET_ID: 4253297
|
||||
BRANCH_NAME: ${{ github.event.inputs.branch_name }}
|
||||
run: |
|
||||
echo "RULESET_ID: ${{ env.RULESET_ID }}"
|
||||
echo "BRANCH_NAME: ${{ env.BRANCH_NAME }}"
|
||||
|
||||
# Get current ruleset conditions
|
||||
CONDITIONS=$(gh api repos/meilisearch/meilisearch/rulesets/${{ env.RULESET_ID }} --jq '{ conditions: .conditions }')
|
||||
|
||||
# Update the conditions by appending the milestone version
|
||||
UPDATED_CONDITIONS=$(echo $CONDITIONS | jq '.conditions.ref_name.include += ["refs/heads/release-'${{ env.MILESTONE_VERSION }}'"]')
|
||||
|
||||
# Update the ruleset from stdin (-)
|
||||
echo $UPDATED_CONDITIONS |
|
||||
gh api repos/meilisearch/meilisearch/rulesets/${{ env.RULESET_ID }} \
|
||||
--method PUT \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
--input -
|
||||
|
||||
# ----------------
|
||||
# MILESTONE CLOSED
|
||||
# ----------------
|
||||
|
||||
create-release-label:
|
||||
if: github.event.action == 'closed'
|
||||
|
45
.github/workflows/publish-apt-brew-pkg.yml
vendored
45
.github/workflows/publish-apt-brew-pkg.yml
vendored
@ -18,31 +18,28 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-version
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27
|
||||
image: ubuntu:18.04
|
||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||
image: ubuntu:22.04
|
||||
steps:
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Install cargo-deb
|
||||
run: cargo install cargo-deb
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build deb package
|
||||
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
||||
- name: Upload debian pkg to release
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/debian/meilisearch.deb
|
||||
asset_name: meilisearch.deb
|
||||
tag: ${{ github.ref }}
|
||||
- name: Upload debian pkg to apt repository
|
||||
run: curl -F package=@target/debian/meilisearch.deb https://${{ secrets.GEMFURY_PUSH_TOKEN }}@push.fury.io/meilisearch/
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Install cargo-deb
|
||||
run: cargo install cargo-deb
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build deb package
|
||||
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
||||
- name: Upload debian pkg to release
|
||||
uses: svenstaro/upload-release-action@2.11.1
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/debian/meilisearch.deb
|
||||
asset_name: meilisearch.deb
|
||||
tag: ${{ github.ref }}
|
||||
- name: Upload debian pkg to apt repository
|
||||
run: curl -F package=@target/debian/meilisearch.deb https://${{ secrets.GEMFURY_PUSH_TOKEN }}@push.fury.io/meilisearch/
|
||||
|
||||
homebrew:
|
||||
name: Bump Homebrew formula
|
||||
|
99
.github/workflows/publish-binaries.yml
vendored
99
.github/workflows/publish-binaries.yml
vendored
@ -3,7 +3,7 @@ name: Publish binaries to GitHub release
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 2 * * *' # Every day at 2:00am
|
||||
- cron: "0 2 * * *" # Every day at 2:00am
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
@ -37,29 +37,26 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-version
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27
|
||||
image: ubuntu:18.04
|
||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||
image: ubuntu:22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Build
|
||||
run: cargo build --release --locked
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/meilisearch
|
||||
asset_name: meilisearch-linux-amd64
|
||||
tag: ${{ github.ref }}
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Build
|
||||
run: cargo build --release --locked
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.11.1
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/meilisearch
|
||||
asset_name: meilisearch-linux-amd64
|
||||
tag: ${{ github.ref }}
|
||||
|
||||
publish-macos-windows:
|
||||
name: Publish binary for ${{ matrix.os }}
|
||||
@ -68,35 +65,32 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [macos-12, windows-2022]
|
||||
os: [macos-13, windows-2022]
|
||||
include:
|
||||
- os: macos-12
|
||||
- os: macos-13
|
||||
artifact_name: meilisearch
|
||||
asset_name: meilisearch-macos-amd64
|
||||
- os: windows-2022
|
||||
artifact_name: meilisearch.exe
|
||||
asset_name: meilisearch-windows-amd64.exe
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Build
|
||||
run: cargo build --release --locked
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/${{ matrix.artifact_name }}
|
||||
asset_name: ${{ matrix.asset_name }}
|
||||
tag: ${{ github.ref }}
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Build
|
||||
run: cargo build --release --locked
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.11.1
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/${{ matrix.artifact_name }}
|
||||
asset_name: ${{ matrix.asset_name }}
|
||||
tag: ${{ github.ref }}
|
||||
|
||||
publish-macos-apple-silicon:
|
||||
name: Publish binary for macOS silicon
|
||||
runs-on: macos-12
|
||||
runs-on: macos-13
|
||||
needs: check-version
|
||||
strategy:
|
||||
matrix:
|
||||
@ -107,12 +101,10 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: Installing Rust toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
target: ${{ matrix.target }}
|
||||
override: true
|
||||
- name: Cargo build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -121,7 +113,7 @@ jobs:
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
uses: svenstaro/upload-release-action@2.11.1
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
@ -132,9 +124,11 @@ jobs:
|
||||
name: Publish binary for aarch64
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-version
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27
|
||||
image: ubuntu:18.04
|
||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||
image: ubuntu:22.04
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
@ -154,12 +148,10 @@ jobs:
|
||||
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
apt-get update -y && apt-get install -y docker-ce
|
||||
- name: Installing Rust toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
target: ${{ matrix.target }}
|
||||
override: true
|
||||
- name: Configure target aarch64 GNU
|
||||
## Environment variable is not passed using env:
|
||||
## LD gold won't work with MUSL
|
||||
@ -170,6 +162,9 @@ jobs:
|
||||
echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config
|
||||
echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
||||
echo 'JEMALLOC_SYS_WITH_LG_PAGE=16' >> $GITHUB_ENV
|
||||
- name: Install a default toolchain that will be used to build cargo cross
|
||||
run: |
|
||||
rustup default stable
|
||||
- name: Cargo build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -183,7 +178,7 @@ jobs:
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
uses: svenstaro/upload-release-action@2.11.1
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
|
22
.github/workflows/publish-docker-images.yml
vendored
22
.github/workflows/publish-docker-images.yml
vendored
@ -80,10 +80,11 @@ jobs:
|
||||
type=ref,event=tag
|
||||
type=raw,value=nightly,enable=${{ github.event_name != 'push' }}
|
||||
type=semver,pattern=v{{major}}.{{minor}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
||||
type=semver,pattern=v{{major}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@ -103,3 +104,22 @@ jobs:
|
||||
repository: meilisearch/meilisearch-cloud
|
||||
event-type: cloud-docker-build
|
||||
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
||||
|
||||
# Send notification to Swarmia to notify of a deployment: https://app.swarmia.com
|
||||
# - name: 'Setup jq'
|
||||
# uses: dcarbone/install-jq-action
|
||||
# - name: Send deployment to Swarmia
|
||||
# if: github.event_name == 'push' && success()
|
||||
# run: |
|
||||
# JSON_STRING=$( jq --null-input --compact-output \
|
||||
# --arg version "${{ github.ref_name }}" \
|
||||
# --arg appName "meilisearch" \
|
||||
# --arg environment "production" \
|
||||
# --arg commitSha "${{ github.sha }}" \
|
||||
# --arg repositoryFullName "${{ github.repository }}" \
|
||||
# '{"version": $version, "appName": $appName, "environment": $environment, "commitSha": $commitSha, "repositoryFullName": $repositoryFullName}' )
|
||||
|
||||
# curl -H "Authorization: ${{ secrets.SWARMIA_DEPLOYMENTS_AUTHORIZATION }}" \
|
||||
# -H "Content-Type: application/json" \
|
||||
# -d "$JSON_STRING" \
|
||||
# https://hook.swarmia.com/deployments
|
||||
|
30
.github/workflows/sdks-tests.yml
vendored
30
.github/workflows/sdks-tests.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
||||
outputs:
|
||||
docker-image: ${{ steps.define-image.outputs.docker-image }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
- name: Define the Docker image we need to use
|
||||
id: define-image
|
||||
run: |
|
||||
@ -46,13 +46,13 @@ jobs:
|
||||
MEILISEARCH_VERSION: ${{ needs.define-docker-image.outputs.docker-image }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: meilisearch/meilisearch-dotnet
|
||||
- name: Setup .NET Core
|
||||
uses: actions/setup-dotnet@v4
|
||||
with:
|
||||
dotnet-version: "6.0.x"
|
||||
dotnet-version: "8.0.x"
|
||||
- name: Install dependencies
|
||||
run: dotnet restore
|
||||
- name: Build
|
||||
@ -75,7 +75,7 @@ jobs:
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: meilisearch/meilisearch-dart
|
||||
- uses: dart-lang/setup-dart@v1
|
||||
@ -103,7 +103,7 @@ jobs:
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: stable
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: meilisearch/meilisearch-go
|
||||
- name: Get dependencies
|
||||
@ -129,7 +129,7 @@ jobs:
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: meilisearch/meilisearch-java
|
||||
- name: Set up Java
|
||||
@ -156,7 +156,7 @@ jobs:
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: meilisearch/meilisearch-js
|
||||
- name: Setup node
|
||||
@ -191,7 +191,7 @@ jobs:
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: meilisearch/meilisearch-php
|
||||
- name: Install PHP
|
||||
@ -220,7 +220,7 @@ jobs:
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: meilisearch/meilisearch-python
|
||||
- name: Set up Python
|
||||
@ -245,7 +245,7 @@ jobs:
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: meilisearch/meilisearch-ruby
|
||||
- name: Set up Ruby 3
|
||||
@ -270,7 +270,7 @@ jobs:
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: meilisearch/meilisearch-rust
|
||||
- name: Build
|
||||
@ -291,7 +291,7 @@ jobs:
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: meilisearch/meilisearch-swift
|
||||
- name: Run tests
|
||||
@ -314,7 +314,7 @@ jobs:
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: meilisearch/meilisearch-js-plugins
|
||||
- name: Setup node
|
||||
@ -345,7 +345,7 @@ jobs:
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: meilisearch/meilisearch-rails
|
||||
- name: Set up Ruby 3
|
||||
@ -369,7 +369,7 @@ jobs:
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: meilisearch/meilisearch-symfony
|
||||
- name: Install PHP
|
||||
|
109
.github/workflows/test-suite.yml
vendored
109
.github/workflows/test-suite.yml
vendored
@ -4,13 +4,9 @@ on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Everyday at 5:00am
|
||||
- cron: '0 5 * * *'
|
||||
- cron: "0 5 * * *"
|
||||
pull_request:
|
||||
push:
|
||||
# trying and staging branches are for Bors config
|
||||
branches:
|
||||
- trying
|
||||
- staging
|
||||
merge_group:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@ -19,11 +15,11 @@ env:
|
||||
|
||||
jobs:
|
||||
test-linux:
|
||||
name: Tests on ubuntu-18.04
|
||||
name: Tests on ubuntu-22.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||
image: ubuntu:22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
@ -31,12 +27,9 @@ jobs:
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- name: Setup test with Rust stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.7.1
|
||||
uses: Swatinem/rust-cache@v2.8.0
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -54,15 +47,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [macos-12, windows-2022]
|
||||
os: [macos-13, windows-2022]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.7.1
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
uses: Swatinem/rust-cache@v2.8.0
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -78,8 +68,8 @@ jobs:
|
||||
name: Tests almost all features
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||
image: ubuntu:22.04
|
||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@ -87,22 +77,51 @@ jobs:
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install --assume-yes build-essential curl
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Run cargo build with almost all features
|
||||
run: |
|
||||
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda)"
|
||||
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||
- name: Run cargo test with almost all features
|
||||
run: |
|
||||
cargo test --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda)"
|
||||
cargo test --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||
|
||||
ollama-ubuntu:
|
||||
name: Test with Ollama
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Ollama
|
||||
run: |
|
||||
curl -fsSL https://ollama.com/install.sh | sudo -E sh
|
||||
- name: Start serving
|
||||
run: |
|
||||
# Run it in the background, there is no way to daemonise at the moment
|
||||
ollama serve &
|
||||
|
||||
# A short pause is required before the HTTP port is opened
|
||||
sleep 5
|
||||
|
||||
# This endpoint blocks until ready
|
||||
time curl -i http://localhost:11434
|
||||
|
||||
- name: Pull nomic-embed-text & all-minilm
|
||||
run: |
|
||||
ollama pull nomic-embed-text
|
||||
ollama pull all-minilm
|
||||
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release --all --features test-ollama ollama
|
||||
|
||||
test-disabled-tokenization:
|
||||
name: Test disabled tokenization
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ubuntu:18.04
|
||||
image: ubuntu:22.04
|
||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@ -110,13 +129,10 @@ jobs:
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install --assume-yes build-essential curl
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Run cargo tree without default features and check lindera is not present
|
||||
run: |
|
||||
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -vqz lindera; then
|
||||
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
||||
echo "lindera has been found in the sources and it shouldn't"
|
||||
exit 1
|
||||
fi
|
||||
@ -129,20 +145,17 @@ jobs:
|
||||
name: Run tests in debug
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||
image: ubuntu:22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.7.1
|
||||
uses: Swatinem/rust-cache@v2.8.0
|
||||
- name: Run tests in debug
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -154,14 +167,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: 1.75.0
|
||||
override: true
|
||||
components: clippy
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.7.1
|
||||
uses: Swatinem/rust-cache@v2.8.0
|
||||
- name: Run cargo clippy
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -173,18 +184,18 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: nightly
|
||||
toolchain: nightly-2024-07-09
|
||||
override: true
|
||||
components: rustfmt
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.7.1
|
||||
uses: Swatinem/rust-cache@v2.8.0
|
||||
- name: Run cargo fmt
|
||||
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
||||
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
||||
# we are going to create an empty file where rustfmt expects it.
|
||||
run: |
|
||||
echo -ne "\n" > benchmarks/benches/datasets_paths.rs
|
||||
echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs
|
||||
cargo fmt --all -- --check
|
||||
|
@ -4,7 +4,7 @@ on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
new_version:
|
||||
description: 'The new version (vX.Y.Z)'
|
||||
description: "The new version (vX.Y.Z)"
|
||||
required: true
|
||||
|
||||
env:
|
||||
@ -18,11 +18,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Install sd
|
||||
run: cargo install sd
|
||||
- name: Update Cargo.toml file
|
||||
|
15
.gitignore
vendored
15
.gitignore
vendored
@ -5,18 +5,27 @@
|
||||
**/*.json_lines
|
||||
**/*.rs.bk
|
||||
/*.mdb
|
||||
/query-history.txt
|
||||
/data.ms
|
||||
/snapshots
|
||||
/dumps
|
||||
/bench
|
||||
/_xtask_benchmark.ms
|
||||
/benchmarks
|
||||
.DS_Store
|
||||
|
||||
# Snapshots
|
||||
## ... large
|
||||
*.full.snap
|
||||
## ... unreviewed
|
||||
## ... unreviewed
|
||||
*.snap.new
|
||||
## ... pending
|
||||
*.pending-snap
|
||||
|
||||
# Tmp files
|
||||
.tmp*
|
||||
|
||||
# Database snapshot
|
||||
crates/meilisearch/db.snapshot
|
||||
|
||||
# Fuzzcheck data for the facet indexing fuzz test
|
||||
milli/fuzz/update::facet::incremental::fuzz::fuzz/
|
||||
crates/milli/fuzz/update::facet::incremental::fuzz::fuzz/
|
||||
|
@ -48,6 +48,27 @@ cargo xtask bench --no-dashboard -- workloads/my_workload_1.json workloads/my_wo
|
||||
|
||||
For processing the results, look at [Looking at benchmark results/Without dashboard](#without-dashboard).
|
||||
|
||||
#### Sending a workload by hand
|
||||
|
||||
Sometimes you want to visualize the metrics of a worlkoad that comes from a custom report.
|
||||
It is not quite easy to trick the benchboard in thinking that your report is legitimate but here are the commands you can run to upload your firefox report on a running benchboard.
|
||||
|
||||
```bash
|
||||
# Name this hostname whatever you want
|
||||
echo '{ "hostname": "the-best-place" }' | xh PUT 'http://127.0.0.1:9001/api/v1/machine'
|
||||
|
||||
# You'll receive an UUID from this command that we will call $invocation_uuid
|
||||
echo '{ "commit": { "sha1": "1234567", "commit_date": "2024-09-05 12:00:12.0 +00:00:00", "message": "A cool message" }, "machine_hostname": "the-best-place", "max_workloads": 1 }' | xh PUT 'http://127.0.0.1:9001/api/v1/invocation'
|
||||
|
||||
# Just use UUID from the previous command
|
||||
# and you'll receive another UUID that we will call $workload_uuid
|
||||
echo '{ "invocation_uuid": "$invocation_uuid", "name": "toto", "max_runs": 1 }' | xh PUT 'http://127.0.0.1:9001/api/v1/workload'
|
||||
|
||||
# And now use your $workload_uuid and the content of your firefox report
|
||||
# but don't forget to convert your firefox report from JSONLines into an object
|
||||
echo '{ "workload_uuid": "$workload_uuid", "data": $REPORT_JSON_DATA }' | xh PUT 'http://127.0.0.1:9001/api/v1/run'
|
||||
```
|
||||
|
||||
### In CI
|
||||
|
||||
We have dedicated runners to run workloads on CI. Currently, there are three ways of running the CI:
|
||||
@ -109,6 +130,12 @@ They are JSON files with the following structure (comments are not actually supp
|
||||
"run_count": 3,
|
||||
// List of arguments to add to the Meilisearch command line.
|
||||
"extra_cli_args": ["--max-indexing-threads=1"],
|
||||
// An expression that can be parsed as a comma-separated list of targets and levels
|
||||
// as described in [tracing_subscriber's documentation](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/targets/struct.Targets.html#examples).
|
||||
// The expression is used to filter the spans that are measured for profiling purposes.
|
||||
// Optional, defaults to "indexing::=trace" (for indexing workloads), common other values is
|
||||
// "search::=trace"
|
||||
"target": "indexing::=trace",
|
||||
// List of named assets that can be used in the commands.
|
||||
"assets": {
|
||||
// name of the asset.
|
||||
|
@ -52,6 +52,28 @@ cargo test
|
||||
|
||||
This command will be triggered to each PR as a requirement for merging it.
|
||||
|
||||
#### Faster build
|
||||
|
||||
You can set the `LINDERA_CACHE` environment variable to speed up your successive builds by up to 2 minutes.
|
||||
It'll store some built artifacts in the directory of your choice.
|
||||
|
||||
We recommend using the `$HOME/.cache/meili/lindera` directory:
|
||||
```sh
|
||||
export LINDERA_CACHE=$HOME/.cache/meili/lindera
|
||||
```
|
||||
|
||||
You can set the `MILLI_BENCH_DATASETS_PATH` environment variable to further speed up your builds.
|
||||
It'll store some big files used for the benchmarks in the directory of your choice.
|
||||
|
||||
We recommend using the `$HOME/.cache/meili/benches` directory:
|
||||
```sh
|
||||
export MILLI_BENCH_DATASETS_PATH=$HOME/.cache/meili/benches
|
||||
```
|
||||
|
||||
Furthermore, you can improve incremental compilation by setting the `MEILI_NO_VERGEN` environment variable.
|
||||
Setting this variable will prevent the Meilisearch binary from being rebuilt each time the directory that hosts the Meilisearch repository changes.
|
||||
Do not enable this environment variable for production builds (as it will break the `version` route, among other things).
|
||||
|
||||
#### Snapshot-based tests
|
||||
|
||||
We are using [insta](https://insta.rs) to perform snapshot-based testing.
|
||||
@ -63,7 +85,7 @@ Furthermore, we provide some macros on top of insta, notably a way to use snapsh
|
||||
|
||||
To effectively debug snapshot-based hashes, we recommend you export the `MEILI_TEST_FULL_SNAPS` environment variable so that snapshot are fully created locally:
|
||||
|
||||
```
|
||||
```sh
|
||||
export MEILI_TEST_FULL_SNAPS=true # add this to your .bashrc, .zshrc, ...
|
||||
```
|
||||
|
||||
@ -81,6 +103,11 @@ Meilisearch follows the [cargo xtask](https://github.com/matklad/cargo-xtask) wo
|
||||
|
||||
Run `cargo xtask --help` from the root of the repository to find out what is available.
|
||||
|
||||
#### Update the openAPI file if the API changed
|
||||
|
||||
To update the openAPI file in the code, see [sprint_issue.md](https://github.com/meilisearch/meilisearch/blob/main/.github/ISSUE_TEMPLATE/sprint_issue.md#reminders-when-modifying-the-api).
|
||||
If you want to update the openAPI file on the [open-api repository](https://github.com/meilisearch/open-api), see [update-openapi-issue.md](https://github.com/meilisearch/engine-team/blob/main/issue-templates/update-openapi-issue.md).
|
||||
|
||||
### Logging
|
||||
|
||||
Meilisearch uses [`tracing`](https://lib.rs/crates/tracing) for logging purposes. Tracing logs are structured and can be displayed as JSON to the end user, so prefer passing arguments as fields rather than interpolating them in the message.
|
||||
@ -131,7 +158,7 @@ Some notes on GitHub PRs:
|
||||
- The PR title should be accurate and descriptive of the changes.
|
||||
- [Convert your PR as a draft](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/changing-the-stage-of-a-pull-request) if your changes are a work in progress: no one will review it until you pass your PR as ready for review.<br>
|
||||
The draft PRs are recommended when you want to show that you are working on something and make your work visible.
|
||||
- The branch related to the PR must be **up-to-date with `main`** before merging. Fortunately, this project uses [Bors](https://github.com/bors-ng/bors-ng) to automatically enforce this requirement without the PR author having to rebase manually.
|
||||
- The branch related to the PR must be **up-to-date with `main`** before merging. Fortunately, this project uses [GitHub Merge Queues](https://github.blog/news-insights/product-news/github-merge-queue-is-generally-available/) to automatically enforce this requirement without the PR author having to rebase manually.
|
||||
|
||||
## Release Process (for internal team only)
|
||||
|
||||
@ -139,8 +166,7 @@ Meilisearch tools follow the [Semantic Versioning Convention](https://semver.org
|
||||
|
||||
### Automation to rebase and Merge the PRs
|
||||
|
||||
This project integrates a bot that helps us manage pull requests merging.<br>
|
||||
_[Read more about this](https://github.com/meilisearch/integration-guides/blob/main/resources/bors.md)._
|
||||
This project uses GitHub Merge Queues that helps us manage pull requests merging.
|
||||
|
||||
### How to Publish a new Release
|
||||
|
||||
|
4692
Cargo.lock
generated
4692
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
65
Cargo.toml
65
Cargo.toml
@ -1,28 +1,28 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"meilisearch",
|
||||
"meilitool",
|
||||
"meilisearch-types",
|
||||
"meilisearch-auth",
|
||||
"meili-snap",
|
||||
"index-scheduler",
|
||||
"dump",
|
||||
"file-store",
|
||||
"permissive-json-pointer",
|
||||
"milli",
|
||||
"filter-parser",
|
||||
"flatten-serde-json",
|
||||
"json-depth-checker",
|
||||
"benchmarks",
|
||||
"fuzzers",
|
||||
"tracing-trace",
|
||||
"xtask",
|
||||
"build-info",
|
||||
"crates/meilisearch",
|
||||
"crates/meilitool",
|
||||
"crates/meilisearch-types",
|
||||
"crates/meilisearch-auth",
|
||||
"crates/meili-snap",
|
||||
"crates/index-scheduler",
|
||||
"crates/dump",
|
||||
"crates/file-store",
|
||||
"crates/permissive-json-pointer",
|
||||
"crates/milli",
|
||||
"crates/filter-parser",
|
||||
"crates/flatten-serde-json",
|
||||
"crates/json-depth-checker",
|
||||
"crates/benchmarks",
|
||||
"crates/fuzzers",
|
||||
"crates/tracing-trace",
|
||||
"crates/xtask",
|
||||
"crates/build-info",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.8.0"
|
||||
version = "1.16.0"
|
||||
authors = [
|
||||
"Quentin de Quelen <quentin@dequelen.me>",
|
||||
"Clément Renault <clement@meilisearch.com>",
|
||||
@ -36,6 +36,12 @@ license = "MIT"
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
|
||||
# We now compile heed without the NDEBUG define for better performance.
|
||||
# However, we still enable debug assertions for a better detection of
|
||||
# disk corruption on the cloud or in OSS.
|
||||
[profile.release.package.heed]
|
||||
debug-assertions = true
|
||||
|
||||
[profile.dev.package.flate2]
|
||||
opt-level = 3
|
||||
|
||||
@ -43,24 +49,3 @@ opt-level = 3
|
||||
opt-level = 3
|
||||
[profile.dev.package.roaring]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.lindera-ipadic-builder]
|
||||
opt-level = 3
|
||||
[profile.dev.package.encoding]
|
||||
opt-level = 3
|
||||
[profile.dev.package.yada]
|
||||
opt-level = 3
|
||||
|
||||
[profile.release.package.lindera-ipadic-builder]
|
||||
opt-level = 3
|
||||
[profile.release.package.encoding]
|
||||
opt-level = 3
|
||||
[profile.release.package.yada]
|
||||
opt-level = 3
|
||||
|
||||
[profile.bench.package.lindera-ipadic-builder]
|
||||
opt-level = 3
|
||||
[profile.bench.package.encoding]
|
||||
opt-level = 3
|
||||
[profile.bench.package.yada]
|
||||
opt-level = 3
|
||||
|
10
Dockerfile
10
Dockerfile
@ -1,7 +1,7 @@
|
||||
# Compile
|
||||
FROM rust:1.75.0-alpine3.18 AS compiler
|
||||
FROM rust:1.85-alpine3.20 AS compiler
|
||||
|
||||
RUN apk add -q --update-cache --no-cache build-base openssl-dev
|
||||
RUN apk add -q --no-cache build-base openssl-dev
|
||||
|
||||
WORKDIR /
|
||||
|
||||
@ -20,13 +20,13 @@ RUN set -eux; \
|
||||
cargo build --release -p meilisearch -p meilitool
|
||||
|
||||
# Run
|
||||
FROM alpine:3.16
|
||||
FROM alpine:3.20
|
||||
LABEL org.opencontainers.image.source="https://github.com/meilisearch/meilisearch"
|
||||
|
||||
ENV MEILI_HTTP_ADDR 0.0.0.0:7700
|
||||
ENV MEILI_SERVER_PROVIDER docker
|
||||
|
||||
RUN apk update --quiet \
|
||||
&& apk add -q --no-cache libgcc tini curl
|
||||
RUN apk add -q --no-cache libgcc tini curl
|
||||
|
||||
# add meilisearch and meilitool to the `/bin` so you can run it from anywhere
|
||||
# and it's easy to find.
|
||||
|
2
LICENSE
2
LICENSE
@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019-2024 Meili SAS
|
||||
Copyright (c) 2019-2025 Meili SAS
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
42
README.md
42
README.md
@ -20,12 +20,12 @@
|
||||
<p align="center">
|
||||
<a href="https://deps.rs/repo/github/meilisearch/meilisearch"><img src="https://deps.rs/repo/github/meilisearch/meilisearch/status.svg" alt="Dependency status"></a>
|
||||
<a href="https://github.com/meilisearch/meilisearch/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-informational" alt="License"></a>
|
||||
<a href="https://ms-bors.herokuapp.com/repositories/52"><img src="https://bors.tech/images/badge_small.svg" alt="Bors enabled"></a>
|
||||
<a href="https://github.com/meilisearch/meilisearch/queue"><img alt="Merge Queues enabled" src="https://img.shields.io/badge/Merge_Queues-enabled-%2357cf60?logo=github"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">⚡ A lightning-fast search engine that fits effortlessly into your apps, websites, and workflow 🔍</p>
|
||||
|
||||
Meilisearch helps you shape a delightful search experience in a snap, offering features that work out-of-the-box to speed up your workflow.
|
||||
[Meilisearch](https://www.meilisearch.com?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=intro) helps you shape a delightful search experience in a snap, offering features that work out of the box to speed up your workflow.
|
||||
|
||||
<p align="center" name="demo">
|
||||
<a href="https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demo-gif#gh-light-mode-only" target="_blank">
|
||||
@ -36,34 +36,42 @@ Meilisearch helps you shape a delightful search experience in a snap, offering f
|
||||
</a>
|
||||
</p>
|
||||
|
||||
🔥 [**Try it!**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demo-link) 🔥
|
||||
## 🖥 Examples
|
||||
|
||||
- [**Movies**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — An application to help you find streaming platforms to watch movies using [hybrid search](https://www.meilisearch.com/solutions/hybrid-search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos).
|
||||
- [**Ecommerce**](https://ecommerce.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Ecommerce website using disjunctive [facets](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos), range and rating filtering, and pagination.
|
||||
- [**Songs**](https://music.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search through 47 million of songs.
|
||||
- [**SaaS**](https://saas.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search for contacts, deals, and companies in this [multi-tenant](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) CRM application.
|
||||
|
||||
See the list of all our example apps in our [demos repository](https://github.com/meilisearch/demos).
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- **Search-as-you-type:** find search results in less than 50 milliseconds
|
||||
- **[Typo tolerance](https://www.meilisearch.com/docs/learn/configuration/typo_tolerance?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** get relevant matches even when queries contain typos and misspellings
|
||||
- **Hybrid search:** Combine the best of both [semantic](https://www.meilisearch.com/docs/learn/experimental/vector_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features) & full-text search to get the most relevant results
|
||||
- **Search-as-you-type:** Find & display results in less than 50 milliseconds to provide an intuitive experience
|
||||
- **[Typo tolerance](https://www.meilisearch.com/docs/learn/relevancy/typo_tolerance_settings?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** get relevant matches even when queries contain typos and misspellings
|
||||
- **[Filtering](https://www.meilisearch.com/docs/learn/fine_tuning_results/filtering?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features) and [faceted search](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** enhance your users' search experience with custom filters and build a faceted search interface in a few lines of code
|
||||
- **[Sorting](https://www.meilisearch.com/docs/learn/fine_tuning_results/sorting?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** sort results based on price, date, or pretty much anything else your users need
|
||||
- **[Synonym support](https://www.meilisearch.com/docs/learn/configuration/synonyms?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** configure synonyms to include more relevant content in your search results
|
||||
- **[Synonym support](https://www.meilisearch.com/docs/learn/relevancy/synonyms?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** configure synonyms to include more relevant content in your search results
|
||||
- **[Geosearch](https://www.meilisearch.com/docs/learn/fine_tuning_results/geosearch?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** filter and sort documents based on geographic data
|
||||
- **[Extensive language support](https://www.meilisearch.com/docs/learn/what_is_meilisearch/language?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** search datasets in any language, with optimized support for Chinese, Japanese, Hebrew, and languages using the Latin alphabet
|
||||
- **[Security management](https://www.meilisearch.com/docs/learn/security/master_api_keys?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** control which users can access what data with API keys that allow fine-grained permissions handling
|
||||
- **[Multi-Tenancy](https://www.meilisearch.com/docs/learn/security/tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** personalize search results for any number of application tenants
|
||||
- **[Multi-Tenancy](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** personalize search results for any number of application tenants
|
||||
- **Highly Customizable:** customize Meilisearch to your specific needs or use our out-of-the-box and hassle-free presets
|
||||
- **[RESTful API](https://www.meilisearch.com/docs/reference/api/overview?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** integrate Meilisearch in your technical stack with our plugins and SDKs
|
||||
- **AI-ready:** works out of the box with [langchain](https://www.meilisearch.com/with/langchain) and the [model context protocol](https://github.com/meilisearch/meilisearch-mcp)
|
||||
- **Easy to install, deploy, and maintain**
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
You can consult Meilisearch's documentation at [https://www.meilisearch.com/docs](https://www.meilisearch.com/docs/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=docs).
|
||||
You can consult Meilisearch's documentation at [meilisearch.com/docs](https://www.meilisearch.com/docs/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=docs).
|
||||
|
||||
## 🚀 Getting started
|
||||
|
||||
For basic instructions on how to set up Meilisearch, add documents to an index, and search for documents, take a look at our [Quick Start](https://www.meilisearch.com/docs/learn/getting_started/quick_start?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=get-started) guide.
|
||||
For basic instructions on how to set up Meilisearch, add documents to an index, and search for documents, take a look at our [documentation](https://www.meilisearch.com/docs?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=get-started) guide.
|
||||
|
||||
## ⚡ Supercharge your Meilisearch experience
|
||||
## 🌍 Supercharge your Meilisearch experience
|
||||
|
||||
Say goodbye to server deployment and manual updates with [Meilisearch Cloud](https://www.meilisearch.com/cloud?utm_campaign=oss&utm_source=github&utm_medium=meilisearch). No credit card required.
|
||||
Say goodbye to server deployment and manual updates with [Meilisearch Cloud](https://www.meilisearch.com/cloud?utm_campaign=oss&utm_source=github&utm_medium=meilisearch). Additional features include analytics & monitoring in many regions around the world. No credit card is required.
|
||||
|
||||
## 🧰 SDKs & integration tools
|
||||
|
||||
@ -83,15 +91,15 @@ Finally, for more in-depth information, refer to our articles explaining fundame
|
||||
|
||||
## 📊 Telemetry
|
||||
|
||||
Meilisearch collects **anonymized** data from users to help us improve our product. You can [deactivate this](https://www.meilisearch.com/docs/learn/what_is_meilisearch/telemetry?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=telemetry#how-to-disable-data-collection) whenever you want.
|
||||
Meilisearch collects **anonymized** user data to help us improve our product. You can [deactivate this](https://www.meilisearch.com/docs/learn/what_is_meilisearch/telemetry?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=telemetry#how-to-disable-data-collection) whenever you want.
|
||||
|
||||
To request deletion of collected data, please write to us at [privacy@meilisearch.com](mailto:privacy@meilisearch.com). Don't forget to include your `Instance UID` in the message, as this helps us quickly find and delete your data.
|
||||
To request deletion of collected data, please write to us at [privacy@meilisearch.com](mailto:privacy@meilisearch.com). Remember to include your `Instance UID` in the message, as this helps us quickly find and delete your data.
|
||||
|
||||
If you want to know more about the kind of data we collect and what we use it for, check the [telemetry section](https://www.meilisearch.com/docs/learn/what_is_meilisearch/telemetry?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=telemetry#how-to-disable-data-collection) of our documentation.
|
||||
|
||||
## 📫 Get in touch!
|
||||
|
||||
Meilisearch is a search engine created by [Meili](https://www.welcometothejungle.com/en/companies/meilisearch), a software development company based in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
|
||||
Meilisearch is a search engine created by [Meili](https://www.meilisearch.com/careers), a software development company headquartered in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
|
||||
|
||||
🗞 [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
|
||||
|
||||
@ -105,11 +113,11 @@ Thank you for your support!
|
||||
|
||||
## 👩💻 Contributing
|
||||
|
||||
Meilisearch is, and will always be, open-source! If you want to contribute to the project, please take a look at [our contribution guidelines](CONTRIBUTING.md).
|
||||
Meilisearch is, and will always be, open-source! If you want to contribute to the project, please look at [our contribution guidelines](CONTRIBUTING.md).
|
||||
|
||||
## 📦 Versioning
|
||||
|
||||
Meilisearch releases and their associated binaries are available [in this GitHub page](https://github.com/meilisearch/meilisearch/releases).
|
||||
Meilisearch releases and their associated binaries are available on the project's [releases page](https://github.com/meilisearch/meilisearch/releases).
|
||||
|
||||
The binaries are versioned following [SemVer conventions](https://semver.org/). To know more, read our [versioning policy](https://github.com/meilisearch/engine-team/blob/main/resources/versioning-policy.md).
|
||||
|
||||
|
@ -1403,6 +1403,398 @@
|
||||
"title": "Number of tasks by indexes",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 15,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 11,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 51
|
||||
},
|
||||
"id": 29,
|
||||
"interval": "5s",
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "right",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.1.4",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "builder",
|
||||
"exemplar": true,
|
||||
"expr": "meilisearch_task_queue_latency_seconds{instance=\"$instance\", job=\"$job\"}",
|
||||
"interval": "",
|
||||
"legendFormat": "{{value}} ",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Task queue latency",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 15,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 11,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 51
|
||||
},
|
||||
"id": 29,
|
||||
"interval": "5s",
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "right",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.1.4",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "builder",
|
||||
"exemplar": true,
|
||||
"expr": "meilisearch_task_queue_used_size{instance=\"$instance\", job=\"$job\"}",
|
||||
"interval": "",
|
||||
"legendFormat": "{{value}} ",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Task queue used size in bytes",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 15,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 11,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 51
|
||||
},
|
||||
"id": 29,
|
||||
"interval": "5s",
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "right",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.1.4",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "builder",
|
||||
"exemplar": true,
|
||||
"expr": "meilisearch_task_queue_size_until_stop_registering{instance=\"$instance\", job=\"$job\"}",
|
||||
"interval": "",
|
||||
"legendFormat": "{{value}} ",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Task queue available size until it stop receiving tasks.",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 15,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 11,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 51
|
||||
},
|
||||
"id": 29,
|
||||
"interval": "5s",
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "right",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.1.4",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "builder",
|
||||
"exemplar": true,
|
||||
"expr": "meilisearch_task_queue_max_size{instance=\"$instance\", job=\"$job\"}",
|
||||
"interval": "",
|
||||
"legendFormat": "{{value}} ",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Task queue maximum possible size",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": {
|
||||
|
BIN
assets/ph-banner.png
Normal file
BIN
assets/ph-banner.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 578 KiB |
File diff suppressed because it is too large
Load Diff
@ -1,256 +0,0 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::fs::{create_dir_all, remove_dir_all, File};
|
||||
use std::io::{self, BufRead, BufReader, Cursor, Read, Seek};
|
||||
use std::num::ParseFloatError;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
|
||||
use criterion::BenchmarkId;
|
||||
use milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
|
||||
use milli::heed::EnvOpenOptions;
|
||||
use milli::update::{
|
||||
IndexDocuments, IndexDocumentsConfig, IndexDocumentsMethod, IndexerConfig, Settings,
|
||||
};
|
||||
use milli::{Criterion, Filter, Index, Object, TermsMatchingStrategy};
|
||||
use serde_json::Value;
|
||||
|
||||
pub struct Conf<'a> {
|
||||
/// where we are going to create our database.mmdb directory
|
||||
/// each benchmark will first try to delete it and then recreate it
|
||||
pub database_name: &'a str,
|
||||
/// the dataset to be used, it must be an uncompressed csv
|
||||
pub dataset: &'a str,
|
||||
/// The format of the dataset
|
||||
pub dataset_format: &'a str,
|
||||
pub group_name: &'a str,
|
||||
pub queries: &'a [&'a str],
|
||||
/// here you can change which criterion are used and in which order.
|
||||
/// - if you specify something all the base configuration will be thrown out
|
||||
/// - if you don't specify anything (None) the default configuration will be kept
|
||||
pub criterion: Option<&'a [&'a str]>,
|
||||
/// the last chance to configure your database as you want
|
||||
pub configure: fn(&mut Settings),
|
||||
pub filter: Option<&'a str>,
|
||||
pub sort: Option<Vec<&'a str>>,
|
||||
/// enable or disable the optional words on the query
|
||||
pub optional_words: bool,
|
||||
/// primary key, if there is None we'll auto-generate docids for every documents
|
||||
pub primary_key: Option<&'a str>,
|
||||
}
|
||||
|
||||
impl Conf<'_> {
|
||||
pub const BASE: Self = Conf {
|
||||
database_name: "benches.mmdb",
|
||||
dataset_format: "csv",
|
||||
dataset: "",
|
||||
group_name: "",
|
||||
queries: &[],
|
||||
criterion: None,
|
||||
configure: |_| (),
|
||||
filter: None,
|
||||
sort: None,
|
||||
optional_words: true,
|
||||
primary_key: None,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn base_setup(conf: &Conf) -> Index {
|
||||
match remove_dir_all(conf.database_name) {
|
||||
Ok(_) => (),
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => (),
|
||||
Err(e) => panic!("{}", e),
|
||||
}
|
||||
create_dir_all(conf.database_name).unwrap();
|
||||
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(100 * 1024 * 1024 * 1024); // 100 GB
|
||||
options.max_readers(10);
|
||||
let index = Index::new(options, conf.database_name).unwrap();
|
||||
|
||||
let config = IndexerConfig::default();
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
let mut builder = Settings::new(&mut wtxn, &index, &config);
|
||||
|
||||
if let Some(primary_key) = conf.primary_key {
|
||||
builder.set_primary_key(primary_key.to_string());
|
||||
}
|
||||
|
||||
if let Some(criterion) = conf.criterion {
|
||||
builder.reset_filterable_fields();
|
||||
builder.reset_criteria();
|
||||
builder.reset_stop_words();
|
||||
|
||||
let criterion = criterion.iter().map(|s| Criterion::from_str(s).unwrap()).collect();
|
||||
builder.set_criteria(criterion);
|
||||
}
|
||||
|
||||
(conf.configure)(&mut builder);
|
||||
|
||||
builder.execute(|_| (), || false).unwrap();
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
let config = IndexerConfig::default();
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
let indexing_config = IndexDocumentsConfig {
|
||||
autogenerate_docids: conf.primary_key.is_none(),
|
||||
update_method: IndexDocumentsMethod::ReplaceDocuments,
|
||||
..Default::default()
|
||||
};
|
||||
let builder =
|
||||
IndexDocuments::new(&mut wtxn, &index, &config, indexing_config, |_| (), || false).unwrap();
|
||||
let documents = documents_from(conf.dataset, conf.dataset_format);
|
||||
let (builder, user_error) = builder.add_documents(documents).unwrap();
|
||||
user_error.unwrap();
|
||||
builder.execute().unwrap();
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
index
|
||||
}
|
||||
|
||||
pub fn run_benches(c: &mut criterion::Criterion, confs: &[Conf]) {
|
||||
for conf in confs {
|
||||
let index = base_setup(conf);
|
||||
|
||||
let file_name = Path::new(conf.dataset).file_name().and_then(|f| f.to_str()).unwrap();
|
||||
let name = format!("{}: {}", file_name, conf.group_name);
|
||||
let mut group = c.benchmark_group(&name);
|
||||
|
||||
for &query in conf.queries {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(query), &query, |b, &query| {
|
||||
b.iter(|| {
|
||||
let rtxn = index.read_txn().unwrap();
|
||||
let mut search = index.search(&rtxn);
|
||||
search.query(query).terms_matching_strategy(TermsMatchingStrategy::default());
|
||||
if let Some(filter) = conf.filter {
|
||||
let filter = Filter::from_str(filter).unwrap().unwrap();
|
||||
search.filter(filter);
|
||||
}
|
||||
if let Some(sort) = &conf.sort {
|
||||
let sort = sort.iter().map(|sort| sort.parse().unwrap()).collect();
|
||||
search.sort_criteria(sort);
|
||||
}
|
||||
let _ids = search.execute().unwrap();
|
||||
});
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
|
||||
index.prepare_for_closing().wait();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn documents_from(filename: &str, filetype: &str) -> DocumentsBatchReader<impl BufRead + Seek> {
|
||||
let reader = File::open(filename)
|
||||
.unwrap_or_else(|_| panic!("could not find the dataset in: {}", filename));
|
||||
let reader = BufReader::new(reader);
|
||||
let documents = match filetype {
|
||||
"csv" => documents_from_csv(reader).unwrap(),
|
||||
"json" => documents_from_json(reader).unwrap(),
|
||||
"jsonl" => documents_from_jsonl(reader).unwrap(),
|
||||
otherwise => panic!("invalid update format {:?}", otherwise),
|
||||
};
|
||||
DocumentsBatchReader::from_reader(Cursor::new(documents)).unwrap()
|
||||
}
|
||||
|
||||
fn documents_from_jsonl(reader: impl BufRead) -> anyhow::Result<Vec<u8>> {
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
|
||||
for result in serde_json::Deserializer::from_reader(reader).into_iter::<Object>() {
|
||||
let object = result?;
|
||||
documents.append_json_object(&object)?;
|
||||
}
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
fn documents_from_json(reader: impl BufRead) -> anyhow::Result<Vec<u8>> {
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
|
||||
documents.append_json_array(reader)?;
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
fn documents_from_csv(reader: impl BufRead) -> anyhow::Result<Vec<u8>> {
|
||||
let csv = csv::Reader::from_reader(reader);
|
||||
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
documents.append_csv(csv)?;
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
enum AllowedType {
|
||||
String,
|
||||
Number,
|
||||
}
|
||||
|
||||
fn parse_csv_header(header: &str) -> (String, AllowedType) {
|
||||
// if there are several separators we only split on the last one.
|
||||
match header.rsplit_once(':') {
|
||||
Some((field_name, field_type)) => match field_type {
|
||||
"string" => (field_name.to_string(), AllowedType::String),
|
||||
"number" => (field_name.to_string(), AllowedType::Number),
|
||||
// we may return an error in this case.
|
||||
_otherwise => (header.to_string(), AllowedType::String),
|
||||
},
|
||||
None => (header.to_string(), AllowedType::String),
|
||||
}
|
||||
}
|
||||
|
||||
struct CSVDocumentDeserializer<R>
|
||||
where
|
||||
R: Read,
|
||||
{
|
||||
documents: csv::StringRecordsIntoIter<R>,
|
||||
headers: Vec<(String, AllowedType)>,
|
||||
}
|
||||
|
||||
impl<R: Read> CSVDocumentDeserializer<R> {
|
||||
fn from_reader(reader: R) -> io::Result<Self> {
|
||||
let mut records = csv::Reader::from_reader(reader);
|
||||
|
||||
let headers = records.headers()?.into_iter().map(parse_csv_header).collect();
|
||||
|
||||
Ok(Self { documents: records.into_records(), headers })
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Iterator for CSVDocumentDeserializer<R> {
|
||||
type Item = anyhow::Result<Object>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let csv_document = self.documents.next()?;
|
||||
|
||||
match csv_document {
|
||||
Ok(csv_document) => {
|
||||
let mut document = Object::new();
|
||||
|
||||
for ((field_name, field_type), value) in
|
||||
self.headers.iter().zip(csv_document.into_iter())
|
||||
{
|
||||
let parsed_value: Result<Value, ParseFloatError> = match field_type {
|
||||
AllowedType::Number => {
|
||||
value.parse::<f64>().map(Value::from).map_err(Into::into)
|
||||
}
|
||||
AllowedType::String => Ok(Value::String(value.to_string())),
|
||||
};
|
||||
|
||||
match parsed_value {
|
||||
Ok(value) => drop(document.insert(field_name.to_string(), value)),
|
||||
Err(_e) => {
|
||||
return Some(Err(anyhow::anyhow!(
|
||||
"Value '{}' is not a valid number",
|
||||
value
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some(Ok(document))
|
||||
}
|
||||
Err(e) => Some(Err(anyhow::anyhow!("Error parsing csv document: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
11
bors.toml
11
bors.toml
@ -1,11 +0,0 @@
|
||||
status = [
|
||||
'Tests on ubuntu-18.04',
|
||||
'Tests on macos-12',
|
||||
'Tests on windows-2022',
|
||||
'Run Clippy',
|
||||
'Run Rustfmt',
|
||||
'Run tests in debug',
|
||||
]
|
||||
pr_status = ['Milestone Check']
|
||||
# 3 hours timeout
|
||||
timeout-sec = 10800
|
@ -11,24 +11,27 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.79"
|
||||
csv = "1.3.0"
|
||||
anyhow = "1.0.98"
|
||||
bumpalo = "3.18.1"
|
||||
csv = "1.3.1"
|
||||
memmap2 = "0.9.5"
|
||||
milli = { path = "../milli" }
|
||||
mimalloc = { version = "0.1.39", default-features = false }
|
||||
serde_json = { version = "1.0.111", features = ["preserve_order"] }
|
||||
mimalloc = { version = "0.1.47", default-features = false }
|
||||
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||
tempfile = "3.20.0"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.5.1", features = ["html_reports"] }
|
||||
criterion = { version = "0.6.0", features = ["html_reports"] }
|
||||
rand = "0.8.5"
|
||||
rand_chacha = "0.3.1"
|
||||
roaring = "0.10.2"
|
||||
roaring = "0.10.12"
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = "1.0.79"
|
||||
bytes = "1.5.0"
|
||||
convert_case = "0.6.0"
|
||||
flate2 = "1.0.28"
|
||||
reqwest = { version = "0.11.23", features = ["blocking", "rustls-tls"], default-features = false }
|
||||
anyhow = "1.0.98"
|
||||
bytes = "1.10.1"
|
||||
convert_case = "0.8.0"
|
||||
flate2 = "1.1.2"
|
||||
reqwest = { version = "0.12.20", features = ["blocking", "rustls-tls"], default-features = false }
|
||||
|
||||
[features]
|
||||
default = ["milli/all-tokenizations"]
|
||||
@ -48,3 +51,8 @@ harness = false
|
||||
[[bench]]
|
||||
name = "indexing"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "sort"
|
||||
harness = false
|
||||
|
2062
crates/benchmarks/benches/indexing.rs
Normal file
2062
crates/benchmarks/benches/indexing.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -3,8 +3,10 @@ mod utils;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::update::Settings;
|
||||
use milli::FilterableAttributesRule;
|
||||
use utils::Conf;
|
||||
|
||||
#[cfg(not(windows))]
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
@ -20,8 +22,10 @@ fn base_conf(builder: &mut Settings) {
|
||||
["name", "alternatenames", "elevation"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_searchable_fields(searchable_fields);
|
||||
|
||||
let filterable_fields =
|
||||
["_geo", "population", "elevation"].iter().map(|s| s.to_string()).collect();
|
||||
let filterable_fields = ["_geo", "population", "elevation"]
|
||||
.iter()
|
||||
.map(|s| FilterableAttributesRule::Field(s.to_string()))
|
||||
.collect();
|
||||
builder.set_filterable_fields(filterable_fields);
|
||||
|
||||
let sortable_fields =
|
@ -3,8 +3,10 @@ mod utils;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::update::Settings;
|
||||
use milli::FilterableAttributesRule;
|
||||
use utils::Conf;
|
||||
|
||||
#[cfg(not(windows))]
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
@ -21,7 +23,7 @@ fn base_conf(builder: &mut Settings) {
|
||||
|
||||
let faceted_fields = ["released-timestamp", "duration-float", "genre", "country", "artist"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.map(|s| FilterableAttributesRule::Field(s.to_string()))
|
||||
.collect();
|
||||
builder.set_filterable_fields(faceted_fields);
|
||||
}
|
@ -5,6 +5,7 @@ use criterion::{criterion_group, criterion_main};
|
||||
use milli::update::Settings;
|
||||
use utils::Conf;
|
||||
|
||||
#[cfg(not(windows))]
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
114
crates/benchmarks/benches/sort.rs
Normal file
114
crates/benchmarks/benches/sort.rs
Normal file
@ -0,0 +1,114 @@
|
||||
//! This benchmark module is used to compare the performance of sorting documents in /search VS /documents
|
||||
//!
|
||||
//! The tests/benchmarks were designed in the context of a query returning only 20 documents.
|
||||
|
||||
mod datasets_paths;
|
||||
mod utils;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::update::Settings;
|
||||
use utils::Conf;
|
||||
|
||||
#[cfg(not(windows))]
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn base_conf(builder: &mut Settings) {
|
||||
let displayed_fields =
|
||||
["geonameid", "name", "asciiname", "alternatenames", "_geo", "population"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
builder.set_displayed_fields(displayed_fields);
|
||||
|
||||
let sortable_fields =
|
||||
["_geo", "name", "population", "elevation", "timezone", "modification-date"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
builder.set_sortable_fields(sortable_fields);
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
const BASE_CONF: Conf = Conf {
|
||||
dataset: datasets_paths::SMOL_ALL_COUNTRIES,
|
||||
dataset_format: "jsonl",
|
||||
configure: base_conf,
|
||||
primary_key: Some("geonameid"),
|
||||
queries: &[""],
|
||||
offsets: &[
|
||||
Some((0, 20)), // The most common query in the real world
|
||||
Some((0, 500)), // A query that ranges over many documents
|
||||
Some((980, 20)), // The worst query that could happen in the real world
|
||||
Some((800_000, 20)) // The worst query
|
||||
],
|
||||
get_documents: true,
|
||||
..Conf::BASE
|
||||
};
|
||||
|
||||
fn bench_sort(c: &mut criterion::Criterion) {
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
utils::Conf {
|
||||
group_name: "without sort",
|
||||
sort: None,
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
utils::Conf {
|
||||
group_name: "sort on many different values",
|
||||
sort: Some(vec!["name:asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
utils::Conf {
|
||||
group_name: "sort on many similar values",
|
||||
sort: Some(vec!["timezone:desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
utils::Conf {
|
||||
group_name: "sort on many similar then different values",
|
||||
sort: Some(vec!["timezone:desc", "name:asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
utils::Conf {
|
||||
group_name: "sort on many different then similar values",
|
||||
sort: Some(vec!["timezone:desc", "name:asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
utils::Conf {
|
||||
group_name: "geo sort",
|
||||
sample_size: Some(10),
|
||||
sort: Some(vec!["_geoPoint(45.4777599, 9.1967508):asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
utils::Conf {
|
||||
group_name: "sort on many similar values then geo sort",
|
||||
sample_size: Some(50),
|
||||
sort: Some(vec!["timezone:desc", "_geoPoint(45.4777599, 9.1967508):asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
utils::Conf {
|
||||
group_name: "sort on many different values then geo sort",
|
||||
sample_size: Some(50),
|
||||
sort: Some(vec!["name:desc", "_geoPoint(45.4777599, 9.1967508):asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
utils::Conf {
|
||||
group_name: "sort on many fields",
|
||||
sort: Some(vec!["population:asc", "name:asc", "elevation:asc", "timezone:asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
];
|
||||
|
||||
utils::run_benches(c, confs);
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_sort);
|
||||
criterion_main!(benches);
|
401
crates/benchmarks/benches/utils.rs
Normal file
401
crates/benchmarks/benches/utils.rs
Normal file
@ -0,0 +1,401 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::fs::{create_dir_all, remove_dir_all, File};
|
||||
use std::io::{self, BufReader, BufWriter, Read};
|
||||
use std::path::Path;
|
||||
use std::str::FromStr as _;
|
||||
|
||||
use anyhow::Context;
|
||||
use bumpalo::Bump;
|
||||
use criterion::BenchmarkId;
|
||||
use memmap2::Mmap;
|
||||
use milli::documents::sort::recursive_sort;
|
||||
use milli::heed::EnvOpenOptions;
|
||||
use milli::progress::Progress;
|
||||
use milli::update::new::indexer;
|
||||
use milli::update::{IndexerConfig, Settings};
|
||||
use milli::vector::RuntimeEmbedders;
|
||||
use milli::{Criterion, Filter, Index, Object, TermsMatchingStrategy};
|
||||
use serde_json::Value;
|
||||
|
||||
pub struct Conf<'a> {
|
||||
/// where we are going to create our database.mmdb directory
|
||||
/// each benchmark will first try to delete it and then recreate it
|
||||
pub database_name: &'a str,
|
||||
/// the dataset to be used, it must be an uncompressed csv
|
||||
pub dataset: &'a str,
|
||||
/// The format of the dataset
|
||||
pub dataset_format: &'a str,
|
||||
pub group_name: &'a str,
|
||||
pub queries: &'a [&'a str],
|
||||
/// here you can change which criterion are used and in which order.
|
||||
/// - if you specify something all the base configuration will be thrown out
|
||||
/// - if you don't specify anything (None) the default configuration will be kept
|
||||
pub criterion: Option<&'a [&'a str]>,
|
||||
/// the last chance to configure your database as you want
|
||||
pub configure: fn(&mut Settings),
|
||||
pub filter: Option<&'a str>,
|
||||
pub sort: Option<Vec<&'a str>>,
|
||||
/// set to skip documents (offset, limit)
|
||||
pub offsets: &'a [Option<(usize, usize)>],
|
||||
/// enable if you want to bench getting documents without querying
|
||||
pub get_documents: bool,
|
||||
/// configure the benchmark sample size
|
||||
pub sample_size: Option<usize>,
|
||||
/// enable or disable the optional words on the query
|
||||
pub optional_words: bool,
|
||||
/// primary key, if there is None we'll auto-generate docids for every documents
|
||||
pub primary_key: Option<&'a str>,
|
||||
}
|
||||
|
||||
impl Conf<'_> {
|
||||
pub const BASE: Self = Conf {
|
||||
database_name: "benches.mmdb",
|
||||
dataset_format: "csv",
|
||||
dataset: "",
|
||||
group_name: "",
|
||||
queries: &[],
|
||||
criterion: None,
|
||||
configure: |_| (),
|
||||
filter: None,
|
||||
sort: None,
|
||||
offsets: &[None],
|
||||
get_documents: false,
|
||||
sample_size: None,
|
||||
optional_words: true,
|
||||
primary_key: None,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn base_setup(conf: &Conf) -> Index {
|
||||
match remove_dir_all(conf.database_name) {
|
||||
Ok(_) => (),
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => (),
|
||||
Err(e) => panic!("{}", e),
|
||||
}
|
||||
create_dir_all(conf.database_name).unwrap();
|
||||
|
||||
let options = EnvOpenOptions::new();
|
||||
let mut options = options.read_txn_without_tls();
|
||||
options.map_size(100 * 1024 * 1024 * 1024); // 100 GB
|
||||
options.max_readers(100);
|
||||
let index = Index::new(options, conf.database_name, true).unwrap();
|
||||
|
||||
let config = IndexerConfig::default();
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
let mut builder = Settings::new(&mut wtxn, &index, &config);
|
||||
|
||||
if let Some(primary_key) = conf.primary_key {
|
||||
builder.set_primary_key(primary_key.to_string());
|
||||
}
|
||||
|
||||
if let Some(criterion) = conf.criterion {
|
||||
builder.reset_filterable_fields();
|
||||
builder.reset_criteria();
|
||||
builder.reset_stop_words();
|
||||
|
||||
let criterion = criterion.iter().map(|s| Criterion::from_str(s).unwrap()).collect();
|
||||
builder.set_criteria(criterion);
|
||||
}
|
||||
|
||||
(conf.configure)(&mut builder);
|
||||
|
||||
builder.execute(&|| false, &Progress::default(), Default::default()).unwrap();
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
let config = IndexerConfig::default();
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
let rtxn = index.read_txn().unwrap();
|
||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||
|
||||
let documents = documents_from(conf.dataset, conf.dataset_format);
|
||||
let mut indexer = indexer::DocumentOperation::new();
|
||||
indexer.replace_documents(&documents).unwrap();
|
||||
|
||||
let indexer_alloc = Bump::new();
|
||||
let (document_changes, _operation_stats, primary_key) = indexer
|
||||
.into_changes(
|
||||
&indexer_alloc,
|
||||
&index,
|
||||
&rtxn,
|
||||
None,
|
||||
&mut new_fields_ids_map,
|
||||
&|| false,
|
||||
Progress::default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
indexer::index(
|
||||
&mut wtxn,
|
||||
&index,
|
||||
&milli::ThreadPoolNoAbortBuilder::new().build().unwrap(),
|
||||
config.grenad_parameters(),
|
||||
&db_fields_ids_map,
|
||||
new_fields_ids_map,
|
||||
primary_key,
|
||||
&document_changes,
|
||||
RuntimeEmbedders::default(),
|
||||
&|| false,
|
||||
&Progress::default(),
|
||||
&Default::default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
drop(rtxn);
|
||||
|
||||
index
|
||||
}
|
||||
|
||||
pub fn run_benches(c: &mut criterion::Criterion, confs: &[Conf]) {
|
||||
for conf in confs {
|
||||
let index = base_setup(conf);
|
||||
|
||||
let file_name = Path::new(conf.dataset).file_name().and_then(|f| f.to_str()).unwrap();
|
||||
let name = format!("{}: {}", file_name, conf.group_name);
|
||||
let mut group = c.benchmark_group(&name);
|
||||
if let Some(sample_size) = conf.sample_size {
|
||||
group.sample_size(sample_size);
|
||||
}
|
||||
|
||||
for &query in conf.queries {
|
||||
for offset in conf.offsets {
|
||||
let parameter = match offset {
|
||||
None => query.to_string(),
|
||||
Some((offset, limit)) => format!("{query}[{offset}:{limit}]"),
|
||||
};
|
||||
group.bench_with_input(
|
||||
BenchmarkId::from_parameter(parameter),
|
||||
&query,
|
||||
|b, &query| {
|
||||
b.iter(|| {
|
||||
let rtxn = index.read_txn().unwrap();
|
||||
let mut search = index.search(&rtxn);
|
||||
search
|
||||
.query(query)
|
||||
.terms_matching_strategy(TermsMatchingStrategy::default());
|
||||
if let Some(filter) = conf.filter {
|
||||
let filter = Filter::from_str(filter).unwrap().unwrap();
|
||||
search.filter(filter);
|
||||
}
|
||||
if let Some(sort) = &conf.sort {
|
||||
let sort = sort.iter().map(|sort| sort.parse().unwrap()).collect();
|
||||
search.sort_criteria(sort);
|
||||
}
|
||||
if let Some((offset, limit)) = offset {
|
||||
search.offset(*offset).limit(*limit);
|
||||
}
|
||||
|
||||
let _ids = search.execute().unwrap();
|
||||
});
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if conf.get_documents {
|
||||
for offset in conf.offsets {
|
||||
let parameter = match offset {
|
||||
None => String::from("get_documents"),
|
||||
Some((offset, limit)) => format!("get_documents[{offset}:{limit}]"),
|
||||
};
|
||||
group.bench_with_input(BenchmarkId::from_parameter(parameter), &(), |b, &()| {
|
||||
b.iter(|| {
|
||||
let rtxn = index.read_txn().unwrap();
|
||||
if let Some(sort) = &conf.sort {
|
||||
let sort = sort.iter().map(|sort| sort.parse().unwrap()).collect();
|
||||
let all_docs = index.documents_ids(&rtxn).unwrap();
|
||||
let facet_sort =
|
||||
recursive_sort(&index, &rtxn, sort, &all_docs).unwrap();
|
||||
let iter = facet_sort.iter().unwrap();
|
||||
if let Some((offset, limit)) = offset {
|
||||
let _results = iter.skip(*offset).take(*limit).collect::<Vec<_>>();
|
||||
} else {
|
||||
let _results = iter.collect::<Vec<_>>();
|
||||
}
|
||||
} else {
|
||||
let all_docs = index.documents_ids(&rtxn).unwrap();
|
||||
if let Some((offset, limit)) = offset {
|
||||
let _results =
|
||||
all_docs.iter().skip(*offset).take(*limit).collect::<Vec<_>>();
|
||||
} else {
|
||||
let _results = all_docs.iter().collect::<Vec<_>>();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
group.finish();
|
||||
|
||||
index.prepare_for_closing().wait();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn documents_from(filename: &str, filetype: &str) -> Mmap {
|
||||
let file = File::open(filename)
|
||||
.unwrap_or_else(|_| panic!("could not find the dataset in: {filename}"));
|
||||
match filetype {
|
||||
"csv" => documents_from_csv(file).unwrap(),
|
||||
"json" => documents_from_json(file).unwrap(),
|
||||
"jsonl" => documents_from_jsonl(file).unwrap(),
|
||||
otherwise => panic!("invalid update format {otherwise:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn documents_from_jsonl(file: File) -> anyhow::Result<Mmap> {
|
||||
unsafe { Mmap::map(&file).map_err(Into::into) }
|
||||
}
|
||||
|
||||
fn documents_from_json(file: File) -> anyhow::Result<Mmap> {
|
||||
let reader = BufReader::new(file);
|
||||
let documents: Vec<milli::Object> = serde_json::from_reader(reader)?;
|
||||
let mut output = tempfile::tempfile().map(BufWriter::new)?;
|
||||
|
||||
for document in documents {
|
||||
serde_json::to_writer(&mut output, &document)?;
|
||||
}
|
||||
|
||||
let file = output.into_inner()?;
|
||||
unsafe { Mmap::map(&file).map_err(Into::into) }
|
||||
}
|
||||
|
||||
fn documents_from_csv(file: File) -> anyhow::Result<Mmap> {
|
||||
let output = tempfile::tempfile()?;
|
||||
let mut output = BufWriter::new(output);
|
||||
let mut reader = csv::ReaderBuilder::new().from_reader(file);
|
||||
|
||||
let headers = reader.headers().context("while retrieving headers")?.clone();
|
||||
let typed_fields: Vec<_> = headers.iter().map(parse_csv_header).collect();
|
||||
let mut object: serde_json::Map<_, _> =
|
||||
typed_fields.iter().map(|(k, _)| (k.to_string(), Value::Null)).collect();
|
||||
|
||||
let mut line = 0;
|
||||
let mut record = csv::StringRecord::new();
|
||||
while reader.read_record(&mut record).context("while reading a record")? {
|
||||
// We increment here and not at the end of the loop
|
||||
// to take the header offset into account.
|
||||
line += 1;
|
||||
|
||||
// Reset the document values
|
||||
object.iter_mut().for_each(|(_, v)| *v = Value::Null);
|
||||
|
||||
for (i, (name, atype)) in typed_fields.iter().enumerate() {
|
||||
let value = &record[i];
|
||||
let trimmed_value = value.trim();
|
||||
let value = match atype {
|
||||
AllowedType::Number if trimmed_value.is_empty() => Value::Null,
|
||||
AllowedType::Number => {
|
||||
match trimmed_value.parse::<i64>() {
|
||||
Ok(integer) => Value::from(integer),
|
||||
Err(_) => match trimmed_value.parse::<f64>() {
|
||||
Ok(float) => Value::from(float),
|
||||
Err(error) => {
|
||||
anyhow::bail!("document format error on line {line}: {error}. For value: {value}")
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
AllowedType::Boolean if trimmed_value.is_empty() => Value::Null,
|
||||
AllowedType::Boolean => match trimmed_value.parse::<bool>() {
|
||||
Ok(bool) => Value::from(bool),
|
||||
Err(error) => {
|
||||
anyhow::bail!(
|
||||
"document format error on line {line}: {error}. For value: {value}"
|
||||
)
|
||||
}
|
||||
},
|
||||
AllowedType::String if value.is_empty() => Value::Null,
|
||||
AllowedType::String => Value::from(value),
|
||||
};
|
||||
|
||||
*object.get_mut(name).expect("encountered an unknown field") = value;
|
||||
}
|
||||
|
||||
serde_json::to_writer(&mut output, &object).context("while writing to disk")?;
|
||||
}
|
||||
|
||||
let output = output.into_inner()?;
|
||||
unsafe { Mmap::map(&output).map_err(Into::into) }
|
||||
}
|
||||
|
||||
enum AllowedType {
|
||||
String,
|
||||
Boolean,
|
||||
Number,
|
||||
}
|
||||
|
||||
fn parse_csv_header(header: &str) -> (String, AllowedType) {
|
||||
// if there are several separators we only split on the last one.
|
||||
match header.rsplit_once(':') {
|
||||
Some((field_name, field_type)) => match field_type {
|
||||
"string" => (field_name.to_string(), AllowedType::String),
|
||||
"boolean" => (field_name.to_string(), AllowedType::Boolean),
|
||||
"number" => (field_name.to_string(), AllowedType::Number),
|
||||
// if the pattern isn't recognized, we keep the whole field.
|
||||
_otherwise => (header.to_string(), AllowedType::String),
|
||||
},
|
||||
None => (header.to_string(), AllowedType::String),
|
||||
}
|
||||
}
|
||||
|
||||
struct CSVDocumentDeserializer<R>
|
||||
where
|
||||
R: Read,
|
||||
{
|
||||
documents: csv::StringRecordsIntoIter<R>,
|
||||
headers: Vec<(String, AllowedType)>,
|
||||
}
|
||||
|
||||
impl<R: Read> CSVDocumentDeserializer<R> {
|
||||
fn from_reader(reader: R) -> io::Result<Self> {
|
||||
let mut records = csv::Reader::from_reader(reader);
|
||||
|
||||
let headers = records.headers()?.into_iter().map(parse_csv_header).collect();
|
||||
|
||||
Ok(Self { documents: records.into_records(), headers })
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Iterator for CSVDocumentDeserializer<R> {
|
||||
type Item = anyhow::Result<Object>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let csv_document = self.documents.next()?;
|
||||
|
||||
match csv_document {
|
||||
Ok(csv_document) => {
|
||||
let mut document = Object::new();
|
||||
|
||||
for ((field_name, field_type), value) in
|
||||
self.headers.iter().zip(csv_document.into_iter())
|
||||
{
|
||||
let parsed_value: anyhow::Result<Value> = match field_type {
|
||||
AllowedType::Number => {
|
||||
value.parse::<f64>().map(Value::from).map_err(Into::into)
|
||||
}
|
||||
AllowedType::Boolean => {
|
||||
value.parse::<bool>().map(Value::from).map_err(Into::into)
|
||||
}
|
||||
AllowedType::String => Ok(Value::String(value.to_string())),
|
||||
};
|
||||
|
||||
match parsed_value {
|
||||
Ok(value) => drop(document.insert(field_name.to_string(), value)),
|
||||
Err(_e) => {
|
||||
return Some(Err(anyhow::anyhow!(
|
||||
"Value '{}' is not a valid number",
|
||||
value
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some(Ok(document))
|
||||
}
|
||||
Err(e) => Some(Err(anyhow::anyhow!("Error parsing csv document: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
@ -67,7 +67,7 @@ fn main() -> anyhow::Result<()> {
|
||||
writeln!(
|
||||
&mut manifest_paths_file,
|
||||
r#"pub const {}: &str = {:?};"#,
|
||||
dataset.to_case(Case::ScreamingSnake),
|
||||
dataset.to_case(Case::UpperSnake),
|
||||
out_file.display(),
|
||||
)?;
|
||||
|
@ -11,8 +11,8 @@ license.workspace = true
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
time = { version = "0.3.34", features = ["parsing"] }
|
||||
time = { version = "0.3.41", features = ["parsing"] }
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = "1.0.80"
|
||||
vergen-git2 = "1.0.0-beta.2"
|
||||
anyhow = "1.0.98"
|
||||
vergen-git2 = "1.0.7"
|
@ -5,6 +5,13 @@ fn main() {
|
||||
}
|
||||
|
||||
fn emit_git_variables() -> anyhow::Result<()> {
|
||||
println!("cargo::rerun-if-env-changed=MEILI_NO_VERGEN");
|
||||
|
||||
let has_vergen =
|
||||
!matches!(std::env::var_os("MEILI_NO_VERGEN"), Some(x) if x != "false" && x != "0");
|
||||
|
||||
anyhow::ensure!(has_vergen, "disabled via `MEILI_NO_VERGEN`");
|
||||
|
||||
// Note: any code that needs VERGEN_ environment variables should take care to define them manually in the Dockerfile and pass them
|
||||
// in the corresponding GitHub workflow (publish_docker.yml).
|
||||
// This is due to the Dockerfile building the binary outside of the git directory.
|
34
crates/dump/Cargo.toml
Normal file
34
crates/dump/Cargo.toml
Normal file
@ -0,0 +1,34 @@
|
||||
[package]
|
||||
name = "dump"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.98"
|
||||
flate2 = "1.1.2"
|
||||
http = "1.3.1"
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
||||
once_cell = "1.21.3"
|
||||
regex = "1.11.1"
|
||||
roaring = { version = "0.10.12", features = ["serde"] }
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = { version = "1.0.140", features = ["preserve_order"] }
|
||||
tar = "0.4.44"
|
||||
tempfile = "3.20.0"
|
||||
thiserror = "2.0.12"
|
||||
time = { version = "0.3.41", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||
tracing = "0.1.41"
|
||||
uuid = { version = "1.17.0", features = ["serde", "v4"] }
|
||||
|
||||
[dev-dependencies]
|
||||
big_s = "1.0.2"
|
||||
maplit = "1.0.2"
|
||||
meili-snap = { path = "../meili-snap" }
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
@ -10,8 +10,10 @@ dump
|
||||
├── instance-uid.uuid
|
||||
├── keys.jsonl
|
||||
├── metadata.json
|
||||
└── tasks
|
||||
├── update_files
|
||||
│ └── [task_id].jsonl
|
||||
├── tasks
|
||||
│ ├── update_files
|
||||
│ │ └── [task_id].jsonl
|
||||
│ └── queue.jsonl
|
||||
└── batches
|
||||
└── queue.jsonl
|
||||
```
|
||||
```
|
@ -1,11 +1,17 @@
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![allow(clippy::wrong_self_convention)]
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use meilisearch_types::batches::BatchId;
|
||||
use meilisearch_types::byte_unit::Byte;
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::keys::Key;
|
||||
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
||||
use meilisearch_types::settings::Unchecked;
|
||||
use meilisearch_types::tasks::{Details, IndexSwap, KindWithContent, Status, Task, TaskId};
|
||||
use meilisearch_types::tasks::{
|
||||
Details, ExportIndexSettings, IndexSwap, KindWithContent, Status, Task, TaskId,
|
||||
};
|
||||
use meilisearch_types::InstanceUid;
|
||||
use roaring::RoaringBitmap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@ -57,6 +63,9 @@ pub enum Version {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TaskDump {
|
||||
pub uid: TaskId,
|
||||
// The batch ID were introduced in v1.12, everything prior to this version will be `None`.
|
||||
#[serde(default)]
|
||||
pub batch_uid: Option<BatchId>,
|
||||
#[serde(default)]
|
||||
pub index_uid: Option<String>,
|
||||
pub status: Status,
|
||||
@ -104,6 +113,11 @@ pub enum KindDump {
|
||||
DocumentDeletionByFilter {
|
||||
filter: serde_json::Value,
|
||||
},
|
||||
DocumentEdition {
|
||||
filter: Option<serde_json::Value>,
|
||||
context: Option<serde_json::Map<String, serde_json::Value>>,
|
||||
function: String,
|
||||
},
|
||||
Settings {
|
||||
settings: Box<meilisearch_types::settings::Settings<Unchecked>>,
|
||||
is_deletion: bool,
|
||||
@ -132,12 +146,22 @@ pub enum KindDump {
|
||||
instance_uid: Option<InstanceUid>,
|
||||
},
|
||||
SnapshotCreation,
|
||||
Export {
|
||||
url: String,
|
||||
api_key: Option<String>,
|
||||
payload_size: Option<Byte>,
|
||||
indexes: BTreeMap<String, ExportIndexSettings>,
|
||||
},
|
||||
UpgradeDatabase {
|
||||
from: (u32, u32, u32),
|
||||
},
|
||||
}
|
||||
|
||||
impl From<Task> for TaskDump {
|
||||
fn from(task: Task) -> Self {
|
||||
TaskDump {
|
||||
uid: task.uid,
|
||||
batch_uid: task.batch_uid,
|
||||
index_uid: task.index_uid().map(|uid| uid.to_string()),
|
||||
status: task.status,
|
||||
kind: task.kind.into(),
|
||||
@ -172,6 +196,9 @@ impl From<KindWithContent> for KindDump {
|
||||
KindWithContent::DocumentDeletionByFilter { filter_expr, .. } => {
|
||||
KindDump::DocumentDeletionByFilter { filter: filter_expr }
|
||||
}
|
||||
KindWithContent::DocumentEdition { filter_expr, context, function, .. } => {
|
||||
KindDump::DocumentEdition { filter: filter_expr, context, function }
|
||||
}
|
||||
KindWithContent::DocumentClear { .. } => KindDump::DocumentClear,
|
||||
KindWithContent::SettingsUpdate {
|
||||
new_settings,
|
||||
@ -197,6 +224,18 @@ impl From<KindWithContent> for KindDump {
|
||||
KindDump::DumpCreation { keys, instance_uid }
|
||||
}
|
||||
KindWithContent::SnapshotCreation => KindDump::SnapshotCreation,
|
||||
KindWithContent::Export { url, api_key, payload_size, indexes } => KindDump::Export {
|
||||
url,
|
||||
api_key,
|
||||
payload_size,
|
||||
indexes: indexes
|
||||
.into_iter()
|
||||
.map(|(pattern, settings)| (pattern.to_string(), settings))
|
||||
.collect(),
|
||||
},
|
||||
KindWithContent::UpgradeDatabase { from: version } => {
|
||||
KindDump::UpgradeDatabase { from: version }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -209,14 +248,16 @@ pub(crate) mod test {
|
||||
|
||||
use big_s::S;
|
||||
use maplit::{btreemap, btreeset};
|
||||
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
|
||||
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
||||
use meilisearch_types::features::RuntimeTogglableFeatures;
|
||||
use meilisearch_types::features::{Network, Remote, RuntimeTogglableFeatures};
|
||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
use meilisearch_types::keys::{Action, Key};
|
||||
use meilisearch_types::milli;
|
||||
use meilisearch_types::milli::update::Setting;
|
||||
use meilisearch_types::milli::{self, FilterableAttributesRule};
|
||||
use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
|
||||
use meilisearch_types::tasks::{Details, Status};
|
||||
use meilisearch_types::task_view::DetailsView;
|
||||
use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status};
|
||||
use serde_json::{json, Map, Value};
|
||||
use time::macros::datetime;
|
||||
use uuid::Uuid;
|
||||
@ -258,7 +299,10 @@ pub(crate) mod test {
|
||||
let settings = Settings {
|
||||
displayed_attributes: Setting::Set(vec![S("race"), S("name")]).into(),
|
||||
searchable_attributes: Setting::Set(vec![S("name"), S("race")]).into(),
|
||||
filterable_attributes: Setting::Set(btreeset! { S("race"), S("age") }),
|
||||
filterable_attributes: Setting::Set(vec![
|
||||
FilterableAttributesRule::Field(S("race")),
|
||||
FilterableAttributesRule::Field(S("age")),
|
||||
]),
|
||||
sortable_attributes: Setting::Set(btreeset! { S("age") }),
|
||||
ranking_rules: Setting::NotSet,
|
||||
stop_words: Setting::NotSet,
|
||||
@ -278,16 +322,50 @@ pub(crate) mod test {
|
||||
pagination: Setting::NotSet,
|
||||
embedders: Setting::NotSet,
|
||||
search_cutoff_ms: Setting::NotSet,
|
||||
localized_attributes: Setting::NotSet,
|
||||
facet_search: Setting::NotSet,
|
||||
prefix_search: Setting::NotSet,
|
||||
chat: Setting::NotSet,
|
||||
_kind: std::marker::PhantomData,
|
||||
};
|
||||
settings.check()
|
||||
}
|
||||
|
||||
pub fn create_test_batches() -> Vec<Batch> {
|
||||
vec![Batch {
|
||||
uid: 0,
|
||||
details: DetailsView {
|
||||
received_documents: Some(12),
|
||||
indexed_documents: Some(Some(10)),
|
||||
..DetailsView::default()
|
||||
},
|
||||
progress: None,
|
||||
stats: BatchStats {
|
||||
total_nb_tasks: 1,
|
||||
status: maplit::btreemap! { Status::Succeeded => 1 },
|
||||
types: maplit::btreemap! { Kind::DocumentAdditionOrUpdate => 1 },
|
||||
index_uids: maplit::btreemap! { "doggo".to_string() => 1 },
|
||||
progress_trace: Default::default(),
|
||||
write_channel_congestion: None,
|
||||
internal_database_sizes: Default::default(),
|
||||
},
|
||||
embedder_stats: Default::default(),
|
||||
enqueued_at: Some(BatchEnqueuedAt {
|
||||
earliest: datetime!(2022-11-11 0:00 UTC),
|
||||
oldest: datetime!(2022-11-11 0:00 UTC),
|
||||
}),
|
||||
started_at: datetime!(2022-11-20 0:00 UTC),
|
||||
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
||||
stop_reason: BatchStopReason::Unspecified.to_string(),
|
||||
}]
|
||||
}
|
||||
|
||||
pub fn create_test_tasks() -> Vec<(TaskDump, Option<Vec<Document>>)> {
|
||||
vec![
|
||||
(
|
||||
TaskDump {
|
||||
uid: 0,
|
||||
batch_uid: Some(0),
|
||||
index_uid: Some(S("doggo")),
|
||||
status: Status::Succeeded,
|
||||
kind: KindDump::DocumentImport {
|
||||
@ -311,6 +389,7 @@ pub(crate) mod test {
|
||||
(
|
||||
TaskDump {
|
||||
uid: 1,
|
||||
batch_uid: None,
|
||||
index_uid: Some(S("doggo")),
|
||||
status: Status::Enqueued,
|
||||
kind: KindDump::DocumentImport {
|
||||
@ -337,6 +416,7 @@ pub(crate) mod test {
|
||||
(
|
||||
TaskDump {
|
||||
uid: 5,
|
||||
batch_uid: None,
|
||||
index_uid: Some(S("catto")),
|
||||
status: Status::Enqueued,
|
||||
kind: KindDump::IndexDeletion,
|
||||
@ -402,6 +482,15 @@ pub(crate) mod test {
|
||||
index.flush().unwrap();
|
||||
index.settings(&settings).unwrap();
|
||||
|
||||
// ========== pushing the batch queue
|
||||
let batches = create_test_batches();
|
||||
|
||||
let mut batch_queue = dump.create_batches_queue().unwrap();
|
||||
for batch in &batches {
|
||||
batch_queue.push_batch(batch).unwrap();
|
||||
}
|
||||
batch_queue.flush().unwrap();
|
||||
|
||||
// ========== pushing the task queue
|
||||
let tasks = create_test_tasks();
|
||||
|
||||
@ -430,6 +519,10 @@ pub(crate) mod test {
|
||||
|
||||
dump.create_experimental_features(features).unwrap();
|
||||
|
||||
// ========== network
|
||||
let network = create_test_network();
|
||||
dump.create_network(network).unwrap();
|
||||
|
||||
// create the dump
|
||||
let mut file = tempfile::tempfile().unwrap();
|
||||
dump.persist_to(&mut file).unwrap();
|
||||
@ -439,7 +532,14 @@ pub(crate) mod test {
|
||||
}
|
||||
|
||||
fn create_test_features() -> RuntimeTogglableFeatures {
|
||||
RuntimeTogglableFeatures { vector_store: true, ..Default::default() }
|
||||
RuntimeTogglableFeatures::default()
|
||||
}
|
||||
|
||||
fn create_test_network() -> Network {
|
||||
Network {
|
||||
local: Some("myself".to_string()),
|
||||
remotes: maplit::btreemap! {"other".to_string() => Remote { url: "http://test".to_string(), search_api_key: Some("apiKey".to_string()) }},
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -490,5 +590,9 @@ pub(crate) mod test {
|
||||
// ==== checking the features
|
||||
let expected = create_test_features();
|
||||
assert_eq!(dump.features().unwrap().unwrap(), expected);
|
||||
|
||||
// ==== checking the network
|
||||
let expected = create_test_network();
|
||||
assert_eq!(&expected, dump.network().unwrap().unwrap());
|
||||
}
|
||||
}
|
@ -425,7 +425,7 @@ pub(crate) mod test {
|
||||
let mut dump = v2::V2Reader::open(dir).unwrap().to_v3();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-09 20:27:59.904096267 +00:00:00");
|
||||
insta::assert_snapshot!(dump.date().unwrap(), @"2022-10-09 20:27:59.904096267 +00:00:00");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
@ -358,7 +358,7 @@ pub(crate) mod test {
|
||||
let mut dump = v3::V3Reader::open(dir).unwrap().to_v4();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-07 11:39:03.709153554 +00:00:00");
|
||||
insta::assert_snapshot!(dump.date().unwrap(), @"2022-10-07 11:39:03.709153554 +00:00:00");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
@ -394,8 +394,8 @@ pub(crate) mod test {
|
||||
let mut dump = v4::V4Reader::open(dir).unwrap().to_v5();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-06 12:53:49.131989609 +00:00:00");
|
||||
insta::assert_display_snapshot!(dump.instance_uid().unwrap().unwrap(), @"9e15e977-f2ae-4761-943f-1eaf75fd736d");
|
||||
insta::assert_snapshot!(dump.date().unwrap(), @"2022-10-06 12:53:49.131989609 +00:00:00");
|
||||
insta::assert_snapshot!(dump.instance_uid().unwrap().unwrap(), @"9e15e977-f2ae-4761-943f-1eaf75fd736d");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
@ -1,3 +1,4 @@
|
||||
use std::num::NonZeroUsize;
|
||||
use std::str::FromStr;
|
||||
|
||||
use super::v4_to_v5::{CompatIndexV4ToV5, CompatV4ToV5};
|
||||
@ -70,6 +71,7 @@ impl CompatV5ToV6 {
|
||||
|
||||
let task = v6::Task {
|
||||
uid: task_view.uid,
|
||||
batch_uid: None,
|
||||
index_uid: task_view.index_uid,
|
||||
status: match task_view.status {
|
||||
v5::Status::Enqueued => v6::Status::Enqueued,
|
||||
@ -195,6 +197,10 @@ impl CompatV5ToV6 {
|
||||
pub fn features(&self) -> Result<Option<v6::RuntimeTogglableFeatures>> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub fn network(&self) -> Result<Option<&v6::Network>> {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub enum CompatIndexV5ToV6 {
|
||||
@ -317,7 +323,16 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
||||
v6::Settings {
|
||||
displayed_attributes: v6::Setting::from(settings.displayed_attributes).into(),
|
||||
searchable_attributes: v6::Setting::from(settings.searchable_attributes).into(),
|
||||
filterable_attributes: settings.filterable_attributes.into(),
|
||||
filterable_attributes: match settings.filterable_attributes {
|
||||
v5::settings::Setting::Set(filterable_attributes) => v6::Setting::Set(
|
||||
filterable_attributes
|
||||
.into_iter()
|
||||
.map(v6::FilterableAttributesRule::Field)
|
||||
.collect(),
|
||||
),
|
||||
v5::settings::Setting::Reset => v6::Setting::Reset,
|
||||
v5::settings::Setting::NotSet => v6::Setting::NotSet,
|
||||
},
|
||||
sortable_attributes: settings.sortable_attributes.into(),
|
||||
ranking_rules: {
|
||||
match settings.ranking_rules {
|
||||
@ -359,6 +374,7 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
||||
},
|
||||
disable_on_words: typo.disable_on_words.into(),
|
||||
disable_on_attributes: typo.disable_on_attributes.into(),
|
||||
disable_on_numbers: v6::Setting::NotSet,
|
||||
}),
|
||||
v5::Setting::Reset => v6::Setting::Reset,
|
||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||
@ -373,13 +389,23 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
||||
},
|
||||
pagination: match settings.pagination {
|
||||
v5::Setting::Set(pagination) => v6::Setting::Set(v6::PaginationSettings {
|
||||
max_total_hits: pagination.max_total_hits.into(),
|
||||
max_total_hits: match pagination.max_total_hits {
|
||||
v5::Setting::Set(max_total_hits) => v6::Setting::Set(
|
||||
max_total_hits.try_into().unwrap_or(NonZeroUsize::new(1).unwrap()),
|
||||
),
|
||||
v5::Setting::Reset => v6::Setting::Reset,
|
||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||
},
|
||||
}),
|
||||
v5::Setting::Reset => v6::Setting::Reset,
|
||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||
},
|
||||
embedders: v6::Setting::NotSet,
|
||||
localized_attributes: v6::Setting::NotSet,
|
||||
search_cutoff_ms: v6::Setting::NotSet,
|
||||
facet_search: v6::Setting::NotSet,
|
||||
prefix_search: v6::Setting::NotSet,
|
||||
chat: v6::Setting::NotSet,
|
||||
_kind: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
@ -442,13 +468,13 @@ pub(crate) mod test {
|
||||
let mut dump = v5::V5Reader::open(dir).unwrap().to_v6();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-04 15:55:10.344982459 +00:00:00");
|
||||
insta::assert_display_snapshot!(dump.instance_uid().unwrap().unwrap(), @"9e15e977-f2ae-4761-943f-1eaf75fd736d");
|
||||
insta::assert_snapshot!(dump.date().unwrap(), @"2022-10-04 15:55:10.344982459 +00:00:00");
|
||||
insta::assert_snapshot!(dump.instance_uid().unwrap().unwrap(), @"9e15e977-f2ae-4761-943f-1eaf75fd736d");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"41f91d3a94911b2735ec41b07540df5c");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"4b03e23e740b27bfb9d2a1faffe512e2");
|
||||
assert_eq!(update_files.len(), 22);
|
||||
assert!(update_files[0].is_none()); // the dump creation
|
||||
assert!(update_files[1].is_some()); // the enqueued document addition
|
@ -23,6 +23,7 @@ mod v6;
|
||||
pub type Document = serde_json::Map<String, serde_json::Value>;
|
||||
pub type UpdateFile = dyn Iterator<Item = Result<Document>>;
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum DumpReader {
|
||||
Current(V6Reader),
|
||||
Compat(CompatV5ToV6),
|
||||
@ -101,6 +102,13 @@ impl DumpReader {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn batches(&mut self) -> Result<Box<dyn Iterator<Item = Result<v6::Batch>> + '_>> {
|
||||
match self {
|
||||
DumpReader::Current(current) => Ok(current.batches()),
|
||||
DumpReader::Compat(_compat) => Ok(Box::new(std::iter::empty())),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn keys(&mut self) -> Result<Box<dyn Iterator<Item = Result<v6::Key>> + '_>> {
|
||||
match self {
|
||||
DumpReader::Current(current) => Ok(current.keys()),
|
||||
@ -108,12 +116,28 @@ impl DumpReader {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn chat_completions_settings(
|
||||
&mut self,
|
||||
) -> Result<Box<dyn Iterator<Item = Result<(String, v6::ChatCompletionSettings)>> + '_>> {
|
||||
match self {
|
||||
DumpReader::Current(current) => current.chat_completions_settings(),
|
||||
DumpReader::Compat(_compat) => Ok(Box::new(std::iter::empty())),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn features(&self) -> Result<Option<v6::RuntimeTogglableFeatures>> {
|
||||
match self {
|
||||
DumpReader::Current(current) => Ok(current.features()),
|
||||
DumpReader::Compat(compat) => compat.features(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn network(&self) -> Result<Option<&v6::Network>> {
|
||||
match self {
|
||||
DumpReader::Current(current) => Ok(current.network()),
|
||||
DumpReader::Compat(compat) => compat.network(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<V6Reader> for DumpReader {
|
||||
@ -216,13 +240,17 @@ pub(crate) mod test {
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2024-05-16 15:51:34.151044 +00:00:00");
|
||||
insta::assert_snapshot!(dump.date().unwrap(), @"2024-05-16 15:51:34.151044 +00:00:00");
|
||||
insta::assert_debug_snapshot!(dump.instance_uid().unwrap(), @"None");
|
||||
|
||||
// batches didn't exists at the time
|
||||
let batches = dump.batches().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot!(meili_snap::json_string!(batches), @"[]");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"278f63325ef06ca04d01df98d8207b94");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"2b8a72d6bc6ba79980491966437daaf9");
|
||||
assert_eq!(update_files.len(), 10);
|
||||
assert!(update_files[0].is_none()); // the dump creation
|
||||
assert!(update_files[1].is_none());
|
||||
@ -255,6 +283,8 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(vector_index.settings().unwrap());
|
||||
|
||||
{
|
||||
let documents: Result<Vec<_>> = vector_index.documents().unwrap().collect();
|
||||
let mut documents = documents.unwrap();
|
||||
@ -325,10 +355,8 @@ pub(crate) mod test {
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
dump.features().unwrap().unwrap(),
|
||||
RuntimeTogglableFeatures { vector_store: true, ..Default::default() }
|
||||
);
|
||||
assert_eq!(dump.features().unwrap().unwrap(), RuntimeTogglableFeatures::default());
|
||||
assert_eq!(dump.network().unwrap(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -337,13 +365,17 @@ pub(crate) mod test {
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2023-07-06 7:10:27.21958 +00:00:00");
|
||||
insta::assert_snapshot!(dump.date().unwrap(), @"2023-07-06 7:10:27.21958 +00:00:00");
|
||||
insta::assert_debug_snapshot!(dump.instance_uid().unwrap(), @"None");
|
||||
|
||||
// batches didn't exists at the time
|
||||
let batches = dump.batches().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot!(meili_snap::json_string!(batches), @"[]");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"d45cd8571703e58ae53c7bd7ce3f5c22");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"3ddf6169b0a3703c5d770971f036fc5d");
|
||||
assert_eq!(update_files.len(), 2);
|
||||
assert!(update_files[0].is_none()); // the dump creation
|
||||
assert!(update_files[1].is_none()); // the processed document addition
|
||||
@ -371,10 +403,28 @@ pub(crate) mod test {
|
||||
|
||||
assert_eq!(test.documents().unwrap().count(), 1);
|
||||
|
||||
assert_eq!(
|
||||
dump.features().unwrap().unwrap(),
|
||||
RuntimeTogglableFeatures { vector_store: true, ..Default::default() }
|
||||
);
|
||||
assert_eq!(dump.features().unwrap().unwrap(), RuntimeTogglableFeatures::default());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_dump_v6_network() {
|
||||
let dump = File::open("tests/assets/v6-with-network.dump").unwrap();
|
||||
let dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_snapshot!(dump.date().unwrap(), @"2025-01-29 15:45:32.738676 +00:00:00");
|
||||
insta::assert_debug_snapshot!(dump.instance_uid().unwrap(), @"None");
|
||||
|
||||
// network
|
||||
|
||||
let network = dump.network().unwrap().unwrap();
|
||||
insta::assert_snapshot!(network.local.as_ref().unwrap(), @"ms-0");
|
||||
insta::assert_snapshot!(network.remotes.get("ms-0").as_ref().unwrap().url, @"http://localhost:7700");
|
||||
insta::assert_snapshot!(network.remotes.get("ms-0").as_ref().unwrap().search_api_key.is_none(), @"true");
|
||||
insta::assert_snapshot!(network.remotes.get("ms-1").as_ref().unwrap().url, @"http://localhost:7701");
|
||||
insta::assert_snapshot!(network.remotes.get("ms-1").as_ref().unwrap().search_api_key.is_none(), @"true");
|
||||
insta::assert_snapshot!(network.remotes.get("ms-2").as_ref().unwrap().url, @"http://ms-5679.example.meilisearch.io");
|
||||
insta::assert_snapshot!(network.remotes.get("ms-2").as_ref().unwrap().search_api_key.as_ref().unwrap(), @"foo");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -383,13 +433,17 @@ pub(crate) mod test {
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-04 15:55:10.344982459 +00:00:00");
|
||||
insta::assert_display_snapshot!(dump.instance_uid().unwrap().unwrap(), @"9e15e977-f2ae-4761-943f-1eaf75fd736d");
|
||||
insta::assert_snapshot!(dump.date().unwrap(), @"2022-10-04 15:55:10.344982459 +00:00:00");
|
||||
insta::assert_snapshot!(dump.instance_uid().unwrap().unwrap(), @"9e15e977-f2ae-4761-943f-1eaf75fd736d");
|
||||
|
||||
// batches didn't exists at the time
|
||||
let batches = dump.batches().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot!(meili_snap::json_string!(batches), @"[]");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"41f91d3a94911b2735ec41b07540df5c");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"4b03e23e740b27bfb9d2a1faffe512e2");
|
||||
assert_eq!(update_files.len(), 22);
|
||||
assert!(update_files[0].is_none()); // the dump creation
|
||||
assert!(update_files[1].is_some()); // the enqueued document addition
|
||||
@ -463,13 +517,17 @@ pub(crate) mod test {
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-06 12:53:49.131989609 +00:00:00");
|
||||
insta::assert_display_snapshot!(dump.instance_uid().unwrap().unwrap(), @"9e15e977-f2ae-4761-943f-1eaf75fd736d");
|
||||
insta::assert_snapshot!(dump.date().unwrap(), @"2022-10-06 12:53:49.131989609 +00:00:00");
|
||||
insta::assert_snapshot!(dump.instance_uid().unwrap().unwrap(), @"9e15e977-f2ae-4761-943f-1eaf75fd736d");
|
||||
|
||||
// batches didn't exists at the time
|
||||
let batches = dump.batches().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot!(meili_snap::json_string!(batches), @"[]");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"c2445ddd1785528b80f2ba534d3bd00c");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"c1b06a5ca60d5805483c16c5b3ff61ef");
|
||||
assert_eq!(update_files.len(), 10);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
@ -540,13 +598,17 @@ pub(crate) mod test {
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-07 11:39:03.709153554 +00:00:00");
|
||||
insta::assert_snapshot!(dump.date().unwrap(), @"2022-10-07 11:39:03.709153554 +00:00:00");
|
||||
assert_eq!(dump.instance_uid().unwrap(), None);
|
||||
|
||||
// batches didn't exists at the time
|
||||
let batches = dump.batches().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot!(meili_snap::json_string!(batches), @"[]");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"cd12efd308fe3ed226356a727ab42ed3");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"0e203b6095f7c68dbdf788321dcc8215");
|
||||
assert_eq!(update_files.len(), 10);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
@ -633,13 +695,17 @@ pub(crate) mod test {
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-09 20:27:59.904096267 +00:00:00");
|
||||
insta::assert_snapshot!(dump.date().unwrap(), @"2022-10-09 20:27:59.904096267 +00:00:00");
|
||||
assert_eq!(dump.instance_uid().unwrap(), None);
|
||||
|
||||
// batches didn't exists at the time
|
||||
let batches = dump.batches().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot!(meili_snap::json_string!(batches), @"[]");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"bc616290adfe7d09a624cf6065ca9069");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"d216c7f90f538ffbb2a059531d7ac89a");
|
||||
assert_eq!(update_files.len(), 9);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
@ -726,13 +792,17 @@ pub(crate) mod test {
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2023-01-30 16:26:09.247261 +00:00:00");
|
||||
insta::assert_snapshot!(dump.date().unwrap(), @"2023-01-30 16:26:09.247261 +00:00:00");
|
||||
assert_eq!(dump.instance_uid().unwrap(), None);
|
||||
|
||||
// batches didn't exists at the time
|
||||
let batches = dump.batches().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot!(meili_snap::json_string!(batches), @"[]");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"2db37756d8af1fb7623436b76e8956a6");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"e27999f1112632222cb84f6cffff7c5f");
|
||||
assert_eq!(update_files.len(), 8);
|
||||
assert!(update_files[0..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
@ -805,10 +875,14 @@ pub(crate) mod test {
|
||||
assert_eq!(dump.date(), None);
|
||||
assert_eq!(dump.instance_uid().unwrap(), None);
|
||||
|
||||
// batches didn't exists at the time
|
||||
let batches = dump.batches().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot!(meili_snap::json_string!(batches), @"[]");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"8df6eab075a44b3c1af6b726f9fd9a43");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"0155a664b0cf62aae23db5138b6b03d7");
|
||||
assert_eq!(update_files.len(), 9);
|
||||
assert!(update_files[..].iter().all(|u| u.is_none())); // no update file in dump v1
|
||||
|
@ -0,0 +1,57 @@
|
||||
---
|
||||
source: crates/dump/src/reader/mod.rs
|
||||
expression: vector_index.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"nonSeparatorTokens": [],
|
||||
"separatorTokens": [],
|
||||
"dictionary": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"proximityPrecision": "byWord",
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100,
|
||||
"sortFacetValuesBy": {
|
||||
"*": "alpha"
|
||||
}
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {
|
||||
"default": {
|
||||
"source": "huggingFace",
|
||||
"model": "BAAI/bge-base-en-v1.5",
|
||||
"revision": "617ca489d9e86b49b8167676d8220688b99db36e",
|
||||
"pooling": "forceMean",
|
||||
"documentTemplate": "{% for field in fields %} {{ field.name }}: {{ field.value }}\n{% endfor %}"
|
||||
}
|
||||
},
|
||||
"searchCutoffMs": null
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user