mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-07-18 04:11:07 +00:00
Compare commits
1622 Commits
prototype-
...
release-v1
Author | SHA1 | Date | |
---|---|---|---|
f2f1367ec3 | |||
18f17ed728 | |||
d49250358d | |||
567194b925 | |||
bd74cce86a | |||
f85c80d059 | |||
e8516f00c4 | |||
29e71eedc7 | |||
10d053cd2f | |||
a302e258bd | |||
29840473b4 | |||
f4037c1a95 | |||
13cc62728b | |||
f84bcb09e1 | |||
5c95b5c933 | |||
0b7bebeeb6 | |||
d2f77e88bd | |||
1d8c13f595 | |||
7f3c495f5c | |||
abd954755d | |||
f3fc2bd01f | |||
6fa3872268 | |||
6c9823d7bb | |||
e0dac5a22f | |||
b918b55c6b | |||
07b1d0edaf | |||
306b25ad3a | |||
9f7a4fbfeb | |||
5ed7b6a0b2 | |||
ae67d5eef0 | |||
88bc9556a9 | |||
ca4876fd10 | |||
d3a95ea2f6 | |||
69c118ef76 | |||
d44e20aa89 | |||
7b670a4afa | |||
fde209b7b6 | |||
904b82a61d | |||
8ec3e30d2b | |||
0a59cb9734 | |||
f053c280e1 | |||
ee3076d5ba | |||
ab1224bfa7 | |||
eefc1c421e | |||
4d42a7af7c | |||
7408db2a46 | |||
663629a9d6 | |||
15c38dca78 | |||
7ee20b0895 | |||
0c216048b5 | |||
36d17110d8 | |||
bdd428c22e | |||
b130917933 | |||
25f64ce7df | |||
adcd848809 | |||
84ae0cd456 | |||
eee46b7537 | |||
55f60a3638 | |||
c608b3f9b5 | |||
86ce843f3d | |||
b11df7ec34 | |||
6862caef64 | |||
f75c7ac979 | |||
f07069094b | |||
eada6de261 | |||
d3004d8040 | |||
f4a6261dea | |||
9806a3e5f6 | |||
a96b45dda7 | |||
452a343a2b | |||
b87485e80d | |||
147a67dc82 | |||
716ffc07ee | |||
b005eb3289 | |||
9e664d87eb | |||
6dcb5219a0 | |||
5e83bac448 | |||
0562818c2a | |||
a478392b7a | |||
bbf3fb88ca | |||
60510e037b | |||
36c27a18a1 | |||
1eb1c043b5 | |||
507739bd98 | |||
eb25b07390 | |||
938149f814 | |||
066a7a3cde | |||
55796406c5 | |||
eb90f0b4fb | |||
c2e2003a80 | |||
91cdd502f8 | |||
a493a50825 | |||
9d1f489a37 | |||
693ba8dd15 | |||
e1a3eed1eb | |||
05ae291989 | |||
6ba9994916 | |||
01ae46dd80 | |||
12f5389ba7 | |||
9ee4f55e6c | |||
865b415b3f | |||
5ee6aaddc4 | |||
4148d391b8 | |||
88c6165e20 | |||
d097431113 | |||
1f8af81ba9 | |||
5d3bad4120 | |||
d34692e30b | |||
024de0dcf8 | |||
a081da0d90 | |||
78e04520fc | |||
72c1674a31 | |||
03bb6372af | |||
3beda8833d | |||
3b6544db6d | |||
55e942cd45 | |||
48026aa75c | |||
e5e811e2c9 | |||
55de96f74e | |||
82b43e9a7f | |||
15dafde21d | |||
290f6d15e7 | |||
39c83cb3d9 | |||
7efb1cae11 | |||
7877788510 | |||
c02d585f5b | |||
be1b054b05 | |||
023c2d755f | |||
407ad753ed | |||
285aa15d2f | |||
bf43a3f60a | |||
2c88131bb1 | |||
35aa9d5904 | |||
cfb3e6b51f | |||
1502382316 | |||
ef994d84d0 | |||
1b74010e9e | |||
08af0e690c | |||
d71b77f18b | |||
c443ed7e3f | |||
db722d201a | |||
91eb67e981 | |||
902d700a24 | |||
f70a615ed9 | |||
7ff722b72e | |||
bcf7909bba | |||
ceb211c515 | |||
f3c34d5b8c | |||
4de2db6786 | |||
661baa716b | |||
02dcaf07db | |||
d78ada07b5 | |||
bc097d90cb | |||
b393823f36 | |||
e773dfa9ba | |||
f158e96fe7 | |||
e23ec4886d | |||
7793ba67a4 | |||
80774148fd | |||
bf5cea8b10 | |||
38e1c40f38 | |||
afc0585c1c | |||
0e7a411d4d | |||
0f327f2821 | |||
77254765e8 | |||
ce6e6ec2c5 | |||
91a8f74763 | |||
abaa72e2bf | |||
3c3a258a22 | |||
73e66d5a97 | |||
b8da117b9c | |||
5e52107474 | |||
bcf1c4dae5 | |||
50f84d43f5 | |||
f76cc0806e | |||
2f1abd2c03 | |||
dedc91e2cf | |||
a61d8c59ff | |||
6e23040464 | |||
8febbf64ce | |||
b141c82a04 | |||
cc79cd0b04 | |||
256538ccb9 | |||
ca8990394e | |||
83fb2949c3 | |||
6cf703387d | |||
771861599b | |||
7e47cea0c4 | |||
5d7061682e | |||
02e6c8a440 | |||
89401d097b | |||
72ebac1fbb | |||
a616a1d37b | |||
3e120619fa | |||
a1caac9bfb | |||
88d03c56ab | |||
32ee05ccef | |||
74c180267e | |||
517f5332d6 | |||
9ac5750096 | |||
7ae4013478 | |||
fb705116a6 | |||
053306c0e7 | |||
84235a63df | |||
29f8300ac7 | |||
05edd85d75 | |||
9eeb75d501 | |||
4792651462 | |||
58c3501b54 | |||
ff76d8f21a | |||
698ea5139d | |||
880e790bff | |||
fbf5f2a392 | |||
1555870088 | |||
9f8f3105d5 | |||
318843aacd | |||
6d111139b5 | |||
dff2707471 | |||
c57f7f7379 | |||
b968616a99 | |||
c1bf33a112 | |||
ddc2b7129a | |||
b6fc181993 | |||
388fce9e46 | |||
d35fe43fd5 | |||
f692021bfc | |||
1b90778bf5 | |||
66ae81a909 | |||
4aa4a15dc9 | |||
4b4e8ea2a4 | |||
84f49d76cd | |||
afb0e8eab9 | |||
b5b2333a05 | |||
40fa0b4df6 | |||
ab4d614599 | |||
262b20fdba | |||
9020606c45 | |||
0887186ecf | |||
7d190d8078 | |||
3b8a9597e2 | |||
f275554982 | |||
d997ea1f01 | |||
50e1d34c66 | |||
406531c991 | |||
01e2c3d6bb | |||
cfaa522d68 | |||
0c8d1644a6 | |||
5e0268d40e | |||
9f9ad4cc05 | |||
3ee7682fa7 | |||
7f125bfb12 | |||
5869ca7716 | |||
7a89abd2a0 | |||
d9d0419845 | |||
5dc8d9e9bf | |||
9e12a91afb | |||
8e016fbfeb | |||
1ccde9bf0b | |||
34e814f400 | |||
857cd09285 | |||
a6fa0b97ec | |||
552127021f | |||
38abfec611 | |||
84a5c304fc | |||
e93d36d5b9 | |||
95f8e21533 | |||
b4d7d80ad9 | |||
68f197624e | |||
b79b03d4e2 | |||
86270e6878 | |||
81b6128b29 | |||
5f5a486895 | |||
5f4fc6c955 | |||
1f5e8fc072 | |||
3f3462ab62 | |||
93363b0201 | |||
97bb1ff9e2 | |||
5ee1378856 | |||
e27b850b09 | |||
f75f22e026 | |||
6203f4acef | |||
12edc2c20a | |||
94b9f3b310 | |||
5204c0b60b | |||
e73cd692db | |||
29b453346b | |||
c4bb435374 | |||
da99a04eb3 | |||
54ae6951eb | |||
2bcff2ea46 | |||
1275e72e0b | |||
658ec6e0a4 | |||
43e822e802 | |||
ee54d3171e | |||
a0e713c4e7 | |||
d4cb0a885b | |||
f52dee2b3b | |||
0bf879fb88 | |||
6ff81de401 | |||
2e4c9651df | |||
ec9649c922 | |||
9123370e90 | |||
14b396d302 | |||
393216bf30 | |||
e249e4db7b | |||
de2ca7006e | |||
333ce12eb2 | |||
fb9db1eba6 | |||
fa2b96b9a5 | |||
19736cefe8 | |||
4fb25b8782 | |||
c83a33017e | |||
be72326c0a | |||
547379abb0 | |||
0b2fff27f2 | |||
3adbc2b942 | |||
fbea721378 | |||
391eb72137 | |||
d78ad51082 | |||
1956045a06 | |||
b2193e612f | |||
942d49314c | |||
9a846e82bc | |||
9df8cfc013 | |||
d868131bb7 | |||
248aaa6d45 | |||
50d6317ec0 | |||
b734bd9891 | |||
9800d5a103 | |||
7c4ed07617 | |||
3a99a555a2 | |||
9e1b458010 | |||
2aede03bc2 | |||
e741bc1c62 | |||
6425996e36 | |||
eb5cb91da2 | |||
87bba98bd8 | |||
217105b7da | |||
1b7c164a55 | |||
f3f3944469 | |||
93dcbf598d | |||
ac68f33194 | |||
9991152bbe | |||
a4536b1381 | |||
5b51cb04af | |||
3c1a14f1cd | |||
b8e4709dfa | |||
806e5b6899 | |||
61bd2fb7a9 | |||
e0cc775dc4 | |||
12940d79a9 | |||
922a640188 | |||
abbe131084 | |||
d4715e0c4d | |||
11e2a2c1aa | |||
65e49b7092 | |||
e56f160032 | |||
687d92f217 | |||
fb539f61fe | |||
cb4ebe163e | |||
dde3a04679 | |||
13c2c6c16b | |||
21bcf32109 | |||
35e1981488 | |||
e0f712b9d3 | |||
56571f762a | |||
005800634d | |||
976af4fa8f | |||
99fec27788 | |||
afa8f273a8 | |||
4b644f6bc0 | |||
7e259cb0d2 | |||
0fbc1511d7 | |||
c9860c7913 | |||
03ffabe889 | |||
1f4fc9c229 | |||
8cc3c54117 | |||
467b49153d | |||
0c3fa8cbc4 | |||
bddc168d83 | |||
84a36002d7 | |||
c95d68e244 | |||
3b3fa38f27 | |||
170e063b80 | |||
d6c2ee15a9 | |||
6376c342c1 | |||
5b563f872b | |||
ec9b52d608 | |||
34c67ac389 | |||
d050c9b4ae | |||
7dd1226faf | |||
1575456594 | |||
add2ceef67 | |||
548c8247c2 | |||
181ca48482 | |||
5751f5c640 | |||
d32eb11329 | |||
dc07790133 | |||
3d23b388bc | |||
85626cff8e | |||
58dac8af42 | |||
0dbf1a16ff | |||
462b4c0080 | |||
0d4482625a | |||
56a0d91ecd | |||
b366acdae6 | |||
7cb7e37ba8 | |||
33b7c574ea | |||
d3575fb028 | |||
39cbb499c2 | |||
ebef6bc24d | |||
d59b7db8d0 | |||
263e825619 | |||
69354a6144 | |||
b0adc73ce6 | |||
2b5d9042d1 | |||
5b57fbab08 | |||
72d3fa4898 | |||
772964125d | |||
378deb0bef | |||
1f36410541 | |||
b11f85a635 | |||
a2d6dc8571 | |||
ee1701157f | |||
8c649d8061 | |||
492fc086f0 | |||
a2d0c73b41 | |||
264b10ec20 | |||
825257da76 | |||
f8289cd974 | |||
3053e01c05 | |||
b11c2afac0 | |||
9cef800b2a | |||
db2fb86b8b | |||
882ab9cc85 | |||
5a9c96e1db | |||
70ce40828c | |||
688266c83e | |||
6dab826908 | |||
1e2fbc6a42 | |||
523519fdbf | |||
ef6fa10f7a | |||
620fee35f9 | |||
cbaa54cafd | |||
1bccf2079e | |||
1b2ea6cf19 | |||
1ad1fcc8c8 | |||
48865470d7 | |||
c810df4d9f | |||
87610a5f98 | |||
2544bc1416 | |||
ff522c919d | |||
1c39459cf4 | |||
bf0651f23c | |||
5b20e625f3 | |||
bc51d6157a | |||
1b4ff991c0 | |||
4b64c33aa2 | |||
12323d610e | |||
44e9033b3a | |||
4d864f0702 | |||
5e3df76699 | |||
02765fb267 | |||
841165d529 | |||
ea4a266f08 | |||
49f069ed97 | |||
be16b99d40 | |||
ec0c09d17c | |||
a9230f6e6c | |||
b10c060bf7 | |||
e507ef5932 | |||
c71b1d33ae | |||
0fc446c62f | |||
0fb6acefc3 | |||
b1d1355b69 | |||
f19332466e | |||
03ddb4f310 | |||
c855cc2721 | |||
da0503ef80 | |||
54f0ee1ed2 | |||
94206b0055 | |||
b40253bf18 | |||
d8bf3f3fc2 | |||
9d59e8011a | |||
dad78cbf8d | |||
4e91707a06 | |||
de10f20732 | |||
ce5647e730 | |||
b57b818b67 | |||
f7ea94e5f4 | |||
be395c7944 | |||
9fedd8101a | |||
54d07a8da3 | |||
53382bb1b8 | |||
5b004a2583 | |||
13416ccbf7 | |||
58690dfb19 | |||
abf424ebfc | |||
dfab6293c9 | |||
fdf3f7f627 | |||
6260cff65f | |||
8e0d9c9a5e | |||
ae4ec8ea55 | |||
652ac3052d | |||
9a2dccc3bc | |||
a35988550c | |||
e78281785c | |||
3c15881818 | |||
73c06d31d9 | |||
290e773d23 | |||
fa6c7f65ca | |||
113527f466 | |||
c534a1b687 | |||
2263dff02b | |||
d651b3ef01 | |||
762b0b47e6 | |||
01d5eedf2f | |||
073f89db79 | |||
8370fbc92b | |||
85f42fbc03 | |||
c6b3c18c85 | |||
bafeb892a7 | |||
8fb221dae3 | |||
5be569e3e2 | |||
946c762d28 | |||
cda6ca1ee6 | |||
696fcf4d18 | |||
476e4d3dbe | |||
576fa9c6da | |||
77dcbff6b2 | |||
544440c363 | |||
a3dae4db9b | |||
ba90a5ec0e | |||
b26dc9aabe | |||
66abac9364 | |||
59f88c14b3 | |||
14832cb324 | |||
04ec293024 | |||
f67ff3a738 | |||
560e8f5613 | |||
2d3f15f82c | |||
40186bf403 | |||
87e3d27878 | |||
6bcf8b4f8c | |||
46aa75abdb | |||
2597bbd107 | |||
e2bc054604 | |||
fcd3a1434d | |||
a82dee21e0 | |||
bc45c1206d | |||
6ae4100f07 | |||
0c47defeee | |||
313b16bec2 | |||
1dd97578a8 | |||
f5ef69293b | |||
1c5705c164 | |||
66c2c82a18 | |||
28a8d0ccda | |||
96be85396d | |||
df9e5c8651 | |||
b541d48847 | |||
8ccf32d1a0 | |||
db1ca21231 | |||
11ea5acff9 | |||
8d77736a67 | |||
748b333161 | |||
17b647dfe5 | |||
2614e7d9ca | |||
e7244aa485 | |||
9cacc82307 | |||
4c6fddb1cb | |||
62ea81bef6 | |||
f28f09ae2f | |||
ca52021079 | |||
ee6f79d60b | |||
e4c24ca6a3 | |||
2bae9550c8 | |||
32c78ac8b1 | |||
5fe7c4545a | |||
2042229927 | |||
eae9eab181 | |||
cf8dad1ca0 | |||
dd619913da | |||
9b55ff16e9 | |||
e761db582f | |||
d8c649b3cd | |||
5e0485d8dd | |||
27eec21415 | |||
62cc97ba70 | |||
fed59cc1d5 | |||
2b3adef796 | |||
956cfc5487 | |||
12fc878640 | |||
0a2e8b92a9 | |||
c7a3f80de6 | |||
029d4de043 | |||
549f1bcccf | |||
689ec7c7ad | |||
3655d4bdca | |||
055ca3935b | |||
1b8871a585 | |||
bf8fac6676 | |||
f2a9e1ebbb | |||
c45c6cf54c | |||
513e61e9a3 | |||
90a626bf80 | |||
0d4acf2daa | |||
58db8d85ec | |||
62dfd09dc6 | |||
656dadabea | |||
c5f7893fbb | |||
8cf2ccf168 | |||
0913373a5e | |||
1a7f1282af | |||
bc747aac3a | |||
be92376ab3 | |||
cf7e355735 | |||
5f09d89ad1 | |||
6ecb26a3f8 | |||
76c6f554d6 | |||
f343ef5f2f | |||
96982a768a | |||
fca78fbc46 | |||
67a678cfb6 | |||
d1331d8abf | |||
19ba129165 | |||
d4da06ff47 | |||
3e0471edae | |||
432df03c4c | |||
11958016dd | |||
63c250a04d | |||
06d8cd5b72 | |||
c0f2724c2d | |||
d772073dfa | |||
8fe8ddea79 | |||
8a95bf28e5 | |||
c0fd3dffb8 | |||
c42fd5375f | |||
b418c3a756 | |||
1cde455758 | |||
ca19bae72f | |||
705878ff59 | |||
92c280d1c8 | |||
181e7a1e53 | |||
2e5abb4d2c | |||
44aaf5d9e3 | |||
ff0ababf65 | |||
c5336af1c5 | |||
1567758a56 | |||
37953afe1a | |||
43989fe2e4 | |||
de3f992ae4 | |||
c668a29ed5 | |||
98f0618065 | |||
b10eeb0e41 | |||
4a8515e9fc | |||
86b314626d | |||
bb79bdb3f8 | |||
d429e7da99 | |||
584b772248 | |||
1806c04a9a | |||
3485e8f1c4 | |||
fe697a6685 | |||
eb4135f8ae | |||
ec4844c3a6 | |||
77c3787b78 | |||
4f902490b9 | |||
1faee92748 | |||
5831466525 | |||
3cdb3e4eaf | |||
26f34ec7a2 | |||
07d36180ad | |||
4c641b79a2 | |||
76c05d1b20 | |||
ef31ab52a4 | |||
34fac115d5 | |||
791c5cd874 | |||
5bea1092fb | |||
056b2c387d | |||
a09686fcbd | |||
b4c44603db | |||
393be40179 | |||
2c1d60f79b | |||
487d493f49 | |||
08af69a33b | |||
9258e5b5bf | |||
ddd34a488a | |||
526c2b3602 | |||
e8c9367686 | |||
9636c5f558 | |||
b310830b5d | |||
462b4654c4 | |||
abfa7ded25 | |||
f2837aaec2 | |||
11df155598 | |||
651657c03e | |||
b9ad59c969 | |||
66aa682e23 | |||
256cf33bca | |||
9945cbf9db | |||
03d0f628bd | |||
ea78060916 | |||
b42d48187a | |||
679c0b0f97 | |||
e02d0064bd | |||
7ef3572f11 | |||
93285041a9 | |||
dc3d9c90d9 | |||
287cf25d39 | |||
66aa6d5871 | |||
8ac5b765bc | |||
cea93e9a37 | |||
085aad0a94 | |||
e9b62aacb3 | |||
456960d2c7 | |||
3dda176723 | |||
af0f6f0bf0 | |||
ccf3ba3f32 | |||
65528a3e06 | |||
6db80b0836 | |||
cdb4b3e024 | |||
8c0ebd1331 | |||
5130e06b41 | |||
08e27ef73f | |||
914b125c5f | |||
e59d7f238c | |||
717b069907 | |||
7ea154673a | |||
b947f3bb9d | |||
4c35817c5f | |||
c53841e166 | |||
fd81945597 | |||
794e491152 | |||
cab27c2ab4 | |||
624fa9052f | |||
359ede4862 | |||
60c11dbdbd | |||
dacee40ebc | |||
6089083a8e | |||
cc2c19d4c3 | |||
a5c56fac8a | |||
e4e49e63d0 | |||
00bd7bd19a | |||
ef3d098b4d | |||
8084cf29f3 | |||
5a7c1bde84 | |||
6b2d671be7 | |||
43c13faeda | |||
29adfc2f68 | |||
064ee95b1c | |||
604d533b31 | |||
44c1900f36 | |||
04671d0751 | |||
4f4c669d50 | |||
8dc5acf998 | |||
fc2590fc9d | |||
35758db9ec | |||
4988199bb9 | |||
83991ee770 | |||
9d061cec26 | |||
4a21fecf67 | |||
ae8e69c030 | |||
fe819a9d80 | |||
e338ceb97f | |||
75c87d5391 | |||
dd57873f8e | |||
3dda93d50f | |||
117146ec4e | |||
884b4d47b1 | |||
023cb0c2de | |||
f391039a6f | |||
fcdd20b533 | |||
b45c36cd71 | |||
151c31c18f | |||
a8ad0902d3 | |||
e917dbdebb | |||
ba919b6123 | |||
9d5e3457e5 | |||
04694071fe | |||
5b0157c6c6 | |||
3b9a87c790 | |||
3a3414270d | |||
d06e0905db | |||
939b2fc6fd | |||
fae61372be | |||
d8b47b689e | |||
b0c1a9504a | |||
be72be7c0d | |||
88559a2d54 | |||
59201a7852 | |||
9e3e69373e | |||
d57026cd96 | |||
29ab54b259 | |||
41c9e8856a | |||
86d8bb3a3e | |||
d4ff59fcf5 | |||
9c485f8563 | |||
0e2a5951b4 | |||
691a536893 | |||
d8d12d5979 | |||
df528b41d8 | |||
2452ec55b4 | |||
54ae1b5a67 | |||
0597a97c84 | |||
3070a20580 | |||
0497f93494 | |||
2dfbb6813a | |||
8f589a5cce | |||
0b8bbd8750 | |||
eef95de30e | |||
13a13a4862 | |||
d5ab750627 | |||
2afd10f96d | |||
e691c92ed5 | |||
2d2619bd90 | |||
516d2df862 | |||
c76b488ab1 | |||
d383afc82b | |||
f9d94c5845 | |||
928ab2f9b1 | |||
7c18a9375f | |||
05a311f9be | |||
9b1b9b409e | |||
7f555f23e8 | |||
a0bfc9f63a | |||
3155264381 | |||
42400c381e | |||
08c7dab528 | |||
8590687515 | |||
8f5d127b1e | |||
7745cc9d3c | |||
657f24ec5f | |||
c106906f8f | |||
9c0691156f | |||
359b90288d | |||
13e3f8faae | |||
fd7c66fd62 | |||
183f23f40d | |||
2b4160ebb9 | |||
8ba1c8f88f | |||
16c8437b28 | |||
8e7edf8ea7 | |||
4310928803 | |||
74315b4ea8 | |||
177e6e27f9 | |||
50afe724ae | |||
012c960fad | |||
76f6d3357e | |||
d59e969c16 | |||
eb7a1aa7af | |||
9daccdf7f0 | |||
c30a14cb97 | |||
a3ca8412ce | |||
106f98aa72 | |||
40fa59d64c | |||
bb40ce6e35 | |||
0c8dbf6fa6 | |||
dd6519b64f | |||
da02a9cf32 | |||
ff192bb480 | |||
437ee55c57 | |||
22762808ab | |||
b1717865ea | |||
86b834c9e4 | |||
2d3cec11a7 | |||
76e1ee9988 | |||
222615d3df | |||
11d024c613 | |||
886c8bb647 | |||
b422e5fdc3 | |||
d727ebee05 | |||
da39a7b29e | |||
377fe33aac | |||
55cd7738b9 | |||
48409c9183 | |||
176f716292 | |||
82650eaae1 | |||
b8ca09c13f | |||
a442af6a7c | |||
e7f8daaf86 | |||
d1ff631df8 | |||
202183adf8 | |||
aae099e330 | |||
5387cf1718 | |||
a0df4becf4 | |||
e0a2f88fb0 | |||
e871906370 | |||
7a80c0dfb3 | |||
71500a4e15 | |||
a9f691f279 | |||
1d40452057 | |||
324d448236 | |||
40ad19ba9e | |||
9859e65d2f | |||
3bdf01bc1c | |||
a5a31667b0 | |||
cab4c4d7c9 | |||
4ec08e9430 | |||
661d1f90dc | |||
6ec7541026 | |||
e8dee3ca65 | |||
a82c49ab08 | |||
84845de9ef | |||
c9b3f80947 | |||
09c5edf242 | |||
4e85f91aee | |||
7c157fc442 | |||
0b97596c93 | |||
a0e0fce677 | |||
3c295c1ffc | |||
b951830461 | |||
9a13b72f25 | |||
1d8dfafd25 | |||
eed9176e0c | |||
b132e859f7 | |||
9917bf046a | |||
d9fea0143f | |||
a385642ec3 | |||
34b2e98fe9 | |||
80bbd4b6f3 | |||
f42bef2f66 | |||
bd3c026406 | |||
84f8938f33 | |||
34a07110de | |||
73bb080a26 | |||
44b5b9e1a7 | |||
68356869c0 | |||
e3fc7112bc | |||
605c1dd54a | |||
3e3f73ba1e | |||
efbe7ce78b | |||
82e1f59f1e | |||
362e9ff845 | |||
32f2556d22 | |||
63fd10aaa5 | |||
29b40295b8 | |||
26f0fa678d | |||
60ddd53439 | |||
2bcd8d2983 | |||
09079a4e88 | |||
904f6574bf | |||
6fb8af423c | |||
cb0bb399fa | |||
41760a9306 | |||
e9a3029c30 | |||
ed0ff47551 | |||
e1b8fb48ee | |||
87e22e436a | |||
0252cfe8b6 | |||
f35ad96afa | |||
2ceb781c73 | |||
7bd67543dd | |||
8e86eb91bb | |||
55c17aa38b | |||
aadbe88048 | |||
f36de2115f | |||
702041b7e1 | |||
a05074e675 | |||
93f30e65a9 | |||
893592c5e9 | |||
e81809aae7 | |||
ce7e7f12c8 | |||
addb21f110 | |||
c34de05106 | |||
15a4c05379 | |||
9deeec88e0 | |||
167ac55a2d | |||
ea68ccd034 | |||
d4f10800f2 | |||
dc293911ad | |||
9d68e6969e | |||
b4b686d253 | |||
98ec476198 | |||
c47b8a8bfe | |||
054f81a021 | |||
d8ea688481 | |||
e69be93e42 | |||
b2b413db12 | |||
30741d17fa | |||
ebad1f396f | |||
29d8268c94 | |||
63bfe1cee2 | |||
f3e4d70638 | |||
eecf20f109 | |||
816d7ed174 | |||
864ad2a23c | |||
66fb5c150c | |||
7c2f5f77b8 | |||
66b8cfd8c8 | |||
ff3664431f | |||
531748c536 | |||
7aa1275337 | |||
737aec1705 | |||
3e3c743392 | |||
5c5a4e075d | |||
ab9f2269aa | |||
321ec5f3fa | |||
1b2923f7c0 | |||
717d4fddd4 | |||
a7e0f0de89 | |||
3b560ef7d0 | |||
2cf747cb89 | |||
3c31e1cdd1 | |||
23eaaf1001 | |||
c2a402f3ae | |||
436a10bef4 | |||
8debf6fe81 | |||
c79e82c62a | |||
aca305bb77 | |||
5816008139 | |||
268a9ef416 | |||
642b0f3a1b | |||
cad90e8cbc | |||
4571e512d2 | |||
7ac2f1489d | |||
34349faeae | |||
ed0a5be4b6 | |||
f105df6599 | |||
13e9b4c2e5 | |||
5a83cecb0f | |||
cca6e47ec1 | |||
6196a53668 | |||
bb6448dc2e | |||
eef9293630 | |||
dac77dfd14 | |||
072d81843f | |||
29ec02d4d4 | |||
9d2a12821d | |||
63ca25290b | |||
59f64a5256 | |||
dc391deca0 | |||
114f878205 | |||
42709ea9a5 | |||
993b0d012c | |||
fb8fa07169 | |||
0ccf1e2e40 | |||
9680e1e41f | |||
a61ca4066e | |||
461b5118bd | |||
a3716c5678 | |||
2d34005965 | |||
62eefcda6e | |||
85a24775c5 | |||
6b0e9b9a7f | |||
b18c57ea7f | |||
6d4981ec25 | |||
040b5a5b6f | |||
530a3e2df3 | |||
11d32ad192 | |||
d26e9a96ec | |||
49c8bc4de6 | |||
da833eb095 | |||
701d44bd91 | |||
c621a250a7 | |||
8939e85f60 | |||
fa41d2489e | |||
59c5b992c2 | |||
2ea8194c18 | |||
421df64602 | |||
c0fca6f884 | |||
9015a8e8d9 | |||
28404d56b7 | |||
262c1f2baf | |||
cfed349aa3 | |||
f050634b1e | |||
becf1f066a | |||
701d299369 | |||
a20e4d447c | |||
af57c3c577 | |||
0c40ef6911 | |||
bbc9f68ff5 | |||
45636d315c | |||
cb9d78fc7f | |||
01d2ee5cc1 | |||
e0c4682758 | |||
d9b4b39922 | |||
2da86b31a6 | |||
4e81445d42 | |||
4829348d6e | |||
047d22fcb1 | |||
a2a3b8c973 | |||
9f37b61666 | |||
c15c076da9 | |||
9dcf1da59d | |||
8628a0c856 | |||
c1e3cc04b0 | |||
d96d8bb0dd | |||
4a3405afec | |||
3cfd653db1 | |||
b6b6a80b76 | |||
f3e2f79290 | |||
f517274d1f | |||
3f41bc642a | |||
672abdb341 | |||
a13ed4d0b0 | |||
4cc2988482 | |||
26c7e31f25 | |||
b2dee07b5e | |||
d963b5f85a | |||
2acc3ec5ee | |||
da04edff8c | |||
85a80f4f4c | |||
1213ec7164 | |||
f03d99690d | |||
0a7817a002 | |||
23a5b45ebf | |||
46fa99f486 | |||
67a583bedf | |||
99e9057684 | |||
8d40d300a5 | |||
6c6387d05e | |||
1dfc4038ab | |||
73198179f1 | |||
51dce9e9d1 | |||
c9b65677bf | |||
35d5556f1f | |||
c433bdd1cd | |||
2db09725f8 | |||
fdb23132d4 | |||
11b95284cd | |||
1b601f70c6 | |||
8185731bbf | |||
840727d76f | |||
ead07d0b9d | |||
44f231d41e | |||
3c5d1c93de | |||
087866d59f | |||
9111f5176f | |||
b9dd092a62 | |||
ca99bc3188 | |||
57d53de402 | |||
2e49d6aec1 | |||
51043f78f0 | |||
a490a11325 | |||
002f42875f | |||
22213dc604 | |||
602ad98cb8 | |||
7f619ff0e4 | |||
4391cba6ca | |||
d7ddf4925e | |||
101f5a20d2 | |||
6ce1ce77e6 | |||
ec8f685d84 | |||
5758268866 | |||
4d037e6693 | |||
96da5130a4 | |||
3e19702de6 | |||
1e762d151f | |||
0b38f211ac | |||
f6524a6858 | |||
65ad8cce36 | |||
42650f82e8 | |||
a37da36766 | |||
85d96d35a8 | |||
64b11f45d7 | |||
bf66e97b48 | |||
a7ea5ec748 | |||
dc7ba77e57 | |||
13f870e993 | |||
1a79fd0c3c | |||
f759ec7fad | |||
4d691d071a | |||
23d1c86825 | |||
c4a40e7110 | |||
e68d86d6b6 | |||
e01980c6f4 | |||
25209a3590 | |||
3064ea6495 | |||
46ec8a97e9 | |||
c42a65a297 | |||
d08f8690d2 | |||
ad5f25d880 | |||
4d352a21ac | |||
918ce1dd67 | |||
4a4210c116 | |||
3533d4f2bb | |||
3625389057 | |||
eace6df91b | |||
83ab8cf4e5 | |||
8095f21999 | |||
cd2573fcc3 | |||
9f7981df28 | |||
e615fa5ec6 | |||
13f1277637 | |||
4919774f2e | |||
a3da680ce6 | |||
11e394dba1 | |||
469d2f2a9c | |||
ce6507d20c | |||
b92da5d15a | |||
ed3dfbe729 | |||
441641397b | |||
a35d3fc708 | |||
745c1a2668 | |||
a95128df6b | |||
e0537c3870 | |||
da220294f6 | |||
78e611f282 | |||
d8381eb790 | |||
b212aef5db | |||
6bf66f35be | |||
52ab114f6c | |||
dcbfecf42c | |||
9ca6f59546 | |||
aa7537a11e | |||
972bb2831c | |||
f9ddd32545 | |||
d5059520aa | |||
1c3642c9b2 | |||
d2d2bacaf2 | |||
30edba3497 | |||
84e7bd9342 | |||
2b74e4d116 | |||
b5fe0b2b07 | |||
0f0cd2d929 | |||
fc8c1d118d | |||
0548ab9038 | |||
143acb9cdc | |||
4b92f1b269 | |||
c12a1cd956 | |||
8af8aa5a33 | |||
6df2ba93a9 | |||
3680a6bf1e | |||
732c52093d | |||
05cc463fbc | |||
1afde4fea5 | |||
f8f190cd40 | |||
3a408e8287 | |||
d3e5b10e23 | |||
1aaf24ccbf | |||
90bc230820 | |||
342c4ff85d | |||
c85392ce40 | |||
8875d24a48 | |||
c470b67fa2 | |||
c0e081cd98 | |||
b60840ebff | |||
fdc1763838 | |||
75819bc940 | |||
7b8cc25625 | |||
2be641f373 | |||
ddcb661c19 | |||
d09b771bce | |||
d89d2efb7e | |||
f284a9c0dd | |||
134e7fc433 | |||
0cba919228 | |||
aa63091752 | |||
58735d6d8f | |||
1b514517f5 | |||
11f814821d | |||
30fb1153cc | |||
3b2c8b9f25 | |||
2a7f9adf78 | |||
608ceea440 | |||
79001b9c97 | |||
59b12fca87 | |||
48f5bb1693 | |||
93188b3c88 | |||
bc4efca611 | |||
feaf25a95d | |||
414b3fae89 | |||
899baa0ea5 | |||
374095d42c | |||
dd007dceca | |||
3ae587205c | |||
1bf2694604 | |||
ed9cc1af55 | |||
b41a6cbd7a | |||
c8af572697 | |||
249053e514 | |||
ff2cf2a5ae | |||
b448aca49c | |||
55bad07c16 | |||
380469665f | |||
3421125a55 | |||
0b2200e6e7 | |||
0fd5ab9fcc | |||
14293f6c8f | |||
d3a94e8b25 | |||
1944077a7f | |||
8195d366fa | |||
cfd1b2cc97 | |||
19b044b4e6 | |||
e0730b55b3 | |||
729fa3770d | |||
9cbc85b2f9 | |||
a3cf104736 | |||
a109802d45 | |||
2d8060df80 | |||
47b66e49b8 | |||
8f2e971879 | |||
654a3a9e19 | |||
d1fdbb63da | |||
fb9d9239b2 | |||
a7a0891210 | |||
84d9c731f8 | |||
11f4724957 | |||
85182497ab | |||
3e4a356638 | |||
dfd9c384aa | |||
f0b4046c43 | |||
4b953d62fb | |||
c2f4b6ced0 | |||
1e6cbcaf12 | |||
066c6bd875 | |||
fd583501d7 | |||
bff4bde0ce | |||
cd45d21d6e | |||
f9960be115 | |||
bd9aba4d77 | |||
8edad8291b | |||
b3f60ee805 | |||
5acf953298 | |||
d9cebff61c | |||
30f7bd03f6 | |||
df0d9bb878 | |||
5230ddb3ea | |||
d6a7c28e4d | |||
e55efc419e | |||
644e136aee | |||
ec0ecb5515 | |||
b4fabce36d | |||
9350a7b017 | |||
be69ab320d | |||
d59d75c9cd | |||
38b7b31beb | |||
7a01f20df7 | |||
c20c38a7fa | |||
5ab46324c4 | |||
325f17488a | |||
e7ff987c46 | |||
244003e36f | |||
1f813a6f3b | |||
96183e804a | |||
5cfb066b0a | |||
a5f44a5ceb | |||
7ab48ed8c7 | |||
a94e78ffb0 | |||
e7bb8c940f | |||
8cb85294ef | |||
d0e9d65025 | |||
540a396e49 | |||
a81165f0d8 | |||
d6585eb10b | |||
f7d90ad19f | |||
bc25f378e8 | |||
31630c85d0 | |||
ab09dc0167 | |||
618c54915d | |||
130d2061bd | |||
66ddee4390 | |||
90a6c01495 | |||
e58426109a | |||
f513cf930a | |||
8a13ed7e3f | |||
1b8e4d0301 | |||
996619b22a | |||
2c9822a337 | |||
7276deee0a | |||
6a068fe36a | |||
f7e7f438f8 | |||
8d826e478f | |||
ba8dcc2d78 | |||
4d308d5237 | |||
7ca91ebb71 | |||
1ba8a40d61 | |||
47f6a3ad3d | |||
b4c01581cd | |||
ae17c62e24 | |||
a1148c09c2 | |||
9c5f64769a | |||
ebe23b04c9 | |||
13b7c826c1 | |||
67fd3b08ef | |||
5440f43fd3 | |||
d9460a76f4 | |||
d1ddaa223d | |||
f7ecea142e | |||
337e75b0e4 | |||
b5691802a3 | |||
1690aec7f1 | |||
f267bed352 | |||
6e50f23896 | |||
597d57bf1d | |||
4c8a0179ba | |||
c69cbec64a | |||
01ac8344ad | |||
3508ba2f20 | |||
ce328c329d | |||
959e4607bb | |||
4b4ffb8ec9 | |||
3951fe22ab | |||
4d5bc9df4c | |||
ec2f8e8040 | |||
406b8bd248 | |||
62b9c6fbee | |||
b439d36807 | |||
faceb661e3 | |||
4129d657e2 | |||
1e6fe71a67 | |||
0fba08cd72 | |||
189d4c3b70 | |||
2fff6f7f23 | |||
fddfb37f1f | |||
52b4090286 | |||
3cabfb448b | |||
77cf5b3787 | |||
3acc5bbb15 | |||
114436926f | |||
0f7904fb38 | |||
3f13608002 | |||
590b1d8fb7 | |||
4708d9b016 | |||
0d2e7bcc13 | |||
55fbfb6124 | |||
be9741eb8a | |||
58fe260c72 | |||
24e5f6f7a9 | |||
0177d66149 | |||
9b87c36200 | |||
1861c69964 | |||
cb2b5eb38e | |||
53aa0a1b54 | |||
12b26cd54e | |||
061b1e6d7c | |||
0d6e8b5c31 | |||
d48cdc67a0 | |||
35c16ad047 | |||
2997d1f186 | |||
2a5997fb20 | |||
ee8a9e0bad | |||
3b0737a092 | |||
fdd02105ac | |||
aa9592455c | |||
01e24dd630 | |||
ae6bb1ce17 | |||
5fd28620cd | |||
728710d63a | |||
fa81381865 | |||
b96a682f16 | |||
d0f048c068 | |||
223e82a10d | |||
9507ff5e31 | |||
c2b025946a | |||
950f73b8bb | |||
3a818c5e87 | |||
7871d12025 | |||
d74134ce3a | |||
5ac129bfa1 | |||
e7153e0a97 | |||
37a24a4a05 | |||
3fb67f94f7 | |||
6592746337 | |||
cf5145b542 | |||
efea1e5837 | |||
b744f33530 | |||
31bb61ba99 | |||
abb4522f76 | |||
d4f54fc55e | |||
ef084ef042 | |||
3524bd1257 | |||
a50b058557 | |||
d4f6216966 | |||
77acafe534 | |||
53afda3237 | |||
abb19d368d | |||
b4a52a622e | |||
8d7d8cdc2f | |||
626a93b348 | |||
af65fe201a | |||
9b83b1deb0 | |||
e9eb271499 | |||
3281a88d08 | |||
5a644054ab | |||
16fefd364e | |||
e7994cdeb3 | |||
00bad8c716 | |||
862714a18b | |||
d18ebe4f3a | |||
7169d85115 | |||
f5f5f03ec0 | |||
9b2653427d | |||
56b7209f26 | |||
9b1f439a91 | |||
01c7d2de8f | |||
a86aeba411 | |||
514b60f8c8 | |||
a2b151e877 | |||
384fdc2df4 | |||
83e5b4ed0d | |||
272cd7ebbd | |||
c63c7377e6 | |||
9259cdb12e | |||
5b50e49522 | |||
65474c8de5 | |||
fbb1ba3de0 | |||
a59ca28e2c | |||
825f742000 | |||
dd491320e5 | |||
c6ff97a220 | |||
49240c367a | |||
1e6e624078 | |||
8b4e07e1a3 | |||
2853009987 | |||
aa59c3bc2c | |||
7b1d8f4c6d | |||
a49ddec9df | |||
05fe856e6e | |||
c0cdaf9f53 | |||
e9cf58d584 | |||
31628c5cd4 | |||
3004e281d7 | |||
14e8d0aaa2 | |||
1c58cf8426 | |||
5155fd2bf1 | |||
9ec9c204d3 | |||
78b9304d52 | |||
0465ba4a05 | |||
2099991dd1 | |||
c232cdabf5 | |||
4e266211bf | |||
57fa689131 | |||
10626dddfc | |||
9051065c22 | |||
e8c76cf7bf | |||
3f1729a17f | |||
cab2b6bcda | |||
c4979a2fda | |||
23931f8a4f | |||
aa414565bb | |||
1db152046e | |||
c27ea2677f | |||
caa1e1b923 | |||
71f18e4379 | |||
600e3dd1c5 | |||
362eb0de86 | |||
998d46ac10 | |||
6c85c0d95e | |||
0e1fbbf7c6 | |||
6806640ef0 | |||
173e37584c | |||
6ba4d5e987 | |||
dd12d44134 | |||
a61495d660 | |||
c8e251bf24 | |||
a938fbde4a | |||
dcf3f1d18a | |||
66d0c63694 | |||
132191360b | |||
345c99d5bd | |||
89d696c1e3 | |||
c645853529 | |||
a70ab8b072 | |||
48aae76b15 | |||
23bf572dea | |||
864f6410ed | |||
c9bf6bb2fa | |||
46249ea901 | |||
ce0d1e0e13 | |||
5065d8b0c1 | |||
a83007c013 | |||
79e0a6dd4e | |||
2d88089129 | |||
1d937f831b | |||
6c659dc12f | |||
a8531053a0 | |||
cf34d1c95f | |||
1a9c58a7ab | |||
64571c8288 | |||
72123c458b | |||
d5881519cb | |||
ea016d97af | |||
70c906d4b4 | |||
fa2ea4a379 | |||
030263caa3 | |||
c25779afba | |||
0f33a65468 | |||
7c9a8b1e1b | |||
f45daf8031 | |||
fb1260ee88 | |||
48a51e5cd6 | |||
2f8eb4f54a | |||
dea101e3d9 | |||
175e8a8495 | |||
6da54d0cb6 | |||
667bb87e35 | |||
df48ac8803 | |||
ff86073288 | |||
0ad53784e7 | |||
7935bef4cd | |||
e064c52544 | |||
e106b16148 | |||
eddefb0e0f | |||
dff2715ef3 | |||
5deea631ea | |||
c5f22be6e1 | |||
b4b859ec8c | |||
b1d61f5a02 | |||
febc8d1b52 | |||
7dc04747fd | |||
7c0cd7172d | |||
b99ef3d336 | |||
43ff236df8 | |||
19ab4d1a15 | |||
9287858997 | |||
2f5b9fbbd8 | |||
7faa9a22f6 | |||
76288fad72 | |||
076a3d371c | |||
3bbf760542 | |||
fd5c48941a | |||
df3986cd83 | |||
34ed6518ae | |||
22219fd88f | |||
a9e17ab8c6 | |||
2dd948a4a1 | |||
76cf1bff87 | |||
c0d8eb295d | |||
bcd3f6054a | |||
3a0314f9de | |||
fa4d8b8348 |
2
.cargo/config.toml
Normal file
2
.cargo/config.toml
Normal file
@ -0,0 +1,2 @@
|
||||
[alias]
|
||||
xtask = "run --release --package xtask --"
|
3
.github/ISSUE_TEMPLATE/bug_report.md
vendored
3
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -23,7 +23,8 @@ A clear and concise description of what you expected to happen.
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Meilisearch version:** [e.g. v0.20.0]
|
||||
**Meilisearch version:**
|
||||
[e.g. v0.20.0]
|
||||
|
||||
**Additional context**
|
||||
Additional information that may be relevant to the issue.
|
||||
|
49
.github/ISSUE_TEMPLATE/sprint_issue.md
vendored
Normal file
49
.github/ISSUE_TEMPLATE/sprint_issue.md
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
name: New sprint issue
|
||||
about: ⚠️ Should only be used by the engine team ⚠️
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
Related product team resources: [PRD]() (_internal only_)
|
||||
Related product discussion:
|
||||
Related spec: WIP
|
||||
|
||||
## Motivation
|
||||
|
||||
<!---Copy/paste the information in PRD or briefly detail the product motivation. Ask product team if any hesitation.-->
|
||||
|
||||
## Usage
|
||||
|
||||
<!---Link to the public part of the PRD, or to the related product discussion for experimental features-->
|
||||
|
||||
## TODO
|
||||
|
||||
<!---Feel free to adapt this list with more technical/product steps-->
|
||||
|
||||
- [ ] Release a prototype
|
||||
- [ ] If prototype validated, merge changes into `main`
|
||||
- [ ] Update the spec
|
||||
|
||||
### Reminders when modifying the Setting API
|
||||
|
||||
<!--- Special steps to remind when adding a new index setting -->
|
||||
|
||||
- [ ] Ensure the new setting route is at least tested by the [`test_setting_routes` macro](https://github.com/meilisearch/meilisearch/blob/5204c0b60b384cbc79621b6b2176fca086069e8e/meilisearch/tests/settings/get_settings.rs#L276)
|
||||
- [ ] Ensure Analytics are fully implemented
|
||||
- [ ] `/settings/my-new-setting` configurated in the [`make_setting_routes` macro](https://github.com/meilisearch/meilisearch/blob/5204c0b60b384cbc79621b6b2176fca086069e8e/meilisearch/src/routes/indexes/settings.rs#L141-L165)
|
||||
- [ ] global `/settings` route configurated in the [`update_all` function](https://github.com/meilisearch/meilisearch/blob/5204c0b60b384cbc79621b6b2176fca086069e8e/meilisearch/src/routes/indexes/settings.rs#L655-L751)
|
||||
- [ ] Ensure the dump serializing is consistent with the `/settings` route serializing, e.g., enums case can be different (`camelCase` in route and `PascalCase` in the dump)
|
||||
|
||||
#### Special cases when adding a setting for an experimental feature
|
||||
|
||||
- [ ] ⚠️ API stability: The setting does not appear on the main settings route when the feature has never been enabled (e.g. mark it `Unset` when returned from the index in this situation. See [an example](https://github.com/meilisearch/meilisearch/blob/7a89abd2a025606a42f8b219e539117eb2eb029f/meilisearch-types/src/settings.rs#L608))
|
||||
- [ ] The setting cannot be set when the feature is disabled, either by the main settings route or the subroute (see [`validate_settings` function](https://github.com/meilisearch/meilisearch/blob/7a89abd2a025606a42f8b219e539117eb2eb029f/meilisearch/src/routes/indexes/settings.rs#L811))
|
||||
- [ ] If possible, the setting is reset when the feature is disabled (hard if it requires reindexing)
|
||||
|
||||
## Impacted teams
|
||||
|
||||
<!---Ping the related teams. Ask for the engine manager if any hesitation-->
|
||||
<!---@meilisearch/docs-team when there is any API change, e.g. settings addition-->
|
47
.github/scripts/check-release.sh
vendored
47
.github/scripts/check-release.sh
vendored
@ -1,24 +1,41 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
set -eu -o pipefail
|
||||
|
||||
# check_tag $current_tag $file_tag $file_name
|
||||
function check_tag {
|
||||
if [[ "$1" != "$2" ]]; then
|
||||
echo "Error: the current tag does not match the version in Cargo.toml: found $2 - expected $1"
|
||||
ret=1
|
||||
fi
|
||||
check_tag() {
|
||||
local expected=$1
|
||||
local actual=$2
|
||||
local filename=$3
|
||||
|
||||
if [[ $actual != $expected ]]; then
|
||||
echo >&2 "Error: the current tag does not match the version in $filename: found $actual, expected $expected"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
read_version() {
|
||||
grep '^version = ' | cut -d \" -f 2
|
||||
}
|
||||
|
||||
if [[ -z "${GITHUB_REF:-}" ]]; then
|
||||
echo >&2 "Error: GITHUB_REF is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! "$GITHUB_REF" =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+(-[a-z0-9]+)?$ ]]; then
|
||||
echo >&2 "Error: GITHUB_REF is not a valid tag: $GITHUB_REF"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
current_tag=${GITHUB_REF#refs/tags/v}
|
||||
ret=0
|
||||
current_tag=${GITHUB_REF#'refs/tags/v'}
|
||||
|
||||
file_tag="$(grep '^version = ' Cargo.toml | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')"
|
||||
check_tag $current_tag $file_tag
|
||||
toml_tag="$(cat Cargo.toml | read_version)"
|
||||
check_tag "$current_tag" "$toml_tag" Cargo.toml || ret=1
|
||||
|
||||
lock_file='Cargo.lock'
|
||||
lock_tag=$(grep -A 1 'name = "meilisearch-auth"' $lock_file | grep version | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')
|
||||
check_tag $current_tag $lock_tag $lock_file
|
||||
lock_tag=$(grep -A 1 '^name = "meilisearch-auth"' Cargo.lock | read_version)
|
||||
check_tag "$current_tag" "$lock_tag" Cargo.lock || ret=1
|
||||
|
||||
if [[ "$ret" -eq 0 ]] ; then
|
||||
echo 'OK'
|
||||
if (( ret == 0 )); then
|
||||
echo 'OK'
|
||||
fi
|
||||
exit $ret
|
||||
|
19
.github/uffizzi/Dockerfile
vendored
19
.github/uffizzi/Dockerfile
vendored
@ -1,19 +0,0 @@
|
||||
# Run
|
||||
FROM uffizzi/ttyd:alpine
|
||||
|
||||
ENV MEILI_HTTP_ADDR 0.0.0.0:7700
|
||||
ENV MEILI_SERVER_PROVIDER docker
|
||||
ENV MEILI_NO_ANALYTICS true
|
||||
|
||||
RUN apk update --quiet \
|
||||
&& apk add -q --no-cache libgcc tini curl
|
||||
|
||||
COPY target/x86_64-unknown-linux-musl/release/meilisearch /bin/meilisearch
|
||||
RUN ln -s /bin/meilisearch /meilisearch
|
||||
|
||||
WORKDIR /meili_data
|
||||
|
||||
EXPOSE 7700/tcp
|
||||
|
||||
ENTRYPOINT ["tini", "--"]
|
||||
CMD ["ttyd", "/bin/zsh"]
|
26
.github/uffizzi/docker-compose.uffizzi.yml
vendored
26
.github/uffizzi/docker-compose.uffizzi.yml
vendored
@ -1,26 +0,0 @@
|
||||
version: "3"
|
||||
|
||||
x-uffizzi:
|
||||
ingress:
|
||||
service: nginx
|
||||
port: 8081
|
||||
|
||||
services:
|
||||
meilisearch:
|
||||
image: "${MEILISEARCH_IMAGE}"
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "7681:7681"
|
||||
- "7700:7700"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 500M
|
||||
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "8081:8081"
|
||||
volumes:
|
||||
- ./.github/uffizzi/nginx:/etc/nginx
|
28
.github/uffizzi/nginx/nginx.conf
vendored
28
.github/uffizzi/nginx/nginx.conf
vendored
@ -1,28 +0,0 @@
|
||||
|
||||
events {
|
||||
worker_connections 4096; ## Default: 1024
|
||||
}
|
||||
|
||||
http {
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8081;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:7681;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
}
|
||||
|
||||
location /meilisearch/ {
|
||||
# rewrite /meilisearch/(.*) /$1 break;
|
||||
proxy_pass http://localhost:7700/;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
30
.github/workflows/bench-manual.yml
vendored
Normal file
30
.github/workflows/bench-manual.yml
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
name: Bench (manual)
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
workload:
|
||||
description: 'The path to the workloads to execute (workloads/...)'
|
||||
required: true
|
||||
default: 'workloads/movies.json'
|
||||
|
||||
env:
|
||||
WORKLOAD_NAME: ${{ github.event.inputs.workload }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- name: Run benchmarks - workload ${WORKLOAD_NAME} - branch ${{ github.ref }} - commit ${{ github.sha }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Manual [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- ${WORKLOAD_NAME}
|
||||
|
46
.github/workflows/bench-pr.yml
vendored
Normal file
46
.github/workflows/bench-pr.yml
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
name: Bench (PR)
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
run-benchmarks-on-comment:
|
||||
if: startsWith(github.event.comment.body, '/bench')
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- name: Check for Command
|
||||
id: command
|
||||
uses: xt0rted/slash-command-action@v2
|
||||
with:
|
||||
command: bench
|
||||
reaction-type: "rocket"
|
||||
repo-token: ${{ env.GH_TOKEN }}
|
||||
|
||||
- uses: xt0rted/pull-request-comment-branch@v2
|
||||
id: comment-branch
|
||||
with:
|
||||
repo_token: ${{ env.GH_TOKEN }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
if: success()
|
||||
with:
|
||||
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "[Comment](${{ github.event.comment.url }}) on [#${{github.event.issue.id}}](${{ github.event.issue.url }})" -- ${{ steps.command.outputs.command-arguments }}
|
25
.github/workflows/bench-push-indexing.yml
vendored
Normal file
25
.github/workflows/bench-push-indexing.yml
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
name: Indexing bench (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Push on `main` [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- workloads/*.json
|
||||
|
@ -1,4 +1,4 @@
|
||||
name: Benchmarks
|
||||
name: Benchmarks (manual)
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@ -74,4 +74,4 @@ jobs:
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
||||
echo " - Run the following command: ./benchmaks/scripts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
98
.github/workflows/benchmarks-pr.yml
vendored
Normal file
98
.github/workflows/benchmarks-pr.yml
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
name: Benchmarks (PR)
|
||||
on: issue_comment
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
run-benchmarks-on-comment:
|
||||
if: startsWith(github.event.comment.body, '/benchmark')
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- name: Check for Command
|
||||
id: command
|
||||
uses: xt0rted/slash-command-action@v2
|
||||
with:
|
||||
command: benchmark
|
||||
reaction-type: "eyes"
|
||||
repo-token: ${{ env.GH_TOKEN }}
|
||||
|
||||
- uses: xt0rted/pull-request-comment-branch@v2
|
||||
id: comment-branch
|
||||
with:
|
||||
repo_token: ${{ env.GH_TOKEN }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
if: success()
|
||||
with:
|
||||
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(git rev-parse --abbrev-ref HEAD)" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(git rev-parse --abbrev-ref HEAD | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${{ steps.command.outputs.command-arguments }}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${{ steps.command.outputs.command-arguments }} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${{ steps.command.outputs.command-arguments }} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Compute the diff of the benchmarks and send a message on the GitHub PR
|
||||
- name: Compute and send a message in the PR
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
run: |
|
||||
set -x
|
||||
export base_ref=$(git merge-base origin/main ${{ steps.comment-branch.outputs.head_ref }} | head -c8)
|
||||
export base_filename=$(echo ${{ steps.command.outputs.command-arguments }}_main_${base_ref}.json)
|
||||
export bench_name=$(echo ${{ steps.command.outputs.command-arguments }})
|
||||
echo "Here are your $bench_name benchmarks diff 👊" >> body.txt
|
||||
echo '```' >> body.txt
|
||||
./benchmarks/scripts/compare.sh $base_filename ${{ steps.file.outputs.basename }}.json >> body.txt
|
||||
echo '```' >> body.txt
|
||||
gh pr comment ${{ steps.current_branch.outputs.name }} --body-file body.txt
|
@ -1,4 +1,4 @@
|
||||
name: Benchmarks indexing (push)
|
||||
name: Benchmarks of indexing (push)
|
||||
|
||||
on:
|
||||
push:
|
@ -1,4 +1,4 @@
|
||||
name: Benchmarks search geo (push)
|
||||
name: Benchmarks of search for geo (push)
|
||||
|
||||
on:
|
||||
push:
|
@ -1,4 +1,4 @@
|
||||
name: Benchmarks search songs (push)
|
||||
name: Benchmarks of search for songs (push)
|
||||
|
||||
on:
|
||||
push:
|
@ -1,4 +1,4 @@
|
||||
name: Benchmarks search wikipedia articles (push)
|
||||
name: Benchmarks of search for Wikipedia articles (push)
|
||||
|
||||
on:
|
||||
push:
|
28
.github/workflows/create-issue-dependencies.yml
vendored
28
.github/workflows/create-issue-dependencies.yml
vendored
@ -1,28 +0,0 @@
|
||||
name: Create issue to upgrade dependencies
|
||||
on:
|
||||
schedule:
|
||||
# Run the first of the month, every 3 month
|
||||
- cron: '0 0 1 */3 *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
create-issue:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Create an issue
|
||||
uses: actions-ecosystem/action-create-issue@v1
|
||||
with:
|
||||
github_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
title: Upgrade dependencies
|
||||
body: |
|
||||
This issue is about updating Meilisearch dependencies:
|
||||
- [ ] Cargo toml dependencies of Meilisearch; but also the main engine-team repositories that Meilisearch depends on (charabia, heed...)
|
||||
- [ ] If new Rust versions have been released, update the Rust version in the Clippy job of this [GitHub Action file](./.github/workflows/rust.yml)
|
||||
|
||||
⚠️ To avoid last minute bugs, this issue should only be done at the beginning of the sprint!
|
||||
|
||||
The GitHub action dependencies are managed by [Dependabot](./.github/dependabot.yml)
|
||||
labels: |
|
||||
dependencies
|
||||
maintenance
|
24
.github/workflows/dependency-issue.yml
vendored
Normal file
24
.github/workflows/dependency-issue.yml
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
name: Create issue to upgrade dependencies
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run the first of the month, every 6 month
|
||||
- cron: '0 0 1 */6 *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
create-issue:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
ISSUE_TEMPLATE: issue-template.md
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Download the issue template
|
||||
run: curl -s https://raw.githubusercontent.com/meilisearch/engine-team/main/issue-templates/dependency-issue.md > $ISSUE_TEMPLATE
|
||||
- name: Create issue
|
||||
run: |
|
||||
gh issue create \
|
||||
--title 'Upgrade dependencies' \
|
||||
--label 'dependencies,maintenance' \
|
||||
--body-file $ISSUE_TEMPLATE
|
24
.github/workflows/fuzzer-indexing.yml
vendored
Normal file
24
.github/workflows/fuzzer-indexing.yml
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
name: Run the indexing fuzzer
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
fuzz:
|
||||
name: Setup the action
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run the fuzzer
|
||||
run: |
|
||||
cargo run --release --bin fuzz-indexing
|
@ -1,4 +1,4 @@
|
||||
name: Publish to APT repository & Homebrew
|
||||
name: Publish to APT & Homebrew
|
||||
|
||||
on:
|
||||
release:
|
||||
@ -35,7 +35,7 @@ jobs:
|
||||
- name: Build deb package
|
||||
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
||||
- name: Upload debian pkg to release
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/debian/meilisearch.deb
|
||||
@ -50,8 +50,9 @@ jobs:
|
||||
needs: check-version
|
||||
steps:
|
||||
- name: Create PR to Homebrew
|
||||
uses: mislav/bump-homebrew-formula-action@v2
|
||||
uses: mislav/bump-homebrew-formula-action@v3
|
||||
with:
|
||||
formula-name: meilisearch
|
||||
formula-path: Formula/m/meilisearch.rb
|
||||
env:
|
||||
COMMITTER_TOKEN: ${{ secrets.HOMEBREW_COMMITTER_TOKEN }}
|
52
.github/workflows/publish-binaries.yml
vendored
52
.github/workflows/publish-binaries.yml
vendored
@ -1,3 +1,5 @@
|
||||
name: Publish binaries to GitHub release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
@ -5,8 +7,6 @@ on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
name: Publish binaries to release
|
||||
|
||||
jobs:
|
||||
check-version:
|
||||
name: Check the version validity
|
||||
@ -54,7 +54,7 @@ jobs:
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/meilisearch
|
||||
@ -87,7 +87,7 @@ jobs:
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/${{ matrix.artifact_name }}
|
||||
@ -96,14 +96,12 @@ jobs:
|
||||
|
||||
publish-macos-apple-silicon:
|
||||
name: Publish binary for macOS silicon
|
||||
runs-on: ${{ matrix.os }}
|
||||
runs-on: macos-12
|
||||
needs: check-version
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: macos-12
|
||||
target: aarch64-apple-darwin
|
||||
- target: aarch64-apple-darwin
|
||||
asset_name: meilisearch-macos-apple-silicon
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@ -123,7 +121,7 @@ jobs:
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
@ -132,21 +130,29 @@ jobs:
|
||||
|
||||
publish-aarch64:
|
||||
name: Publish binary for aarch64
|
||||
runs-on: ${{ matrix.os }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-version
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27
|
||||
image: ubuntu:18.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- build: aarch64
|
||||
os: ubuntu-18.04
|
||||
target: aarch64-unknown-linux-gnu
|
||||
linker: gcc-aarch64-linux-gnu
|
||||
use-cross: true
|
||||
- target: aarch64-unknown-linux-gnu
|
||||
asset_name: meilisearch-linux-aarch64
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update -y && apt upgrade -y
|
||||
apt-get install -y curl build-essential gcc-aarch64-linux-gnu
|
||||
- name: Set up Docker for cross compilation
|
||||
run: |
|
||||
apt-get install -y curl apt-transport-https ca-certificates software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
apt-get update -y && apt-get install -y docker-ce
|
||||
- name: Installing Rust toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
@ -154,15 +160,7 @@ jobs:
|
||||
profile: minimal
|
||||
target: ${{ matrix.target }}
|
||||
override: true
|
||||
- name: APT update
|
||||
run: |
|
||||
sudo apt update
|
||||
- name: Install target specific tools
|
||||
if: matrix.use-cross
|
||||
run: |
|
||||
sudo apt-get install -y ${{ matrix.linker }}
|
||||
- name: Configure target aarch64 GNU
|
||||
if: matrix.target == 'aarch64-unknown-linux-gnu'
|
||||
## Environment variable is not passed using env:
|
||||
## LD gold won't work with MUSL
|
||||
# env:
|
||||
@ -176,14 +174,16 @@ jobs:
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
use-cross: ${{ matrix.use-cross }}
|
||||
use-cross: true
|
||||
args: --release --target ${{ matrix.target }}
|
||||
env:
|
||||
CROSS_DOCKER_IN_DOCKER: true
|
||||
- name: List target output files
|
||||
run: ls -lR ./target
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
|
17
.github/workflows/publish-docker-images.yml
vendored
17
.github/workflows/publish-docker-images.yml
vendored
@ -1,4 +1,5 @@
|
||||
---
|
||||
name: Publish images to Docker Hub
|
||||
|
||||
on:
|
||||
push:
|
||||
# Will run for every tag pushed except `latest`
|
||||
@ -12,8 +13,6 @@ on:
|
||||
- cron: '0 23 * * *' # Every day at 11:00pm
|
||||
workflow_dispatch:
|
||||
|
||||
name: Publish tagged images to Docker Hub
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: docker
|
||||
@ -58,20 +57,20 @@ jobs:
|
||||
echo "date=$commit_date" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: getmeili/meilisearch
|
||||
# Prevent `latest` to be updated for each new tag pushed.
|
||||
@ -84,7 +83,7 @@ jobs:
|
||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@ -98,7 +97,7 @@ jobs:
|
||||
- name: Send CI information to Cloud team
|
||||
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
repository: meilisearch/meilisearch-cloud
|
||||
|
145
.github/workflows/rust.yml
vendored
145
.github/workflows/rust.yml
vendored
@ -1,145 +0,0 @@
|
||||
name: Rust
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Everyday at 5:00am
|
||||
- cron: '0 5 * * *'
|
||||
pull_request:
|
||||
push:
|
||||
# trying and staging branches are for Bors config
|
||||
branches:
|
||||
- trying
|
||||
- staging
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
RUSTFLAGS: "-D warnings"
|
||||
|
||||
jobs:
|
||||
test-linux:
|
||||
name: Tests on ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- name: Run test with Rust stable
|
||||
if: github.event_name != 'schedule'
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Run test with Rust nightly
|
||||
if: github.event_name == 'schedule'
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
# Disable cache due to disk space issues with Windows workers in CI
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --locked --release --no-default-features --all
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release --all
|
||||
|
||||
test-others:
|
||||
name: Tests on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [macos-12, windows-2022]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --locked --release --no-default-features --all
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release --all
|
||||
|
||||
# We run tests in debug also, to make sure that the debug_assertions are hit
|
||||
test-debug:
|
||||
name: Run tests in debug
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run tests in debug
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --all
|
||||
|
||||
clippy:
|
||||
name: Run Clippy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: 1.67.0
|
||||
override: true
|
||||
components: clippy
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo clippy
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
# allow unlined_format_args https://github.com/rust-lang/rust-clippy/issues/10087
|
||||
args: --all-targets -- --deny warnings --allow clippy::uninlined_format_args
|
||||
|
||||
fmt:
|
||||
name: Run Rustfmt
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: nightly
|
||||
override: true
|
||||
components: rustfmt
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo fmt
|
||||
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
||||
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
||||
# we are going to create an empty file where rustfmt expects it.
|
||||
run: |
|
||||
echo -ne "\n" > benchmarks/benches/datasets_paths.rs
|
||||
cargo fmt --all -- --check
|
386
.github/workflows/sdks-tests.yml
vendored
Normal file
386
.github/workflows/sdks-tests.yml
vendored
Normal file
@ -0,0 +1,386 @@
|
||||
# If any test fails, the engine team should ensure the "breaking" changes are expected and contact the integration team
|
||||
name: SDKs tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
docker_image:
|
||||
description: 'The Meilisearch Docker image used'
|
||||
required: false
|
||||
default: nightly
|
||||
schedule:
|
||||
- cron: "0 6 * * MON" # Every Monday at 6:00AM
|
||||
|
||||
env:
|
||||
MEILI_MASTER_KEY: 'masterKey'
|
||||
MEILI_NO_ANALYTICS: 'true'
|
||||
DISABLE_COVERAGE: 'true'
|
||||
|
||||
jobs:
|
||||
define-docker-image:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docker-image: ${{ steps.define-image.outputs.docker-image }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Define the Docker image we need to use
|
||||
id: define-image
|
||||
run: |
|
||||
event=${{ github.event_name }}
|
||||
echo "docker-image=nightly" >> $GITHUB_OUTPUT
|
||||
if [[ $event == 'workflow_dispatch' ]]; then
|
||||
echo "docker-image=${{ github.event.inputs.docker_image }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Docker image is ${{ steps.define-image.outputs.docker-image }}
|
||||
run: echo "Docker image is ${{ steps.define-image.outputs.docker-image }}"
|
||||
|
||||
##########
|
||||
## SDKs ##
|
||||
##########
|
||||
|
||||
meilisearch-dotnet-tests:
|
||||
needs: define-docker-image
|
||||
name: .NET SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
MEILISEARCH_VERSION: ${{ needs.define-docker-image.outputs.docker-image }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: meilisearch/meilisearch-dotnet
|
||||
- name: Setup .NET Core
|
||||
uses: actions/setup-dotnet@v4
|
||||
with:
|
||||
dotnet-version: "6.0.x"
|
||||
- name: Install dependencies
|
||||
run: dotnet restore
|
||||
- name: Build
|
||||
run: dotnet build --configuration Release --no-restore
|
||||
- name: Meilisearch (latest version) setup with Docker
|
||||
run: docker compose up -d
|
||||
- name: Run tests
|
||||
run: dotnet test --no-restore --verbosity normal
|
||||
|
||||
meilisearch-dart-tests:
|
||||
needs: define-docker-image
|
||||
name: Dart SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: meilisearch/meilisearch-dart
|
||||
- uses: dart-lang/setup-dart@v1
|
||||
with:
|
||||
sdk: 'latest'
|
||||
- name: Install dependencies
|
||||
run: dart pub get
|
||||
- name: Run integration tests
|
||||
run: dart test --concurrency=4
|
||||
|
||||
meilisearch-go-tests:
|
||||
needs: define-docker-image
|
||||
name: Go SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: stable
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: meilisearch/meilisearch-go
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
go get -v -t -d ./...
|
||||
if [ -f Gopkg.toml ]; then
|
||||
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
||||
dep ensure
|
||||
fi
|
||||
- name: Run integration tests
|
||||
run: go test -v ./...
|
||||
|
||||
meilisearch-java-tests:
|
||||
needs: define-docker-image
|
||||
name: Java SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: meilisearch/meilisearch-java
|
||||
- name: Set up Java
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
java-version: 8
|
||||
distribution: 'zulu'
|
||||
cache: gradle
|
||||
- name: Grant execute permission for gradlew
|
||||
run: chmod +x gradlew
|
||||
- name: Build and run unit and integration tests
|
||||
run: ./gradlew build integrationTest
|
||||
|
||||
meilisearch-js-tests:
|
||||
needs: define-docker-image
|
||||
name: JS SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: meilisearch/meilisearch-js
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
cache: 'yarn'
|
||||
- name: Install dependencies
|
||||
run: yarn --dev
|
||||
- name: Run tests
|
||||
run: yarn test
|
||||
- name: Build project
|
||||
run: yarn build
|
||||
- name: Run ESM env
|
||||
run: yarn test:env:esm
|
||||
- name: Run Node.js env
|
||||
run: yarn test:env:nodejs
|
||||
- name: Run node typescript env
|
||||
run: yarn test:env:node-ts
|
||||
- name: Run Browser env
|
||||
run: yarn test:env:browser
|
||||
|
||||
meilisearch-php-tests:
|
||||
needs: define-docker-image
|
||||
name: PHP SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: meilisearch/meilisearch-php
|
||||
- name: Install PHP
|
||||
uses: shivammathur/setup-php@v2
|
||||
- name: Validate composer.json and composer.lock
|
||||
run: composer validate
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
composer remove --dev friendsofphp/php-cs-fixer --no-update --no-interaction
|
||||
composer update --prefer-dist --no-progress
|
||||
- name: Run test suite - default HTTP client (Guzzle 7)
|
||||
run: |
|
||||
sh scripts/tests.sh
|
||||
composer remove --dev guzzlehttp/guzzle http-interop/http-factory-guzzle
|
||||
|
||||
meilisearch-python-tests:
|
||||
needs: define-docker-image
|
||||
name: Python SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: meilisearch/meilisearch-python
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
- name: Install pipenv
|
||||
uses: dschep/install-pipenv-action@v1
|
||||
- name: Install dependencies
|
||||
run: pipenv install --dev --python=${{ matrix.python-version }}
|
||||
- name: Test with pytest
|
||||
run: pipenv run pytest
|
||||
|
||||
meilisearch-ruby-tests:
|
||||
needs: define-docker-image
|
||||
name: Ruby SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: meilisearch/meilisearch-ruby
|
||||
- name: Set up Ruby 3
|
||||
uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: 3
|
||||
- name: Install ruby dependencies
|
||||
run: bundle install --with test
|
||||
- name: Run test suite
|
||||
run: bundle exec rspec
|
||||
|
||||
meilisearch-rust-tests:
|
||||
needs: define-docker-image
|
||||
name: Rust SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: meilisearch/meilisearch-rust
|
||||
- name: Build
|
||||
run: cargo build --verbose
|
||||
- name: Run tests
|
||||
run: cargo test --verbose
|
||||
|
||||
meilisearch-swift-tests:
|
||||
needs: define-docker-image
|
||||
name: Swift SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: meilisearch/meilisearch-swift
|
||||
- name: Run tests
|
||||
run: swift test
|
||||
|
||||
########################
|
||||
## FRONT-END PLUGINS ##
|
||||
########################
|
||||
|
||||
meilisearch-js-plugins-tests:
|
||||
needs: define-docker-image
|
||||
name: meilisearch-js-plugins tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: meilisearch/meilisearch-js-plugins
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
cache: yarn
|
||||
- name: Install dependencies
|
||||
run: yarn install
|
||||
- name: Run tests
|
||||
run: yarn test
|
||||
- name: Build all the playgrounds and the packages
|
||||
run: yarn build
|
||||
|
||||
########################
|
||||
## BACK-END PLUGINS ###
|
||||
########################
|
||||
|
||||
meilisearch-rails-tests:
|
||||
needs: define-docker-image
|
||||
name: meilisearch-rails tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: meilisearch/meilisearch-rails
|
||||
- name: Set up Ruby 3
|
||||
uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: 3
|
||||
bundler-cache: true
|
||||
- name: Run tests
|
||||
run: bundle exec rspec
|
||||
|
||||
meilisearch-symfony-tests:
|
||||
needs: define-docker-image
|
||||
name: meilisearch-symfony tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
ports:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: meilisearch/meilisearch-symfony
|
||||
- name: Install PHP
|
||||
uses: shivammathur/setup-php@v2
|
||||
with:
|
||||
tools: composer:v2, flex
|
||||
- name: Validate composer.json and composer.lock
|
||||
run: composer validate
|
||||
- name: Install dependencies
|
||||
run: composer install --prefer-dist --no-progress --quiet
|
||||
- name: Remove doctrine/annotations
|
||||
run: composer remove --dev doctrine/annotations
|
||||
- name: Run test suite
|
||||
run: composer test:unit
|
190
.github/workflows/test-suite.yml
vendored
Normal file
190
.github/workflows/test-suite.yml
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
name: Test suite
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Everyday at 5:00am
|
||||
- cron: '0 5 * * *'
|
||||
pull_request:
|
||||
push:
|
||||
# trying and staging branches are for Bors config
|
||||
branches:
|
||||
- trying
|
||||
- staging
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
RUSTFLAGS: "-D warnings"
|
||||
|
||||
jobs:
|
||||
test-linux:
|
||||
name: Tests on ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- name: Setup test with Rust stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.7.1
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --locked --release --no-default-features --all
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release --all
|
||||
|
||||
test-others:
|
||||
name: Tests on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [macos-12, windows-2022]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.7.1
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --locked --release --no-default-features --all
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release --all
|
||||
|
||||
test-all-features:
|
||||
name: Tests almost all features
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install --assume-yes build-essential curl
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Run cargo build with almost all features
|
||||
run: |
|
||||
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda)"
|
||||
- name: Run cargo test with almost all features
|
||||
run: |
|
||||
cargo test --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda)"
|
||||
|
||||
test-disabled-tokenization:
|
||||
name: Test disabled tokenization
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ubuntu:18.04
|
||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install --assume-yes build-essential curl
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Run cargo tree without default features and check lindera is not present
|
||||
run: |
|
||||
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -vqz lindera; then
|
||||
echo "lindera has been found in the sources and it shouldn't"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run cargo tree with default features and check lindera is pressent
|
||||
run: |
|
||||
cargo tree -f '{p} {f}' -e normal | grep lindera -qz
|
||||
|
||||
# We run tests in debug also, to make sure that the debug_assertions are hit
|
||||
test-debug:
|
||||
name: Run tests in debug
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.7.1
|
||||
- name: Run tests in debug
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --all
|
||||
|
||||
clippy:
|
||||
name: Run Clippy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: 1.75.0
|
||||
override: true
|
||||
components: clippy
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.7.1
|
||||
- name: Run cargo clippy
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
args: --all-targets -- --deny warnings
|
||||
|
||||
fmt:
|
||||
name: Run Rustfmt
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: nightly
|
||||
override: true
|
||||
components: rustfmt
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.7.1
|
||||
- name: Run cargo fmt
|
||||
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
||||
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
||||
# we are going to create an empty file where rustfmt expects it.
|
||||
run: |
|
||||
echo -ne "\n" > benchmarks/benches/datasets_paths.rs
|
||||
cargo fmt --all -- --check
|
120
.github/workflows/uffizzi-build.yml
vendored
120
.github/workflows/uffizzi-build.yml
vendored
@ -1,120 +0,0 @@
|
||||
name: Uffizzi - Build PR Image
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened,synchronize,reopened,closed]
|
||||
|
||||
jobs:
|
||||
build-meilisearch:
|
||||
name: Build and push `meilisearch`
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
if: ${{ github.event.action != 'closed' }}
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- run: sudo apt-get install musl-tools
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
target: x86_64-unknown-linux-musl
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --target x86_64-unknown-linux-musl --release
|
||||
|
||||
- name: Remove dockerignore so we can use the target folder in our docker build
|
||||
run: rm -f .dockerignore
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Generate UUID image name
|
||||
id: uuid
|
||||
run: echo "UUID_TAG=$(uuidgen)" >> $GITHUB_ENV
|
||||
|
||||
- name: Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: registry.uffizzi.com/${{ env.UUID_TAG }}
|
||||
tags: |
|
||||
type=raw,value=60d
|
||||
|
||||
- name: Build Image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: ./
|
||||
file: .github/uffizzi/Dockerfile
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
push: true
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
render-compose-file:
|
||||
name: Render Docker Compose File
|
||||
# Pass output of this workflow to another triggered by `workflow_run` event.
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-meilisearch
|
||||
outputs:
|
||||
compose-file-cache-key: ${{ env.COMPOSE_FILE_HASH }}
|
||||
steps:
|
||||
- name: Checkout git repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Render Compose File
|
||||
run: |
|
||||
MEILISEARCH_IMAGE=$(echo ${{ needs.build-meilisearch.outputs.tags }})
|
||||
export MEILISEARCH_IMAGE
|
||||
# Render simple template from environment variables.
|
||||
envsubst < .github/uffizzi/docker-compose.uffizzi.yml > docker-compose.rendered.yml
|
||||
cat docker-compose.rendered.yml
|
||||
- name: Upload Rendered Compose File as Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: preview-spec
|
||||
path: docker-compose.rendered.yml
|
||||
retention-days: 2
|
||||
- name: Serialize PR Event to File
|
||||
run: |
|
||||
cat << EOF > event.json
|
||||
${{ toJSON(github.event) }}
|
||||
|
||||
EOF
|
||||
- name: Upload PR Event as Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: preview-spec
|
||||
path: event.json
|
||||
retention-days: 2
|
||||
|
||||
delete-preview:
|
||||
name: Call for Preview Deletion
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'closed' }}
|
||||
steps:
|
||||
# If this PR is closing, we will not render a compose file nor pass it to the next workflow.
|
||||
- name: Serialize PR Event to File
|
||||
run: |
|
||||
cat << EOF > event.json
|
||||
${{ toJSON(github.event) }}
|
||||
|
||||
EOF
|
||||
- name: Upload PR Event as Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: preview-spec
|
||||
path: event.json
|
||||
retention-days: 2
|
103
.github/workflows/uffizzi-preview-deploy.yml
vendored
103
.github/workflows/uffizzi-preview-deploy.yml
vendored
@ -1,103 +0,0 @@
|
||||
name: Uffizzi - Deploy Preview
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows:
|
||||
- "Uffizzi - Build PR Image"
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
cache-compose-file:
|
||||
name: Cache Compose File
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
outputs:
|
||||
compose-file-cache-key: ${{ env.COMPOSE_FILE_HASH }}
|
||||
pr-number: ${{ env.PR_NUMBER }}
|
||||
expected-url: ${{ env.EXPECTED_URL }}
|
||||
steps:
|
||||
- name: 'Download artifacts'
|
||||
# Fetch output (zip archive) from the workflow run that triggered this workflow.
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
run_id: context.payload.workflow_run.id,
|
||||
});
|
||||
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
|
||||
return artifact.name == "preview-spec"
|
||||
})[0];
|
||||
let download = await github.rest.actions.downloadArtifact({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
artifact_id: matchArtifact.id,
|
||||
archive_format: 'zip',
|
||||
});
|
||||
let fs = require('fs');
|
||||
fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/preview-spec.zip`, Buffer.from(download.data));
|
||||
|
||||
- name: 'Unzip artifact'
|
||||
run: unzip preview-spec.zip
|
||||
|
||||
- name: Read Event into ENV
|
||||
run: |
|
||||
echo 'EVENT_JSON<<EOF' >> $GITHUB_ENV
|
||||
cat event.json >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
|
||||
- name: Hash Rendered Compose File
|
||||
id: hash
|
||||
# If the previous workflow was triggered by a PR close event, we will not have a compose file artifact.
|
||||
if: ${{ fromJSON(env.EVENT_JSON).action != 'closed' }}
|
||||
run: echo "COMPOSE_FILE_HASH=$(md5sum docker-compose.rendered.yml | awk '{ print $1 }')" >> $GITHUB_ENV
|
||||
|
||||
- name: Cache Rendered Compose File
|
||||
if: ${{ fromJSON(env.EVENT_JSON).action != 'closed' }}
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: docker-compose.rendered.yml
|
||||
key: ${{ env.COMPOSE_FILE_HASH }}
|
||||
|
||||
- name: Read PR Number From Event Object
|
||||
id: pr
|
||||
run: echo "PR_NUMBER=${{ fromJSON(env.EVENT_JSON).number }}" >> $GITHUB_ENV
|
||||
|
||||
- name: DEBUG - Print Job Outputs
|
||||
if: ${{ runner.debug }}
|
||||
run: |
|
||||
echo "PR number: ${{ env.PR_NUMBER }}"
|
||||
echo "Compose file hash: ${{ env.COMPOSE_FILE_HASH }}"
|
||||
cat event.json
|
||||
|
||||
- name: Add expected URL env var
|
||||
if: ${{ runner.debug }}
|
||||
run: |
|
||||
REPO=$(echo ${{ github.repository }} | sed 's/\./+/g')
|
||||
echo "EXPECTED_URL=${{ inputs.server }}/github.com/$REPO/pull/${{ env.PR_NUMBER }}" >> $GITHUB_ENV
|
||||
|
||||
deploy-uffizzi-preview:
|
||||
name: Use Remote Workflow to Preview on Uffizzi
|
||||
needs:
|
||||
- cache-compose-file
|
||||
uses: UffizziCloud/preview-action/.github/workflows/reusable.yaml@v2
|
||||
with:
|
||||
# If this workflow was triggered by a PR close event, cache-key will be an empty string
|
||||
# and this reusable workflow will delete the preview deployment.
|
||||
compose-file-cache-key: ${{ needs.cache-compose-file.outputs.compose-file-cache-key }}
|
||||
compose-file-cache-path: docker-compose.rendered.yml
|
||||
server: https://app.uffizzi.com
|
||||
pr-number: ${{ needs.cache-compose-file.outputs.pr-number }}
|
||||
description: |
|
||||
The meilisearch preview environment contains a web terminal from where you can run the
|
||||
`meilisearch` command. You should be able to access this instance of meilisearch running in
|
||||
the preview from the link Meilisearch Endpoint link given below.
|
||||
|
||||
Web Terminal Endpoint : <uffizzi-url>
|
||||
Meilisearch Endpoint : <uffizzi-url>/meilisearch
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
id-token: write
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -9,6 +9,8 @@
|
||||
/data.ms
|
||||
/snapshots
|
||||
/dumps
|
||||
/bench
|
||||
/_xtask_benchmark.ms
|
||||
|
||||
# Snapshots
|
||||
## ... large
|
||||
|
@ -18,9 +18,9 @@ If Meilisearch does not offer optimized support for your language, please consid
|
||||
|
||||
## Assumptions
|
||||
|
||||
1. **You're familiar with [GitHub](https://github.com) and the [Pull Requests](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests)(PR) workflow.**
|
||||
2. **You've read the Meilisearch [documentation](https://docs.meilisearch.com).**
|
||||
3. **You know about the [Meilisearch community](https://docs.meilisearch.com/learn/what_is_meilisearch/contact.html).
|
||||
1. **You're familiar with [GitHub](https://github.com) and the [Pull Requests (PR)](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests) workflow.**
|
||||
2. **You've read the Meilisearch [documentation](https://www.meilisearch.com/docs).**
|
||||
3. **You know about the [Meilisearch community on Discord](https://discord.meilisearch.com).
|
||||
Please use this for help.**
|
||||
|
||||
## How to Contribute
|
||||
@ -75,6 +75,12 @@ If you get a "Too many open files" error you might want to increase the open fil
|
||||
ulimit -Sn 3000
|
||||
```
|
||||
|
||||
#### Build tools
|
||||
|
||||
Meilisearch follows the [cargo xtask](https://github.com/matklad/cargo-xtask) workflow to provide some build tools.
|
||||
|
||||
Run `cargo xtask --help` from the root of the repository to find out what is available.
|
||||
|
||||
## Git Guidelines
|
||||
|
||||
### Git Branches
|
||||
@ -120,29 +126,9 @@ The full Meilisearch release process is described in [this guide](https://github
|
||||
|
||||
Depending on the developed feature, you might need to provide a prototyped version of Meilisearch to make it easier to test by the users.
|
||||
|
||||
The prototype name must follow this convention: `prototype-X-Y` where
|
||||
- `X` is the feature name formatted in `kebab-case`. It should not end with a single number.
|
||||
- `Y` is the version of the prototype, starting from `0`.
|
||||
|
||||
✅ Example: `prototype-auto-resize-0`. </br>
|
||||
❌ Bad example: `auto-resize-0`: lacks the `prototype` prefix. </br>
|
||||
❌ Bad example: `prototype-auto-resize`: lacks the version suffix. </br>
|
||||
❌ Bad example: `prototype-auto-resize-0-0`: feature name ends with a single number.
|
||||
|
||||
Steps to create a prototype:
|
||||
|
||||
1. In your terminal, go to the last commit of your branch (the one you want to provide as a prototype).
|
||||
2. Create a tag following the convention: `git tag prototype-X-Y`
|
||||
3. Run Meilisearch and check that its launch summary features a line: `Prototype: prototype-X-Y` (you may need to switch branches and back after tagging for this to work).
|
||||
3. Push the tag: `git push origin prototype-X-Y`
|
||||
4. Check the [Docker CI](https://github.com/meilisearch/meilisearch/actions/workflows/publish-docker-images.yml) is now running.
|
||||
|
||||
🐳 Once the CI has finished to run (~1h30), a Docker image named `prototype-X-Y` will be available on [DockerHub](https://hub.docker.com/repository/docker/getmeili/meilisearch/general). People can use it with the following command: `docker run -p 7700:7700 -v $(pwd)/meili_data:/meili_data getmeili/meilisearch:prototype-X-Y`. <br>
|
||||
More information about [how to run Meilisearch with Docker](https://docs.meilisearch.com/learn/cookbooks/docker.html#download-meilisearch-with-docker).
|
||||
|
||||
⚙️ However, no binaries will be created. If the users do not use Docker, they can go to the `prototype-X-Y` tag in the Meilisearch repository and compile from the source code.
|
||||
|
||||
⚠️ When sharing a prototype with users, remind them to not use it in production. Prototypes are solely for test purposes.
|
||||
This happens in two steps:
|
||||
- [Release the prototype](https://github.com/meilisearch/engine-team/blob/main/resources/prototypes.md#how-to-publish-a-prototype)
|
||||
- [Communicate about it](https://github.com/meilisearch/engine-team/blob/main/resources/prototypes.md#communication)
|
||||
|
||||
### Release assets
|
||||
|
||||
|
3605
Cargo.lock
generated
3605
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
13
Cargo.toml
13
Cargo.toml
@ -2,6 +2,7 @@
|
||||
resolver = "2"
|
||||
members = [
|
||||
"meilisearch",
|
||||
"meilitool",
|
||||
"meilisearch-types",
|
||||
"meilisearch-auth",
|
||||
"meili-snap",
|
||||
@ -13,12 +14,18 @@ members = [
|
||||
"filter-parser",
|
||||
"flatten-serde-json",
|
||||
"json-depth-checker",
|
||||
"benchmarks"
|
||||
"benchmarks",
|
||||
"fuzzers",
|
||||
"tracing-trace",
|
||||
"xtask", "build-info",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.1.0"
|
||||
authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"]
|
||||
version = "1.7.2"
|
||||
authors = [
|
||||
"Quentin de Quelen <quentin@dequelen.me>",
|
||||
"Clément Renault <clement@meilisearch.com>",
|
||||
]
|
||||
description = "Meilisearch HTTP server"
|
||||
homepage = "https://meilisearch.com"
|
||||
readme = "README.md"
|
||||
|
15
Dockerfile
15
Dockerfile
@ -1,14 +1,14 @@
|
||||
# Compile
|
||||
FROM rust:alpine3.16 AS compiler
|
||||
FROM rust:1.75.0-alpine3.18 AS compiler
|
||||
|
||||
RUN apk add -q --update-cache --no-cache build-base openssl-dev
|
||||
|
||||
WORKDIR /meilisearch
|
||||
WORKDIR /
|
||||
|
||||
ARG COMMIT_SHA
|
||||
ARG COMMIT_DATE
|
||||
ARG GIT_TAG
|
||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_SEMVER_LIGHTWEIGHT=${GIT_TAG}
|
||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_DESCRIBE=${GIT_TAG}
|
||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||
|
||||
COPY . .
|
||||
@ -17,7 +17,7 @@ RUN set -eux; \
|
||||
if [ "$apkArch" = "aarch64" ]; then \
|
||||
export JEMALLOC_SYS_WITH_LG_PAGE=16; \
|
||||
fi && \
|
||||
cargo build --release
|
||||
cargo build --release -p meilisearch -p meilitool
|
||||
|
||||
# Run
|
||||
FROM alpine:3.16
|
||||
@ -28,9 +28,10 @@ ENV MEILI_SERVER_PROVIDER docker
|
||||
RUN apk update --quiet \
|
||||
&& apk add -q --no-cache libgcc tini curl
|
||||
|
||||
# add meilisearch to the `/bin` so you can run it from anywhere and it's easy
|
||||
# to find.
|
||||
COPY --from=compiler /meilisearch/target/release/meilisearch /bin/meilisearch
|
||||
# add meilisearch and meilitool to the `/bin` so you can run it from anywhere
|
||||
# and it's easy to find.
|
||||
COPY --from=compiler /target/release/meilisearch /bin/meilisearch
|
||||
COPY --from=compiler /target/release/meilitool /bin/meilitool
|
||||
# To stay compatible with the older version of the container (pre v0.27.0) we're
|
||||
# going to symlink the meilisearch binary in the path to `/meilisearch`
|
||||
RUN ln -s /bin/meilisearch /meilisearch
|
||||
|
2
LICENSE
2
LICENSE
@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019-2022 Meili SAS
|
||||
Copyright (c) 2019-2024 Meili SAS
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
19
PROFILING.md
Normal file
19
PROFILING.md
Normal file
@ -0,0 +1,19 @@
|
||||
# Profiling Meilisearch
|
||||
|
||||
Search engine technologies are complex pieces of software that require thorough profiling tools. We chose to use [Puffin](https://github.com/EmbarkStudios/puffin), which the Rust gaming industry uses extensively. You can export and import the profiling reports using the top bar's _File_ menu options [in Puffin Viewer](https://github.com/embarkstudios/puffin#ui).
|
||||
|
||||

|
||||
|
||||
## Profiling the Indexing Process
|
||||
|
||||
When you enable [the `exportPuffinReports` experimental feature](https://www.meilisearch.com/docs/learn/experimental/overview) of Meilisearch, Puffin reports with the `.puffin` extension will be automatically exported to disk. When this option is enabled, the engine will automatically create a "frame" whenever it executes the `IndexScheduler::tick` method.
|
||||
|
||||
[Puffin Viewer](https://github.com/EmbarkStudios/puffin/tree/main/puffin_viewer) is used to analyze the reports. Those reports show areas where Meilisearch spent time during indexing.
|
||||
|
||||
Another piece of advice on the Puffin viewer UI interface is to consider the _Merge children with same ID_ option. It can hide the exact actual timings at which events were sent. Please turn it off when you see strange gaps on the Flamegraph. It can help.
|
||||
|
||||
## Profiling the Search Process
|
||||
|
||||
We still need to take the time to profile the search side of the engine with Puffin. It would require time to profile the filtering phase, query parsing, creation, and execution. We could even profile the Actix HTTP server.
|
||||
|
||||
The only issue we see is the framing system. Puffin requires a global frame-based profiling phase, which collides with Meilisearch's ability to accept and answer multiple requests on different threads simultaneously.
|
76
README.md
76
README.md
@ -1,21 +1,26 @@
|
||||
<p align="center">
|
||||
<img src="assets/meilisearch-logo-light.svg?sanitize=true#gh-light-mode-only">
|
||||
<img src="assets/meilisearch-logo-dark.svg?sanitize=true#gh-dark-mode-only">
|
||||
<a href="https://www.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=logo#gh-light-mode-only" target="_blank">
|
||||
<img src="assets/meilisearch-logo-light.svg?sanitize=true#gh-light-mode-only">
|
||||
</a>
|
||||
<a href="https://www.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=logo#gh-dark-mode-only" target="_blank">
|
||||
<img src="assets/meilisearch-logo-dark.svg?sanitize=true#gh-dark-mode-only">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://www.meilisearch.com">Website</a> |
|
||||
<a href="https://www.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=nav">Website</a> |
|
||||
<a href="https://roadmap.meilisearch.com/tabs/1-under-consideration">Roadmap</a> |
|
||||
<a href="https://blog.meilisearch.com">Blog</a> |
|
||||
<a href="https://docs.meilisearch.com">Documentation</a> |
|
||||
<a href="https://docs.meilisearch.com/faq/">FAQ</a> |
|
||||
<a href="https://discord.meilisearch.com">Discord</a>
|
||||
<a href="https://www.meilisearch.com/pricing?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=nav">Meilisearch Cloud</a> |
|
||||
<a href="https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=nav">Blog</a> |
|
||||
<a href="https://www.meilisearch.com/docs?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=nav">Documentation</a> |
|
||||
<a href="https://www.meilisearch.com/docs/faq?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=nav">FAQ</a> |
|
||||
<a href="https://discord.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=nav">Discord</a>
|
||||
</h4>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://deps.rs/repo/github/meilisearch/meilisearch"><img src="https://deps.rs/repo/github/meilisearch/meilisearch/status.svg" alt="Dependency status"></a>
|
||||
<a href="https://github.com/meilisearch/meilisearch/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-informational" alt="License"></a>
|
||||
<a href="https://app.bors.tech/repositories/26457"><img src="https://bors.tech/images/badge_small.svg" alt="Bors enabled"></a>
|
||||
<a href="https://ms-bors.herokuapp.com/repositories/52"><img src="https://bors.tech/images/badge_small.svg" alt="Bors enabled"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">⚡ A lightning-fast search engine that fits effortlessly into your apps, websites, and workflow 🔍</p>
|
||||
@ -23,72 +28,70 @@
|
||||
Meilisearch helps you shape a delightful search experience in a snap, offering features that work out-of-the-box to speed up your workflow.
|
||||
|
||||
<p align="center" name="demo">
|
||||
<a href="https://where2watch.meilisearch.com/#gh-light-mode-only" target="_blank">
|
||||
<a href="https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demo-gif#gh-light-mode-only" target="_blank">
|
||||
<img src="assets/demo-light.gif#gh-light-mode-only" alt="A bright colored application for finding movies screening near the user">
|
||||
</a>
|
||||
<a href="https://where2watch.meilisearch.com/#gh-dark-mode-only" target="_blank">
|
||||
<a href="https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demo-gif#gh-dark-mode-only" target="_blank">
|
||||
<img src="assets/demo-dark.gif#gh-dark-mode-only" alt="A dark colored application for finding movies screening near the user">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
🔥 [**Try it!**](https://where2watch.meilisearch.com/) 🔥
|
||||
🔥 [**Try it!**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demo-link) 🔥
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- **Search-as-you-type:** find search results in less than 50 milliseconds
|
||||
- **[Typo tolerance](https://docs.meilisearch.com/learn/getting_started/customizing_relevancy.html#typo-tolerance):** get relevant matches even when queries contain typos and misspellings
|
||||
- **[Filtering and faceted search](https://docs.meilisearch.com/learn/advanced/filtering_and_faceted_search.html):** enhance your user's search experience with custom filters and build a faceted search interface in a few lines of code
|
||||
- **[Sorting](https://docs.meilisearch.com/learn/advanced/sorting.html):** sort results based on price, date, or pretty much anything else your users need
|
||||
- **[Synonym support](https://docs.meilisearch.com/learn/getting_started/customizing_relevancy.html#synonyms):** configure synonyms to include more relevant content in your search results
|
||||
- **[Geosearch](https://docs.meilisearch.com/learn/advanced/geosearch.html):** filter and sort documents based on geographic data
|
||||
- **[Extensive language support](https://docs.meilisearch.com/learn/what_is_meilisearch/language.html):** search datasets in any language, with optimized support for Chinese, Japanese, Hebrew, and languages using the Latin alphabet
|
||||
- **[Security management](https://docs.meilisearch.com/learn/security/master_api_keys.html):** control which users can access what data with API keys that allow fine-grained permissions handling
|
||||
- **[Multi-Tenancy](https://docs.meilisearch.com/learn/security/tenant_tokens.html):** personalize search results for any number of application tenants
|
||||
- **[Typo tolerance](https://www.meilisearch.com/docs/learn/configuration/typo_tolerance?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** get relevant matches even when queries contain typos and misspellings
|
||||
- **[Filtering](https://www.meilisearch.com/docs/learn/fine_tuning_results/filtering?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features) and [faceted search](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** enhance your users' search experience with custom filters and build a faceted search interface in a few lines of code
|
||||
- **[Sorting](https://www.meilisearch.com/docs/learn/fine_tuning_results/sorting?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** sort results based on price, date, or pretty much anything else your users need
|
||||
- **[Synonym support](https://www.meilisearch.com/docs/learn/configuration/synonyms?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** configure synonyms to include more relevant content in your search results
|
||||
- **[Geosearch](https://www.meilisearch.com/docs/learn/fine_tuning_results/geosearch?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** filter and sort documents based on geographic data
|
||||
- **[Extensive language support](https://www.meilisearch.com/docs/learn/what_is_meilisearch/language?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** search datasets in any language, with optimized support for Chinese, Japanese, Hebrew, and languages using the Latin alphabet
|
||||
- **[Security management](https://www.meilisearch.com/docs/learn/security/master_api_keys?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** control which users can access what data with API keys that allow fine-grained permissions handling
|
||||
- **[Multi-Tenancy](https://www.meilisearch.com/docs/learn/security/tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** personalize search results for any number of application tenants
|
||||
- **Highly Customizable:** customize Meilisearch to your specific needs or use our out-of-the-box and hassle-free presets
|
||||
- **[RESTful API](https://docs.meilisearch.com/reference/api/overview.html):** integrate Meilisearch in your technical stack with our plugins and SDKs
|
||||
- **[RESTful API](https://www.meilisearch.com/docs/reference/api/overview?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=features):** integrate Meilisearch in your technical stack with our plugins and SDKs
|
||||
- **Easy to install, deploy, and maintain**
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
You can consult Meilisearch's documentation at [https://docs.meilisearch.com](https://docs.meilisearch.com/).
|
||||
You can consult Meilisearch's documentation at [https://www.meilisearch.com/docs](https://www.meilisearch.com/docs/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=docs).
|
||||
|
||||
## 🚀 Getting started
|
||||
|
||||
For basic instructions on how to set up Meilisearch, add documents to an index, and search for documents, take a look at our [Quick Start](https://docs.meilisearch.com/learn/getting_started/quick_start.html) guide.
|
||||
For basic instructions on how to set up Meilisearch, add documents to an index, and search for documents, take a look at our [Quick Start](https://www.meilisearch.com/docs/learn/getting_started/quick_start?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=get-started) guide.
|
||||
|
||||
You may also want to check out [Meilisearch 101](https://docs.meilisearch.com/learn/getting_started/filtering_and_sorting.html) for an introduction to some of Meilisearch's most popular features.
|
||||
## ⚡ Supercharge your Meilisearch experience
|
||||
|
||||
## ☁️ Meilisearch cloud
|
||||
|
||||
Let us manage your infrastructure so you can focus on integrating a great search experience. Try [Meilisearch Cloud](https://meilisearch.com/pricing) today.
|
||||
Say goodbye to server deployment and manual updates with [Meilisearch Cloud](https://www.meilisearch.com/cloud?utm_campaign=oss&utm_source=github&utm_medium=meilisearch). No credit card required.
|
||||
|
||||
## 🧰 SDKs & integration tools
|
||||
|
||||
Install one of our SDKs in your project for seamless integration between Meilisearch and your favorite language or framework!
|
||||
|
||||
Take a look at the complete [Meilisearch integration list](https://docs.meilisearch.com/learn/what_is_meilisearch/sdks.html).
|
||||
Take a look at the complete [Meilisearch integration list](https://www.meilisearch.com/docs/learn/what_is_meilisearch/sdks?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=sdks-link).
|
||||
|
||||
[](https://docs.meilisearch.com/learn/what_is_meilisearch/sdks.html)
|
||||
[](https://www.meilisearch.com/docs/learn/what_is_meilisearch/sdks?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=sdks-logos)
|
||||
|
||||
## ⚙️ Advanced usage
|
||||
|
||||
Experienced users will want to keep our [API Reference](https://docs.meilisearch.com/reference/api) close at hand.
|
||||
Experienced users will want to keep our [API Reference](https://www.meilisearch.com/docs/reference/api/overview?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=advanced) close at hand.
|
||||
|
||||
We also offer a wide range of dedicated guides to all Meilisearch features, such as [filtering](https://docs.meilisearch.com/learn/advanced/filtering_and_faceted_search.html), [sorting](https://docs.meilisearch.com/learn/advanced/sorting.html), [geosearch](https://docs.meilisearch.com/learn/advanced/geosearch.html), [API keys](https://docs.meilisearch.com/learn/security/master_api_keys.html), and [tenant tokens](https://docs.meilisearch.com/learn/security/tenant_tokens.html).
|
||||
We also offer a wide range of dedicated guides to all Meilisearch features, such as [filtering](https://www.meilisearch.com/docs/learn/fine_tuning_results/filtering?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=advanced), [sorting](https://www.meilisearch.com/docs/learn/fine_tuning_results/sorting?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=advanced), [geosearch](https://www.meilisearch.com/docs/learn/fine_tuning_results/geosearch?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=advanced), [API keys](https://www.meilisearch.com/docs/learn/security/master_api_keys?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=advanced), and [tenant tokens](https://www.meilisearch.com/docs/learn/security/tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=advanced).
|
||||
|
||||
Finally, for more in-depth information, refer to our articles explaining fundamental Meilisearch concepts such as [documents](https://docs.meilisearch.com/learn/core_concepts/documents.html) and [indexes](https://docs.meilisearch.com/learn/core_concepts/indexes.html).
|
||||
Finally, for more in-depth information, refer to our articles explaining fundamental Meilisearch concepts such as [documents](https://www.meilisearch.com/docs/learn/core_concepts/documents?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=advanced) and [indexes](https://www.meilisearch.com/docs/learn/core_concepts/indexes?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=advanced).
|
||||
|
||||
## 📊 Telemetry
|
||||
|
||||
Meilisearch collects **anonymized** data from users to help us improve our product. You can [deactivate this](https://docs.meilisearch.com/learn/what_is_meilisearch/telemetry.html#how-to-disable-data-collection) whenever you want.
|
||||
Meilisearch collects **anonymized** data from users to help us improve our product. You can [deactivate this](https://www.meilisearch.com/docs/learn/what_is_meilisearch/telemetry?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=telemetry#how-to-disable-data-collection) whenever you want.
|
||||
|
||||
To request deletion of collected data, please write to us at [privacy@meilisearch.com](mailto:privacy@meilisearch.com). Don't forget to include your `Instance UID` in the message, as this helps us quickly find and delete your data.
|
||||
To request deletion of collected data, please write to us at [privacy@meilisearch.com](mailto:privacy@meilisearch.com). Don't forget to include your `Instance UID` in the message, as this helps us quickly find and delete your data.
|
||||
|
||||
If you want to know more about the kind of data we collect and what we use it for, check the [telemetry section](https://docs.meilisearch.com/learn/what_is_meilisearch/telemetry.html) of our documentation.
|
||||
If you want to know more about the kind of data we collect and what we use it for, check the [telemetry section](https://www.meilisearch.com/docs/learn/what_is_meilisearch/telemetry?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=telemetry#how-to-disable-data-collection) of our documentation.
|
||||
|
||||
## 📫 Get in touch!
|
||||
|
||||
Meilisearch is a search engine created by [Meili](https://www.welcometothejungle.com/en/companies/meilisearch), a software development company based in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/)
|
||||
Meilisearch is a search engine created by [Meili](https://www.welcometothejungle.com/en/companies/meilisearch), a software development company based in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
|
||||
|
||||
🗞 [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
|
||||
|
||||
@ -96,8 +99,7 @@ Meilisearch is a search engine created by [Meili](https://www.welcometothejungle
|
||||
|
||||
- For feature requests, please visit our [product repository](https://github.com/meilisearch/product/discussions)
|
||||
- Found a bug? Open an [issue](https://github.com/meilisearch/meilisearch/issues)!
|
||||
- Want to be part of our Discord community? [Join us!](https://discord.gg/meilisearch)
|
||||
- For everything else, please check [this page listing some of the other places where you can find us](https://docs.meilisearch.com/learn/what_is_meilisearch/contact.html)
|
||||
- Want to be part of our Discord community? [Join us!](https://discord.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
|
||||
|
||||
Thank you for your support!
|
||||
|
||||
|
1393
assets/grafana-dashboard.json
Normal file
1393
assets/grafana-dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
assets/profiling-example.png
Normal file
BIN
assets/profiling-example.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.2 MiB |
19
assets/prometheus-basic-scraper.yml
Normal file
19
assets/prometheus-basic-scraper.yml
Normal file
@ -0,0 +1,19 @@
|
||||
global:
|
||||
scrape_interval: 15s # By default, scrape targets every 15 seconds.
|
||||
|
||||
# Attach these labels to any time series or alerts when communicating with
|
||||
# external systems (federation, remote storage, Alertmanager).
|
||||
external_labels:
|
||||
monitor: 'codelab-monitor'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||
- job_name: 'meilisearch'
|
||||
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
|
||||
static_configs:
|
||||
- targets: ['localhost:7700']
|
@ -11,27 +11,27 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.65"
|
||||
csv = "1.1.6"
|
||||
milli = { path = "../milli", default-features = false }
|
||||
mimalloc = { version = "0.1.29", default-features = false }
|
||||
serde_json = { version = "1.0.85", features = ["preserve_order"] }
|
||||
anyhow = "1.0.79"
|
||||
csv = "1.3.0"
|
||||
milli = { path = "../milli" }
|
||||
mimalloc = { version = "0.1.39", default-features = false }
|
||||
serde_json = { version = "1.0.111", features = ["preserve_order"] }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.4.0", features = ["html_reports"] }
|
||||
criterion = { version = "0.5.1", features = ["html_reports"] }
|
||||
rand = "0.8.5"
|
||||
rand_chacha = "0.3.1"
|
||||
roaring = "0.10.1"
|
||||
roaring = "0.10.2"
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = "1.0.65"
|
||||
bytes = "1.2.1"
|
||||
anyhow = "1.0.79"
|
||||
bytes = "1.5.0"
|
||||
convert_case = "0.6.0"
|
||||
flate2 = "1.0.24"
|
||||
reqwest = { version = "0.11.12", features = ["blocking", "rustls-tls"], default-features = false }
|
||||
flate2 = "1.0.28"
|
||||
reqwest = { version = "0.11.23", features = ["blocking", "rustls-tls"], default-features = false }
|
||||
|
||||
[features]
|
||||
default = ["milli/default"]
|
||||
default = ["milli/all-tokenizations"]
|
||||
|
||||
[[bench]]
|
||||
name = "search_songs"
|
||||
@ -48,7 +48,3 @@ harness = false
|
||||
[[bench]]
|
||||
name = "indexing"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "formatting"
|
||||
harness = false
|
||||
|
@ -119,9 +119,9 @@ _[Download the `smol-wiki` dataset](https://milli-benchmarks.fra1.digitaloceansp
|
||||
|
||||
### Movies
|
||||
|
||||
`movies` is a really small dataset we uses as our example in the [getting started](https://docs.meilisearch.com/learn/getting_started/)
|
||||
`movies` is a really small dataset we uses as our example in the [getting started](https://www.meilisearch.com/docs/learn/getting_started/quick_start)
|
||||
|
||||
_[Download the `movies` dataset](https://docs.meilisearch.com/movies.json)._
|
||||
_[Download the `movies` dataset](https://www.meilisearch.com/movies.json)._
|
||||
|
||||
|
||||
### All Countries
|
||||
|
@ -1,67 +0,0 @@
|
||||
use std::rc::Rc;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::tokenizer::TokenizerBuilder;
|
||||
use milli::{FormatOptions, MatcherBuilder, MatchingWord, MatchingWords};
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
struct Conf<'a> {
|
||||
name: &'a str,
|
||||
text: &'a str,
|
||||
matching_words: MatcherBuilder<'a, Vec<u8>>,
|
||||
}
|
||||
|
||||
fn bench_formatting(c: &mut criterion::Criterion) {
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
Conf {
|
||||
name: "'the door d'",
|
||||
text: r#"He used to do the door sounds in "Star Trek" with his mouth, phssst, phssst. The MD-11 passenger and cargo doors also tend to behave like electromagnetic apertures, because the doors do not have continuous electrical contact with the door frames around the door perimeter. But Theodor said that the doors don't work."#,
|
||||
matching_words: MatcherBuilder::new(MatchingWords::new(vec![
|
||||
(vec![Rc::new(MatchingWord::new("t".to_string(), 0, false).unwrap()), Rc::new(MatchingWord::new("he".to_string(), 0, false).unwrap())], vec![0]),
|
||||
(vec![Rc::new(MatchingWord::new("the".to_string(), 0, false).unwrap())], vec![0]),
|
||||
(vec![Rc::new(MatchingWord::new("door".to_string(), 1, false).unwrap())], vec![1]),
|
||||
(vec![Rc::new(MatchingWord::new("do".to_string(), 0, false).unwrap()), Rc::new(MatchingWord::new("or".to_string(), 0, false).unwrap())], vec![0]),
|
||||
(vec![Rc::new(MatchingWord::new("thedoor".to_string(), 1, false).unwrap())], vec![0, 1]),
|
||||
(vec![Rc::new(MatchingWord::new("d".to_string(), 0, true).unwrap())], vec![2]),
|
||||
(vec![Rc::new(MatchingWord::new("thedoord".to_string(), 1, true).unwrap())], vec![0, 1, 2]),
|
||||
(vec![Rc::new(MatchingWord::new("doord".to_string(), 1, true).unwrap())], vec![1, 2]),
|
||||
]
|
||||
).unwrap(), TokenizerBuilder::default().build()),
|
||||
},
|
||||
];
|
||||
|
||||
let format_options = &[
|
||||
FormatOptions { highlight: false, crop: None },
|
||||
FormatOptions { highlight: true, crop: None },
|
||||
FormatOptions { highlight: false, crop: Some(10) },
|
||||
FormatOptions { highlight: true, crop: Some(10) },
|
||||
FormatOptions { highlight: false, crop: Some(20) },
|
||||
FormatOptions { highlight: true, crop: Some(20) },
|
||||
];
|
||||
|
||||
for option in format_options {
|
||||
let highlight = if option.highlight { "highlight" } else { "no-highlight" };
|
||||
|
||||
let name = match option.crop {
|
||||
Some(size) => format!("{}-crop({})", highlight, size),
|
||||
None => format!("{}-no-crop", highlight),
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group(&name);
|
||||
for conf in confs {
|
||||
group.bench_function(conf.name, |b| {
|
||||
b.iter(|| {
|
||||
let mut matcher = conf.matching_words.build(conf.text);
|
||||
matcher.format(*option);
|
||||
})
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_formatting);
|
||||
criterion_main!(benches);
|
@ -6,9 +6,7 @@ use std::path::Path;
|
||||
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use milli::heed::{EnvOpenOptions, RwTxn};
|
||||
use milli::update::{
|
||||
DeleteDocuments, IndexDocuments, IndexDocumentsConfig, IndexerConfig, Settings,
|
||||
};
|
||||
use milli::update::{IndexDocuments, IndexDocumentsConfig, IndexerConfig, Settings};
|
||||
use milli::Index;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand_chacha::rand_core::SeedableRng;
|
||||
@ -38,7 +36,7 @@ fn setup_index() -> Index {
|
||||
}
|
||||
|
||||
fn setup_settings<'t>(
|
||||
wtxn: &mut RwTxn<'t, '_>,
|
||||
wtxn: &mut RwTxn<'t>,
|
||||
index: &'t Index,
|
||||
primary_key: &str,
|
||||
searchable_fields: &[&str],
|
||||
@ -266,17 +264,7 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
|
||||
(index, document_ids_to_delete)
|
||||
},
|
||||
move |(index, document_ids_to_delete)| {
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
|
||||
for ids in document_ids_to_delete {
|
||||
let mut builder = DeleteDocuments::new(&mut wtxn, &index).unwrap();
|
||||
builder.delete_documents(&ids);
|
||||
builder.execute().unwrap();
|
||||
}
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
index.prepare_for_closing().wait();
|
||||
delete_documents_from_ids(index, document_ids_to_delete)
|
||||
},
|
||||
)
|
||||
});
|
||||
@ -613,17 +601,7 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
|
||||
(index, document_ids_to_delete)
|
||||
},
|
||||
move |(index, document_ids_to_delete)| {
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
|
||||
for ids in document_ids_to_delete {
|
||||
let mut builder = DeleteDocuments::new(&mut wtxn, &index).unwrap();
|
||||
builder.delete_documents(&ids);
|
||||
builder.execute().unwrap();
|
||||
}
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
index.prepare_for_closing().wait();
|
||||
delete_documents_from_ids(index, document_ids_to_delete)
|
||||
},
|
||||
)
|
||||
});
|
||||
@ -875,22 +853,31 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
|
||||
(index, document_ids_to_delete)
|
||||
},
|
||||
move |(index, document_ids_to_delete)| {
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
|
||||
for ids in document_ids_to_delete {
|
||||
let mut builder = DeleteDocuments::new(&mut wtxn, &index).unwrap();
|
||||
builder.delete_documents(&ids);
|
||||
builder.execute().unwrap();
|
||||
}
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
index.prepare_for_closing().wait();
|
||||
delete_documents_from_ids(index, document_ids_to_delete)
|
||||
},
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
fn delete_documents_from_ids(index: Index, document_ids_to_delete: Vec<RoaringBitmap>) {
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
|
||||
let indexer_config = IndexerConfig::default();
|
||||
for ids in document_ids_to_delete {
|
||||
let config = IndexDocumentsConfig::default();
|
||||
|
||||
let mut builder =
|
||||
IndexDocuments::new(&mut wtxn, &index, &indexer_config, config, |_| (), || false)
|
||||
.unwrap();
|
||||
(builder, _) = builder.remove_documents_from_db_no_batch(&ids).unwrap();
|
||||
builder.execute().unwrap();
|
||||
}
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
index.prepare_for_closing().wait();
|
||||
}
|
||||
|
||||
fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("indexing");
|
||||
group.sample_size(BENCHMARK_ITERATION);
|
||||
@ -1112,17 +1099,7 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
|
||||
(index, document_ids_to_delete)
|
||||
},
|
||||
move |(index, document_ids_to_delete)| {
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
|
||||
for ids in document_ids_to_delete {
|
||||
let mut builder = DeleteDocuments::new(&mut wtxn, &index).unwrap();
|
||||
builder.delete_documents(&ids);
|
||||
builder.execute().unwrap();
|
||||
}
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
index.prepare_for_closing().wait();
|
||||
delete_documents_from_ids(index, document_ids_to_delete)
|
||||
},
|
||||
)
|
||||
});
|
||||
@ -1338,17 +1315,7 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
|
||||
(index, document_ids_to_delete)
|
||||
},
|
||||
move |(index, document_ids_to_delete)| {
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
|
||||
for ids in document_ids_to_delete {
|
||||
let mut builder = DeleteDocuments::new(&mut wtxn, &index).unwrap();
|
||||
builder.delete_documents(&ids);
|
||||
builder.execute().unwrap();
|
||||
}
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
index.prepare_for_closing().wait();
|
||||
delete_documents_from_ids(index, document_ids_to_delete)
|
||||
},
|
||||
)
|
||||
});
|
||||
|
18
build-info/Cargo.toml
Normal file
18
build-info/Cargo.toml
Normal file
@ -0,0 +1,18 @@
|
||||
[package]
|
||||
name = "build-info"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
time = { version = "0.3.34", features = ["parsing"] }
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = "1.0.80"
|
||||
vergen-git2 = "1.0.0-beta.2"
|
22
build-info/build.rs
Normal file
22
build-info/build.rs
Normal file
@ -0,0 +1,22 @@
|
||||
fn main() {
|
||||
if let Err(err) = emit_git_variables() {
|
||||
println!("cargo:warning=vergen: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
fn emit_git_variables() -> anyhow::Result<()> {
|
||||
// Note: any code that needs VERGEN_ environment variables should take care to define them manually in the Dockerfile and pass them
|
||||
// in the corresponding GitHub workflow (publish_docker.yml).
|
||||
// This is due to the Dockerfile building the binary outside of the git directory.
|
||||
let mut builder = vergen_git2::Git2Builder::default();
|
||||
|
||||
builder.branch(true);
|
||||
builder.commit_timestamp(true);
|
||||
builder.commit_message(true);
|
||||
builder.describe(true, true, None);
|
||||
builder.sha(false);
|
||||
|
||||
let git2 = builder.build()?;
|
||||
|
||||
vergen_git2::Emitter::default().fail_on_error().add_instructions(&git2)?.emit()
|
||||
}
|
203
build-info/src/lib.rs
Normal file
203
build-info/src/lib.rs
Normal file
@ -0,0 +1,203 @@
|
||||
use time::format_description::well_known::Iso8601;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BuildInfo {
|
||||
pub branch: Option<&'static str>,
|
||||
pub describe: Option<DescribeResult>,
|
||||
pub commit_sha1: Option<&'static str>,
|
||||
pub commit_msg: Option<&'static str>,
|
||||
pub commit_timestamp: Option<time::OffsetDateTime>,
|
||||
}
|
||||
|
||||
impl BuildInfo {
|
||||
pub fn from_build() -> Self {
|
||||
let branch: Option<&'static str> = option_env!("VERGEN_GIT_BRANCH");
|
||||
let describe = DescribeResult::from_build();
|
||||
let commit_sha1 = option_env!("VERGEN_GIT_SHA");
|
||||
let commit_msg = option_env!("VERGEN_GIT_COMMIT_MESSAGE");
|
||||
let commit_timestamp = option_env!("VERGEN_GIT_COMMIT_TIMESTAMP");
|
||||
|
||||
let commit_timestamp = commit_timestamp.and_then(|commit_timestamp| {
|
||||
time::OffsetDateTime::parse(commit_timestamp, &Iso8601::DEFAULT).ok()
|
||||
});
|
||||
|
||||
Self { branch, describe, commit_sha1, commit_msg, commit_timestamp }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum DescribeResult {
|
||||
Prototype { name: &'static str },
|
||||
Release { version: &'static str, major: u64, minor: u64, patch: u64 },
|
||||
Prerelease { version: &'static str, major: u64, minor: u64, patch: u64, rc: u64 },
|
||||
NotATag { describe: &'static str },
|
||||
}
|
||||
|
||||
impl DescribeResult {
|
||||
pub fn new(describe: &'static str) -> Self {
|
||||
if let Some(name) = prototype_name(describe) {
|
||||
Self::Prototype { name }
|
||||
} else if let Some(release) = release_version(describe) {
|
||||
release
|
||||
} else if let Some(prerelease) = prerelease_version(describe) {
|
||||
prerelease
|
||||
} else {
|
||||
Self::NotATag { describe }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_build() -> Option<Self> {
|
||||
let describe: &'static str = option_env!("VERGEN_GIT_DESCRIBE")?;
|
||||
Some(Self::new(describe))
|
||||
}
|
||||
|
||||
pub fn as_tag(&self) -> Option<&'static str> {
|
||||
match self {
|
||||
DescribeResult::Prototype { name } => Some(name),
|
||||
DescribeResult::Release { version, .. } => Some(version),
|
||||
DescribeResult::Prerelease { version, .. } => Some(version),
|
||||
DescribeResult::NotATag { describe: _ } => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_prototype(&self) -> Option<&'static str> {
|
||||
match self {
|
||||
DescribeResult::Prototype { name } => Some(name),
|
||||
DescribeResult::Release { .. }
|
||||
| DescribeResult::Prerelease { .. }
|
||||
| DescribeResult::NotATag { .. } => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses the input as a prototype name.
|
||||
///
|
||||
/// Returns `Some(prototype_name)` if the following conditions are met on this value:
|
||||
///
|
||||
/// 1. starts with `prototype-`,
|
||||
/// 2. ends with `-<some_number>`,
|
||||
/// 3. does not end with `<some_number>-<some_number>`.
|
||||
///
|
||||
/// Otherwise, returns `None`.
|
||||
fn prototype_name(describe: &'static str) -> Option<&'static str> {
|
||||
if !describe.starts_with("prototype-") {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut rsplit_prototype = describe.rsplit('-');
|
||||
// last component MUST be a number
|
||||
rsplit_prototype.next()?.parse::<u64>().ok()?;
|
||||
// before than last component SHALL NOT be a number
|
||||
rsplit_prototype.next()?.parse::<u64>().err()?;
|
||||
|
||||
Some(describe)
|
||||
}
|
||||
|
||||
fn release_version(describe: &'static str) -> Option<DescribeResult> {
|
||||
if !describe.starts_with('v') {
|
||||
return None;
|
||||
}
|
||||
|
||||
// full release version don't contain a `-`
|
||||
if describe.contains('-') {
|
||||
return None;
|
||||
}
|
||||
|
||||
// full release version parse as vX.Y.Z, with X, Y, Z numbers.
|
||||
let mut dots = describe[1..].split('.');
|
||||
let major: u64 = dots.next()?.parse().ok()?;
|
||||
let minor: u64 = dots.next()?.parse().ok()?;
|
||||
let patch: u64 = dots.next()?.parse().ok()?;
|
||||
|
||||
if dots.next().is_some() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(DescribeResult::Release { version: describe, major, minor, patch })
|
||||
}
|
||||
|
||||
fn prerelease_version(describe: &'static str) -> Option<DescribeResult> {
|
||||
// prerelease version is in the shape vM.N.P-rc.C
|
||||
let mut hyphen = describe.rsplit('-');
|
||||
let prerelease = hyphen.next()?;
|
||||
if !prerelease.starts_with("rc.") {
|
||||
return None;
|
||||
}
|
||||
|
||||
let rc: u64 = prerelease[3..].parse().ok()?;
|
||||
|
||||
let release = hyphen.next()?;
|
||||
|
||||
let DescribeResult::Release { version: _, major, minor, patch } = release_version(release)?
|
||||
else {
|
||||
return None;
|
||||
};
|
||||
|
||||
Some(DescribeResult::Prerelease { version: describe, major, minor, patch, rc })
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::DescribeResult;
|
||||
|
||||
fn assert_not_a_tag(describe: &'static str) {
|
||||
assert_eq!(DescribeResult::NotATag { describe }, DescribeResult::new(describe))
|
||||
}
|
||||
|
||||
fn assert_proto(describe: &'static str) {
|
||||
assert_eq!(DescribeResult::Prototype { name: describe }, DescribeResult::new(describe))
|
||||
}
|
||||
|
||||
fn assert_release(describe: &'static str, major: u64, minor: u64, patch: u64) {
|
||||
assert_eq!(
|
||||
DescribeResult::Release { version: describe, major, minor, patch },
|
||||
DescribeResult::new(describe)
|
||||
)
|
||||
}
|
||||
|
||||
fn assert_prerelease(describe: &'static str, major: u64, minor: u64, patch: u64, rc: u64) {
|
||||
assert_eq!(
|
||||
DescribeResult::Prerelease { version: describe, major, minor, patch, rc },
|
||||
DescribeResult::new(describe)
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn not_a_tag() {
|
||||
assert_not_a_tag("whatever-fuzzy");
|
||||
assert_not_a_tag("whatever-fuzzy-5-ggg-dirty");
|
||||
assert_not_a_tag("whatever-fuzzy-120-ggg-dirty");
|
||||
|
||||
// technically a tag, but not a proto nor a version, so not parsed as a tag
|
||||
assert_not_a_tag("whatever");
|
||||
|
||||
// dirty version
|
||||
assert_not_a_tag("v1.7.0-1-ggga-dirty");
|
||||
assert_not_a_tag("v1.7.0-rc.1-1-ggga-dirty");
|
||||
|
||||
// after version
|
||||
assert_not_a_tag("v1.7.0-1-ggga");
|
||||
assert_not_a_tag("v1.7.0-rc.1-1-ggga");
|
||||
|
||||
// after proto
|
||||
assert_not_a_tag("protoype-tag-0-1-ggga");
|
||||
assert_not_a_tag("protoype-tag-0-1-ggga-dirty");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prototype() {
|
||||
assert_proto("prototype-tag-0");
|
||||
assert_proto("prototype-tag-10");
|
||||
assert_proto("prototype-long-name-tag-10");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn release() {
|
||||
assert_release("v1.7.2", 1, 7, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prerelease() {
|
||||
assert_prerelease("v1.7.2-rc.3", 1, 7, 2, 3);
|
||||
}
|
||||
}
|
106
config.toml
106
config.toml
@ -1,130 +1,134 @@
|
||||
# This file shows the default configuration of Meilisearch.
|
||||
# All variables are defined here: https://docs.meilisearch.com/learn/configuration/instance_options.html#environment-variables
|
||||
# All variables are defined here: https://www.meilisearch.com/docs/learn/configuration/instance_options#environment-variables
|
||||
|
||||
db_path = "./data.ms"
|
||||
# Designates the location where database files will be created and retrieved.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#database-path
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#database-path
|
||||
db_path = "./data.ms"
|
||||
|
||||
env = "development"
|
||||
# Configures the instance's environment. Value must be either `production` or `development`.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#environment
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#environment
|
||||
env = "development"
|
||||
|
||||
http_addr = "localhost:7700"
|
||||
# The address on which the HTTP server will listen.
|
||||
http_addr = "localhost:7700"
|
||||
|
||||
# master_key = "YOUR_MASTER_KEY_VALUE"
|
||||
# Sets the instance's master key, automatically protecting all routes except GET /health.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#master-key
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#master-key
|
||||
# master_key = "YOUR_MASTER_KEY_VALUE"
|
||||
|
||||
# no_analytics = true
|
||||
# Deactivates Meilisearch's built-in telemetry when provided.
|
||||
# Meilisearch automatically collects data from all instances that do not opt out using this flag.
|
||||
# All gathered data is used solely for the purpose of improving Meilisearch, and can be deleted at any time.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#disable-analytics
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#disable-analytics
|
||||
# no_analytics = true
|
||||
|
||||
http_payload_size_limit = "100 MB"
|
||||
# Sets the maximum size of accepted payloads.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#payload-limit-size
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#payload-limit-size
|
||||
http_payload_size_limit = "100 MB"
|
||||
|
||||
log_level = "INFO"
|
||||
# Defines how much detail should be present in Meilisearch's logs.
|
||||
# Meilisearch currently supports six log levels, listed in order of increasing verbosity: `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#log-level
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#log-level
|
||||
log_level = "INFO"
|
||||
|
||||
# max_indexing_memory = "2 GiB"
|
||||
# Sets the maximum amount of RAM Meilisearch can use when indexing.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#max-indexing-memory
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#max-indexing-memory
|
||||
# max_indexing_memory = "2 GiB"
|
||||
|
||||
# max_indexing_threads = 4
|
||||
# Sets the maximum number of threads Meilisearch can use during indexing.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#max-indexing-threads
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#max-indexing-threads
|
||||
# max_indexing_threads = 4
|
||||
|
||||
#############
|
||||
### DUMPS ###
|
||||
#############
|
||||
|
||||
dump_dir = "dumps/"
|
||||
# Sets the directory where Meilisearch will create dump files.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#dump-directory
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#dump-directory
|
||||
dump_dir = "dumps/"
|
||||
|
||||
# import_dump = "./path/to/my/file.dump"
|
||||
# Imports the dump file located at the specified path. Path must point to a .dump file.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#import-dump
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#import-dump
|
||||
# import_dump = "./path/to/my/file.dump"
|
||||
|
||||
ignore_missing_dump = false
|
||||
# Prevents Meilisearch from throwing an error when `import_dump` does not point to a valid dump file.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ignore-missing-dump
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-missing-dump
|
||||
ignore_missing_dump = false
|
||||
|
||||
ignore_dump_if_db_exists = false
|
||||
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_dump`.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ignore-dump-if-db-exists
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-dump-if-db-exists
|
||||
ignore_dump_if_db_exists = false
|
||||
|
||||
|
||||
#################
|
||||
### SNAPSHOTS ###
|
||||
#################
|
||||
|
||||
schedule_snapshot = false
|
||||
# Enables scheduled snapshots when true, disable when false (the default).
|
||||
# If the value is given as an integer, then enables the scheduled snapshot with the passed value as the interval
|
||||
# between each snapshot, in seconds.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#schedule-snapshot-creation
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#schedule-snapshot-creation
|
||||
schedule_snapshot = false
|
||||
|
||||
snapshot_dir = "snapshots/"
|
||||
# Sets the directory where Meilisearch will store snapshots.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#snapshot-destination
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#snapshot-destination
|
||||
snapshot_dir = "snapshots/"
|
||||
|
||||
# import_snapshot = "./path/to/my/snapshot"
|
||||
# Launches Meilisearch after importing a previously-generated snapshot at the given filepath.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#import-snapshot
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#import-snapshot
|
||||
# import_snapshot = "./path/to/my/snapshot"
|
||||
|
||||
ignore_missing_snapshot = false
|
||||
# Prevents a Meilisearch instance from throwing an error when `import_snapshot` does not point to a valid snapshot file.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ignore-missing-snapshot
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-missing-snapshot
|
||||
ignore_missing_snapshot = false
|
||||
|
||||
ignore_snapshot_if_db_exists = false
|
||||
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_snapshot`.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ignore-snapshot-if-db-exists
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-snapshot-if-db-exists
|
||||
ignore_snapshot_if_db_exists = false
|
||||
|
||||
|
||||
###########
|
||||
### SSL ###
|
||||
###########
|
||||
|
||||
# ssl_auth_path = "./path/to/root"
|
||||
# Enables client authentication in the specified path.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-authentication-path
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-authentication-path
|
||||
# ssl_auth_path = "./path/to/root"
|
||||
|
||||
# ssl_cert_path = "./path/to/certfile"
|
||||
# Sets the server's SSL certificates.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-certificates-path
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-certificates-path
|
||||
# ssl_cert_path = "./path/to/certfile"
|
||||
|
||||
# ssl_key_path = "./path/to/private-key"
|
||||
# Sets the server's SSL key files.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-key-path
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-key-path
|
||||
# ssl_key_path = "./path/to/private-key"
|
||||
|
||||
# ssl_ocsp_path = "./path/to/ocsp-file"
|
||||
# Sets the server's OCSP file.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-ocsp-path
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-ocsp-path
|
||||
# ssl_ocsp_path = "./path/to/ocsp-file"
|
||||
|
||||
ssl_require_auth = false
|
||||
# Makes SSL authentication mandatory.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-require-auth
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-require-auth
|
||||
ssl_require_auth = false
|
||||
|
||||
ssl_resumption = false
|
||||
# Activates SSL session resumption.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-resumption
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-resumption
|
||||
ssl_resumption = false
|
||||
|
||||
ssl_tickets = false
|
||||
# Activates SSL tickets.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-tickets
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-tickets
|
||||
ssl_tickets = false
|
||||
|
||||
#############################
|
||||
### Experimental features ###
|
||||
#############################
|
||||
|
||||
experimental_enable_metrics = false
|
||||
# Experimental metrics feature. For more information, see: <https://github.com/meilisearch/meilisearch/discussions/3518>
|
||||
# Enables the Prometheus metrics on the `GET /metrics` endpoint.
|
||||
experimental_enable_metrics = false
|
||||
|
||||
# Experimental RAM reduction during indexing, do not use in production, see: <https://github.com/meilisearch/product/discussions/652>
|
||||
experimental_reduce_indexing_memory_usage = false
|
||||
|
||||
# Experimentally reduces the maximum number of tasks that will be processed at once, see: <https://github.com/orgs/meilisearch/discussions/713>
|
||||
# experimental_max_number_of_batched_tasks = 100
|
||||
|
@ -103,7 +103,7 @@ not_available_failure_usage() {
|
||||
printf "$RED%s\n$DEFAULT" 'ERROR: Meilisearch binary is not available for your OS distribution or your architecture yet.'
|
||||
echo ''
|
||||
echo 'However, you can easily compile the binary from the source files.'
|
||||
echo 'Follow the steps at the page ("Source" tab): https://docs.meilisearch.com/learn/getting_started/installation.html'
|
||||
echo 'Follow the steps at the page ("Source" tab): https://www.meilisearch.com/docs/learn/getting_started/installation'
|
||||
}
|
||||
|
||||
fetch_release_failure_usage() {
|
||||
|
@ -11,22 +11,22 @@ readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.65"
|
||||
flate2 = "1.0.22"
|
||||
http = "0.2.8"
|
||||
log = "0.4.17"
|
||||
anyhow = "1.0.79"
|
||||
flate2 = "1.0.28"
|
||||
http = "0.2.11"
|
||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
||||
once_cell = "1.15.0"
|
||||
regex = "1.6.0"
|
||||
roaring = { version = "0.10.0", features = ["serde"] }
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = { version = "1.0.85", features = ["preserve_order"] }
|
||||
tar = "0.4.38"
|
||||
tempfile = "3.3.0"
|
||||
thiserror = "1.0.30"
|
||||
time = { version = "0.3.7", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||
uuid = { version = "1.1.2", features = ["serde", "v4"] }
|
||||
once_cell = "1.19.0"
|
||||
regex = "1.10.2"
|
||||
roaring = { version = "0.10.2", features = ["serde"] }
|
||||
serde = { version = "1.0.195", features = ["derive"] }
|
||||
serde_json = { version = "1.0.111", features = ["preserve_order"] }
|
||||
tar = "0.4.40"
|
||||
tempfile = "3.9.0"
|
||||
thiserror = "1.0.56"
|
||||
time = { version = "0.3.31", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||
tracing = "0.1.40"
|
||||
uuid = { version = "1.6.1", features = ["serde", "v4"] }
|
||||
|
||||
[dev-dependencies]
|
||||
big_s = "1.0.2"
|
||||
|
@ -101,6 +101,9 @@ pub enum KindDump {
|
||||
documents_ids: Vec<String>,
|
||||
},
|
||||
DocumentClear,
|
||||
DocumentDeletionByFilter {
|
||||
filter: serde_json::Value,
|
||||
},
|
||||
Settings {
|
||||
settings: Box<meilisearch_types::settings::Settings<Unchecked>>,
|
||||
is_deletion: bool,
|
||||
@ -166,6 +169,9 @@ impl From<KindWithContent> for KindDump {
|
||||
KindWithContent::DocumentDeletion { documents_ids, .. } => {
|
||||
KindDump::DocumentDeletion { documents_ids }
|
||||
}
|
||||
KindWithContent::DocumentDeletionByFilter { filter_expr, .. } => {
|
||||
KindDump::DocumentDeletionByFilter { filter: filter_expr }
|
||||
}
|
||||
KindWithContent::DocumentClear { .. } => KindDump::DocumentClear,
|
||||
KindWithContent::SettingsUpdate {
|
||||
new_settings,
|
||||
@ -202,12 +208,14 @@ pub(crate) mod test {
|
||||
use std::str::FromStr;
|
||||
|
||||
use big_s::S;
|
||||
use maplit::btreeset;
|
||||
use maplit::{btreemap, btreeset};
|
||||
use meilisearch_types::facet_values_sort::FacetValuesSort;
|
||||
use meilisearch_types::features::RuntimeTogglableFeatures;
|
||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
use meilisearch_types::keys::{Action, Key};
|
||||
use meilisearch_types::milli;
|
||||
use meilisearch_types::milli::update::Setting;
|
||||
use meilisearch_types::milli::{self};
|
||||
use meilisearch_types::settings::{Checked, Settings};
|
||||
use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
|
||||
use meilisearch_types::tasks::{Details, Status};
|
||||
use serde_json::{json, Map, Value};
|
||||
use time::macros::datetime;
|
||||
@ -254,11 +262,21 @@ pub(crate) mod test {
|
||||
sortable_attributes: Setting::Set(btreeset! { S("age") }),
|
||||
ranking_rules: Setting::NotSet,
|
||||
stop_words: Setting::NotSet,
|
||||
non_separator_tokens: Setting::NotSet,
|
||||
separator_tokens: Setting::NotSet,
|
||||
dictionary: Setting::NotSet,
|
||||
synonyms: Setting::NotSet,
|
||||
distinct_attribute: Setting::NotSet,
|
||||
proximity_precision: Setting::NotSet,
|
||||
typo_tolerance: Setting::NotSet,
|
||||
faceting: Setting::NotSet,
|
||||
faceting: Setting::Set(FacetingSettings {
|
||||
max_values_per_facet: Setting::Set(111),
|
||||
sort_facet_values_by: Setting::Set(
|
||||
btreemap! { S("age") => FacetValuesSort::Count },
|
||||
),
|
||||
}),
|
||||
pagination: Setting::NotSet,
|
||||
embedders: Setting::NotSet,
|
||||
_kind: std::marker::PhantomData,
|
||||
};
|
||||
settings.check()
|
||||
@ -406,6 +424,11 @@ pub(crate) mod test {
|
||||
}
|
||||
keys.flush().unwrap();
|
||||
|
||||
// ========== experimental features
|
||||
let features = create_test_features();
|
||||
|
||||
dump.create_experimental_features(features).unwrap();
|
||||
|
||||
// create the dump
|
||||
let mut file = tempfile::tempfile().unwrap();
|
||||
dump.persist_to(&mut file).unwrap();
|
||||
@ -414,6 +437,10 @@ pub(crate) mod test {
|
||||
file
|
||||
}
|
||||
|
||||
fn create_test_features() -> RuntimeTogglableFeatures {
|
||||
RuntimeTogglableFeatures { vector_store: true, ..Default::default() }
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_creating_and_read_dump() {
|
||||
let mut file = create_test_dump();
|
||||
@ -458,5 +485,9 @@ pub(crate) mod test {
|
||||
for (key, expected) in dump.keys().unwrap().zip(create_test_api_keys()) {
|
||||
assert_eq!(key.unwrap(), expected);
|
||||
}
|
||||
|
||||
// ==== checking the features
|
||||
let expected = create_test_features();
|
||||
assert_eq!(dump.features().unwrap().unwrap(), expected);
|
||||
}
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ impl From<v1::settings::Settings> for v2::Settings<v2::Unchecked> {
|
||||
criterion.as_ref().map(ToString::to_string)
|
||||
}
|
||||
Err(()) => {
|
||||
log::warn!(
|
||||
tracing::warn!(
|
||||
"Could not import the following ranking rule: `{}`.",
|
||||
ranking_rule
|
||||
);
|
||||
@ -152,11 +152,11 @@ impl From<v1::update::UpdateStatus> for Option<v2::updates::UpdateStatus> {
|
||||
use v2::updates::UpdateStatus as UpdateStatusV2;
|
||||
Some(match source {
|
||||
UpdateStatusV1::Enqueued { content } => {
|
||||
log::warn!(
|
||||
tracing::warn!(
|
||||
"Cannot import task {} (importing enqueued tasks from v1 dumps is unsupported)",
|
||||
content.update_id
|
||||
);
|
||||
log::warn!("Task will be skipped in the queue of imported tasks.");
|
||||
tracing::warn!("Task will be skipped in the queue of imported tasks.");
|
||||
|
||||
return None;
|
||||
}
|
||||
@ -229,7 +229,7 @@ impl From<v1::update::UpdateType> for Option<v2::updates::UpdateMeta> {
|
||||
Some(match source {
|
||||
v1::update::UpdateType::ClearAll => v2::updates::UpdateMeta::ClearDocuments,
|
||||
v1::update::UpdateType::Customs => {
|
||||
log::warn!("Ignoring task with type 'Customs' that is no longer supported");
|
||||
tracing::warn!("Ignoring task with type 'Customs' that is no longer supported");
|
||||
return None;
|
||||
}
|
||||
v1::update::UpdateType::DocumentsAddition { .. } => {
|
||||
@ -296,7 +296,7 @@ impl From<v1::settings::RankingRule> for Option<v2::settings::Criterion> {
|
||||
v1::settings::RankingRule::Proximity => Some(v2::settings::Criterion::Proximity),
|
||||
v1::settings::RankingRule::Attribute => Some(v2::settings::Criterion::Attribute),
|
||||
v1::settings::RankingRule::WordsPosition => {
|
||||
log::warn!("Removing the 'WordsPosition' ranking rule that is no longer supported, please check the resulting ranking rules of your indexes");
|
||||
tracing::warn!("Removing the 'WordsPosition' ranking rule that is no longer supported, please check the resulting ranking rules of your indexes");
|
||||
None
|
||||
}
|
||||
v1::settings::RankingRule::Exactness => Some(v2::settings::Criterion::Exactness),
|
||||
|
@ -1,4 +1,3 @@
|
||||
use std::convert::TryInto;
|
||||
use std::str::FromStr;
|
||||
|
||||
use time::OffsetDateTime;
|
||||
@ -25,7 +24,6 @@ impl CompatV2ToV3 {
|
||||
CompatV2ToV3::Compat(compat) => compat.index_uuid(),
|
||||
};
|
||||
v2_uuids
|
||||
.into_iter()
|
||||
.into_iter()
|
||||
.map(|index| v3::meta::IndexUuid { uid: index.uid, uuid: index.uuid })
|
||||
.collect()
|
||||
@ -147,8 +145,8 @@ impl From<v2::updates::UpdateStatus> for v3::updates::UpdateStatus {
|
||||
started_processing_at: processing.started_processing_at,
|
||||
}),
|
||||
Err(e) => {
|
||||
log::warn!("Error with task {}: {}", processing.from.update_id, e);
|
||||
log::warn!("Task will be marked as `Failed`.");
|
||||
tracing::warn!("Error with task {}: {}", processing.from.update_id, e);
|
||||
tracing::warn!("Task will be marked as `Failed`.");
|
||||
v3::updates::UpdateStatus::Failed(v3::updates::Failed {
|
||||
from: v3::updates::Processing {
|
||||
from: v3::updates::Enqueued {
|
||||
@ -173,8 +171,8 @@ impl From<v2::updates::UpdateStatus> for v3::updates::UpdateStatus {
|
||||
enqueued_at: enqueued.enqueued_at,
|
||||
}),
|
||||
Err(e) => {
|
||||
log::warn!("Error with task {}: {}", enqueued.update_id, e);
|
||||
log::warn!("Task will be marked as `Failed`.");
|
||||
tracing::warn!("Error with task {}: {}", enqueued.update_id, e);
|
||||
tracing::warn!("Task will be marked as `Failed`.");
|
||||
v3::updates::UpdateStatus::Failed(v3::updates::Failed {
|
||||
from: v3::updates::Processing {
|
||||
from: v3::updates::Enqueued {
|
||||
@ -354,7 +352,7 @@ impl From<String> for v3::Code {
|
||||
"malformed_payload" => v3::Code::MalformedPayload,
|
||||
"missing_payload" => v3::Code::MissingPayload,
|
||||
other => {
|
||||
log::warn!("Unknown error code {}", other);
|
||||
tracing::warn!("Unknown error code {}", other);
|
||||
v3::Code::UnretrievableErrorCode
|
||||
}
|
||||
}
|
||||
|
@ -76,20 +76,20 @@ impl CompatV3ToV4 {
|
||||
let index_uid = match index_uid {
|
||||
Some(uid) => uid,
|
||||
None => {
|
||||
log::warn!(
|
||||
tracing::warn!(
|
||||
"Error while importing the update {}.",
|
||||
task.update.id()
|
||||
);
|
||||
log::warn!(
|
||||
tracing::warn!(
|
||||
"The index associated to the uuid `{}` could not be retrieved.",
|
||||
task.uuid.to_string()
|
||||
);
|
||||
if task.update.is_finished() {
|
||||
// we're fucking with his history but not his data, that's ok-ish.
|
||||
log::warn!("The index-uuid will be set as `unknown`.");
|
||||
tracing::warn!("The index-uuid will be set as `unknown`.");
|
||||
String::from("unknown")
|
||||
} else {
|
||||
log::warn!("The task will be ignored.");
|
||||
tracing::warn!("The task will be ignored.");
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
@ -305,7 +305,7 @@ impl From<v4::ResponseError> for v5::ResponseError {
|
||||
"invalid_api_key_expires_at" => v5::Code::InvalidApiKeyExpiresAt,
|
||||
"invalid_api_key_description" => v5::Code::InvalidApiKeyDescription,
|
||||
other => {
|
||||
log::warn!("Unknown error code {}", other);
|
||||
tracing::warn!("Unknown error code {}", other);
|
||||
v5::Code::UnretrievableErrorCode
|
||||
}
|
||||
};
|
||||
|
@ -191,6 +191,10 @@ impl CompatV5ToV6 {
|
||||
})
|
||||
})))
|
||||
}
|
||||
|
||||
pub fn features(&self) -> Result<Option<v6::RuntimeTogglableFeatures>> {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub enum CompatIndexV5ToV6 {
|
||||
@ -300,7 +304,7 @@ impl From<v5::ResponseError> for v6::ResponseError {
|
||||
"immutable_field" => v6::Code::BadRequest,
|
||||
"api_key_already_exists" => v6::Code::ApiKeyAlreadyExists,
|
||||
other => {
|
||||
log::warn!("Unknown error code {}", other);
|
||||
tracing::warn!("Unknown error code {}", other);
|
||||
v6::Code::UnretrievableErrorCode
|
||||
}
|
||||
};
|
||||
@ -325,7 +329,7 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
||||
new_ranking_rules.push(new_rule);
|
||||
}
|
||||
Err(_) => {
|
||||
log::warn!("Error while importing settings. The ranking rule `{rule}` does not exist anymore.")
|
||||
tracing::warn!("Error while importing settings. The ranking rule `{rule}` does not exist anymore.")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -336,8 +340,12 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
||||
}
|
||||
},
|
||||
stop_words: settings.stop_words.into(),
|
||||
non_separator_tokens: v6::Setting::NotSet,
|
||||
separator_tokens: v6::Setting::NotSet,
|
||||
dictionary: v6::Setting::NotSet,
|
||||
synonyms: settings.synonyms.into(),
|
||||
distinct_attribute: settings.distinct_attribute.into(),
|
||||
proximity_precision: v6::Setting::NotSet,
|
||||
typo_tolerance: match settings.typo_tolerance {
|
||||
v5::Setting::Set(typo) => v6::Setting::Set(v6::TypoTolerance {
|
||||
enabled: typo.enabled.into(),
|
||||
@ -358,6 +366,7 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
||||
faceting: match settings.faceting {
|
||||
v5::Setting::Set(faceting) => v6::Setting::Set(v6::FacetingSettings {
|
||||
max_values_per_facet: faceting.max_values_per_facet.into(),
|
||||
sort_facet_values_by: v6::Setting::NotSet,
|
||||
}),
|
||||
v5::Setting::Reset => v6::Setting::Reset,
|
||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||
@ -369,6 +378,7 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
||||
v5::Setting::Reset => v6::Setting::Reset,
|
||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||
},
|
||||
embedders: v6::Setting::NotSet,
|
||||
_kind: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
|
@ -13,12 +13,12 @@ use crate::{Result, Version};
|
||||
|
||||
mod compat;
|
||||
|
||||
pub(self) mod v1;
|
||||
pub(self) mod v2;
|
||||
pub(self) mod v3;
|
||||
pub(self) mod v4;
|
||||
pub(self) mod v5;
|
||||
pub(self) mod v6;
|
||||
mod v1;
|
||||
mod v2;
|
||||
mod v3;
|
||||
mod v4;
|
||||
mod v5;
|
||||
mod v6;
|
||||
|
||||
pub type Document = serde_json::Map<String, serde_json::Value>;
|
||||
pub type UpdateFile = dyn Iterator<Item = Result<Document>>;
|
||||
@ -107,6 +107,13 @@ impl DumpReader {
|
||||
DumpReader::Compat(compat) => compat.keys(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn features(&self) -> Result<Option<v6::RuntimeTogglableFeatures>> {
|
||||
match self {
|
||||
DumpReader::Current(current) => Ok(current.features()),
|
||||
DumpReader::Compat(compat) => compat.features(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<V6Reader> for DumpReader {
|
||||
@ -188,6 +195,53 @@ pub(crate) mod test {
|
||||
use meili_snap::insta;
|
||||
|
||||
use super::*;
|
||||
use crate::reader::v6::RuntimeTogglableFeatures;
|
||||
|
||||
#[test]
|
||||
fn import_dump_v6_experimental() {
|
||||
let dump = File::open("tests/assets/v6-with-experimental.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2023-07-06 7:10:27.21958 +00:00:00");
|
||||
insta::assert_debug_snapshot!(dump.instance_uid().unwrap(), @"None");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"d45cd8571703e58ae53c7bd7ce3f5c22");
|
||||
assert_eq!(update_files.len(), 2);
|
||||
assert!(update_files[0].is_none()); // the dump creation
|
||||
assert!(update_files[1].is_none()); // the processed document addition
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys), @"13c2da155e9729c2344688cab29af71d");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut test = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
insta::assert_json_snapshot!(test.metadata(), @r###"
|
||||
{
|
||||
"uid": "test",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "2023-07-06T07:07:41.364694Z",
|
||||
"updatedAt": "2023-07-06T07:07:41.396114Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
assert_eq!(test.documents().unwrap().count(), 1);
|
||||
|
||||
assert_eq!(
|
||||
dump.features().unwrap().unwrap(),
|
||||
RuntimeTogglableFeatures { vector_store: true, ..Default::default() }
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_dump_v5() {
|
||||
@ -265,6 +319,8 @@ pub(crate) mod test {
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
|
||||
assert_eq!(dump.features().unwrap(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -470,12 +526,12 @@ pub(crate) mod test {
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-09T20:27:22.688964637Z",
|
||||
"updatedAt": "2022-10-09T20:27:23.951017769Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -485,12 +541,12 @@ pub(crate) mod test {
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-09T20:27:22.197788495Z",
|
||||
"updatedAt": "2022-10-09T20:28:01.93111053Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -515,12 +571,12 @@ pub(crate) mod test {
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-09T20:27:24.242683494Z",
|
||||
"updatedAt": "2022-10-09T20:27:24.312809641Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -561,12 +617,12 @@ pub(crate) mod test {
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2023-01-30T16:25:56.595257Z",
|
||||
"updatedAt": "2023-01-30T16:25:58.70348Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -576,12 +632,12 @@ pub(crate) mod test {
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2023-01-30T16:25:56.192178Z",
|
||||
"updatedAt": "2023-01-30T16:25:56.455714Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -591,12 +647,12 @@ pub(crate) mod test {
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"0227598af846e574139ee0b80e03a720");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2023-01-30T16:25:58.876405Z",
|
||||
"updatedAt": "2023-01-30T16:25:59.079906Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
|
@ -56,8 +56,7 @@ pub enum RankingRule {
|
||||
Desc(String),
|
||||
}
|
||||
|
||||
static ASC_DESC_REGEX: Lazy<Regex> =
|
||||
Lazy::new(|| Regex::new(r#"(asc|desc)\(([\w_-]+)\)"#).unwrap());
|
||||
static ASC_DESC_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(asc|desc)\(([\w_-]+)\)").unwrap());
|
||||
|
||||
impl FromStr for RankingRule {
|
||||
type Err = ();
|
||||
|
@ -46,6 +46,7 @@ pub type Checked = settings::Checked;
|
||||
pub type Unchecked = settings::Unchecked;
|
||||
|
||||
pub type Task = updates::UpdateEntry;
|
||||
pub type Kind = updates::UpdateMeta;
|
||||
|
||||
// everything related to the errors
|
||||
pub type ResponseError = errors::ResponseError;
|
||||
@ -107,8 +108,11 @@ impl V2Reader {
|
||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V2IndexReader>> + '_> {
|
||||
Ok(self.index_uuid.iter().map(|index| -> Result<_> {
|
||||
V2IndexReader::new(
|
||||
index.uid.clone(),
|
||||
&self.dump.path().join("indexes").join(format!("index-{}", index.uuid)),
|
||||
index,
|
||||
BufReader::new(
|
||||
File::open(self.dump.path().join("updates").join("data.jsonl")).unwrap(),
|
||||
),
|
||||
)
|
||||
}))
|
||||
}
|
||||
@ -143,16 +147,41 @@ pub struct V2IndexReader {
|
||||
}
|
||||
|
||||
impl V2IndexReader {
|
||||
pub fn new(name: String, path: &Path) -> Result<Self> {
|
||||
pub fn new(path: &Path, index_uuid: &IndexUuid, tasks: BufReader<File>) -> Result<Self> {
|
||||
let meta = File::open(path.join("meta.json"))?;
|
||||
let meta: DumpMeta = serde_json::from_reader(meta)?;
|
||||
|
||||
let mut created_at = None;
|
||||
let mut updated_at = None;
|
||||
|
||||
for line in tasks.lines() {
|
||||
let task: Task = serde_json::from_str(&line?)?;
|
||||
if !(task.uuid == index_uuid.uuid && task.is_finished()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let new_created_at = match task.update.meta() {
|
||||
Kind::DocumentsAddition { .. } | Kind::Settings(_) => task.update.finished_at(),
|
||||
_ => None,
|
||||
};
|
||||
let new_updated_at = task.update.finished_at();
|
||||
|
||||
if created_at.is_none() || created_at > new_created_at {
|
||||
created_at = new_created_at;
|
||||
}
|
||||
|
||||
if updated_at.is_none() || updated_at < new_updated_at {
|
||||
updated_at = new_updated_at;
|
||||
}
|
||||
}
|
||||
|
||||
let current_time = OffsetDateTime::now_utc();
|
||||
|
||||
let metadata = IndexMetadata {
|
||||
uid: name,
|
||||
uid: index_uuid.uid.clone(),
|
||||
primary_key: meta.primary_key,
|
||||
// FIXME: Iterate over the whole task queue to find the creation and last update date.
|
||||
created_at: OffsetDateTime::now_utc(),
|
||||
updated_at: OffsetDateTime::now_utc(),
|
||||
created_at: created_at.unwrap_or(current_time),
|
||||
updated_at: updated_at.unwrap_or(current_time),
|
||||
};
|
||||
|
||||
let ret = V2IndexReader {
|
||||
@ -248,12 +277,12 @@ pub(crate) mod test {
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-09T20:27:22.688964637Z",
|
||||
"updatedAt": "2022-10-09T20:27:23.951017769Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -263,12 +292,12 @@ pub(crate) mod test {
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-09T20:27:22.197788495Z",
|
||||
"updatedAt": "2022-10-09T20:28:01.93111053Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -293,12 +322,12 @@ pub(crate) mod test {
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-09T20:27:24.242683494Z",
|
||||
"updatedAt": "2022-10-09T20:27:24.312809641Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -340,12 +369,12 @@ pub(crate) mod test {
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2023-01-30T16:25:56.595257Z",
|
||||
"updatedAt": "2023-01-30T16:25:58.70348Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -355,12 +384,12 @@ pub(crate) mod test {
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2023-01-30T16:25:56.192178Z",
|
||||
"updatedAt": "2023-01-30T16:25:56.455714Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -370,12 +399,12 @@ pub(crate) mod test {
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"0227598af846e574139ee0b80e03a720");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2023-01-30T16:25:58.876405Z",
|
||||
"updatedAt": "2023-01-30T16:25:59.079906Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
|
@ -227,4 +227,14 @@ impl UpdateStatus {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finished_at(&self) -> Option<OffsetDateTime> {
|
||||
match self {
|
||||
UpdateStatus::Processing(_) => None,
|
||||
UpdateStatus::Enqueued(_) => None,
|
||||
UpdateStatus::Processed(u) => Some(u.processed_at),
|
||||
UpdateStatus::Aborted(_) => None,
|
||||
UpdateStatus::Failed(u) => Some(u.failed_at),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
||||
pub enum Code {
|
||||
// index related error
|
||||
|
@ -95,6 +95,7 @@ impl fmt::Display for ErrorType {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
||||
pub enum Code {
|
||||
// index related error
|
||||
|
@ -31,6 +31,7 @@ impl ResponseError {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(Deserialize, Debug, Clone, Copy)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub enum Code {
|
||||
|
@ -5,6 +5,7 @@ use std::path::Path;
|
||||
pub use meilisearch_types::milli;
|
||||
use tempfile::TempDir;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::debug;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::Document;
|
||||
@ -18,6 +19,7 @@ pub type Unchecked = meilisearch_types::settings::Unchecked;
|
||||
|
||||
pub type Task = crate::TaskDump;
|
||||
pub type Key = meilisearch_types::keys::Key;
|
||||
pub type RuntimeTogglableFeatures = meilisearch_types::features::RuntimeTogglableFeatures;
|
||||
|
||||
// ===== Other types to clarify the code of the compat module
|
||||
// everything related to the tasks
|
||||
@ -47,6 +49,7 @@ pub struct V6Reader {
|
||||
metadata: Metadata,
|
||||
tasks: BufReader<File>,
|
||||
keys: BufReader<File>,
|
||||
features: Option<RuntimeTogglableFeatures>,
|
||||
}
|
||||
|
||||
impl V6Reader {
|
||||
@ -58,11 +61,29 @@ impl V6Reader {
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
let feature_file = match fs::read(dump.path().join("experimental-features.json")) {
|
||||
Ok(feature_file) => Some(feature_file),
|
||||
Err(error) => match error.kind() {
|
||||
// Allows the file to be missing, this will only result in all experimental features disabled.
|
||||
ErrorKind::NotFound => {
|
||||
debug!("`experimental-features.json` not found in dump");
|
||||
None
|
||||
}
|
||||
_ => return Err(error.into()),
|
||||
},
|
||||
};
|
||||
let features = if let Some(feature_file) = feature_file {
|
||||
Some(serde_json::from_reader(&*feature_file)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(V6Reader {
|
||||
metadata: serde_json::from_reader(&*meta_file)?,
|
||||
instance_uid,
|
||||
tasks: BufReader::new(File::open(dump.path().join("tasks").join("queue.jsonl"))?),
|
||||
keys: BufReader::new(File::open(dump.path().join("keys.jsonl"))?),
|
||||
features,
|
||||
dump,
|
||||
})
|
||||
}
|
||||
@ -129,6 +150,10 @@ impl V6Reader {
|
||||
(&mut self.keys).lines().map(|line| -> Result<_> { Ok(serde_json::from_str(&line?)?) }),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn features(&self) -> Option<RuntimeTogglableFeatures> {
|
||||
self.features
|
||||
}
|
||||
}
|
||||
|
||||
pub struct UpdateFile {
|
||||
|
@ -4,6 +4,7 @@ use std::path::PathBuf;
|
||||
|
||||
use flate2::write::GzEncoder;
|
||||
use flate2::Compression;
|
||||
use meilisearch_types::features::RuntimeTogglableFeatures;
|
||||
use meilisearch_types::keys::Key;
|
||||
use meilisearch_types::settings::{Checked, Settings};
|
||||
use serde_json::{Map, Value};
|
||||
@ -53,6 +54,13 @@ impl DumpWriter {
|
||||
TaskWriter::new(self.dir.path().join("tasks"))
|
||||
}
|
||||
|
||||
pub fn create_experimental_features(&self, features: RuntimeTogglableFeatures) -> Result<()> {
|
||||
Ok(std::fs::write(
|
||||
self.dir.path().join("experimental-features.json"),
|
||||
serde_json::to_string(&features)?,
|
||||
)?)
|
||||
}
|
||||
|
||||
pub fn persist_to(self, mut writer: impl Write) -> Result<()> {
|
||||
let gz_encoder = GzEncoder::new(&mut writer, Compression::default());
|
||||
let mut tar_encoder = tar::Builder::new(gz_encoder);
|
||||
@ -284,6 +292,7 @@ pub(crate) mod test {
|
||||
│ ├---- update_files/
|
||||
│ │ └---- 1.jsonl
|
||||
│ └---- queue.jsonl
|
||||
├---- experimental-features.json
|
||||
├---- instance_uid.uuid
|
||||
├---- keys.jsonl
|
||||
└---- metadata.json
|
||||
|
BIN
dump/tests/assets/v6-with-experimental.dump
Normal file
BIN
dump/tests/assets/v6-with-experimental.dump
Normal file
Binary file not shown.
@ -11,9 +11,10 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
tempfile = "3.3.0"
|
||||
thiserror = "1.0.30"
|
||||
uuid = { version = "1.1.2", features = ["serde", "v4"] }
|
||||
tempfile = "3.9.0"
|
||||
thiserror = "1.0.56"
|
||||
tracing = "0.1.40"
|
||||
uuid = { version = "1.6.1", features = ["serde", "v4"] }
|
||||
|
||||
[dev-dependencies]
|
||||
faux = "0.1.8"
|
||||
faux = "0.1.10"
|
||||
|
@ -1,5 +1,5 @@
|
||||
use std::fs::File as StdFile;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::str::FromStr;
|
||||
|
||||
@ -22,20 +22,6 @@ pub enum Error {
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl Deref for File {
|
||||
type Target = NamedTempFile;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.file
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for File {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.file
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FileStore {
|
||||
path: PathBuf,
|
||||
@ -56,7 +42,7 @@ impl FileStore {
|
||||
let file = NamedTempFile::new_in(&self.path)?;
|
||||
let uuid = Uuid::new_v4();
|
||||
let path = self.path.join(uuid.to_string());
|
||||
let update_file = File { file, path };
|
||||
let update_file = File { file: Some(file), path };
|
||||
|
||||
Ok((uuid, update_file))
|
||||
}
|
||||
@ -67,7 +53,7 @@ impl FileStore {
|
||||
let file = NamedTempFile::new_in(&self.path)?;
|
||||
let uuid = Uuid::from_u128(uuid);
|
||||
let path = self.path.join(uuid.to_string());
|
||||
let update_file = File { file, path };
|
||||
let update_file = File { file: Some(file), path };
|
||||
|
||||
Ok((uuid, update_file))
|
||||
}
|
||||
@ -75,7 +61,13 @@ impl FileStore {
|
||||
/// Returns the file corresponding to the requested uuid.
|
||||
pub fn get_update(&self, uuid: Uuid) -> Result<StdFile> {
|
||||
let path = self.get_update_path(uuid);
|
||||
let file = StdFile::open(path)?;
|
||||
let file = match StdFile::open(path) {
|
||||
Ok(file) => file,
|
||||
Err(e) => {
|
||||
tracing::error!("Can't access update file {uuid}: {e}");
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
Ok(file)
|
||||
}
|
||||
|
||||
@ -110,8 +102,12 @@ impl FileStore {
|
||||
|
||||
pub fn delete(&self, uuid: Uuid) -> Result<()> {
|
||||
let path = self.path.join(uuid.to_string());
|
||||
std::fs::remove_file(path)?;
|
||||
Ok(())
|
||||
if let Err(e) = std::fs::remove_file(path) {
|
||||
tracing::error!("Can't delete file {uuid}: {e}");
|
||||
Err(e.into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// List the Uuids of the files in the FileStore
|
||||
@ -136,16 +132,40 @@ impl FileStore {
|
||||
|
||||
pub struct File {
|
||||
path: PathBuf,
|
||||
file: NamedTempFile,
|
||||
file: Option<NamedTempFile>,
|
||||
}
|
||||
|
||||
impl File {
|
||||
pub fn dry_file() -> Result<Self> {
|
||||
Ok(Self { path: PathBuf::new(), file: None })
|
||||
}
|
||||
|
||||
pub fn persist(self) -> Result<()> {
|
||||
self.file.persist(&self.path)?;
|
||||
if let Some(file) = self.file {
|
||||
file.persist(&self.path)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for File {
|
||||
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||
if let Some(file) = self.file.as_mut() {
|
||||
file.write(buf)
|
||||
} else {
|
||||
Ok(buf.len())
|
||||
}
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
if let Some(file) = self.file.as_mut() {
|
||||
file.flush()
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::io::Write;
|
||||
|
@ -12,8 +12,9 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
nom = "7.1.1"
|
||||
nom_locate = "4.0.0"
|
||||
nom = "7.1.3"
|
||||
nom_locate = "4.2.0"
|
||||
unescaper = "0.1.3"
|
||||
|
||||
[dev-dependencies]
|
||||
insta = "1.21.0"
|
||||
insta = "1.34.0"
|
||||
|
@ -20,6 +20,8 @@ pub enum Condition<'a> {
|
||||
GreaterThanOrEqual(Token<'a>),
|
||||
Equal(Token<'a>),
|
||||
NotEqual(Token<'a>),
|
||||
Null,
|
||||
Empty,
|
||||
Exists,
|
||||
LowerThan(Token<'a>),
|
||||
LowerThanOrEqual(Token<'a>),
|
||||
@ -44,6 +46,38 @@ pub fn parse_condition(input: Span) -> IResult<FilterCondition> {
|
||||
Ok((input, condition))
|
||||
}
|
||||
|
||||
/// null = value "IS" WS+ "NULL"
|
||||
pub fn parse_is_null(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, key) = parse_value(input)?;
|
||||
|
||||
let (input, _) = tuple((tag("IS"), multispace1, tag("NULL")))(input)?;
|
||||
Ok((input, FilterCondition::Condition { fid: key, op: Null }))
|
||||
}
|
||||
|
||||
/// null = value "IS" WS+ "NOT" WS+ "NULL"
|
||||
pub fn parse_is_not_null(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, key) = parse_value(input)?;
|
||||
|
||||
let (input, _) = tuple((tag("IS"), multispace1, tag("NOT"), multispace1, tag("NULL")))(input)?;
|
||||
Ok((input, FilterCondition::Not(Box::new(FilterCondition::Condition { fid: key, op: Null }))))
|
||||
}
|
||||
|
||||
/// empty = value "IS" WS+ "EMPTY"
|
||||
pub fn parse_is_empty(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, key) = parse_value(input)?;
|
||||
|
||||
let (input, _) = tuple((tag("IS"), multispace1, tag("EMPTY")))(input)?;
|
||||
Ok((input, FilterCondition::Condition { fid: key, op: Empty }))
|
||||
}
|
||||
|
||||
/// empty = value "IS" WS+ "NOT" WS+ "EMPTY"
|
||||
pub fn parse_is_not_empty(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, key) = parse_value(input)?;
|
||||
|
||||
let (input, _) = tuple((tag("IS"), multispace1, tag("NOT"), multispace1, tag("EMPTY")))(input)?;
|
||||
Ok((input, FilterCondition::Not(Box::new(FilterCondition::Condition { fid: key, op: Empty }))))
|
||||
}
|
||||
|
||||
/// exist = value "EXISTS"
|
||||
pub fn parse_exists(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, key) = terminated(parse_value, tag("EXISTS"))(input)?;
|
||||
|
@ -62,6 +62,7 @@ pub enum ErrorKind<'a> {
|
||||
MisusedGeoRadius,
|
||||
MisusedGeoBoundingBox,
|
||||
InvalidPrimary,
|
||||
InvalidEscapedNumber,
|
||||
ExpectedEof,
|
||||
ExpectedValue(ExpectedValueKind),
|
||||
MalformedValue,
|
||||
@ -143,11 +144,12 @@ impl<'a> Display for Error<'a> {
|
||||
ErrorKind::MissingClosingDelimiter(c) => {
|
||||
writeln!(f, "Expression `{}` is missing the following closing delimiter: `{}`.", escaped_input, c)?
|
||||
}
|
||||
ErrorKind::InvalidPrimary if input.trim().is_empty() => {
|
||||
writeln!(f, "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` but instead got nothing.")?
|
||||
}
|
||||
ErrorKind::InvalidPrimary => {
|
||||
writeln!(f, "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` at `{}`.", escaped_input)?
|
||||
let text = if input.trim().is_empty() { "but instead got nothing.".to_string() } else { format!("at `{}`.", escaped_input) };
|
||||
writeln!(f, "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` {}", text)?
|
||||
}
|
||||
ErrorKind::InvalidEscapedNumber => {
|
||||
writeln!(f, "Found an invalid escaped sequence number: `{}`.", escaped_input)?
|
||||
}
|
||||
ErrorKind::ExpectedEof => {
|
||||
writeln!(f, "Found unexpected characters at the end of the filter: `{}`. You probably forgot an `OR` or an `AND` rule.", escaped_input)?
|
||||
@ -159,7 +161,7 @@ impl<'a> Display for Error<'a> {
|
||||
writeln!(f, "The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.")?
|
||||
}
|
||||
ErrorKind::ReservedGeo(name) => {
|
||||
writeln!(f, "`{}` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance), or _geoBoundingBox([latitude, longitude], [latitude, longitude]) built-in rules to filter on `_geo` coordinates.", name.escape_debug())?
|
||||
writeln!(f, "`{}` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.", name.escape_debug())?
|
||||
}
|
||||
ErrorKind::MisusedGeoRadius => {
|
||||
writeln!(f, "The `_geoRadius` filter is an operation and can't be used as a value.")?
|
||||
|
@ -47,7 +47,10 @@ mod value;
|
||||
use std::fmt::Debug;
|
||||
|
||||
pub use condition::{parse_condition, parse_to, Condition};
|
||||
use condition::{parse_exists, parse_not_exists};
|
||||
use condition::{
|
||||
parse_exists, parse_is_empty, parse_is_not_empty, parse_is_not_null, parse_is_null,
|
||||
parse_not_exists,
|
||||
};
|
||||
use error::{cut_with_err, ExpectedValueKind, NomErrorExt};
|
||||
pub use error::{Error, ErrorKind};
|
||||
use nom::branch::alt;
|
||||
@ -141,7 +144,7 @@ pub enum FilterCondition<'a> {
|
||||
Or(Vec<Self>),
|
||||
And(Vec<Self>),
|
||||
GeoLowerThan { point: [Token<'a>; 2], radius: Token<'a> },
|
||||
GeoBoundingBox { top_left_point: [Token<'a>; 2], bottom_right_point: [Token<'a>; 2] },
|
||||
GeoBoundingBox { top_right_point: [Token<'a>; 2], bottom_left_point: [Token<'a>; 2] },
|
||||
}
|
||||
|
||||
impl<'a> FilterCondition<'a> {
|
||||
@ -362,8 +365,8 @@ fn parse_geo_bounding_box(input: Span) -> IResult<FilterCondition> {
|
||||
}
|
||||
|
||||
let res = FilterCondition::GeoBoundingBox {
|
||||
top_left_point: [args[0][0].into(), args[0][1].into()],
|
||||
bottom_right_point: [args[1][0].into(), args[1][1].into()],
|
||||
top_right_point: [args[0][0].into(), args[0][1].into()],
|
||||
bottom_left_point: [args[1][0].into(), args[1][1].into()],
|
||||
};
|
||||
Ok((input, res))
|
||||
}
|
||||
@ -382,6 +385,34 @@ fn parse_geo_point(input: Span) -> IResult<FilterCondition> {
|
||||
Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::ReservedGeo("_geoPoint"))))
|
||||
}
|
||||
|
||||
/// geoPoint = WS* "_geoDistance(float WS* "," WS* float WS* "," WS* float)
|
||||
fn parse_geo_distance(input: Span) -> IResult<FilterCondition> {
|
||||
// we want to forbid space BEFORE the _geoDistance but not after
|
||||
tuple((
|
||||
multispace0,
|
||||
tag("_geoDistance"),
|
||||
// if we were able to parse `_geoDistance` we are going to return a Failure whatever happens next.
|
||||
cut(delimited(char('('), separated_list1(tag(","), ws(recognize_float)), char(')'))),
|
||||
))(input)
|
||||
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::ReservedGeo("_geoDistance"))))?;
|
||||
// if we succeeded we still return a `Failure` because `geoDistance` filters are not allowed
|
||||
Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::ReservedGeo("_geoDistance"))))
|
||||
}
|
||||
|
||||
/// geo = WS* "_geo(float WS* "," WS* float WS* "," WS* float)
|
||||
fn parse_geo(input: Span) -> IResult<FilterCondition> {
|
||||
// we want to forbid space BEFORE the _geo but not after
|
||||
tuple((
|
||||
multispace0,
|
||||
word_exact("_geo"),
|
||||
// if we were able to parse `_geo` we are going to return a Failure whatever happens next.
|
||||
cut(delimited(char('('), separated_list1(tag(","), ws(recognize_float)), char(')'))),
|
||||
))(input)
|
||||
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::ReservedGeo("_geo"))))?;
|
||||
// if we succeeded we still return a `Failure` because `_geo` filter is not allowed
|
||||
Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::ReservedGeo("_geo"))))
|
||||
}
|
||||
|
||||
fn parse_error_reserved_keyword(input: Span) -> IResult<FilterCondition> {
|
||||
match parse_condition(input) {
|
||||
Ok(result) => Ok(result),
|
||||
@ -414,10 +445,16 @@ fn parse_primary(input: Span, depth: usize) -> IResult<FilterCondition> {
|
||||
parse_in,
|
||||
parse_not_in,
|
||||
parse_condition,
|
||||
parse_is_null,
|
||||
parse_is_not_null,
|
||||
parse_is_empty,
|
||||
parse_is_not_empty,
|
||||
parse_exists,
|
||||
parse_not_exists,
|
||||
parse_to,
|
||||
// the next lines are only for error handling and are written at the end to have the less possible performance impact
|
||||
parse_geo,
|
||||
parse_geo_distance,
|
||||
parse_geo_point,
|
||||
parse_error_reserved_keyword,
|
||||
))(input)
|
||||
@ -435,8 +472,81 @@ pub fn parse_filter(input: Span) -> IResult<FilterCondition> {
|
||||
terminated(|input| parse_expression(input, 0), eof)(input)
|
||||
}
|
||||
|
||||
impl<'a> std::fmt::Display for FilterCondition<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
FilterCondition::Not(filter) => {
|
||||
write!(f, "NOT ({filter})")
|
||||
}
|
||||
FilterCondition::Condition { fid, op } => {
|
||||
write!(f, "{fid} {op}")
|
||||
}
|
||||
FilterCondition::In { fid, els } => {
|
||||
write!(f, "{fid} IN[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::Or(els) => {
|
||||
write!(f, "OR[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::And(els) => {
|
||||
write!(f, "AND[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::GeoLowerThan { point, radius } => {
|
||||
write!(f, "_geoRadius({}, {}, {})", point[0], point[1], radius)
|
||||
}
|
||||
FilterCondition::GeoBoundingBox {
|
||||
top_right_point: top_left_point,
|
||||
bottom_left_point: bottom_right_point,
|
||||
} => {
|
||||
write!(
|
||||
f,
|
||||
"_geoBoundingBox([{}, {}], [{}, {}])",
|
||||
top_left_point[0],
|
||||
top_left_point[1],
|
||||
bottom_right_point[0],
|
||||
bottom_right_point[1]
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Condition<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Condition::GreaterThan(token) => write!(f, "> {token}"),
|
||||
Condition::GreaterThanOrEqual(token) => write!(f, ">= {token}"),
|
||||
Condition::Equal(token) => write!(f, "= {token}"),
|
||||
Condition::NotEqual(token) => write!(f, "!= {token}"),
|
||||
Condition::Null => write!(f, "IS NULL"),
|
||||
Condition::Empty => write!(f, "IS EMPTY"),
|
||||
Condition::Exists => write!(f, "EXISTS"),
|
||||
Condition::LowerThan(token) => write!(f, "< {token}"),
|
||||
Condition::LowerThanOrEqual(token) => write!(f, "<= {token}"),
|
||||
Condition::Between { from, to } => write!(f, "{from} TO {to}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Token<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{{{}}}", self.value())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use FilterCondition as Fc;
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Create a raw [Token]. You must specify the string that appear BEFORE your element followed by your element
|
||||
@ -448,14 +558,22 @@ pub mod tests {
|
||||
unsafe { Span::new_from_raw_offset(offset, lines as u32, value, "") }.into()
|
||||
}
|
||||
|
||||
fn p(s: &str) -> impl std::fmt::Display + '_ {
|
||||
Fc::parse(s).unwrap().unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_escaped() {
|
||||
insta::assert_display_snapshot!(p(r"title = 'foo\\'"), @r#"{title} = {foo\}"#);
|
||||
insta::assert_display_snapshot!(p(r"title = 'foo\\\\'"), @r#"{title} = {foo\\}"#);
|
||||
insta::assert_display_snapshot!(p(r"title = 'foo\\\\\\'"), @r#"{title} = {foo\\\}"#);
|
||||
insta::assert_display_snapshot!(p(r"title = 'foo\\\\\\\\'"), @r#"{title} = {foo\\\\}"#);
|
||||
// but it also works with other sequencies
|
||||
insta::assert_display_snapshot!(p(r#"title = 'foo\x20\n\t\"\'"'"#), @"{title} = {foo \n\t\"\'\"}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse() {
|
||||
use FilterCondition as Fc;
|
||||
|
||||
fn p(s: &str) -> impl std::fmt::Display + '_ {
|
||||
Fc::parse(s).unwrap().unwrap()
|
||||
}
|
||||
|
||||
// Test equal
|
||||
insta::assert_display_snapshot!(p("channel = Ponce"), @"{channel} = {Ponce}");
|
||||
insta::assert_display_snapshot!(p("subscribers = 12"), @"{subscribers} = {12}");
|
||||
@ -496,14 +614,30 @@ pub mod tests {
|
||||
insta::assert_display_snapshot!(p("subscribers <= 1000"), @"{subscribers} <= {1000}");
|
||||
insta::assert_display_snapshot!(p("subscribers 100 TO 1000"), @"{subscribers} {100} TO {1000}");
|
||||
|
||||
// Test NOT + EXISTS
|
||||
insta::assert_display_snapshot!(p("subscribers EXISTS"), @"{subscribers} EXISTS");
|
||||
// Test NOT
|
||||
insta::assert_display_snapshot!(p("NOT subscribers < 1000"), @"NOT ({subscribers} < {1000})");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers 100 TO 1000"), @"NOT ({subscribers} {100} TO {1000})");
|
||||
|
||||
// Test NULL + NOT NULL
|
||||
insta::assert_display_snapshot!(p("subscribers IS NULL"), @"{subscribers} IS NULL");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers IS NULL"), @"NOT ({subscribers} IS NULL)");
|
||||
insta::assert_display_snapshot!(p("subscribers IS NOT NULL"), @"NOT ({subscribers} IS NULL)");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers IS NOT NULL"), @"{subscribers} IS NULL");
|
||||
insta::assert_display_snapshot!(p("subscribers IS NOT NULL"), @"NOT ({subscribers} IS NULL)");
|
||||
|
||||
// Test EMPTY + NOT EMPTY
|
||||
insta::assert_display_snapshot!(p("subscribers IS EMPTY"), @"{subscribers} IS EMPTY");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers IS EMPTY"), @"NOT ({subscribers} IS EMPTY)");
|
||||
insta::assert_display_snapshot!(p("subscribers IS NOT EMPTY"), @"NOT ({subscribers} IS EMPTY)");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers IS NOT EMPTY"), @"{subscribers} IS EMPTY");
|
||||
insta::assert_display_snapshot!(p("subscribers IS NOT EMPTY"), @"NOT ({subscribers} IS EMPTY)");
|
||||
|
||||
// Test EXISTS + NOT EXITS
|
||||
insta::assert_display_snapshot!(p("subscribers EXISTS"), @"{subscribers} EXISTS");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers EXISTS"), @"NOT ({subscribers} EXISTS)");
|
||||
insta::assert_display_snapshot!(p("subscribers NOT EXISTS"), @"NOT ({subscribers} EXISTS)");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers NOT EXISTS"), @"{subscribers} EXISTS");
|
||||
insta::assert_display_snapshot!(p("subscribers NOT EXISTS"), @"NOT ({subscribers} EXISTS)");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers 100 TO 1000"), @"NOT ({subscribers} {100} TO {1000})");
|
||||
|
||||
// Test nested NOT
|
||||
insta::assert_display_snapshot!(p("NOT NOT NOT NOT x = 5"), @"{x} = {5}");
|
||||
@ -576,7 +710,7 @@ pub mod tests {
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("'OR'"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` at `\'OR\'`.
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `\'OR\'`.
|
||||
1:5 'OR'
|
||||
"###);
|
||||
|
||||
@ -586,12 +720,12 @@ pub mod tests {
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel Ponce"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` at `channel Ponce`.
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `channel Ponce`.
|
||||
1:14 channel Ponce
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = Ponce OR"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` but instead got nothing.
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` but instead got nothing.
|
||||
19:19 channel = Ponce OR
|
||||
"###);
|
||||
|
||||
@ -621,15 +755,35 @@ pub mod tests {
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("_geoPoint(12, 13, 14)"), @r###"
|
||||
`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance), or _geoBoundingBox([latitude, longitude], [latitude, longitude]) built-in rules to filter on `_geo` coordinates.
|
||||
`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.
|
||||
1:22 _geoPoint(12, 13, 14)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("position <= _geoPoint(12, 13, 14)"), @r###"
|
||||
`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance), or _geoBoundingBox([latitude, longitude], [latitude, longitude]) built-in rules to filter on `_geo` coordinates.
|
||||
`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.
|
||||
13:34 position <= _geoPoint(12, 13, 14)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("_geoDistance(12, 13, 14)"), @r###"
|
||||
`_geoDistance` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.
|
||||
1:25 _geoDistance(12, 13, 14)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("position <= _geoDistance(12, 13, 14)"), @r###"
|
||||
`_geoDistance` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.
|
||||
13:37 position <= _geoDistance(12, 13, 14)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("_geo(12, 13, 14)"), @r###"
|
||||
`_geo` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.
|
||||
1:17 _geo(12, 13, 14)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("position <= _geo(12, 13, 14)"), @r###"
|
||||
`_geo` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance)` or `_geoBoundingBox([latitude, longitude], [latitude, longitude])` built-in rules to filter on `_geo` coordinates.
|
||||
13:29 position <= _geo(12, 13, 14)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("position <= _geoRadius(12, 13, 14)"), @r###"
|
||||
The `_geoRadius` filter is an operation and can't be used as a value.
|
||||
13:35 position <= _geoRadius(12, 13, 14)
|
||||
@ -656,12 +810,12 @@ pub mod tests {
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("colour NOT EXIST"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` at `colour NOT EXIST`.
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `colour NOT EXIST`.
|
||||
1:17 colour NOT EXIST
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("subscribers 100 TO1000"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` at `subscribers 100 TO1000`.
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `subscribers 100 TO1000`.
|
||||
1:23 subscribers 100 TO1000
|
||||
"###);
|
||||
|
||||
@ -722,6 +876,39 @@ pub mod tests {
|
||||
Was expecting a value but instead got `OR`, which is a reserved keyword. To use `OR` as a field name or a value, surround it by quotes.
|
||||
5:7 NOT OR EXISTS AND EXISTS NOT EXISTS
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p(r#"value NULL"#), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `value NULL`.
|
||||
1:11 value NULL
|
||||
"###);
|
||||
insta::assert_display_snapshot!(p(r#"value NOT NULL"#), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `value NOT NULL`.
|
||||
1:15 value NOT NULL
|
||||
"###);
|
||||
insta::assert_display_snapshot!(p(r#"value EMPTY"#), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `value EMPTY`.
|
||||
1:12 value EMPTY
|
||||
"###);
|
||||
insta::assert_display_snapshot!(p(r#"value NOT EMPTY"#), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `value NOT EMPTY`.
|
||||
1:16 value NOT EMPTY
|
||||
"###);
|
||||
insta::assert_display_snapshot!(p(r#"value IS"#), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `value IS`.
|
||||
1:9 value IS
|
||||
"###);
|
||||
insta::assert_display_snapshot!(p(r#"value IS NOT"#), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `value IS NOT`.
|
||||
1:13 value IS NOT
|
||||
"###);
|
||||
insta::assert_display_snapshot!(p(r#"value IS EXISTS"#), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `value IS EXISTS`.
|
||||
1:16 value IS EXISTS
|
||||
"###);
|
||||
insta::assert_display_snapshot!(p(r#"value IS NOT EXISTS"#), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `value IS NOT EXISTS`.
|
||||
1:20 value IS NOT EXISTS
|
||||
"###);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -746,69 +933,3 @@ pub mod tests {
|
||||
assert_eq!(token.value(), s);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> std::fmt::Display for FilterCondition<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
FilterCondition::Not(filter) => {
|
||||
write!(f, "NOT ({filter})")
|
||||
}
|
||||
FilterCondition::Condition { fid, op } => {
|
||||
write!(f, "{fid} {op}")
|
||||
}
|
||||
FilterCondition::In { fid, els } => {
|
||||
write!(f, "{fid} IN[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::Or(els) => {
|
||||
write!(f, "OR[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::And(els) => {
|
||||
write!(f, "AND[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::GeoLowerThan { point, radius } => {
|
||||
write!(f, "_geoRadius({}, {}, {})", point[0], point[1], radius)
|
||||
}
|
||||
FilterCondition::GeoBoundingBox { top_left_point, bottom_right_point } => {
|
||||
write!(
|
||||
f,
|
||||
"_geoBoundingBox([{}, {}], [{}, {}])",
|
||||
top_left_point[0],
|
||||
top_left_point[1],
|
||||
bottom_right_point[0],
|
||||
bottom_right_point[1]
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Condition<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Condition::GreaterThan(token) => write!(f, "> {token}"),
|
||||
Condition::GreaterThanOrEqual(token) => write!(f, ">= {token}"),
|
||||
Condition::Equal(token) => write!(f, "= {token}"),
|
||||
Condition::NotEqual(token) => write!(f, "!= {token}"),
|
||||
Condition::Exists => write!(f, "EXISTS"),
|
||||
Condition::LowerThan(token) => write!(f, "< {token}"),
|
||||
Condition::LowerThanOrEqual(token) => write!(f, "<= {token}"),
|
||||
Condition::Between { from, to } => write!(f, "{from} TO {to}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Token<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{{{}}}", self.value())
|
||||
}
|
||||
}
|
||||
|
@ -7,8 +7,8 @@ use nom::{InputIter, InputLength, InputTake, Slice};
|
||||
|
||||
use crate::error::{ExpectedValueKind, NomErrorExt};
|
||||
use crate::{
|
||||
parse_geo_bounding_box, parse_geo_point, parse_geo_radius, Error, ErrorKind, IResult, Span,
|
||||
Token,
|
||||
parse_geo, parse_geo_bounding_box, parse_geo_distance, parse_geo_point, parse_geo_radius,
|
||||
Error, ErrorKind, IResult, Span, Token,
|
||||
};
|
||||
|
||||
/// This function goes through all characters in the [Span] if it finds any escaped character (`\`).
|
||||
@ -88,11 +88,16 @@ pub fn parse_value(input: Span) -> IResult<Token> {
|
||||
// then, we want to check if the user is misusing a geo expression
|
||||
// This expression can’t finish without error.
|
||||
// We want to return an error in case of failure.
|
||||
if let Err(err) = parse_geo_point(input) {
|
||||
if err.is_failure() {
|
||||
return Err(err);
|
||||
let geo_reserved_parse_functions = [parse_geo_point, parse_geo_distance, parse_geo];
|
||||
|
||||
for parser in geo_reserved_parse_functions {
|
||||
if let Err(err) = parser(input) {
|
||||
if err.is_failure() {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match parse_geo_radius(input) {
|
||||
Ok(_) => {
|
||||
return Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::MisusedGeoRadius)))
|
||||
@ -166,7 +171,24 @@ pub fn parse_value(input: Span) -> IResult<Token> {
|
||||
})
|
||||
})?;
|
||||
|
||||
Ok((input, value))
|
||||
match unescaper::unescape(value.value()) {
|
||||
Ok(content) => {
|
||||
if content.len() != value.value().len() {
|
||||
Ok((input, Token::new(value.original_span(), Some(content))))
|
||||
} else {
|
||||
Ok((input, value))
|
||||
}
|
||||
}
|
||||
Err(unescaper::Error::IncompleteStr(_)) => Err(nom::Err::Incomplete(nom::Needed::Unknown)),
|
||||
Err(unescaper::Error::ParseIntError { .. }) => Err(nom::Err::Error(Error::new_from_kind(
|
||||
value.original_span(),
|
||||
ErrorKind::InvalidEscapedNumber,
|
||||
))),
|
||||
Err(unescaper::Error::InvalidChar { .. }) => Err(nom::Err::Error(Error::new_from_kind(
|
||||
value.original_span(),
|
||||
ErrorKind::MalformedValue,
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_value_component(c: char) -> bool {
|
||||
@ -178,7 +200,20 @@ fn is_syntax_component(c: char) -> bool {
|
||||
}
|
||||
|
||||
fn is_keyword(s: &str) -> bool {
|
||||
matches!(s, "AND" | "OR" | "IN" | "NOT" | "TO" | "EXISTS" | "_geoRadius" | "_geoBoundingBox")
|
||||
matches!(
|
||||
s,
|
||||
"AND"
|
||||
| "OR"
|
||||
| "IN"
|
||||
| "NOT"
|
||||
| "TO"
|
||||
| "EXISTS"
|
||||
| "IS"
|
||||
| "NULL"
|
||||
| "EMPTY"
|
||||
| "_geoRadius"
|
||||
| "_geoBoundingBox"
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -235,8 +270,8 @@ pub mod test {
|
||||
("aaaa", "", rtok("", "aaaa"), "aaaa"),
|
||||
(r#"aa"aa"#, r#""aa"#, rtok("", "aa"), "aa"),
|
||||
(r#"aa\"aa"#, r#""#, rtok("", r#"aa\"aa"#), r#"aa"aa"#),
|
||||
(r#"aa\\\aa"#, r#""#, rtok("", r#"aa\\\aa"#), r#"aa\\\aa"#),
|
||||
(r#"aa\\"\aa"#, r#""\aa"#, rtok("", r#"aa\\"#), r#"aa\\"#),
|
||||
(r"aa\\\aa", r#""#, rtok("", r"aa\\\aa"), r"aa\\\aa"),
|
||||
(r#"aa\\"\aa"#, r#""\aa"#, rtok("", r"aa\\"), r"aa\\"),
|
||||
(r#"aa\\\"\aa"#, r#""#, rtok("", r#"aa\\\"\aa"#), r#"aa\\"\aa"#),
|
||||
(r#"\"\""#, r#""#, rtok("", r#"\"\""#), r#""""#),
|
||||
];
|
||||
@ -266,12 +301,12 @@ pub mod test {
|
||||
);
|
||||
// simple quote
|
||||
assert_eq!(
|
||||
unescape(Span::new_extra(r#"Hello \'World\'"#, ""), '\''),
|
||||
unescape(Span::new_extra(r"Hello \'World\'", ""), '\''),
|
||||
r#"Hello 'World'"#.to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
unescape(Span::new_extra(r#"Hello \\\'World\\\'"#, ""), '\''),
|
||||
r#"Hello \\'World\\'"#.to_string()
|
||||
unescape(Span::new_extra(r"Hello \\\'World\\\'", ""), '\''),
|
||||
r"Hello \\'World\\'".to_string()
|
||||
);
|
||||
}
|
||||
|
||||
@ -300,19 +335,19 @@ pub mod test {
|
||||
("\"cha'nnel\"", "cha'nnel", false),
|
||||
("I'm tamo", "I", false),
|
||||
// escaped thing but not quote
|
||||
(r#""\\""#, r#"\\"#, false),
|
||||
(r#""\\\\\\""#, r#"\\\\\\"#, false),
|
||||
(r#""aa\\aa""#, r#"aa\\aa"#, false),
|
||||
(r#""\\""#, r"\", true),
|
||||
(r#""\\\\\\""#, r"\\\", true),
|
||||
(r#""aa\\aa""#, r"aa\aa", true),
|
||||
// with double quote
|
||||
(r#""Hello \"world\"""#, r#"Hello "world""#, true),
|
||||
(r#""Hello \\\"world\\\"""#, r#"Hello \\"world\\""#, true),
|
||||
(r#""Hello \\\"world\\\"""#, r#"Hello \"world\""#, true),
|
||||
(r#""I'm \"super\" tamo""#, r#"I'm "super" tamo"#, true),
|
||||
(r#""\"\"""#, r#""""#, true),
|
||||
// with simple quote
|
||||
(r#"'Hello \'world\''"#, r#"Hello 'world'"#, true),
|
||||
(r#"'Hello \\\'world\\\''"#, r#"Hello \\'world\\'"#, true),
|
||||
(r"'Hello \'world\''", r#"Hello 'world'"#, true),
|
||||
(r"'Hello \\\'world\\\''", r"Hello \'world\'", true),
|
||||
(r#"'I\'m "super" tamo'"#, r#"I'm "super" tamo"#, true),
|
||||
(r#"'\'\''"#, r#"''"#, true),
|
||||
(r"'\'\''", r#"''"#, true),
|
||||
];
|
||||
|
||||
for (input, expected, escaped) in test_case {
|
||||
@ -332,7 +367,14 @@ pub mod test {
|
||||
"Filter `{}` was not supposed to be escaped",
|
||||
input
|
||||
);
|
||||
assert_eq!(token.value(), expected, "Filter `{}` failed.", input);
|
||||
assert_eq!(
|
||||
token.value(),
|
||||
expected,
|
||||
"Filter `{}` failed by giving `{}` instead of `{}`.",
|
||||
input,
|
||||
token.value(),
|
||||
expected
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,7 @@ license.workspace = true
|
||||
serde_json = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.4.0", features = ["html_reports"] }
|
||||
criterion = { version = "0.5.1", features = ["html_reports"] }
|
||||
|
||||
[[bench]]
|
||||
name = "benchmarks"
|
||||
|
@ -4,51 +4,56 @@ use serde_json::{Map, Value};
|
||||
|
||||
pub fn flatten(json: &Map<String, Value>) -> Map<String, Value> {
|
||||
let mut obj = Map::new();
|
||||
let mut all_keys = vec![];
|
||||
insert_object(&mut obj, None, json, &mut all_keys);
|
||||
for key in all_keys {
|
||||
obj.entry(key).or_insert(Value::Array(vec![]));
|
||||
let mut all_entries = vec![];
|
||||
insert_object(&mut obj, None, json, &mut all_entries);
|
||||
for (key, old_val) in all_entries {
|
||||
obj.entry(key).or_insert(old_val.clone());
|
||||
}
|
||||
obj
|
||||
}
|
||||
|
||||
fn insert_object(
|
||||
fn insert_object<'a>(
|
||||
base_json: &mut Map<String, Value>,
|
||||
base_key: Option<&str>,
|
||||
object: &Map<String, Value>,
|
||||
all_keys: &mut Vec<String>,
|
||||
object: &'a Map<String, Value>,
|
||||
all_entries: &mut Vec<(String, &'a Value)>,
|
||||
) {
|
||||
for (key, value) in object {
|
||||
let new_key = base_key.map_or_else(|| key.clone(), |base_key| format!("{base_key}.{key}"));
|
||||
all_keys.push(new_key.clone());
|
||||
all_entries.push((new_key.clone(), value));
|
||||
if let Some(array) = value.as_array() {
|
||||
insert_array(base_json, &new_key, array, all_keys);
|
||||
insert_array(base_json, &new_key, array, all_entries);
|
||||
} else if let Some(object) = value.as_object() {
|
||||
insert_object(base_json, Some(&new_key), object, all_keys);
|
||||
insert_object(base_json, Some(&new_key), object, all_entries);
|
||||
} else {
|
||||
insert_value(base_json, &new_key, value.clone());
|
||||
insert_value(base_json, &new_key, value.clone(), false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn insert_array(
|
||||
fn insert_array<'a>(
|
||||
base_json: &mut Map<String, Value>,
|
||||
base_key: &str,
|
||||
array: &Vec<Value>,
|
||||
all_keys: &mut Vec<String>,
|
||||
array: &'a Vec<Value>,
|
||||
all_entries: &mut Vec<(String, &'a Value)>,
|
||||
) {
|
||||
for value in array {
|
||||
if let Some(object) = value.as_object() {
|
||||
insert_object(base_json, Some(base_key), object, all_keys);
|
||||
insert_object(base_json, Some(base_key), object, all_entries);
|
||||
} else if let Some(sub_array) = value.as_array() {
|
||||
insert_array(base_json, base_key, sub_array, all_keys);
|
||||
insert_array(base_json, base_key, sub_array, all_entries);
|
||||
} else {
|
||||
insert_value(base_json, base_key, value.clone());
|
||||
insert_value(base_json, base_key, value.clone(), true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn insert_value(base_json: &mut Map<String, Value>, key: &str, to_insert: Value) {
|
||||
fn insert_value(
|
||||
base_json: &mut Map<String, Value>,
|
||||
key: &str,
|
||||
to_insert: Value,
|
||||
came_from_array: bool,
|
||||
) {
|
||||
debug_assert!(!to_insert.is_object());
|
||||
debug_assert!(!to_insert.is_array());
|
||||
|
||||
@ -63,6 +68,8 @@ fn insert_value(base_json: &mut Map<String, Value>, key: &str, to_insert: Value)
|
||||
base_json[key] = Value::Array(vec![value, to_insert]);
|
||||
}
|
||||
// if it does not exist we can push the value untouched
|
||||
} else if came_from_array {
|
||||
base_json.insert(key.to_string(), Value::Array(vec![to_insert]));
|
||||
} else {
|
||||
base_json.insert(key.to_string(), to_insert);
|
||||
}
|
||||
@ -113,7 +120,11 @@ mod tests {
|
||||
assert_eq!(
|
||||
&flat,
|
||||
json!({
|
||||
"a": [],
|
||||
"a": {
|
||||
"b": "c",
|
||||
"d": "e",
|
||||
"f": "g"
|
||||
},
|
||||
"a.b": "c",
|
||||
"a.d": "e",
|
||||
"a.f": "g"
|
||||
@ -164,7 +175,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
&flat,
|
||||
json!({
|
||||
"a": 42,
|
||||
"a": [42],
|
||||
"a.b": ["c", "d", "e"],
|
||||
})
|
||||
.as_object()
|
||||
@ -186,7 +197,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
&flat,
|
||||
json!({
|
||||
"a": null,
|
||||
"a": [null],
|
||||
"a.b": ["c", "d", "e"],
|
||||
})
|
||||
.as_object()
|
||||
@ -208,7 +219,9 @@ mod tests {
|
||||
assert_eq!(
|
||||
&flat,
|
||||
json!({
|
||||
"a": [],
|
||||
"a": {
|
||||
"b": "c"
|
||||
},
|
||||
"a.b": ["c", "d"],
|
||||
})
|
||||
.as_object()
|
||||
@ -234,7 +247,7 @@ mod tests {
|
||||
json!({
|
||||
"a.b": ["c", "d", "f"],
|
||||
"a.c": "e",
|
||||
"a": 35,
|
||||
"a": [35],
|
||||
})
|
||||
.as_object()
|
||||
.unwrap()
|
||||
@ -302,4 +315,53 @@ mod tests {
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flatten_nested_values_keep_original_values() {
|
||||
let mut base: Value = json!({
|
||||
"tags": {
|
||||
"t1": "v1"
|
||||
},
|
||||
"prices": {
|
||||
"p1": [null],
|
||||
"p1000": {"tamo": {"le": {}}}
|
||||
},
|
||||
"kiki": [[]]
|
||||
});
|
||||
let json = std::mem::take(base.as_object_mut().unwrap());
|
||||
let flat = flatten(&json);
|
||||
|
||||
println!("{}", serde_json::to_string_pretty(&flat).unwrap());
|
||||
|
||||
assert_eq!(
|
||||
&flat,
|
||||
json!({
|
||||
"prices": {
|
||||
"p1": [null],
|
||||
"p1000": {
|
||||
"tamo": {
|
||||
"le": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"prices.p1": [null],
|
||||
"prices.p1000": {
|
||||
"tamo": {
|
||||
"le": {}
|
||||
}
|
||||
},
|
||||
"prices.p1000.tamo": {
|
||||
"le": {}
|
||||
},
|
||||
"prices.p1000.tamo.le": {},
|
||||
"tags": {
|
||||
"t1": "v1"
|
||||
},
|
||||
"tags.t1": "v1",
|
||||
"kiki": [[]]
|
||||
})
|
||||
.as_object()
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
20
fuzzers/Cargo.toml
Normal file
20
fuzzers/Cargo.toml
Normal file
@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "fuzzers"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arbitrary = { version = "1.3.2", features = ["derive"] }
|
||||
clap = { version = "4.4.17", features = ["derive"] }
|
||||
fastrand = "2.0.1"
|
||||
milli = { path = "../milli" }
|
||||
serde = { version = "1.0.195", features = ["derive"] }
|
||||
serde_json = { version = "1.0.111", features = ["preserve_order"] }
|
||||
tempfile = "3.9.0"
|
3
fuzzers/README.md
Normal file
3
fuzzers/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Fuzzers
|
||||
|
||||
The purpose of this crate is to contains all the handmade "fuzzer" we may need.
|
152
fuzzers/src/bin/fuzz-indexing.rs
Normal file
152
fuzzers/src/bin/fuzz-indexing.rs
Normal file
@ -0,0 +1,152 @@
|
||||
use std::num::NonZeroUsize;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::time::Duration;
|
||||
|
||||
use arbitrary::{Arbitrary, Unstructured};
|
||||
use clap::Parser;
|
||||
use fuzzers::Operation;
|
||||
use milli::heed::EnvOpenOptions;
|
||||
use milli::update::{IndexDocuments, IndexDocumentsConfig, IndexerConfig};
|
||||
use milli::Index;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[derive(Debug, Arbitrary)]
|
||||
struct Batch([Operation; 5]);
|
||||
|
||||
#[derive(Debug, Clone, Parser)]
|
||||
struct Opt {
|
||||
/// The number of fuzzer to run in parallel.
|
||||
#[clap(long)]
|
||||
par: Option<NonZeroUsize>,
|
||||
// We need to put a lot of newlines in the following documentation or else everything gets collapsed on one line
|
||||
/// The path in which the databases will be created.
|
||||
/// Using a ramdisk is recommended.
|
||||
///
|
||||
/// Linux:
|
||||
///
|
||||
/// sudo mount -t tmpfs -o size=2g tmpfs ramdisk # to create it
|
||||
///
|
||||
/// sudo umount ramdisk # to remove it
|
||||
///
|
||||
/// MacOS:
|
||||
///
|
||||
/// diskutil erasevolume HFS+ 'RAM Disk' `hdiutil attach -nobrowse -nomount ram://4194304 # create it
|
||||
///
|
||||
/// hdiutil detach /dev/:the_disk
|
||||
#[clap(long)]
|
||||
path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let opt = Opt::parse();
|
||||
let progression: &'static AtomicUsize = Box::leak(Box::new(AtomicUsize::new(0)));
|
||||
let stop: &'static AtomicBool = Box::leak(Box::new(AtomicBool::new(false)));
|
||||
|
||||
let par = opt.par.unwrap_or_else(|| std::thread::available_parallelism().unwrap()).get();
|
||||
let mut handles = Vec::with_capacity(par);
|
||||
|
||||
for _ in 0..par {
|
||||
let opt = opt.clone();
|
||||
|
||||
let handle = std::thread::spawn(move || {
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(1024 * 1024 * 1024 * 1024);
|
||||
let tempdir = match opt.path {
|
||||
Some(path) => TempDir::new_in(path).unwrap(),
|
||||
None => TempDir::new().unwrap(),
|
||||
};
|
||||
let index = Index::new(options, tempdir.path()).unwrap();
|
||||
let indexer_config = IndexerConfig::default();
|
||||
let index_documents_config = IndexDocumentsConfig::default();
|
||||
|
||||
std::thread::scope(|s| {
|
||||
loop {
|
||||
if stop.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let v: Vec<u8> =
|
||||
std::iter::repeat_with(|| fastrand::u8(..)).take(1000).collect();
|
||||
|
||||
let mut data = Unstructured::new(&v);
|
||||
let batches = <[Batch; 5]>::arbitrary(&mut data).unwrap();
|
||||
// will be used to display the error once a thread crashes
|
||||
let dbg_input = format!("{:#?}", batches);
|
||||
|
||||
let handle = s.spawn(|| {
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
|
||||
for batch in batches {
|
||||
let mut builder = IndexDocuments::new(
|
||||
&mut wtxn,
|
||||
&index,
|
||||
&indexer_config,
|
||||
index_documents_config.clone(),
|
||||
|_| (),
|
||||
|| false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
for op in batch.0 {
|
||||
match op {
|
||||
Operation::AddDoc(doc) => {
|
||||
let documents =
|
||||
milli::documents::objects_from_json_value(doc.to_d());
|
||||
let documents =
|
||||
milli::documents::documents_batch_reader_from_objects(
|
||||
documents,
|
||||
);
|
||||
let (b, _added) = builder.add_documents(documents).unwrap();
|
||||
builder = b;
|
||||
}
|
||||
Operation::DeleteDoc(id) => {
|
||||
let (b, _removed) =
|
||||
builder.remove_documents(vec![id.to_s()]).unwrap();
|
||||
builder = b;
|
||||
}
|
||||
}
|
||||
}
|
||||
builder.execute().unwrap();
|
||||
|
||||
// after executing a batch we check if the database is corrupted
|
||||
let res = index.search(&wtxn).execute().unwrap();
|
||||
index.documents(&wtxn, res.documents_ids).unwrap();
|
||||
progression.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
wtxn.abort();
|
||||
});
|
||||
if let err @ Err(_) = handle.join() {
|
||||
stop.store(true, Ordering::Relaxed);
|
||||
err.expect(&dbg_input);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
std::thread::spawn(|| {
|
||||
let mut last_value = 0;
|
||||
let start = std::time::Instant::now();
|
||||
loop {
|
||||
let total = progression.load(Ordering::Relaxed);
|
||||
let elapsed = start.elapsed().as_secs();
|
||||
if elapsed > 3600 {
|
||||
// after 1 hour, stop the fuzzer, success
|
||||
std::process::exit(0);
|
||||
}
|
||||
println!(
|
||||
"Has been running for {:?} seconds. Tested {} new values for a total of {}.",
|
||||
elapsed,
|
||||
total - last_value,
|
||||
total
|
||||
);
|
||||
last_value = total;
|
||||
std::thread::sleep(Duration::from_secs(1));
|
||||
}
|
||||
});
|
||||
|
||||
for handle in handles {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
}
|
46
fuzzers/src/lib.rs
Normal file
46
fuzzers/src/lib.rs
Normal file
@ -0,0 +1,46 @@
|
||||
use arbitrary::Arbitrary;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
#[derive(Debug, Arbitrary)]
|
||||
pub enum Document {
|
||||
One,
|
||||
Two,
|
||||
Three,
|
||||
Four,
|
||||
Five,
|
||||
Six,
|
||||
}
|
||||
|
||||
impl Document {
|
||||
pub fn to_d(&self) -> Value {
|
||||
match self {
|
||||
Document::One => json!({ "id": 0, "doggo": "bernese" }),
|
||||
Document::Two => json!({ "id": 0, "doggo": "golden" }),
|
||||
Document::Three => json!({ "id": 0, "catto": "jorts" }),
|
||||
Document::Four => json!({ "id": 1, "doggo": "bernese" }),
|
||||
Document::Five => json!({ "id": 1, "doggo": "golden" }),
|
||||
Document::Six => json!({ "id": 1, "catto": "jorts" }),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Arbitrary)]
|
||||
pub enum DocId {
|
||||
Zero,
|
||||
One,
|
||||
}
|
||||
|
||||
impl DocId {
|
||||
pub fn to_s(&self) -> String {
|
||||
match self {
|
||||
DocId::Zero => "0".to_string(),
|
||||
DocId::One => "1".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Arbitrary)]
|
||||
pub enum Operation {
|
||||
AddDoc(Document),
|
||||
DeleteDoc(DocId),
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -11,29 +11,37 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.64"
|
||||
anyhow = "1.0.79"
|
||||
bincode = "1.3.3"
|
||||
csv = "1.1.6"
|
||||
derive_builder = "0.11.2"
|
||||
csv = "1.3.0"
|
||||
derive_builder = "0.12.0"
|
||||
dump = { path = "../dump" }
|
||||
enum-iterator = "1.1.3"
|
||||
enum-iterator = "1.5.0"
|
||||
file-store = { path = "../file-store" }
|
||||
log = "0.4.14"
|
||||
flate2 = "1.0.28"
|
||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
||||
page_size = "0.5.0"
|
||||
roaring = { version = "0.10.0", features = ["serde"] }
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = { version = "1.0.85", features = ["preserve_order"] }
|
||||
puffin = { version = "0.16.0", features = ["serialization"] }
|
||||
rayon = "1.8.1"
|
||||
roaring = { version = "0.10.2", features = ["serde"] }
|
||||
serde = { version = "1.0.195", features = ["derive"] }
|
||||
serde_json = { version = "1.0.111", features = ["preserve_order"] }
|
||||
synchronoise = "1.0.1"
|
||||
tempfile = "3.3.0"
|
||||
thiserror = "1.0.30"
|
||||
time = { version = "0.3.7", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||
uuid = { version = "1.1.2", features = ["serde", "v4"] }
|
||||
tempfile = "3.9.0"
|
||||
thiserror = "1.0.56"
|
||||
time = { version = "0.3.31", features = [
|
||||
"serde-well-known",
|
||||
"formatting",
|
||||
"parsing",
|
||||
"macros",
|
||||
] }
|
||||
tracing = "0.1.40"
|
||||
ureq = "2.9.1"
|
||||
uuid = { version = "1.6.1", features = ["serde", "v4"] }
|
||||
|
||||
[dev-dependencies]
|
||||
big_s = "1.0.2"
|
||||
crossbeam = "0.8.2"
|
||||
insta = { version = "1.19.1", features = ["json", "redactions"] }
|
||||
crossbeam = "0.8.4"
|
||||
insta = { version = "1.34.0", features = ["json", "redactions"] }
|
||||
meili-snap = { path = "../meili-snap" }
|
||||
nelson = { git = "https://github.com/meilisearch/nelson.git", rev = "675f13885548fb415ead8fbb447e9e6d9314000a"}
|
||||
|
@ -25,6 +25,7 @@ enum AutobatchKind {
|
||||
primary_key: Option<String>,
|
||||
},
|
||||
DocumentDeletion,
|
||||
DocumentDeletionByFilter,
|
||||
DocumentClear,
|
||||
Settings {
|
||||
allow_index_creation: bool,
|
||||
@ -64,6 +65,9 @@ impl From<KindWithContent> for AutobatchKind {
|
||||
} => AutobatchKind::DocumentImport { method, allow_index_creation, primary_key },
|
||||
KindWithContent::DocumentDeletion { .. } => AutobatchKind::DocumentDeletion,
|
||||
KindWithContent::DocumentClear { .. } => AutobatchKind::DocumentClear,
|
||||
KindWithContent::DocumentDeletionByFilter { .. } => {
|
||||
AutobatchKind::DocumentDeletionByFilter
|
||||
}
|
||||
KindWithContent::SettingsUpdate { allow_index_creation, is_deletion, .. } => {
|
||||
AutobatchKind::Settings {
|
||||
allow_index_creation: allow_index_creation && !is_deletion,
|
||||
@ -97,6 +101,9 @@ pub enum BatchKind {
|
||||
DocumentDeletion {
|
||||
deletion_ids: Vec<TaskId>,
|
||||
},
|
||||
DocumentDeletionByFilter {
|
||||
id: TaskId,
|
||||
},
|
||||
ClearAndSettings {
|
||||
other: Vec<TaskId>,
|
||||
allow_index_creation: bool,
|
||||
@ -153,7 +160,7 @@ impl BatchKind {
|
||||
impl BatchKind {
|
||||
/// Returns a `ControlFlow::Break` if you must stop right now.
|
||||
/// The boolean tell you if an index has been created by the batched task.
|
||||
/// To ease the writting of the code. `true` can be returned when you don't need to create an index
|
||||
/// To ease the writing of the code. `true` can be returned when you don't need to create an index
|
||||
/// but false can't be returned if you needs to create an index.
|
||||
// TODO use an AutoBatchKind as input
|
||||
pub fn new(
|
||||
@ -195,6 +202,9 @@ impl BatchKind {
|
||||
K::DocumentDeletion => {
|
||||
(Continue(BatchKind::DocumentDeletion { deletion_ids: vec![task_id] }), false)
|
||||
}
|
||||
K::DocumentDeletionByFilter => {
|
||||
(Break(BatchKind::DocumentDeletionByFilter { id: task_id }), false)
|
||||
}
|
||||
K::Settings { allow_index_creation } => (
|
||||
Continue(BatchKind::Settings { allow_index_creation, settings_ids: vec![task_id] }),
|
||||
allow_index_creation,
|
||||
@ -204,7 +214,7 @@ impl BatchKind {
|
||||
|
||||
/// Returns a `ControlFlow::Break` if you must stop right now.
|
||||
/// The boolean tell you if an index has been created by the batched task.
|
||||
/// To ease the writting of the code. `true` can be returned when you don't need to create an index
|
||||
/// To ease the writing of the code. `true` can be returned when you don't need to create an index
|
||||
/// but false can't be returned if you needs to create an index.
|
||||
#[rustfmt::skip]
|
||||
fn accumulate(self, id: TaskId, kind: AutobatchKind, index_already_exists: bool, primary_key: Option<&str>) -> ControlFlow<BatchKind, BatchKind> {
|
||||
@ -212,7 +222,7 @@ impl BatchKind {
|
||||
|
||||
match (self, kind) {
|
||||
// We don't batch any of these operations
|
||||
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap) => Break(this),
|
||||
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentDeletionByFilter) => Break(this),
|
||||
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
|
||||
(this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
|
||||
Break(this)
|
||||
@ -508,7 +518,8 @@ impl BatchKind {
|
||||
BatchKind::IndexCreation { .. }
|
||||
| BatchKind::IndexDeletion { .. }
|
||||
| BatchKind::IndexUpdate { .. }
|
||||
| BatchKind::IndexSwap { .. },
|
||||
| BatchKind::IndexSwap { .. }
|
||||
| BatchKind::DocumentDeletionByFilter { .. },
|
||||
_,
|
||||
) => {
|
||||
unreachable!()
|
||||
|
@ -19,18 +19,19 @@ one indexing operation.
|
||||
|
||||
use std::collections::{BTreeSet, HashSet};
|
||||
use std::ffi::OsStr;
|
||||
use std::fmt;
|
||||
use std::fs::{self, File};
|
||||
use std::io::BufWriter;
|
||||
|
||||
use dump::IndexMetadata;
|
||||
use log::{debug, error, info};
|
||||
use meilisearch_types::error::Code;
|
||||
use meilisearch_types::heed::{RoTxn, RwTxn};
|
||||
use meilisearch_types::milli::documents::{obkv_to_object, DocumentsBatchReader};
|
||||
use meilisearch_types::milli::heed::CompactionOption;
|
||||
use meilisearch_types::milli::update::{
|
||||
DocumentDeletionResult, IndexDocumentsConfig, IndexDocumentsMethod, Settings as MilliSettings,
|
||||
IndexDocumentsConfig, IndexDocumentsMethod, IndexerConfig, Settings as MilliSettings,
|
||||
};
|
||||
use meilisearch_types::milli::{self, BEU32};
|
||||
use meilisearch_types::milli::{self, Filter};
|
||||
use meilisearch_types::settings::{apply_settings_to_builder, Settings, Unchecked};
|
||||
use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status, Task};
|
||||
use meilisearch_types::{compression, Index, VERSION_FILE_NAME};
|
||||
@ -41,7 +42,7 @@ use uuid::Uuid;
|
||||
|
||||
use crate::autobatcher::{self, BatchKind};
|
||||
use crate::utils::{self, swap_index_uid_in_task};
|
||||
use crate::{Error, IndexScheduler, ProcessingTasks, Result, TaskId};
|
||||
use crate::{Error, IndexScheduler, MustStopProcessing, ProcessingTasks, Result, TaskId};
|
||||
|
||||
/// Represents a combination of tasks that can all be processed at the same time.
|
||||
///
|
||||
@ -58,7 +59,7 @@ pub(crate) enum Batch {
|
||||
/// The list of tasks that were processing when this task cancelation appeared.
|
||||
previous_processing_tasks: RoaringBitmap,
|
||||
},
|
||||
TaskDeletion(Task),
|
||||
TaskDeletions(Vec<Task>),
|
||||
SnapshotCreation(Vec<Task>),
|
||||
Dump(Task),
|
||||
IndexOperation {
|
||||
@ -102,11 +103,9 @@ pub(crate) enum IndexOperation {
|
||||
operations: Vec<DocumentOperation>,
|
||||
tasks: Vec<Task>,
|
||||
},
|
||||
DocumentDeletion {
|
||||
IndexDocumentDeletionByFilter {
|
||||
index_uid: String,
|
||||
// The vec associated with each document deletion tasks.
|
||||
documents: Vec<Vec<String>>,
|
||||
tasks: Vec<Task>,
|
||||
task: Task,
|
||||
},
|
||||
DocumentClear {
|
||||
index_uid: String,
|
||||
@ -143,22 +142,27 @@ pub(crate) enum IndexOperation {
|
||||
|
||||
impl Batch {
|
||||
/// Return the task ids associated with this batch.
|
||||
pub fn ids(&self) -> Vec<TaskId> {
|
||||
pub fn ids(&self) -> RoaringBitmap {
|
||||
match self {
|
||||
Batch::TaskCancelation { task, .. }
|
||||
| Batch::TaskDeletion(task)
|
||||
| Batch::Dump(task)
|
||||
| Batch::IndexCreation { task, .. }
|
||||
| Batch::IndexUpdate { task, .. } => vec![task.uid],
|
||||
Batch::SnapshotCreation(tasks) | Batch::IndexDeletion { tasks, .. } => {
|
||||
tasks.iter().map(|task| task.uid).collect()
|
||||
| Batch::IndexUpdate { task, .. } => {
|
||||
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
||||
}
|
||||
Batch::SnapshotCreation(tasks)
|
||||
| Batch::TaskDeletions(tasks)
|
||||
| Batch::IndexDeletion { tasks, .. } => {
|
||||
RoaringBitmap::from_iter(tasks.iter().map(|task| task.uid))
|
||||
}
|
||||
Batch::IndexOperation { op, .. } => match op {
|
||||
IndexOperation::DocumentOperation { tasks, .. }
|
||||
| IndexOperation::DocumentDeletion { tasks, .. }
|
||||
| IndexOperation::Settings { tasks, .. }
|
||||
| IndexOperation::DocumentClear { tasks, .. } => {
|
||||
tasks.iter().map(|task| task.uid).collect()
|
||||
RoaringBitmap::from_iter(tasks.iter().map(|task| task.uid))
|
||||
}
|
||||
IndexOperation::IndexDocumentDeletionByFilter { task, .. } => {
|
||||
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
||||
}
|
||||
IndexOperation::SettingsAndDocumentOperation {
|
||||
document_import_tasks: tasks,
|
||||
@ -169,9 +173,11 @@ impl Batch {
|
||||
cleared_tasks: tasks,
|
||||
settings_tasks: other,
|
||||
..
|
||||
} => tasks.iter().chain(other).map(|task| task.uid).collect(),
|
||||
} => RoaringBitmap::from_iter(tasks.iter().chain(other).map(|task| task.uid)),
|
||||
},
|
||||
Batch::IndexSwap { task } => vec![task.uid],
|
||||
Batch::IndexSwap { task } => {
|
||||
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -180,7 +186,7 @@ impl Batch {
|
||||
use Batch::*;
|
||||
match self {
|
||||
TaskCancelation { .. }
|
||||
| TaskDeletion(_)
|
||||
| TaskDeletions(_)
|
||||
| SnapshotCreation(_)
|
||||
| Dump(_)
|
||||
| IndexSwap { .. } => None,
|
||||
@ -192,11 +198,34 @@ impl Batch {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Batch {
|
||||
/// A text used when we debug the profiling reports.
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let index_uid = self.index_uid();
|
||||
let tasks = self.ids();
|
||||
match self {
|
||||
Batch::TaskCancelation { .. } => f.write_str("TaskCancelation")?,
|
||||
Batch::TaskDeletions(_) => f.write_str("TaskDeletion")?,
|
||||
Batch::SnapshotCreation(_) => f.write_str("SnapshotCreation")?,
|
||||
Batch::Dump(_) => f.write_str("Dump")?,
|
||||
Batch::IndexOperation { op, .. } => write!(f, "{op}")?,
|
||||
Batch::IndexCreation { .. } => f.write_str("IndexCreation")?,
|
||||
Batch::IndexUpdate { .. } => f.write_str("IndexUpdate")?,
|
||||
Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?,
|
||||
Batch::IndexSwap { .. } => f.write_str("IndexSwap")?,
|
||||
};
|
||||
match index_uid {
|
||||
Some(name) => f.write_fmt(format_args!(" on {name:?} from tasks: {tasks:?}")),
|
||||
None => f.write_fmt(format_args!(" from tasks: {tasks:?}")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexOperation {
|
||||
pub fn index_uid(&self) -> &str {
|
||||
match self {
|
||||
IndexOperation::DocumentOperation { index_uid, .. }
|
||||
| IndexOperation::DocumentDeletion { index_uid, .. }
|
||||
| IndexOperation::IndexDocumentDeletionByFilter { index_uid, .. }
|
||||
| IndexOperation::DocumentClear { index_uid, .. }
|
||||
| IndexOperation::Settings { index_uid, .. }
|
||||
| IndexOperation::DocumentClearAndSetting { index_uid, .. }
|
||||
@ -205,6 +234,27 @@ impl IndexOperation {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for IndexOperation {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
IndexOperation::DocumentOperation { .. } => {
|
||||
f.write_str("IndexOperation::DocumentOperation")
|
||||
}
|
||||
IndexOperation::IndexDocumentDeletionByFilter { .. } => {
|
||||
f.write_str("IndexOperation::IndexDocumentDeletionByFilter")
|
||||
}
|
||||
IndexOperation::DocumentClear { .. } => f.write_str("IndexOperation::DocumentClear"),
|
||||
IndexOperation::Settings { .. } => f.write_str("IndexOperation::Settings"),
|
||||
IndexOperation::DocumentClearAndSetting { .. } => {
|
||||
f.write_str("IndexOperation::DocumentClearAndSetting")
|
||||
}
|
||||
IndexOperation::SettingsAndDocumentOperation { .. } => {
|
||||
f.write_str("IndexOperation::SettingsAndDocumentOperation")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexScheduler {
|
||||
/// Convert an [`BatchKind`](crate::autobatcher::BatchKind) into a [`Batch`].
|
||||
///
|
||||
@ -227,6 +277,21 @@ impl IndexScheduler {
|
||||
},
|
||||
must_create_index,
|
||||
})),
|
||||
BatchKind::DocumentDeletionByFilter { id } => {
|
||||
let task = self.get_task(rtxn, id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||
match &task.kind {
|
||||
KindWithContent::DocumentDeletionByFilter { index_uid, .. } => {
|
||||
Ok(Some(Batch::IndexOperation {
|
||||
op: IndexOperation::IndexDocumentDeletionByFilter {
|
||||
index_uid: index_uid.clone(),
|
||||
task,
|
||||
},
|
||||
must_create_index: false,
|
||||
}))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
BatchKind::DocumentOperation { method, operation_ids, .. } => {
|
||||
let tasks = self.get_existing_tasks(rtxn, operation_ids)?;
|
||||
let primary_key = tasks
|
||||
@ -277,18 +342,27 @@ impl IndexScheduler {
|
||||
BatchKind::DocumentDeletion { deletion_ids } => {
|
||||
let tasks = self.get_existing_tasks(rtxn, deletion_ids)?;
|
||||
|
||||
let mut documents = Vec::new();
|
||||
let mut operations = Vec::with_capacity(tasks.len());
|
||||
let mut documents_counts = Vec::with_capacity(tasks.len());
|
||||
for task in &tasks {
|
||||
match task.kind {
|
||||
KindWithContent::DocumentDeletion { ref documents_ids, .. } => {
|
||||
documents.push(documents_ids.clone())
|
||||
operations.push(DocumentOperation::Delete(documents_ids.clone()));
|
||||
documents_counts.push(documents_ids.len() as u64);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(Batch::IndexOperation {
|
||||
op: IndexOperation::DocumentDeletion { index_uid, documents, tasks },
|
||||
op: IndexOperation::DocumentOperation {
|
||||
index_uid,
|
||||
primary_key: None,
|
||||
method: IndexDocumentsMethod::ReplaceDocuments,
|
||||
documents_counts,
|
||||
operations,
|
||||
tasks,
|
||||
},
|
||||
must_create_index,
|
||||
}))
|
||||
}
|
||||
@ -447,10 +521,13 @@ impl IndexScheduler {
|
||||
/// 3. We get the *next* snapshot to process.
|
||||
/// 4. We get the *next* dump to process.
|
||||
/// 5. We get the *next* tasks to process for a specific index.
|
||||
#[tracing::instrument(level = "trace", skip(self, rtxn), target = "indexing::scheduler")]
|
||||
pub(crate) fn create_next_batch(&self, rtxn: &RoTxn) -> Result<Option<Batch>> {
|
||||
#[cfg(test)]
|
||||
self.maybe_fail(crate::tests::FailureLocation::InsideCreateBatch)?;
|
||||
|
||||
puffin::profile_function!();
|
||||
|
||||
let enqueued = &self.get_status(rtxn, Status::Enqueued)?;
|
||||
let to_cancel = self.get_kind(rtxn, Kind::TaskCancelation)? & enqueued;
|
||||
|
||||
@ -469,9 +546,9 @@ impl IndexScheduler {
|
||||
|
||||
// 2. we get the next task to delete
|
||||
let to_delete = self.get_kind(rtxn, Kind::TaskDeletion)? & enqueued;
|
||||
if let Some(task_id) = to_delete.min() {
|
||||
let task = self.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||
return Ok(Some(Batch::TaskDeletion(task)));
|
||||
if !to_delete.is_empty() {
|
||||
let tasks = self.get_existing_tasks(rtxn, to_delete)?;
|
||||
return Ok(Some(Batch::TaskDeletions(tasks)));
|
||||
}
|
||||
|
||||
// 3. we batch the snapshot.
|
||||
@ -514,7 +591,9 @@ impl IndexScheduler {
|
||||
let index_tasks = self.index_tasks(rtxn, index_name)? & enqueued;
|
||||
|
||||
// If autobatching is disabled we only take one task at a time.
|
||||
let tasks_limit = if self.autobatching_enabled { usize::MAX } else { 1 };
|
||||
// Otherwise, we take only a maximum of tasks to create batches.
|
||||
let tasks_limit =
|
||||
if self.autobatching_enabled { self.max_number_of_batched_tasks } else { 1 };
|
||||
|
||||
let enqueued = index_tasks
|
||||
.into_iter()
|
||||
@ -548,6 +627,7 @@ impl IndexScheduler {
|
||||
/// The list of tasks that were processed. The metadata of each task in the returned
|
||||
/// list is updated accordingly, with the exception of the its date fields
|
||||
/// [`finished_at`](meilisearch_types::tasks::Task::finished_at) and [`started_at`](meilisearch_types::tasks::Task::started_at).
|
||||
#[tracing::instrument(level = "trace", skip(self, batch), target = "indexing::scheduler", fields(batch=batch.to_string()))]
|
||||
pub(crate) fn process_batch(&self, batch: Batch) -> Result<Vec<Task>> {
|
||||
#[cfg(test)]
|
||||
{
|
||||
@ -555,6 +635,9 @@ impl IndexScheduler {
|
||||
self.maybe_fail(crate::tests::FailureLocation::PanicInsideProcessBatch)?;
|
||||
self.breakpoint(crate::Breakpoint::InsideProcessBatch);
|
||||
}
|
||||
|
||||
puffin::profile_function!(batch.to_string());
|
||||
|
||||
match batch {
|
||||
Batch::TaskCancelation { mut task, previous_started_at, previous_processing_tasks } => {
|
||||
// 1. Retrieve the tasks that matched the query at enqueue-time.
|
||||
@ -594,9 +677,10 @@ impl IndexScheduler {
|
||||
Ok(()) => {
|
||||
for content_uuid in canceled_tasks_content_uuids {
|
||||
if let Err(error) = self.delete_update_file(content_uuid) {
|
||||
error!(
|
||||
"We failed deleting the content file indentified as {}: {}",
|
||||
content_uuid, error
|
||||
tracing::error!(
|
||||
file_content_uuid = %content_uuid,
|
||||
%error,
|
||||
"Failed deleting content file"
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -606,31 +690,43 @@ impl IndexScheduler {
|
||||
|
||||
Ok(vec![task])
|
||||
}
|
||||
Batch::TaskDeletion(mut task) => {
|
||||
Batch::TaskDeletions(mut tasks) => {
|
||||
// 1. Retrieve the tasks that matched the query at enqueue-time.
|
||||
let matched_tasks =
|
||||
let mut matched_tasks = RoaringBitmap::new();
|
||||
|
||||
for task in tasks.iter() {
|
||||
if let KindWithContent::TaskDeletion { tasks, query: _ } = &task.kind {
|
||||
tasks
|
||||
matched_tasks |= tasks;
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
let mut deleted_tasks = self.delete_matched_tasks(&mut wtxn, &matched_tasks)?;
|
||||
wtxn.commit()?;
|
||||
|
||||
for task in tasks.iter_mut() {
|
||||
task.status = Status::Succeeded;
|
||||
let KindWithContent::TaskDeletion { tasks, query: _ } = &task.kind else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
let deleted_tasks_count = self.delete_matched_tasks(&mut wtxn, matched_tasks)?;
|
||||
let deleted_tasks_count = deleted_tasks.intersection_len(tasks);
|
||||
deleted_tasks -= tasks;
|
||||
|
||||
task.status = Status::Succeeded;
|
||||
match &mut task.details {
|
||||
Some(Details::TaskDeletion {
|
||||
matched_tasks: _,
|
||||
deleted_tasks,
|
||||
original_filter: _,
|
||||
}) => {
|
||||
*deleted_tasks = Some(deleted_tasks_count);
|
||||
match &mut task.details {
|
||||
Some(Details::TaskDeletion {
|
||||
matched_tasks: _,
|
||||
deleted_tasks,
|
||||
original_filter: _,
|
||||
}) => {
|
||||
*deleted_tasks = Some(deleted_tasks_count);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
wtxn.commit()?;
|
||||
Ok(vec![task])
|
||||
Ok(tasks)
|
||||
}
|
||||
Batch::SnapshotCreation(mut tasks) => {
|
||||
fs::create_dir_all(&self.snapshots_path)?;
|
||||
@ -642,7 +738,7 @@ impl IndexScheduler {
|
||||
|
||||
// 2. Snapshot the index-scheduler LMDB env
|
||||
//
|
||||
// When we call copy_to_path, LMDB opens a read transaction by itself,
|
||||
// When we call copy_to_file, LMDB opens a read transaction by itself,
|
||||
// we can't provide our own. It is an issue as we would like to know
|
||||
// the update files to copy but new ones can be enqueued between the copy
|
||||
// of the env and the new transaction we open to retrieve the enqueued tasks.
|
||||
@ -655,7 +751,7 @@ impl IndexScheduler {
|
||||
// 2.1 First copy the LMDB env of the index-scheduler
|
||||
let dst = temp_snapshot_dir.path().join("tasks");
|
||||
fs::create_dir_all(&dst)?;
|
||||
self.env.copy_to_path(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
||||
self.env.copy_to_file(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
||||
|
||||
// 2.2 Create a read transaction on the index-scheduler
|
||||
let rtxn = self.env.read_txn()?;
|
||||
@ -680,7 +776,7 @@ impl IndexScheduler {
|
||||
let index = self.index_mapper.index(&rtxn, name)?;
|
||||
let dst = temp_snapshot_dir.path().join("indexes").join(uuid.to_string());
|
||||
fs::create_dir_all(&dst)?;
|
||||
index.copy_to_path(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
||||
index.copy_to_file(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
||||
}
|
||||
|
||||
drop(rtxn);
|
||||
@ -693,7 +789,7 @@ impl IndexScheduler {
|
||||
.map_size(1024 * 1024 * 1024) // 1 GiB
|
||||
.max_dbs(2)
|
||||
.open(&self.auth_path)?;
|
||||
auth.copy_to_path(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
||||
auth.copy_to_file(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
||||
|
||||
// 5. Copy and tarball the flat snapshot
|
||||
// 5.1 Find the original name of the database
|
||||
@ -749,6 +845,10 @@ impl IndexScheduler {
|
||||
// 2. dump the tasks
|
||||
let mut dump_tasks = dump.create_tasks_queue()?;
|
||||
for ret in self.all_tasks.iter(&rtxn)? {
|
||||
if self.must_stop_processing.get() {
|
||||
return Err(Error::AbortedTask);
|
||||
}
|
||||
|
||||
let (_, mut t) = ret?;
|
||||
let status = t.status;
|
||||
let content_file = t.content_uuid();
|
||||
@ -769,6 +869,9 @@ impl IndexScheduler {
|
||||
|
||||
// 2.1. Dump the `content_file` associated with the task if there is one and the task is not finished yet.
|
||||
if let Some(content_file) = content_file {
|
||||
if self.must_stop_processing.get() {
|
||||
return Err(Error::AbortedTask);
|
||||
}
|
||||
if status == Status::Enqueued {
|
||||
let content_file = self.file_store.get_update(content_file)?;
|
||||
|
||||
@ -808,6 +911,9 @@ impl IndexScheduler {
|
||||
|
||||
// 3.1. Dump the documents
|
||||
for ret in index.all_documents(&rtxn)? {
|
||||
if self.must_stop_processing.get() {
|
||||
return Err(Error::AbortedTask);
|
||||
}
|
||||
let (_id, doc) = ret?;
|
||||
let document = milli::obkv_to_json(&all_fields, &fields_ids_map, doc)?;
|
||||
index_dumper.push_document(&document)?;
|
||||
@ -819,10 +925,17 @@ impl IndexScheduler {
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
// 4. Dump experimental feature settings
|
||||
let features = self.features().runtime_features();
|
||||
dump.create_experimental_features(features)?;
|
||||
|
||||
let dump_uid = started_at.format(format_description!(
|
||||
"[year repr:full][month repr:numerical][day padding:zero]-[hour padding:zero][minute padding:zero][second padding:zero][subsecond digits:3]"
|
||||
)).unwrap();
|
||||
|
||||
if self.must_stop_processing.get() {
|
||||
return Err(Error::AbortedTask);
|
||||
}
|
||||
let path = self.dumps_path.join(format!("{}.dump", dump_uid));
|
||||
let file = File::create(path)?;
|
||||
dump.persist_to(BufWriter::new(file))?;
|
||||
@ -833,20 +946,45 @@ impl IndexScheduler {
|
||||
Ok(vec![task])
|
||||
}
|
||||
Batch::IndexOperation { op, must_create_index } => {
|
||||
let index_uid = op.index_uid();
|
||||
let index_uid = op.index_uid().to_string();
|
||||
let index = if must_create_index {
|
||||
// create the index if it doesn't already exist
|
||||
let wtxn = self.env.write_txn()?;
|
||||
self.index_mapper.create_index(wtxn, index_uid, None)?
|
||||
self.index_mapper.create_index(wtxn, &index_uid, None)?
|
||||
} else {
|
||||
let rtxn = self.env.read_txn()?;
|
||||
self.index_mapper.index(&rtxn, index_uid)?
|
||||
self.index_mapper.index(&rtxn, &index_uid)?
|
||||
};
|
||||
|
||||
// the index operation can take a long time, so save this handle to make it available to the search for the duration of the tick
|
||||
self.index_mapper
|
||||
.set_currently_updating_index(Some((index_uid.clone(), index.clone())));
|
||||
|
||||
let mut index_wtxn = index.write_txn()?;
|
||||
let tasks = self.apply_index_operation(&mut index_wtxn, &index, op)?;
|
||||
index_wtxn.commit()?;
|
||||
|
||||
// if the update processed successfully, we're going to store the new
|
||||
// stats of the index. Since the tasks have already been processed and
|
||||
// this is a non-critical operation. If it fails, we should not fail
|
||||
// the entire batch.
|
||||
let res = || -> Result<()> {
|
||||
let index_rtxn = index.read_txn()?;
|
||||
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)?;
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
self.index_mapper.store_stats_of(&mut wtxn, &index_uid, &stats)?;
|
||||
wtxn.commit()?;
|
||||
Ok(())
|
||||
}();
|
||||
|
||||
match res {
|
||||
Ok(_) => (),
|
||||
Err(e) => tracing::error!(
|
||||
error = &e as &dyn std::error::Error,
|
||||
"Could not write the stats of the index"
|
||||
),
|
||||
}
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
Batch::IndexCreation { index_uid, primary_key, task } => {
|
||||
@ -872,14 +1010,39 @@ impl IndexScheduler {
|
||||
builder.set_primary_key(primary_key);
|
||||
let must_stop_processing = self.must_stop_processing.clone();
|
||||
builder.execute(
|
||||
|indexing_step| debug!("update: {:?}", indexing_step),
|
||||
|indexing_step| tracing::debug!(update = ?indexing_step),
|
||||
|| must_stop_processing.get(),
|
||||
)?;
|
||||
index_wtxn.commit()?;
|
||||
}
|
||||
|
||||
// drop rtxn before starting a new wtxn on the same db
|
||||
rtxn.commit()?;
|
||||
|
||||
task.status = Status::Succeeded;
|
||||
task.details = Some(Details::IndexInfo { primary_key });
|
||||
|
||||
// if the update processed successfully, we're going to store the new
|
||||
// stats of the index. Since the tasks have already been processed and
|
||||
// this is a non-critical operation. If it fails, we should not fail
|
||||
// the entire batch.
|
||||
let res = || -> Result<()> {
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
let index_rtxn = index.read_txn()?;
|
||||
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)?;
|
||||
self.index_mapper.store_stats_of(&mut wtxn, &index_uid, &stats)?;
|
||||
wtxn.commit()?;
|
||||
Ok(())
|
||||
}();
|
||||
|
||||
match res {
|
||||
Ok(_) => (),
|
||||
Err(e) => tracing::error!(
|
||||
error = &e as &dyn std::error::Error,
|
||||
"Could not write the stats of the index"
|
||||
),
|
||||
}
|
||||
|
||||
Ok(vec![task])
|
||||
}
|
||||
Batch::IndexDeletion { index_uid, index_has_been_created, mut tasks } => {
|
||||
@ -893,7 +1056,7 @@ impl IndexScheduler {
|
||||
}()
|
||||
.unwrap_or_default();
|
||||
|
||||
// The write transaction is directly owned and commited inside.
|
||||
// The write transaction is directly owned and committed inside.
|
||||
match self.index_mapper.delete_index(wtxn, &index_uid) {
|
||||
Ok(()) => (),
|
||||
Err(Error::IndexNotFound(_)) if index_has_been_created => (),
|
||||
@ -972,7 +1135,7 @@ impl IndexScheduler {
|
||||
for task_id in &index_lhs_task_ids | &index_rhs_task_ids {
|
||||
let mut task = self.get_task(wtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||
swap_index_uid_in_task(&mut task, (lhs, rhs));
|
||||
self.all_tasks.put(wtxn, &BEU32::new(task_id), &task)?;
|
||||
self.all_tasks.put(wtxn, &task_id, &task)?;
|
||||
}
|
||||
|
||||
// 4. remove the task from indexuid = before_name
|
||||
@ -996,12 +1159,19 @@ impl IndexScheduler {
|
||||
///
|
||||
/// ## Return
|
||||
/// The list of processed tasks.
|
||||
#[tracing::instrument(
|
||||
level = "trace",
|
||||
skip(self, index_wtxn, index),
|
||||
target = "indexing::scheduler"
|
||||
)]
|
||||
fn apply_index_operation<'i>(
|
||||
&self,
|
||||
index_wtxn: &mut RwTxn<'i, '_>,
|
||||
index_wtxn: &mut RwTxn<'i>,
|
||||
index: &'i Index,
|
||||
operation: IndexOperation,
|
||||
) -> Result<Vec<Task>> {
|
||||
puffin::profile_function!();
|
||||
|
||||
match operation {
|
||||
IndexOperation::DocumentClear { mut tasks, .. } => {
|
||||
let count = milli::update::ClearDocuments::new(index_wtxn, index).execute()?;
|
||||
@ -1054,7 +1224,7 @@ impl IndexScheduler {
|
||||
milli::update::Settings::new(index_wtxn, index, indexer_config);
|
||||
builder.set_primary_key(primary_key);
|
||||
builder.execute(
|
||||
|indexing_step| debug!("update: {:?}", indexing_step),
|
||||
|indexing_step| tracing::debug!(update = ?indexing_step),
|
||||
|| must_stop_processing.clone().get(),
|
||||
)?;
|
||||
primary_key_has_been_set = true;
|
||||
@ -1064,12 +1234,16 @@ impl IndexScheduler {
|
||||
|
||||
let config = IndexDocumentsConfig { update_method: method, ..Default::default() };
|
||||
|
||||
let embedder_configs = index.embedding_configs(index_wtxn)?;
|
||||
// TODO: consider Arc'ing the map too (we only need read access + we'll be cloning it multiple times, so really makes sense)
|
||||
let embedders = self.embedders(embedder_configs)?;
|
||||
|
||||
let mut builder = milli::update::IndexDocuments::new(
|
||||
index_wtxn,
|
||||
index,
|
||||
indexer_config,
|
||||
config,
|
||||
|indexing_step| debug!("update: {:?}", indexing_step),
|
||||
|indexing_step| tracing::trace!(?indexing_step, "Update"),
|
||||
|| must_stop_processing.get(),
|
||||
)?;
|
||||
|
||||
@ -1082,6 +1256,8 @@ impl IndexScheduler {
|
||||
let (new_builder, user_result) = builder.add_documents(reader)?;
|
||||
builder = new_builder;
|
||||
|
||||
builder = builder.with_embedders(embedders.clone());
|
||||
|
||||
let received_documents =
|
||||
if let Some(Details::DocumentAdditionOrUpdate {
|
||||
received_documents,
|
||||
@ -1116,7 +1292,8 @@ impl IndexScheduler {
|
||||
let (new_builder, user_result) =
|
||||
builder.remove_documents(document_ids)?;
|
||||
builder = new_builder;
|
||||
|
||||
// Uses Invariant: remove documents actually always returns Ok for the inner result
|
||||
let count = user_result.unwrap();
|
||||
let provided_ids =
|
||||
if let Some(Details::DocumentDeletion { provided_ids, .. }) =
|
||||
task.details
|
||||
@ -1127,30 +1304,18 @@ impl IndexScheduler {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
match user_result {
|
||||
Ok(count) => {
|
||||
task.status = Status::Succeeded;
|
||||
task.details = Some(Details::DocumentDeletion {
|
||||
provided_ids,
|
||||
deleted_documents: Some(count),
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
task.status = Status::Failed;
|
||||
task.details = Some(Details::DocumentDeletion {
|
||||
provided_ids,
|
||||
deleted_documents: Some(0),
|
||||
});
|
||||
task.error = Some(milli::Error::from(e).into());
|
||||
}
|
||||
}
|
||||
task.status = Status::Succeeded;
|
||||
task.details = Some(Details::DocumentDeletion {
|
||||
provided_ids,
|
||||
deleted_documents: Some(count),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !tasks.iter().all(|res| res.error.is_some()) {
|
||||
let addition = builder.execute()?;
|
||||
info!("document addition done: {:?}", addition);
|
||||
tracing::info!(indexing_result = ?addition, "document indexing done");
|
||||
} else if primary_key_has_been_set {
|
||||
// Everything failed but we've set a primary key.
|
||||
// We need to remove it.
|
||||
@ -1158,30 +1323,59 @@ impl IndexScheduler {
|
||||
milli::update::Settings::new(index_wtxn, index, indexer_config);
|
||||
builder.reset_primary_key();
|
||||
builder.execute(
|
||||
|indexing_step| debug!("update: {:?}", indexing_step),
|
||||
|indexing_step| tracing::trace!(update = ?indexing_step),
|
||||
|| must_stop_processing.clone().get(),
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
IndexOperation::DocumentDeletion { index_uid: _, documents, mut tasks } => {
|
||||
let mut builder = milli::update::DeleteDocuments::new(index_wtxn, index)?;
|
||||
documents.iter().flatten().for_each(|id| {
|
||||
builder.delete_external_id(id);
|
||||
});
|
||||
IndexOperation::IndexDocumentDeletionByFilter { mut task, index_uid: _ } => {
|
||||
let filter =
|
||||
if let KindWithContent::DocumentDeletionByFilter { filter_expr, .. } =
|
||||
&task.kind
|
||||
{
|
||||
filter_expr
|
||||
} else {
|
||||
unreachable!()
|
||||
};
|
||||
let deleted_documents = delete_document_by_filter(
|
||||
index_wtxn,
|
||||
filter,
|
||||
self.index_mapper.indexer_config(),
|
||||
self.must_stop_processing.clone(),
|
||||
index,
|
||||
);
|
||||
let original_filter = if let Some(Details::DocumentDeletionByFilter {
|
||||
original_filter,
|
||||
deleted_documents: _,
|
||||
}) = task.details
|
||||
{
|
||||
original_filter
|
||||
} else {
|
||||
// In the case of a `documentDeleteByFilter` the details MUST be set
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
let DocumentDeletionResult { deleted_documents, .. } = builder.execute()?;
|
||||
|
||||
for (task, documents) in tasks.iter_mut().zip(documents) {
|
||||
task.status = Status::Succeeded;
|
||||
task.details = Some(Details::DocumentDeletion {
|
||||
provided_ids: documents.len(),
|
||||
deleted_documents: Some(deleted_documents.min(documents.len() as u64)),
|
||||
});
|
||||
match deleted_documents {
|
||||
Ok(deleted_documents) => {
|
||||
task.status = Status::Succeeded;
|
||||
task.details = Some(Details::DocumentDeletionByFilter {
|
||||
original_filter,
|
||||
deleted_documents: Some(deleted_documents),
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
task.status = Status::Failed;
|
||||
task.details = Some(Details::DocumentDeletionByFilter {
|
||||
original_filter,
|
||||
deleted_documents: Some(0),
|
||||
});
|
||||
task.error = Some(e.into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(tasks)
|
||||
Ok(vec![task])
|
||||
}
|
||||
IndexOperation::Settings { index_uid: _, settings, mut tasks } => {
|
||||
let indexer_config = self.index_mapper.indexer_config();
|
||||
@ -1199,7 +1393,7 @@ impl IndexScheduler {
|
||||
|
||||
let must_stop_processing = self.must_stop_processing.clone();
|
||||
builder.execute(
|
||||
|indexing_step| debug!("update: {:?}", indexing_step),
|
||||
|indexing_step| tracing::debug!(update = ?indexing_step),
|
||||
|| must_stop_processing.get(),
|
||||
)?;
|
||||
|
||||
@ -1273,7 +1467,11 @@ impl IndexScheduler {
|
||||
/// Delete each given task from all the databases (if it is deleteable).
|
||||
///
|
||||
/// Return the number of tasks that were actually deleted.
|
||||
fn delete_matched_tasks(&self, wtxn: &mut RwTxn, matched_tasks: &RoaringBitmap) -> Result<u64> {
|
||||
fn delete_matched_tasks(
|
||||
&self,
|
||||
wtxn: &mut RwTxn,
|
||||
matched_tasks: &RoaringBitmap,
|
||||
) -> Result<RoaringBitmap> {
|
||||
// 1. Remove from this list the tasks that we are not allowed to delete
|
||||
let enqueued_tasks = self.get_status(wtxn, Status::Enqueued)?;
|
||||
let processing_tasks = &self.processing_tasks.read().unwrap().processing.clone();
|
||||
@ -1325,10 +1523,9 @@ impl IndexScheduler {
|
||||
}
|
||||
|
||||
for task in to_delete_tasks.iter() {
|
||||
self.all_tasks.delete(wtxn, &BEU32::new(task))?;
|
||||
self.all_tasks.delete(wtxn, &task)?;
|
||||
}
|
||||
for canceled_by in affected_canceled_by {
|
||||
let canceled_by = BEU32::new(canceled_by);
|
||||
if let Some(mut tasks) = self.canceled_by.get(wtxn, &canceled_by)? {
|
||||
tasks -= &to_delete_tasks;
|
||||
if tasks.is_empty() {
|
||||
@ -1339,7 +1536,7 @@ impl IndexScheduler {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(to_delete_tasks.len())
|
||||
Ok(to_delete_tasks)
|
||||
}
|
||||
|
||||
/// Cancel each given task from all the databases (if it is cancelable).
|
||||
@ -1376,8 +1573,48 @@ impl IndexScheduler {
|
||||
task.details = task.details.map(|d| d.to_failed());
|
||||
self.update_task(wtxn, &task)?;
|
||||
}
|
||||
self.canceled_by.put(wtxn, &BEU32::new(cancel_task_id), &tasks_to_cancel)?;
|
||||
self.canceled_by.put(wtxn, &cancel_task_id, &tasks_to_cancel)?;
|
||||
|
||||
Ok(content_files_to_delete)
|
||||
}
|
||||
}
|
||||
|
||||
fn delete_document_by_filter<'a>(
|
||||
wtxn: &mut RwTxn<'a>,
|
||||
filter: &serde_json::Value,
|
||||
indexer_config: &IndexerConfig,
|
||||
must_stop_processing: MustStopProcessing,
|
||||
index: &'a Index,
|
||||
) -> Result<u64> {
|
||||
let filter = Filter::from_json(filter)?;
|
||||
Ok(if let Some(filter) = filter {
|
||||
let candidates = filter.evaluate(wtxn, index).map_err(|err| match err {
|
||||
milli::Error::UserError(milli::UserError::InvalidFilter(_)) => {
|
||||
Error::from(err).with_custom_error_code(Code::InvalidDocumentFilter)
|
||||
}
|
||||
e => e.into(),
|
||||
})?;
|
||||
|
||||
let config = IndexDocumentsConfig {
|
||||
update_method: IndexDocumentsMethod::ReplaceDocuments,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut builder = milli::update::IndexDocuments::new(
|
||||
wtxn,
|
||||
index,
|
||||
indexer_config,
|
||||
config,
|
||||
|indexing_step| tracing::debug!(update = ?indexing_step),
|
||||
|| must_stop_processing.get(),
|
||||
)?;
|
||||
|
||||
let (new_builder, count) = builder.remove_documents_from_db_no_batch(&candidates)?;
|
||||
builder = new_builder;
|
||||
|
||||
let _ = builder.execute()?;
|
||||
count
|
||||
} else {
|
||||
0
|
||||
})
|
||||
}
|
||||
|
@ -46,6 +46,10 @@ impl From<DateField> for Code {
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error("{1}")]
|
||||
WithCustomErrorCode(Code, Box<Self>),
|
||||
#[error("Received bad task id: {received} should be >= to {expected}.")]
|
||||
BadTaskId { received: TaskId, expected: TaskId },
|
||||
#[error("Index `{0}` not found.")]
|
||||
IndexNotFound(String),
|
||||
#[error("Index `{0}` already exists.")]
|
||||
@ -61,6 +65,8 @@ pub enum Error {
|
||||
SwapDuplicateIndexesFound(Vec<String>),
|
||||
#[error("Index `{0}` not found.")]
|
||||
SwapIndexNotFound(String),
|
||||
#[error("Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.")]
|
||||
NoSpaceLeftInTaskQueue,
|
||||
#[error(
|
||||
"Indexes {} not found.",
|
||||
.0.iter().map(|s| format!("`{}`", s)).collect::<Vec<_>>().join(", ")
|
||||
@ -104,6 +110,8 @@ pub enum Error {
|
||||
TaskDeletionWithEmptyQuery,
|
||||
#[error("Query parameters to filter the tasks to cancel are missing. Available query parameters are: `uids`, `indexUids`, `statuses`, `types`, `canceledBy`, `beforeEnqueuedAt`, `afterEnqueuedAt`, `beforeStartedAt`, `afterStartedAt`, `beforeFinishedAt`, `afterFinishedAt`.")]
|
||||
TaskCancelationWithEmptyQuery,
|
||||
#[error("Aborted task")]
|
||||
AbortedTask,
|
||||
|
||||
#[error(transparent)]
|
||||
Dump(#[from] dump::Error),
|
||||
@ -119,6 +127,8 @@ pub enum Error {
|
||||
IoError(#[from] std::io::Error),
|
||||
#[error(transparent)]
|
||||
Persist(#[from] tempfile::PersistError),
|
||||
#[error(transparent)]
|
||||
FeatureNotEnabled(#[from] FeatureNotEnabledError),
|
||||
|
||||
#[error(transparent)]
|
||||
Anyhow(#[from] anyhow::Error),
|
||||
@ -132,11 +142,73 @@ pub enum Error {
|
||||
TaskDatabaseUpdate(Box<Self>),
|
||||
#[error(transparent)]
|
||||
HeedTransaction(heed::Error),
|
||||
|
||||
#[cfg(test)]
|
||||
#[error("Planned failure for tests.")]
|
||||
PlannedFailure,
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(
|
||||
"{disabled_action} requires enabling the `{feature}` experimental feature. See {issue_link}"
|
||||
)]
|
||||
pub struct FeatureNotEnabledError {
|
||||
pub disabled_action: &'static str,
|
||||
pub feature: &'static str,
|
||||
pub issue_link: &'static str,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn is_recoverable(&self) -> bool {
|
||||
match self {
|
||||
Error::IndexNotFound(_)
|
||||
| Error::WithCustomErrorCode(_, _)
|
||||
| Error::BadTaskId { .. }
|
||||
| Error::IndexAlreadyExists(_)
|
||||
| Error::SwapDuplicateIndexFound(_)
|
||||
| Error::SwapDuplicateIndexesFound(_)
|
||||
| Error::SwapIndexNotFound(_)
|
||||
| Error::NoSpaceLeftInTaskQueue
|
||||
| Error::SwapIndexesNotFound(_)
|
||||
| Error::CorruptedDump
|
||||
| Error::InvalidTaskDate { .. }
|
||||
| Error::InvalidTaskUids { .. }
|
||||
| Error::InvalidTaskStatuses { .. }
|
||||
| Error::InvalidTaskTypes { .. }
|
||||
| Error::InvalidTaskCanceledBy { .. }
|
||||
| Error::InvalidIndexUid { .. }
|
||||
| Error::TaskNotFound(_)
|
||||
| Error::TaskDeletionWithEmptyQuery
|
||||
| Error::TaskCancelationWithEmptyQuery
|
||||
| Error::AbortedTask
|
||||
| Error::Dump(_)
|
||||
| Error::Heed(_)
|
||||
| Error::Milli(_)
|
||||
| Error::ProcessBatchPanicked
|
||||
| Error::FileStore(_)
|
||||
| Error::IoError(_)
|
||||
| Error::Persist(_)
|
||||
| Error::FeatureNotEnabled(_)
|
||||
| Error::Anyhow(_) => true,
|
||||
Error::CreateBatch(_)
|
||||
| Error::CorruptedTaskQueue
|
||||
| Error::TaskDatabaseUpdate(_)
|
||||
| Error::HeedTransaction(_) => false,
|
||||
#[cfg(test)]
|
||||
Error::PlannedFailure => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_custom_error_code(self, code: Code) -> Self {
|
||||
Self::WithCustomErrorCode(code, Box::new(self))
|
||||
}
|
||||
}
|
||||
|
||||
impl ErrorCode for Error {
|
||||
fn error_code(&self) -> Code {
|
||||
match self {
|
||||
Error::WithCustomErrorCode(code, _) => *code,
|
||||
Error::BadTaskId { .. } => Code::BadRequest,
|
||||
Error::IndexNotFound(_) => Code::IndexNotFound,
|
||||
Error::IndexAlreadyExists(_) => Code::IndexAlreadyExists,
|
||||
Error::SwapDuplicateIndexesFound(_) => Code::InvalidSwapDuplicateIndexFound,
|
||||
@ -152,6 +224,8 @@ impl ErrorCode for Error {
|
||||
Error::TaskNotFound(_) => Code::TaskNotFound,
|
||||
Error::TaskDeletionWithEmptyQuery => Code::MissingTaskFilters,
|
||||
Error::TaskCancelationWithEmptyQuery => Code::MissingTaskFilters,
|
||||
// TODO: not sure of the Code to use
|
||||
Error::NoSpaceLeftInTaskQueue => Code::NoSpaceLeftOnDevice,
|
||||
Error::Dump(e) => e.error_code(),
|
||||
Error::Milli(e) => e.error_code(),
|
||||
Error::ProcessBatchPanicked => Code::Internal,
|
||||
@ -160,6 +234,7 @@ impl ErrorCode for Error {
|
||||
Error::FileStore(e) => e.error_code(),
|
||||
Error::IoError(e) => e.error_code(),
|
||||
Error::Persist(e) => e.error_code(),
|
||||
Error::FeatureNotEnabled(_) => Code::FeatureNotEnabled,
|
||||
|
||||
// Irrecoverable errors
|
||||
Error::Anyhow(_) => Code::Internal,
|
||||
@ -167,6 +242,12 @@ impl ErrorCode for Error {
|
||||
Error::CorruptedDump => Code::Internal,
|
||||
Error::TaskDatabaseUpdate(_) => Code::Internal,
|
||||
Error::CreateBatch(_) => Code::Internal,
|
||||
|
||||
// This one should never be seen by the end user
|
||||
Error::AbortedTask => Code::Internal,
|
||||
|
||||
#[cfg(test)]
|
||||
Error::PlannedFailure => Code::Internal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
130
index-scheduler/src/features.rs
Normal file
130
index-scheduler/src/features.rs
Normal file
@ -0,0 +1,130 @@
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use meilisearch_types::features::{InstanceTogglableFeatures, RuntimeTogglableFeatures};
|
||||
use meilisearch_types::heed::types::{SerdeJson, Str};
|
||||
use meilisearch_types::heed::{Database, Env, RwTxn};
|
||||
|
||||
use crate::error::FeatureNotEnabledError;
|
||||
use crate::Result;
|
||||
|
||||
const EXPERIMENTAL_FEATURES: &str = "experimental-features";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct FeatureData {
|
||||
persisted: Database<Str, SerdeJson<RuntimeTogglableFeatures>>,
|
||||
runtime: Arc<RwLock<RuntimeTogglableFeatures>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct RoFeatures {
|
||||
runtime: RuntimeTogglableFeatures,
|
||||
}
|
||||
|
||||
impl RoFeatures {
|
||||
fn new(data: &FeatureData) -> Self {
|
||||
let runtime = data.runtime_features();
|
||||
Self { runtime }
|
||||
}
|
||||
|
||||
pub fn runtime_features(&self) -> RuntimeTogglableFeatures {
|
||||
self.runtime
|
||||
}
|
||||
|
||||
pub fn check_metrics(&self) -> Result<()> {
|
||||
if self.runtime.metrics {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(FeatureNotEnabledError {
|
||||
disabled_action: "Getting metrics",
|
||||
feature: "metrics",
|
||||
issue_link: "https://github.com/meilisearch/product/discussions/625",
|
||||
}
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_logs_route(&self) -> Result<()> {
|
||||
if self.runtime.logs_route {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(FeatureNotEnabledError {
|
||||
disabled_action: "Modifying logs through the `/logs/*` routes",
|
||||
feature: "logs route",
|
||||
issue_link: "https://github.com/orgs/meilisearch/discussions/721",
|
||||
}
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_vector(&self, disabled_action: &'static str) -> Result<()> {
|
||||
if self.runtime.vector_store {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(FeatureNotEnabledError {
|
||||
disabled_action,
|
||||
feature: "vector store",
|
||||
issue_link: "https://github.com/meilisearch/product/discussions/677",
|
||||
}
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_puffin(&self) -> Result<()> {
|
||||
if self.runtime.export_puffin_reports {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(FeatureNotEnabledError {
|
||||
disabled_action: "Outputting Puffin reports to disk",
|
||||
feature: "export puffin reports",
|
||||
issue_link: "https://github.com/meilisearch/product/discussions/693",
|
||||
}
|
||||
.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FeatureData {
|
||||
pub fn new(env: &Env, instance_features: InstanceTogglableFeatures) -> Result<Self> {
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let runtime_features_db = env.create_database(&mut wtxn, Some(EXPERIMENTAL_FEATURES))?;
|
||||
wtxn.commit()?;
|
||||
|
||||
let txn = env.read_txn()?;
|
||||
let persisted_features: RuntimeTogglableFeatures =
|
||||
runtime_features_db.get(&txn, EXPERIMENTAL_FEATURES)?.unwrap_or_default();
|
||||
let runtime = Arc::new(RwLock::new(RuntimeTogglableFeatures {
|
||||
metrics: instance_features.metrics || persisted_features.metrics,
|
||||
logs_route: instance_features.logs_route || persisted_features.logs_route,
|
||||
..persisted_features
|
||||
}));
|
||||
|
||||
Ok(Self { persisted: runtime_features_db, runtime })
|
||||
}
|
||||
|
||||
pub fn put_runtime_features(
|
||||
&self,
|
||||
mut wtxn: RwTxn,
|
||||
features: RuntimeTogglableFeatures,
|
||||
) -> Result<()> {
|
||||
self.persisted.put(&mut wtxn, EXPERIMENTAL_FEATURES, &features)?;
|
||||
wtxn.commit()?;
|
||||
|
||||
// safe to unwrap, the lock will only fail if:
|
||||
// 1. requested by the same thread concurrently -> it is called and released in methods that don't call each other
|
||||
// 2. there's a panic while the thread is held -> it is only used for an assignment here.
|
||||
let mut toggled_features = self.runtime.write().unwrap();
|
||||
*toggled_features = features;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn runtime_features(&self) -> RuntimeTogglableFeatures {
|
||||
// sound to unwrap, the lock will only fail if:
|
||||
// 1. requested by the same thread concurrently -> it is called and released in methods that don't call each other
|
||||
// 2. there's a panic while the thread is held -> it is only used for copying the data here
|
||||
*self.runtime.read().unwrap()
|
||||
}
|
||||
|
||||
pub fn features(&self) -> RoFeatures {
|
||||
RoFeatures::new(self)
|
||||
}
|
||||
}
|
@ -1,11 +1,8 @@
|
||||
/// the map size to use when we don't succeed in reading it in indexes.
|
||||
const DEFAULT_MAP_SIZE: usize = 10 * 1024 * 1024 * 1024; // 10 GiB
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use meilisearch_types::heed::{EnvClosingEvent, EnvOpenOptions};
|
||||
use meilisearch_types::heed::{EnvClosingEvent, EnvFlags, EnvOpenOptions};
|
||||
use meilisearch_types::milli::Index;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
@ -53,6 +50,7 @@ pub struct IndexMap {
|
||||
pub struct ClosingIndex {
|
||||
uuid: Uuid,
|
||||
closing_event: EnvClosingEvent,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size: usize,
|
||||
generation: usize,
|
||||
}
|
||||
@ -68,6 +66,7 @@ impl ClosingIndex {
|
||||
pub fn wait_timeout(self, timeout: Duration) -> Option<ReopenableIndex> {
|
||||
self.closing_event.wait_timeout(timeout).then_some(ReopenableIndex {
|
||||
uuid: self.uuid,
|
||||
enable_mdb_writemap: self.enable_mdb_writemap,
|
||||
map_size: self.map_size,
|
||||
generation: self.generation,
|
||||
})
|
||||
@ -76,6 +75,7 @@ impl ClosingIndex {
|
||||
|
||||
pub struct ReopenableIndex {
|
||||
uuid: Uuid,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size: usize,
|
||||
generation: usize,
|
||||
}
|
||||
@ -103,7 +103,7 @@ impl ReopenableIndex {
|
||||
return Ok(());
|
||||
}
|
||||
map.unavailable.remove(&self.uuid);
|
||||
map.create(&self.uuid, path, None, self.map_size)?;
|
||||
map.create(&self.uuid, path, None, self.enable_mdb_writemap, self.map_size)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -170,16 +170,17 @@ impl IndexMap {
|
||||
uuid: &Uuid,
|
||||
path: &Path,
|
||||
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size: usize,
|
||||
) -> Result<Index> {
|
||||
if !matches!(self.get_unavailable(uuid), Missing) {
|
||||
panic!("Attempt to open an index that was unavailable");
|
||||
}
|
||||
let index = create_or_open_index(path, date, map_size)?;
|
||||
let index = create_or_open_index(path, date, enable_mdb_writemap, map_size)?;
|
||||
match self.available.insert(*uuid, index.clone()) {
|
||||
InsertionOutcome::InsertedNew => (),
|
||||
InsertionOutcome::Evicted(evicted_uuid, evicted_index) => {
|
||||
self.close(evicted_uuid, evicted_index, 0);
|
||||
self.close(evicted_uuid, evicted_index, enable_mdb_writemap, 0);
|
||||
}
|
||||
InsertionOutcome::Replaced(_) => {
|
||||
panic!("Attempt to open an index that was already opened")
|
||||
@ -212,17 +213,32 @@ impl IndexMap {
|
||||
/// | Closing | Closing |
|
||||
/// | Available | Closing |
|
||||
///
|
||||
pub fn close_for_resize(&mut self, uuid: &Uuid, map_size_growth: usize) {
|
||||
let Some(index) = self.available.remove(uuid) else { return; };
|
||||
self.close(*uuid, index, map_size_growth);
|
||||
pub fn close_for_resize(
|
||||
&mut self,
|
||||
uuid: &Uuid,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size_growth: usize,
|
||||
) {
|
||||
let Some(index) = self.available.remove(uuid) else {
|
||||
return;
|
||||
};
|
||||
self.close(*uuid, index, enable_mdb_writemap, map_size_growth);
|
||||
}
|
||||
|
||||
fn close(&mut self, uuid: Uuid, index: Index, map_size_growth: usize) {
|
||||
let map_size = index.map_size().unwrap_or(DEFAULT_MAP_SIZE) + map_size_growth;
|
||||
fn close(
|
||||
&mut self,
|
||||
uuid: Uuid,
|
||||
index: Index,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size_growth: usize,
|
||||
) {
|
||||
let map_size = index.map_size() + map_size_growth;
|
||||
let closing_event = index.prepare_for_closing();
|
||||
let generation = self.next_generation();
|
||||
self.unavailable
|
||||
.insert(uuid, Some(ClosingIndex { uuid, closing_event, map_size, generation }));
|
||||
self.unavailable.insert(
|
||||
uuid,
|
||||
Some(ClosingIndex { uuid, closing_event, enable_mdb_writemap, map_size, generation }),
|
||||
);
|
||||
}
|
||||
|
||||
/// Attempts to delete and index.
|
||||
@ -282,11 +298,15 @@ impl IndexMap {
|
||||
fn create_or_open_index(
|
||||
path: &Path,
|
||||
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size: usize,
|
||||
) -> Result<Index> {
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(clamp_to_page_size(map_size));
|
||||
options.max_readers(1024);
|
||||
if enable_mdb_writemap {
|
||||
unsafe { options.flags(EnvFlags::WRITE_MAP) };
|
||||
}
|
||||
|
||||
if let Some((created, updated)) = date {
|
||||
Ok(Index::new_with_creation_dates(options, path, created, updated)?)
|
||||
@ -364,7 +384,7 @@ mod tests {
|
||||
|
||||
fn assert_index_size(index: Index, expected: usize) {
|
||||
let expected = clamp_to_page_size(expected);
|
||||
let index_map_size = index.map_size().unwrap();
|
||||
let index_map_size = index.map_size();
|
||||
assert_eq!(index_map_size, expected);
|
||||
}
|
||||
}
|
||||
|
@ -3,12 +3,13 @@ use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
use std::{fs, thread};
|
||||
|
||||
use log::error;
|
||||
use meilisearch_types::heed::types::Str;
|
||||
use meilisearch_types::heed::types::{SerdeJson, Str};
|
||||
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn};
|
||||
use meilisearch_types::milli::update::IndexerConfig;
|
||||
use meilisearch_types::milli::Index;
|
||||
use meilisearch_types::milli::{FieldDistribution, Index};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::OffsetDateTime;
|
||||
use tracing::error;
|
||||
use uuid::Uuid;
|
||||
|
||||
use self::index_map::IndexMap;
|
||||
@ -19,6 +20,7 @@ use crate::{Error, Result};
|
||||
mod index_map;
|
||||
|
||||
const INDEX_MAPPING: &str = "index-mapping";
|
||||
const INDEX_STATS: &str = "index-stats";
|
||||
|
||||
/// Structure managing meilisearch's indexes.
|
||||
///
|
||||
@ -52,6 +54,11 @@ pub struct IndexMapper {
|
||||
|
||||
/// Map an index name with an index uuid currently available on disk.
|
||||
pub(crate) index_mapping: Database<Str, UuidCodec>,
|
||||
/// Map an index UUID with the cached stats associated to the index.
|
||||
///
|
||||
/// Using an UUID forces to use the index_mapping table to recover the index behind a name, ensuring
|
||||
/// consistency wrt index swapping.
|
||||
pub(crate) index_stats: Database<UuidCodec, SerdeJson<IndexStats>>,
|
||||
|
||||
/// Path to the folder where the LMDB environments of each index are.
|
||||
base_path: PathBuf,
|
||||
@ -59,7 +66,13 @@ pub struct IndexMapper {
|
||||
index_base_map_size: usize,
|
||||
/// The quantity by which the map size of an index is incremented upon reopening, in bytes.
|
||||
index_growth_amount: usize,
|
||||
/// Whether we open a meilisearch index with the MDB_WRITEMAP option or not.
|
||||
enable_mdb_writemap: bool,
|
||||
pub indexer_config: Arc<IndexerConfig>,
|
||||
|
||||
/// A few types of long running batches of tasks that act on a single index set this field
|
||||
/// so that a handle to the index is available from other threads (search) in an optimized manner.
|
||||
currently_updating_index: Arc<RwLock<Option<(String, Index)>>>,
|
||||
}
|
||||
|
||||
/// Whether the index is available for use or is forbidden to be inserted back in the index map
|
||||
@ -76,6 +89,48 @@ pub enum IndexStatus {
|
||||
Available(Index),
|
||||
}
|
||||
|
||||
/// The statistics that can be computed from an `Index` object.
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct IndexStats {
|
||||
/// Number of documents in the index.
|
||||
pub number_of_documents: u64,
|
||||
/// Size taken up by the index' DB, in bytes.
|
||||
///
|
||||
/// This includes the size taken by both the used and free pages of the DB, and as the free pages
|
||||
/// are not returned to the disk after a deletion, this number is typically larger than
|
||||
/// `used_database_size` that only includes the size of the used pages.
|
||||
pub database_size: u64,
|
||||
/// Size taken by the used pages of the index' DB, in bytes.
|
||||
///
|
||||
/// As the DB backend does not return to the disk the pages that are not currently used by the DB,
|
||||
/// this value is typically smaller than `database_size`.
|
||||
pub used_database_size: u64,
|
||||
/// Association of every field name with the number of times it occurs in the documents.
|
||||
pub field_distribution: FieldDistribution,
|
||||
/// Creation date of the index.
|
||||
pub created_at: OffsetDateTime,
|
||||
/// Date of the last update of the index.
|
||||
pub updated_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
impl IndexStats {
|
||||
/// Compute the stats of an index
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - rtxn: a RO transaction for the index, obtained from `Index::read_txn()`.
|
||||
pub fn new(index: &Index, rtxn: &RoTxn) -> Result<Self> {
|
||||
Ok(IndexStats {
|
||||
number_of_documents: index.number_of_documents(rtxn)?,
|
||||
database_size: index.on_disk_size()?,
|
||||
used_database_size: index.used_size()?,
|
||||
field_distribution: index.field_distribution(rtxn)?,
|
||||
created_at: index.created_at(rtxn)?,
|
||||
updated_at: index.updated_at(rtxn)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexMapper {
|
||||
pub fn new(
|
||||
env: &Env,
|
||||
@ -83,15 +138,24 @@ impl IndexMapper {
|
||||
index_base_map_size: usize,
|
||||
index_growth_amount: usize,
|
||||
index_count: usize,
|
||||
enable_mdb_writemap: bool,
|
||||
indexer_config: IndexerConfig,
|
||||
) -> Result<Self> {
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let index_mapping = env.create_database(&mut wtxn, Some(INDEX_MAPPING))?;
|
||||
let index_stats = env.create_database(&mut wtxn, Some(INDEX_STATS))?;
|
||||
wtxn.commit()?;
|
||||
|
||||
Ok(Self {
|
||||
index_map: Arc::new(RwLock::new(IndexMap::new(index_count))),
|
||||
index_mapping: env.create_database(Some(INDEX_MAPPING))?,
|
||||
index_mapping,
|
||||
index_stats,
|
||||
base_path,
|
||||
index_base_map_size,
|
||||
index_growth_amount,
|
||||
enable_mdb_writemap,
|
||||
indexer_config: Arc::new(indexer_config),
|
||||
currently_updating_index: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
@ -121,6 +185,7 @@ impl IndexMapper {
|
||||
&uuid,
|
||||
&index_path,
|
||||
date,
|
||||
self.enable_mdb_writemap,
|
||||
self.index_base_map_size,
|
||||
)?;
|
||||
|
||||
@ -140,6 +205,9 @@ impl IndexMapper {
|
||||
.get(&wtxn, name)?
|
||||
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
||||
|
||||
// Not an error if the index had no stats in cache.
|
||||
self.index_stats.delete(&mut wtxn, &uuid)?;
|
||||
|
||||
// Once we retrieved the UUID of the index we remove it from the mapping table.
|
||||
assert!(self.index_mapping.delete(&mut wtxn, name)?);
|
||||
|
||||
@ -229,13 +297,25 @@ impl IndexMapper {
|
||||
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
||||
|
||||
// We remove the index from the in-memory index map.
|
||||
self.index_map.write().unwrap().close_for_resize(&uuid, self.index_growth_amount);
|
||||
self.index_map.write().unwrap().close_for_resize(
|
||||
&uuid,
|
||||
self.enable_mdb_writemap,
|
||||
self.index_growth_amount,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return an index, may open it if it wasn't already opened.
|
||||
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
|
||||
if let Some((current_name, current_index)) =
|
||||
self.currently_updating_index.read().unwrap().as_ref()
|
||||
{
|
||||
if current_name == name {
|
||||
return Ok(current_index.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let uuid = self
|
||||
.index_mapping
|
||||
.get(rtxn, name)?
|
||||
@ -294,6 +374,7 @@ impl IndexMapper {
|
||||
&uuid,
|
||||
&index_path,
|
||||
None,
|
||||
self.enable_mdb_writemap,
|
||||
self.index_base_map_size,
|
||||
)?;
|
||||
}
|
||||
@ -360,6 +441,45 @@ impl IndexMapper {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The stats of an index.
|
||||
///
|
||||
/// If available in the cache, they are directly returned.
|
||||
/// Otherwise, the `Index` is opened to compute the stats on the fly (the result is not cached).
|
||||
/// The stats for an index are cached after each `Index` update.
|
||||
pub fn stats_of(&self, rtxn: &RoTxn, index_uid: &str) -> Result<IndexStats> {
|
||||
let uuid = self
|
||||
.index_mapping
|
||||
.get(rtxn, index_uid)?
|
||||
.ok_or_else(|| Error::IndexNotFound(index_uid.to_string()))?;
|
||||
|
||||
match self.index_stats.get(rtxn, &uuid)? {
|
||||
Some(stats) => Ok(stats),
|
||||
None => {
|
||||
let index = self.index(rtxn, index_uid)?;
|
||||
let index_rtxn = index.read_txn()?;
|
||||
IndexStats::new(&index, &index_rtxn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores the new stats for an index.
|
||||
///
|
||||
/// Expected usage is to compute the stats the index using `IndexStats::new`, the pass it to this function.
|
||||
pub fn store_stats_of(
|
||||
&self,
|
||||
wtxn: &mut RwTxn,
|
||||
index_uid: &str,
|
||||
stats: &IndexStats,
|
||||
) -> Result<()> {
|
||||
let uuid = self
|
||||
.index_mapping
|
||||
.get(wtxn, index_uid)?
|
||||
.ok_or_else(|| Error::IndexNotFound(index_uid.to_string()))?;
|
||||
|
||||
self.index_stats.put(wtxn, &uuid, stats)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn index_exists(&self, rtxn: &RoTxn, name: &str) -> Result<bool> {
|
||||
Ok(self.index_mapping.get(rtxn, name)?.is_some())
|
||||
}
|
||||
@ -367,4 +487,8 @@ impl IndexMapper {
|
||||
pub fn indexer_config(&self) -> &IndexerConfig {
|
||||
&self.indexer_config
|
||||
}
|
||||
|
||||
pub fn set_currently_updating_index(&self, index: Option<(String, Index)>) {
|
||||
*self.currently_updating_index.write().unwrap() = index;
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::collections::BTreeSet;
|
||||
use std::fmt::Write;
|
||||
|
||||
use meilisearch_types::heed::types::{OwnedType, SerdeBincode, SerdeJson, Str};
|
||||
use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str};
|
||||
use meilisearch_types::heed::{Database, RoTxn};
|
||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
||||
use meilisearch_types::tasks::{Details, Task};
|
||||
@ -15,6 +15,7 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
||||
|
||||
let IndexScheduler {
|
||||
autobatching_enabled,
|
||||
cleanup_enabled: _,
|
||||
must_stop_processing: _,
|
||||
processing_tasks,
|
||||
file_store,
|
||||
@ -28,14 +29,21 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
||||
started_at,
|
||||
finished_at,
|
||||
index_mapper,
|
||||
features: _,
|
||||
max_number_of_tasks: _,
|
||||
max_number_of_batched_tasks: _,
|
||||
puffin_frame: _,
|
||||
wake_up: _,
|
||||
dumps_path: _,
|
||||
snapshots_path: _,
|
||||
auth_path: _,
|
||||
version_file_path: _,
|
||||
webhook_url: _,
|
||||
webhook_authorization_header: _,
|
||||
test_breakpoint_sdr: _,
|
||||
planned_failures: _,
|
||||
run_loop_iteration: _,
|
||||
embedders: _,
|
||||
} = scheduler;
|
||||
|
||||
let rtxn = env.read_txn().unwrap();
|
||||
@ -111,7 +119,7 @@ pub fn snapshot_bitmap(r: &RoaringBitmap) -> String {
|
||||
snap
|
||||
}
|
||||
|
||||
pub fn snapshot_all_tasks(rtxn: &RoTxn, db: Database<OwnedType<BEU32>, SerdeJson<Task>>) -> String {
|
||||
pub fn snapshot_all_tasks(rtxn: &RoTxn, db: Database<BEU32, SerdeJson<Task>>) -> String {
|
||||
let mut snap = String::new();
|
||||
let iter = db.iter(rtxn).unwrap();
|
||||
for next in iter {
|
||||
@ -121,10 +129,7 @@ pub fn snapshot_all_tasks(rtxn: &RoTxn, db: Database<OwnedType<BEU32>, SerdeJson
|
||||
snap
|
||||
}
|
||||
|
||||
pub fn snapshot_date_db(
|
||||
rtxn: &RoTxn,
|
||||
db: Database<OwnedType<BEI128>, CboRoaringBitmapCodec>,
|
||||
) -> String {
|
||||
pub fn snapshot_date_db(rtxn: &RoTxn, db: Database<BEI128, CboRoaringBitmapCodec>) -> String {
|
||||
let mut snap = String::new();
|
||||
let iter = db.iter(rtxn).unwrap();
|
||||
for next in iter {
|
||||
@ -183,6 +188,9 @@ fn snapshot_details(d: &Details) -> String {
|
||||
provided_ids: received_document_ids,
|
||||
deleted_documents,
|
||||
} => format!("{{ received_document_ids: {received_document_ids}, deleted_documents: {deleted_documents:?} }}"),
|
||||
Details::DocumentDeletionByFilter { original_filter, deleted_documents } => format!(
|
||||
"{{ original_filter: {original_filter}, deleted_documents: {deleted_documents:?} }}"
|
||||
),
|
||||
Details::ClearAll { deleted_documents } => {
|
||||
format!("{{ deleted_documents: {deleted_documents:?} }}")
|
||||
},
|
||||
@ -241,10 +249,7 @@ pub fn snapshot_index_tasks(rtxn: &RoTxn, db: Database<Str, RoaringBitmapCodec>)
|
||||
}
|
||||
snap
|
||||
}
|
||||
pub fn snapshot_canceled_by(
|
||||
rtxn: &RoTxn,
|
||||
db: Database<OwnedType<BEU32>, RoaringBitmapCodec>,
|
||||
) -> String {
|
||||
pub fn snapshot_canceled_by(rtxn: &RoTxn, db: Database<BEU32, RoaringBitmapCodec>) -> String {
|
||||
let mut snap = String::new();
|
||||
let iter = db.iter(rtxn).unwrap();
|
||||
for next in iter {
|
||||
@ -254,6 +259,16 @@ pub fn snapshot_canceled_by(
|
||||
snap
|
||||
}
|
||||
pub fn snapshot_index_mapper(rtxn: &RoTxn, mapper: &IndexMapper) -> String {
|
||||
let mut s = String::new();
|
||||
let names = mapper.index_names(rtxn).unwrap();
|
||||
format!("{names:?}")
|
||||
|
||||
for name in names {
|
||||
let stats = mapper.stats_of(rtxn, &name).unwrap();
|
||||
s.push_str(&format!(
|
||||
"{name}: {{ number_of_documents: {}, field_distribution: {:?} }}\n",
|
||||
stats.number_of_documents, stats.field_distribution
|
||||
));
|
||||
}
|
||||
|
||||
s
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,5 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
assertion_line: 1755
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
@ -23,7 +22,7 @@ canceled [0,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
[]
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
1 [0,]
|
||||
|
@ -20,7 +20,7 @@ enqueued [0,1,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
[]
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
@ -25,7 +25,9 @@ catto [0,]
|
||||
wolfo [2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
["beavero", "catto"]
|
||||
beavero: { number_of_documents: 0, field_distribution: {} }
|
||||
catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
assertion_line: 1859
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
@ -27,7 +26,9 @@ catto [0,]
|
||||
wolfo [2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
["beavero", "catto"]
|
||||
beavero: { number_of_documents: 0, field_distribution: {} }
|
||||
catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
3 [1,2,]
|
||||
|
@ -23,7 +23,8 @@ catto [0,]
|
||||
wolfo [2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
["catto"]
|
||||
catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
@ -25,7 +25,8 @@ catto [0,]
|
||||
wolfo [2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
["catto"]
|
||||
catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
@ -0,0 +1,35 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { dump_uid: None }, kind: DumpCreation { keys: [], instance_uid: None }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"dumpCreation" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
@ -0,0 +1,45 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: canceled, canceled_by: 1, details: { dump_uid: None }, kind: DumpCreation { keys: [], instance_uid: None }}
|
||||
1 {uid: 1, status: succeeded, details: { matched_tasks: 1, canceled_tasks: Some(0), original_filter: "cancel dump" }, kind: TaskCancelation { query: "cancel dump", tasks: RoaringBitmap<[0]> }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued []
|
||||
succeeded [1,]
|
||||
canceled [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"taskCancelation" [1,]
|
||||
"dumpCreation" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
1 [0,]
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
@ -0,0 +1,38 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[0,]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { dump_uid: None }, kind: DumpCreation { keys: [], instance_uid: None }}
|
||||
1 {uid: 1, status: enqueued, details: { matched_tasks: 1, canceled_tasks: None, original_filter: "cancel dump" }, kind: TaskCancelation { query: "cancel dump", tasks: RoaringBitmap<[0]> }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"taskCancelation" [1,]
|
||||
"dumpCreation" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user