mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-07-18 12:20:48 +00:00
Compare commits
2593 Commits
v1.0.0
...
prototype-
Author | SHA1 | Date | |
---|---|---|---|
bddf3f96e6 | |||
1e9ac00800 | |||
b08a49a16e | |||
a8f6f108e0 | |||
1479050f7a | |||
97b8c32e22 | |||
35f6c624bc | |||
1116788475 | |||
951a5b5832 | |||
1c670d7fa0 | |||
6cc3797aa1 | |||
faf1e17a27 | |||
4c519c2ab3 | |||
dd120e0e16 | |||
18796d6e6a | |||
c91bfeaf15 | |||
28961b2ad1 | |||
895ab2906c | |||
f11c7d4b62 | |||
e79f6f87f6 | |||
5367d8f05a | |||
52686da028 | |||
8c074f5028 | |||
49e18da23e | |||
54240db495 | |||
e1ed4bc750 | |||
9bd1cfb3a3 | |||
a341c94871 | |||
f46cf46b8c | |||
c3a30a5a91 | |||
143e3cf948 | |||
ab2adba183 | |||
74d1a67a99 | |||
91ce8a5e67 | |||
fd7ae1883b | |||
42a3cdca66 | |||
a43765d454 | |||
769576fd94 | |||
8fb7b1d10f | |||
d494c29768 | |||
74dcfe9676 | |||
1b1703a609 | |||
fb5e4957a6 | |||
8de3c9f737 | |||
43a19d0709 | |||
29d14bed90 | |||
f3b54337f9 | |||
7f3ae40204 | |||
a53536836b | |||
b095325bf8 | |||
d7ad39ad77 | |||
849de089d2 | |||
7f25007d31 | |||
c810af3ebf | |||
c0b77773ba | |||
7481559e8b | |||
83c765ce6c | |||
4c91037602 | |||
825923f6fc | |||
e405702733 | |||
6fa877efb0 | |||
4b1cd10653 | |||
47748395dc | |||
ff595156d7 | |||
8770088df3 | |||
827c1c8447 | |||
764df24b7d | |||
4570d5bf3a | |||
746b31c1ce | |||
eaad84bd1d | |||
c690c4fec4 | |||
ea9ac46f28 | |||
93db755d57 | |||
93f130a400 | |||
860c993ef7 | |||
67dda0678f | |||
2db6347686 | |||
421a9cf05e | |||
7b4b57ecc8 | |||
8f64fba1ce | |||
9882029fa4 | |||
5f56e6dd58 | |||
c88c3637b4 | |||
97fd9ac493 | |||
821d92b5d0 | |||
0b60928cbc | |||
42114325cd | |||
7a38fe624f | |||
1b005f697d | |||
fbec48f56e | |||
a377a49218 | |||
41cbaad1cb | |||
a015e232ab | |||
3ebc99473f | |||
fadea504ed | |||
d27007005e | |||
fcb09ccc3d | |||
734a9ecea8 | |||
69fcd3d05e | |||
1ca7778e6a | |||
a11d992923 | |||
781691191a | |||
ae8660e585 | |||
d80ce00623 | |||
2d66fdc8e9 | |||
b297b5deb0 | |||
0d71c80ba6 | |||
b2054d3f6c | |||
65a3086cf1 | |||
426d63b01b | |||
b078477d80 | |||
5c525168a0 | |||
39b62b7158 | |||
3f97f630ed | |||
a36b1dbd70 | |||
5672165e44 | |||
d563ed8a39 | |||
36cae3b480 | |||
2a1a7ef00a | |||
758b4acea7 | |||
a4e8158239 | |||
151e52c481 | |||
a2690ea8d4 | |||
33f61d2cd4 | |||
5c0668afcf | |||
20f05efb3c | |||
cbf029f64c | |||
bffabf9cc6 | |||
f647b20818 | |||
d91f8fc493 | |||
3296cf7ae6 | |||
89675e5f15 | |||
47b7d515ed | |||
2ba4629938 | |||
982dd76042 | |||
3505ee47f8 | |||
b2d25c07d7 | |||
b9d8bd77fc | |||
8a66ba01d8 | |||
8a6d548041 | |||
b452358124 | |||
bfb1f9279b | |||
48dabd27ea | |||
9c3830a19c | |||
ff6b8dfac4 | |||
ec7de4bae7 | |||
184b8afd9e | |||
29961b8c6b | |||
0b08413c98 | |||
474d4ec498 | |||
0544b60974 | |||
4223c51838 | |||
6f71a2b38b | |||
7c9935f96a | |||
f7ae8bc065 | |||
3d8a3d22d1 | |||
30f88350c7 | |||
4c4baaf1ce | |||
55e8046551 | |||
4e4d8dfda7 | |||
de3c4f1986 | |||
5f4497935f | |||
3f69dd6450 | |||
1c4b1b3b2d | |||
b4f1e9bc36 | |||
abd65d9307 | |||
30fc376713 | |||
2a1787ed22 | |||
d1a31afdd6 | |||
60018d0fe4 | |||
13b1abceaf | |||
3521a3a0b2 | |||
40a53f8824 | |||
f5ca421227 | |||
3f048927a0 | |||
e7c0617699 | |||
798aa4ee92 | |||
4fd6fd9bef | |||
a2cd7214f0 | |||
0ce1d6d487 | |||
b3166df7ea | |||
1803998017 | |||
1af3089456 | |||
d1fc42b53a | |||
e64571a881 | |||
497187083b | |||
32e2848a74 | |||
6b3da8a6de | |||
0769090dd6 | |||
1d507c84b2 | |||
1b78231e18 | |||
2b1f6a7f11 | |||
6993924f32 | |||
0c7d1f761e | |||
e3d30e28ef | |||
63af1e9f28 | |||
f073a86387 | |||
9dd01ff44b | |||
507a7bad96 | |||
cde62fcb5b | |||
03a82136dc | |||
e68758cec4 | |||
4fb47492e5 | |||
5bab8cf7ec | |||
97005dd505 | |||
eabef5194a | |||
ebb2494879 | |||
0cec352d2b | |||
a97281af08 | |||
55605435bc | |||
e6bea99974 | |||
9e32ac7cb2 | |||
302d6cccd7 | |||
21b7d709ad | |||
02fd06ea0b | |||
11ee7daa0f | |||
00746b32c0 | |||
1cce613399 | |||
be9786bed9 | |||
c3f4835e8e | |||
49f58b2c47 | |||
6a10e85707 | |||
c505fa9d7d | |||
9519e60f97 | |||
b5df889dcb | |||
31155dce4c | |||
8d36570958 | |||
939e7faf31 | |||
32c6062e65 | |||
f097aafa1c | |||
777b387dc4 | |||
b0f3dc2c06 | |||
4b166bea2b | |||
5943100754 | |||
b24def3281 | |||
402dcd6b2f | |||
13c95d25aa | |||
a8defb585b | |||
339a4b0789 | |||
904fd2f6d1 | |||
229405aeb9 | |||
249e051cd4 | |||
fc0e7382fe | |||
97fb64e40e | |||
69edbf9f6d | |||
8957251eed | |||
c72535531b | |||
916c23e7be | |||
ad9937c755 | |||
171c942282 | |||
e2ae3b24aa | |||
fc7618d49b | |||
7f88c4ff2f | |||
96d4242b93 | |||
5114686394 | |||
3322018c06 | |||
0276d5212a | |||
e2ffc3d69a | |||
739da9fd4d | |||
2af93966e0 | |||
2c47500bc3 | |||
406ee31d1a | |||
2d8d0af1a6 | |||
e0a8f8cb5a | |||
be3b00350c | |||
80d34a4169 | |||
e3ee553dcc | |||
bebd050961 | |||
1f1beae077 | |||
55724f2412 | |||
6d50ea0830 | |||
f37c86e0b2 | |||
098c410612 | |||
ee10cb8c87 | |||
d38cc73630 | |||
e688581c36 | |||
4ac8f96342 | |||
1c9555566e | |||
303d740245 | |||
250743885d | |||
5eecb8489d | |||
0e5c3b1f64 | |||
f53bdc4320 | |||
0a301b5f88 | |||
a993b68684 | |||
80c7a00567 | |||
67d8cec209 | |||
2a846aaae7 | |||
d6eacb2aac | |||
212dbfa3b5 | |||
456da5de9c | |||
46e26ab550 | |||
cda4ba2bb6 | |||
ae59d37b75 | |||
f2cf981641 | |||
50954d31fa | |||
1b5b5778c1 | |||
d3731dda48 | |||
51a2613c5c | |||
82e1c4f468 | |||
5bdf5c0aaf | |||
282b2e3b98 | |||
5e754b3ee0 | |||
e1612fcb01 | |||
9dd4b33a9a | |||
de22116b3d | |||
5f78522044 | |||
87e2bc3bed | |||
61b58b115a | |||
d3182f3830 | |||
f698e6cfdf | |||
f70856bab1 | |||
80588daae5 | |||
e2ebed62b1 | |||
8284bd760f | |||
8d0ace2d64 | |||
86c34a996b | |||
eba7af1d2c | |||
e0d24104a3 | |||
2db738dbac | |||
84dd2e4df1 | |||
3d06ea41ea | |||
3958db4b17 | |||
935a724c57 | |||
ed29cceae9 | |||
bb9e33bf85 | |||
7c0e544839 | |||
d19c8672bb | |||
57c9f03e51 | |||
467e742bd1 | |||
cd5aaa3a9f | |||
8ceb199dca | |||
777eb3fa00 | |||
0caadedd3b | |||
ac3baafbe8 | |||
990a861241 | |||
d95d02cb8a | |||
f00108d2ec | |||
f7c8730d09 | |||
a651397afc | |||
2000db8453 | |||
92cc3550d8 | |||
cd3bca06e9 | |||
87576cf26c | |||
6dc6a5d874 | |||
e75829aded | |||
d00d2aab3f | |||
f46a8ab2e2 | |||
c3b75bbe5d | |||
c7711daca3 | |||
f18a4581f1 | |||
8ce8bbcdfc | |||
bd12989610 | |||
24a298a83c | |||
d85cd9bf1a | |||
37b3c5c323 | |||
1b1ad1923b | |||
a836b8e703 | |||
3328560788 | |||
cf76ec7b37 | |||
abf1cf9cd5 | |||
b09676779d | |||
70465aa5ce | |||
3009981d31 | |||
401e956128 | |||
48eafc546f | |||
6add470805 | |||
13175f2339 | |||
1a1ad8a792 | |||
4492605a78 | |||
fe5a0219e1 | |||
5ff066c3e7 | |||
6770eb2a87 | |||
0d43ddbd85 | |||
3950ec8d3c | |||
3b35ebda50 | |||
4bcfd14a45 | |||
a07f0a4a43 | |||
2dec6e86e9 | |||
c965200010 | |||
d55f0e2e53 | |||
d53a80b408 | |||
ecb88143f9 | |||
03eb5d87c1 | |||
a1d7ed1258 | |||
f3c0b05ae8 | |||
f4ec1abb9b | |||
d35afa0cf5 | |||
752d031010 | |||
c7322f704c | |||
811f156031 | |||
d8fed1f7a9 | |||
2e539249cb | |||
488d31ecdf | |||
af33d22f25 | |||
f1da623af3 | |||
77f1ff019b | |||
2aa11afb87 | |||
bb9ce3c5c5 | |||
d187b32a28 | |||
c8c666c6a6 | |||
3e190503e6 | |||
709ab3c14c | |||
ef13c6a5b6 | |||
6a10b679ca | |||
62816dddde | |||
54c0cf93fe | |||
365f44c39b | |||
2fa85a24ec | |||
631e9910da | |||
2741756248 | |||
d3f95e6c69 | |||
b7f2428961 | |||
3b1f908e5e | |||
14ca8048a8 | |||
206a3e00e5 | |||
f198b20c42 | |||
e3ba1fc883 | |||
ab5e56fd16 | |||
d885de1600 | |||
ee1abfd1c1 | |||
2295e0e3ce | |||
acc8caebe6 | |||
a034a1e628 | |||
1165ba2171 | |||
0ade699873 | |||
d0109627b9 | |||
a2270b7432 | |||
1ecd3bb822 | |||
51961e1064 | |||
cb8442a119 | |||
3baa34d842 | |||
86d9f50b9c | |||
de52a9bf75 | |||
985a94adfc | |||
b1ab09196c | |||
3d7ed3263f | |||
fca4577e23 | |||
27454e9828 | |||
bee3c23b45 | |||
b2f01ad204 | |||
9026867d17 | |||
330c9eb1b2 | |||
485a72306d | |||
9b55e582cd | |||
3d145d7f48 | |||
982efab88f | |||
079ed4a992 | |||
afdf87f6f7 | |||
a7201ece04 | |||
36296bbb20 | |||
07ff92c663 | |||
61252248fb | |||
68cbcdf08b | |||
85824ee203 | |||
d30c89e345 | |||
e8a156d682 | |||
fb8d23deb3 | |||
e570c23153 | |||
bd2c0e1ab6 | |||
39a4a0a362 | |||
22d80eeaf9 | |||
6cc91824c1 | |||
5a904cf29d | |||
b8a1caad5e | |||
63ef0aba18 | |||
7913d6365c | |||
c3f49f766d | |||
e883bccc76 | |||
c8f16530d5 | |||
9d27ac8a2e | |||
42cdc38c7b | |||
2ce025a906 | |||
17f7922bfc | |||
6b2fe94192 | |||
004c09a8e2 | |||
36bd66281d | |||
d11a6e187f | |||
9a569d73d1 | |||
be302fd250 | |||
d76d0cb1bf | |||
2bf867982a | |||
f3874d58b9 | |||
a983129613 | |||
f11a4087da | |||
176ffd23f5 | |||
ab2f6f3aa4 | |||
e6e76fbefe | |||
178d00f93a | |||
830a7c0c7a | |||
18d578dfc4 | |||
072b576514 | |||
6c3a5d69e1 | |||
a7de4f5b85 | |||
264a04922d | |||
1dbbd8694f | |||
bdeb47305e | |||
19b2326f3d | |||
81919a35a2 | |||
516e838eb4 | |||
fc03e53615 | |||
6603437cb1 | |||
6f55e7844c | |||
cf203b7fde | |||
d71bc1e69f | |||
a396806343 | |||
fad0de4581 | |||
c2ca259f48 | |||
4c481a8947 | |||
beb987d3d1 | |||
95e45e1c2c | |||
59fe1e8efa | |||
f30979d021 | |||
85f3028317 | |||
8195fc6141 | |||
32f825d442 | |||
ff8b2d4422 | |||
6cb8b46900 | |||
8c9245149e | |||
2000f7958d | |||
63e79a9039 | |||
7f9680f0a0 | |||
53503f09ca | |||
6fbf5dac68 | |||
98fc093823 | |||
5cfb5df31e | |||
55d889522b | |||
762e320c35 | |||
358aa337ea | |||
1764a33690 | |||
a90d7e4cc7 | |||
aec220ab63 | |||
4348c49656 | |||
a18de9b5f0 | |||
f9c2dacf33 | |||
7d247353d0 | |||
bc502ee125 | |||
00c02d00f3 | |||
804db03e41 | |||
26efdf4dd9 | |||
4b903719a0 | |||
ed3d87f061 | |||
a3622eda46 | |||
513a38f07b | |||
e1e025c319 | |||
b6fe6838d3 | |||
d94339a858 | |||
15d478cf4d | |||
add96f921b | |||
4fc6331cb6 | |||
753e76d451 | |||
3794962330 | |||
2865b063ad | |||
d4d7c9d577 | |||
f8697075ea | |||
7cd0aea1d3 | |||
69b2d31b71 | |||
8cd5200f48 | |||
99b45a7820 | |||
5e07ea79c2 | |||
3af3d3f7d9 | |||
549fa12d5a | |||
077dcd2002 | |||
2907928d93 | |||
fe3973a51c | |||
c83c3cd796 | |||
b9539c59f3 | |||
f2b140d3d7 | |||
e3400a05d3 | |||
b308463022 | |||
5e85059a71 | |||
9e661f2cb9 | |||
44192d754f | |||
1fa851a8d0 | |||
61abc61a69 | |||
efee0e3f43 | |||
0639b14906 | |||
f7c352a32d | |||
bf750e45a1 | |||
a38608fe59 | |||
97a04887a3 | |||
17d020e996 | |||
c3363706c5 | |||
2c2f3d38cc | |||
7f92116b51 | |||
0b55e7ce6a | |||
f6024b3269 | |||
a79ff8a1a9 | |||
e314423653 | |||
d0521e493f | |||
9ed7324995 | |||
e140227065 | |||
18886dc6b7 | |||
5391e3842c | |||
f9029727e0 | |||
a5b9a35c50 | |||
ba5ca8a362 | |||
5943e1c3b2 | |||
b46225070f | |||
e7624abe63 | |||
993aa1321c | |||
bff9653050 | |||
9640976c79 | |||
60a7221827 | |||
afc10acd19 | |||
c7a86b56ef | |||
9b6602cba2 | |||
8a271223a9 | |||
dd34dbaca5 | |||
5d74ebd5e5 | |||
9af69c151b | |||
c51dcad51b | |||
98f0da6b38 | |||
b030efdc83 | |||
84a784834e | |||
79094bcbcf | |||
497f9817a2 | |||
4aae07d5f5 | |||
e96b852107 | |||
238a7be58d | |||
b09a8f1b91 | |||
087da5621a | |||
fb95e67a2a | |||
e4a52e6e45 | |||
8c3f1a9c39 | |||
e9e2349ce6 | |||
2668f841d1 | |||
7384650d85 | |||
39869be23b | |||
6cc975704d | |||
93252769af | |||
196f79115a | |||
d10d78d520 | |||
4ecfb95d0c | |||
2fd20fadfc | |||
ca97cb0eda | |||
90a304cb07 | |||
cc7415bb31 | |||
44744d9e67 | |||
01675771d5 | |||
258c3dd563 | |||
39687908f1 | |||
8d4b21a005 | |||
cf0cd92ed4 | |||
cd2635ccfc | |||
78d9f0622d | |||
4f9edf13d7 | |||
405555b401 | |||
1bc4788e59 | |||
ef75a77464 | |||
7309111433 | |||
f6f8f543e1 | |||
34c991ea02 | |||
06f3fd8c6d | |||
474500362c | |||
ea4a96761c | |||
220921628b | |||
044356d221 | |||
d350114159 | |||
86807ca848 | |||
306593144d | |||
5d59bfde8a | |||
f55034ed54 | |||
03e679b634 | |||
f20e588ec1 | |||
20be69e1b9 | |||
293a246af8 | |||
dea00311b6 | |||
fb2b6c0c28 | |||
6f49126223 | |||
12920f2a4f | |||
4b7fd4dfae | |||
ce560fdcb5 | |||
748bb86b5b | |||
051f24f674 | |||
d2e01528a6 | |||
a9c7d82693 | |||
4bba2f41d7 | |||
8ac24d3114 | |||
6066256689 | |||
3a734af159 | |||
b9907997e4 | |||
ef889ade5d | |||
334098a7e0 | |||
8f73251012 | |||
b389be48a0 | |||
950d8e4c44 | |||
58cb1c1bda | |||
acff17fb88 | |||
21284cf235 | |||
50f6524ff2 | |||
e8987cf5aa | |||
d6f9a60a32 | |||
7fc35c5586 | |||
f156d7dd3b | |||
1fe224f2c6 | |||
07003704a8 | |||
e1bc610d27 | |||
d5e9b7305b | |||
cbb3b25459 | |||
941af58239 | |||
41a0ce07cb | |||
1506683705 | |||
d0eee5ff7a | |||
aed8c69bcb | |||
1eb1e73bb3 | |||
4f0bd317df | |||
80b962b4f4 | |||
ea0642c32d | |||
c17d616250 | |||
30bd4db0fc | |||
392472f4bb | |||
bd15f5625a | |||
722db7b088 | |||
a5c9162250 | |||
0388b2d463 | |||
dc64170a69 | |||
72452f0cb2 | |||
a8641b42a7 | |||
453d593ce8 | |||
5704235521 | |||
f6415b679f | |||
2d79720f5d | |||
8ddb4e750b | |||
a277daa1f2 | |||
fb794c6b5e | |||
1237cfc249 | |||
d7fd5c58cd | |||
fc9f3f31e7 | |||
ab1571cdec | |||
8270e2b768 | |||
e261ef64d7 | |||
1da4ab5918 | |||
448114cc1c | |||
25e768f31c | |||
192793ee38 | |||
a892a4a79c | |||
dc61105554 | |||
2eec290424 | |||
5d149d631f | |||
0bbcc7b180 | |||
d1a4da9812 | |||
c8ebf0de47 | |||
905af2a2e9 | |||
742543091e | |||
5f1bfb73ee | |||
6a0a0ae94f | |||
ea852200bb | |||
dc3f092d07 | |||
8ebf5eed0d | |||
19eb3b4708 | |||
2ceeb51c37 | |||
399eec5c01 | |||
fcfc4caf8c | |||
0146175fe6 | |||
cefffde9af | |||
bdc4263883 | |||
a97d4d63b9 | |||
f29114f94a | |||
a4ceef9624 | |||
6d0498df24 | |||
e8297ad27e | |||
419ce3966c | |||
eb63af1f10 | |||
048e174efb | |||
5d79617a56 | |||
ce90fc628a | |||
aae03356cb | |||
ebddfdb9a3 | |||
eeba196053 | |||
1bfdcfc84f | |||
dd1e606f13 | |||
250be9fe6c | |||
62692c171d | |||
9bc7627e27 | |||
b61efd09fc | |||
eaf28b0628 | |||
3b309f654a | |||
2700d8dc67 | |||
77c837fc1b | |||
446439e8be | |||
c6f4775fde | |||
3ff03a3f5f | |||
83ad1aaf05 | |||
cc48992e79 | |||
68bb170732 | |||
238692a8e7 | |||
290a40b7a5 | |||
d546f6f40e | |||
38a8d3cae1 | |||
f5c3b951bc | |||
d7c248042b | |||
d2f84a9d9e | |||
4f547eff02 | |||
64b833410c | |||
31f749b5d8 | |||
a0ab90a4d7 | |||
a59ae19842 | |||
2652310f2a | |||
adbb0ff318 | |||
0a5d1a445e | |||
447195a27a | |||
177154828c | |||
0d1d354052 | |||
f1d848bb9a | |||
676187ba43 | |||
90afde435b | |||
19d44142a1 | |||
445d5474cc | |||
69931e50d2 | |||
52a494bd3b | |||
9580b9de79 | |||
a762d7f462 | |||
56ee9cc21f | |||
2a505503b3 | |||
bae4007447 | |||
7313d6c533 | |||
306d2f37ff | |||
478dbfa45a | |||
d0aaa7ff00 | |||
31776fdc3f | |||
05ae6dbfa4 | |||
78f76c841d | |||
d212dc6b8b | |||
a5c790bf4b | |||
6ce1c6487a | |||
727d663f28 | |||
7aabe42ae0 | |||
dd186533f0 | |||
4dd7b20c32 | |||
4dd3675d2b | |||
86ac8568e6 | |||
192e024ada | |||
ac6df0df57 | |||
c19c17eddb | |||
74d1914a64 | |||
582930dbbb | |||
9f78e392b1 | |||
25fc576696 | |||
69dc4de80f | |||
ac975cc747 | |||
8993fec8a3 | |||
754f48a4fb | |||
cd7c6e19ed | |||
19dac01c5c | |||
895f5d8a26 | |||
3389561f34 | |||
137434a1c8 | |||
08c6d50cd1 | |||
cf3e574cb4 | |||
0af399a6d7 | |||
f586028f9a | |||
e1e85267fd | |||
51809eb260 | |||
484a9ddb27 | |||
65e6aa0de2 | |||
f3b9f7b867 | |||
48cdfddebf | |||
c55368ddd4 | |||
60ccb3fa4c | |||
5ad5d56f7e | |||
0c2c8af44e | |||
2fe9a02b1c | |||
211c8763b9 | |||
7e47031bdc | |||
f820c9804d | |||
3cb1f6d0a1 | |||
1ee3d6ae33 | |||
312515dd6b | |||
3eb3f0269e | |||
9db86aac51 | |||
2aae19dc52 | |||
a4d343aade | |||
c2bd94c871 | |||
7d1c2d97bf | |||
d388ea0f9d | |||
ec89030483 | |||
5c29258e8e | |||
2fdf520271 | |||
f19d2dc548 | |||
5adeac8047 | |||
7cb7643565 | |||
d138b3c704 | |||
fa6f495662 | |||
8cc86d5a8d | |||
5e562ffecf | |||
2277172f9c | |||
2db3d60259 | |||
7e19bf1c0e | |||
fb192aaa9f | |||
e1e362fa43 | |||
08753d002a | |||
8d15ae37a1 | |||
3e53791de3 | |||
8010eca9c7 | |||
dc0d4addd9 | |||
71414630fc | |||
2e0089d5ff | |||
3a2451fcba | |||
eb5830aa40 | |||
d81a3f4a74 | |||
c7d0097c97 | |||
152a10344c | |||
04eb32e539 | |||
8b14090927 | |||
ea4bb9402f | |||
f1115e274f | |||
a68e3a79fb | |||
8d630a6f62 | |||
d362278a41 | |||
00f78d6b5a | |||
399fba16bb | |||
c2469b6765 | |||
7791ef90e7 | |||
ee64f4a936 | |||
456887a54a | |||
b3cec1a383 | |||
436d2032c4 | |||
3828635fb2 | |||
dda28d7415 | |||
cd83014fff | |||
bbb6728d2f | |||
49fbbacafc | |||
7ad582f39f | |||
aa896f0e7a | |||
0261a0e3cf | |||
5809d3ae0d | |||
827cedcd15 | |||
011f8210ed | |||
6b0737384b | |||
e153418b8a | |||
c8306616e0 | |||
9383629d13 | |||
a16de5de84 | |||
a769e09dfa | |||
9ac2fd1c37 | |||
80ae020bee | |||
bab898ce86 | |||
c8ed1675a7 | |||
b1905dfa24 | |||
ab458d8840 | |||
4f3ce6d9cd | |||
ee1d627803 | |||
4ae7aea3b2 | |||
aadb0c58c9 | |||
86249e2ae4 | |||
b799f3326b | |||
fa7d3a37c0 | |||
3bb1e35ada | |||
56e0edd621 | |||
a93cd8c61c | |||
b3f0f39106 | |||
6dc345bc53 | |||
bd30ee97b8 | |||
29c5f76d7f | |||
734d0899d3 | |||
4428cb5909 | |||
844f546a8b | |||
3be1790803 | |||
d96e72e5dc | |||
201fea0fda | |||
5cfd3d8407 | |||
9eec44dd98 | |||
b85cd4983e | |||
dac81b2d44 | |||
ab185a59b5 | |||
59e41d98e3 | |||
1810927dbd | |||
b7694c34f5 | |||
6cabd47c32 | |||
9963f11172 | |||
c8d3a09af8 | |||
bfd81ce050 | |||
6b2c2509b2 | |||
56b4f5dce2 | |||
21ae4143b1 | |||
e8f06f6c06 | |||
6dd2e4ffbd | |||
ba0bb29cd8 | |||
c4c6e35352 | |||
8d46a5b0b5 | |||
5451c64d5d | |||
0a77be4ec0 | |||
5f9f82757d | |||
f82d4b36eb | |||
c882d8daf0 | |||
7e9d56a9e7 | |||
900825bac0 | |||
3e67d8818c | |||
284d8a24e0 | |||
30a2711bac | |||
0fd55db21c | |||
559e46be5e | |||
8b1e5d9c6d | |||
774fa8f065 | |||
9bbffb8fee | |||
48a5ce7434 | |||
6bf9824fec | |||
853b4a520f | |||
2cb71dff4a | |||
1941072bb2 | |||
fdaf45aab2 | |||
950a740bd4 | |||
66020cd923 | |||
4c4b336ecb | |||
286dd7b2e4 | |||
55af85db3c | |||
9102de5500 | |||
a1a3a49bc9 | |||
5a24e60572 | |||
9fe40df960 | |||
d5ddc6b080 | |||
d2d930dd3f | |||
3e34981d9b | |||
6ef3bb9d83 | |||
f782fe2062 | |||
c4653347fd | |||
d8dd357326 | |||
6a77c81a28 | |||
e10c26e70d | |||
ddf78a735b | |||
2c7cafbf20 | |||
86dd88698d | |||
b82f46e862 | |||
5dc464b9a7 | |||
90276d9a2d | |||
49d59d88c2 | |||
5863afa1a5 | |||
adc71742c8 | |||
cb6b6915a4 | |||
2a31cd13c9 | |||
4822fe1beb | |||
f04ab67083 | |||
ad4c982c68 | |||
3f24555c3d | |||
628c835a22 | |||
8efac33b53 | |||
d127c57f2d | |||
d633ac5b9d | |||
d68fe2b3c7 | |||
08a06b49f0 | |||
d87e8b63a9 | |||
0c5f4ed7de | |||
21ec334dcc | |||
63682c2c9a | |||
288a879411 | |||
712bf035a7 | |||
5e08fac729 | |||
92e2e09434 | |||
290a29b5fb | |||
1ae13c1374 | |||
a8d28e364d | |||
2ef5751795 | |||
8bb45956d4 | |||
3cbadf92b6 | |||
db3a1905de | |||
6cf82ba993 | |||
66c6d5e1ef | |||
df518d8b0b | |||
d9ed9de2b0 | |||
51cf44d6fd | |||
d5b8b5a2f8 | |||
8d26f3040c | |||
21898ffc60 | |||
04b1bbf932 | |||
382be56d36 | |||
acfc96525c | |||
a820aa11e6 | |||
8d2e3e4aba | |||
ab5247dc64 | |||
acd9535588 | |||
19bfb2649b | |||
25123af3b8 | |||
ff8d7a810d | |||
f367cc2e75 | |||
f2984f66e6 | |||
0defeb268c | |||
030064da25 | |||
84035a27f5 | |||
0885fcf973 | |||
48542ac8fd | |||
ea15ad6c34 | |||
d03b3ceb58 | |||
5d58cb7449 | |||
c5a996aa78 | |||
1279c38ac9 | |||
267d14c28d | |||
bd2262ceea | |||
13de251047 | |||
fda4f229bb | |||
2468ebb76b | |||
9142ba9dd4 | |||
d59bcea749 | |||
7541ab99cd | |||
d0aabde502 | |||
55e6cb9c7b | |||
642c01d0dc | |||
d852dc0d2b | |||
fb79c32430 | |||
51d1e64b23 | |||
e9c02173cf | |||
dbba5fd461 | |||
e760e02737 | |||
d59e559317 | |||
2ec8542105 | |||
28692f65be | |||
5404bc02dd | |||
c90fa95f93 | |||
822f67e9ad | |||
d28f18658e | |||
38d23546a5 | |||
c63f945093 | |||
0f213f2202 | |||
de808a391a | |||
0d282e3cc5 | |||
d342c3c357 | |||
f9b214f34e | |||
e1cc025cbd | |||
f04cd19886 | |||
1514dfa1b7 | |||
23ea3ad738 | |||
e3c34684c6 | |||
b5f01b52c7 | |||
fb51d511be | |||
9f2ff71581 | |||
fd177b63f8 | |||
8433516d85 | |||
0c84a40298 | |||
74962b2fd9 | |||
01968d7ca7 | |||
367f403693 | |||
8f4499090b | |||
4c516c00da | |||
d1ac40ea14 | |||
15bbde1022 | |||
c0313f3026 | |||
2d7607734e | |||
5ab505be33 | |||
c10f58b7bd | |||
e752bd06f7 | |||
30247d70cd | |||
0605c0ac68 | |||
b22c80106f | |||
c94952e25d | |||
e1053989c0 | |||
559e019de1 | |||
660eac50b2 | |||
92804f6f45 | |||
0fcde35a20 | |||
3c7ea1d298 | |||
74594be234 | |||
3d99686f7a | |||
c039562723 | |||
9bdcd42b9b | |||
4cae691b86 | |||
6a1216bd51 | |||
02a21fd309 | |||
98a365aaae | |||
d671d6f0f1 | |||
11a056d116 | |||
94011bb9a8 | |||
1c6c89f345 | |||
25faef67d0 | |||
65519bc04b | |||
ef59762d8e | |||
80dcfd5c3e | |||
ee856a7a46 | |||
32bd9f091f | |||
90f49eab6d | |||
49c2db9485 | |||
57502fcf6a | |||
c83b77304a | |||
1b3923b5ce | |||
26629a3f9e | |||
8970246bc4 | |||
cc32519a2d | |||
6e977dd8e8 | |||
68f1db123a | |||
35f9499638 | |||
64ef5869d7 | |||
2c14efa8a2 | |||
6eb47ab792 | |||
21b78f3926 | |||
09b4281cff | |||
721fc294be | |||
8dff08d772 | |||
7c3017734a | |||
bff48681d2 | |||
519d6b2bf3 | |||
73df873f44 | |||
99197387af | |||
f28600031d | |||
0ea0146e04 | |||
a211a9cdcd | |||
9b24f83456 | |||
2c6d08c519 | |||
18eb4b9c51 | |||
cf98bf37d0 | |||
bc9daf9041 | |||
9c36e497d9 | |||
6515838d35 | |||
ea52aff6dc | |||
ef0d5a8240 | |||
15bd14297e | |||
21d115dcbb | |||
959ca66125 | |||
7483c7513a | |||
e5af3ac65c | |||
6831c23449 | |||
5c01e9bf7c | |||
075d9c97c0 | |||
b249989bef | |||
070ec9bd97 | |||
27a6a26b4b | |||
76d961cc77 | |||
8234f9fdf3 | |||
7328ffb034 | |||
3e5550c910 | |||
72a9071203 | |||
07a5ffb04c | |||
a58bc5bebb | |||
b1a0110a47 | |||
d0fe9dea61 | |||
b165c77fa7 | |||
54aec7ac5f | |||
a2fc74f010 | |||
743ed9f57f | |||
7b3bac46a0 | |||
3be37b00e7 | |||
702589104d | |||
cb9e7e510b | |||
0c0038488c | |||
5d3af5f273 | |||
76a2adb7c3 | |||
5a6d22d4ec | |||
08ae47e475 | |||
056ff13c4d | |||
9f1e0d2a49 | |||
ed6db19681 | |||
9875f2646a | |||
183d3dada7 | |||
2be755ce75 | |||
3599df77f0 | |||
d7943fe225 | |||
6758146213 | |||
9b8ab40d80 | |||
baddd80069 | |||
f9445c1d90 | |||
15c29cdd9b | |||
13d8272173 | |||
208903ddde | |||
679fe18b17 | |||
3fcccc31b5 | |||
430e9b13d3 | |||
53c79e85f2 | |||
2e62925a6e | |||
0f86d6b28f | |||
8d70b01714 | |||
1327807caa | |||
c8d03046bf | |||
3942b3732f | |||
7cd9109e2f | |||
4e113bbf1b | |||
e25ca9776f | |||
6c9165b6a8 | |||
efb2f8b325 | |||
d6ba84ea99 | |||
c27870e765 | |||
01dedde1c9 | |||
7e5c5c4d27 | |||
c634d43ac5 | |||
6c15f50899 | |||
e1d81342cf | |||
423baac08b | |||
36281a653f | |||
f8fe9316c0 | |||
661bc21af5 | |||
b6af84eb77 | |||
7906461c14 | |||
2e4604b0b9 | |||
4c34164d2e | |||
9df4f3aaad | |||
513d3178c6 | |||
2209acbfe2 | |||
70121e3c6b | |||
59cc59e93e | |||
493d9b98f5 | |||
efaef4f748 | |||
7666e4f34a | |||
2ea2f7570c | |||
e750465e15 | |||
aa5e099718 | |||
c7db4176f3 | |||
a3e7c468cd | |||
cd359cd96e | |||
5de5dd80a3 | |||
2c65781d91 | |||
6e3b869e6a | |||
86ead92ed5 | |||
9a266a531b | |||
3f7f24b90e | |||
c5a6075484 | |||
360c5ff3df | |||
d323e35001 | |||
70f576d5d3 | |||
28f9be8d7c | |||
469d92c569 | |||
7a90a101ee | |||
f7796edc7e | |||
ac1df9d9d7 | |||
50ad750ec1 | |||
8748df2ca4 | |||
8f6b6c9042 | |||
07fb6d64e5 | |||
e45c846af5 | |||
dd56e82dba | |||
99889a0ed0 | |||
799f3d43c8 | |||
ed7fd855af | |||
2dfe24f067 | |||
a2743baaa3 | |||
b65aa7b5ac | |||
11dfe38761 | |||
dde1da1c0e | |||
085bc6440c | |||
1bd15d849b | |||
ea4bd29d14 | |||
5ed75de0db | |||
3296bb243c | |||
75d341d928 | |||
31c18f0953 | |||
05d8a33a28 | |||
c9092c72bf | |||
d9eba9d145 | |||
0ee67bb7d1 | |||
22551d0941 | |||
26b5dad042 | |||
6a057a3bd0 | |||
2e49230ca2 | |||
7ad0214089 | |||
1df5b8712b | |||
bfedbc1b6d | |||
68c758a533 | |||
d2427f18e5 | |||
00f94b1ffd | |||
0e8665bf18 | |||
f65153ad64 | |||
adddf3f179 | |||
785c1372f2 | |||
3580b2d803 | |||
3a12f5887e | |||
a80dcfd4a3 | |||
b2a332599e | |||
8046ae4bd5 | |||
1988416295 | |||
3b479948c6 | |||
cc732fe95e | |||
c7cb816ae1 | |||
4c09f6838f | |||
b188063869 | |||
0f8320bdc2 | |||
551df0cb77 | |||
87dd441a3a | |||
1eacab2169 | |||
b806097141 | |||
218f0a6661 | |||
47ee93b0bd | |||
1e5e3d57e2 | |||
023446ecf3 | |||
86e272856a | |||
257e621d40 | |||
113a061bee | |||
ad3befaaf5 | |||
176160d32f | |||
16790ee620 | |||
78b0bce9a1 | |||
2837cab5da | |||
2e99fa8251 | |||
fe9f380993 | |||
f8ecbc28e2 | |||
700318dc62 | |||
9d9010e45f | |||
aa6c5df0bc | |||
94764e5c7c | |||
31c8de1cca | |||
0d104a0fce | |||
3f1453f470 | |||
f4b8e5675d | |||
3b7a2cdbce | |||
203aa727a7 | |||
eaba772f21 | |||
9a920d1f93 | |||
5e683ba472 | |||
f6c6b026bb | |||
c695a1ffd2 | |||
91ce4d1721 | |||
3b1885859d | |||
2741aa8589 | |||
a43f99c600 | |||
90d64d257f | |||
f167f7b412 | |||
4af31ec9a6 | |||
cfc62a1c15 | |||
26deeb45a3 | |||
3fc145c254 | |||
a84f3a8b31 | |||
c81ff22c5b | |||
bad8ea47d5 | |||
b15c77ebc4 | |||
4b618b95e4 | |||
2988d3c76d | |||
e5ef0cad9a | |||
4f69b190bc | |||
7ae2a7341c | |||
6d5762a6c8 | |||
ebf82ac28c | |||
bd4c248292 | |||
e8c093c1d0 | |||
f0b74637dc | |||
b1bf7d4f40 | |||
aca707413c | |||
a8a1f5bd55 | |||
dc84ecc40b | |||
7483614b75 | |||
4820ac71a6 | |||
13c78e5aa2 | |||
5bb175fc90 | |||
f73273d71c | |||
4fd0116a0d | |||
ea2f2ecf96 | |||
4b459768a0 | |||
6d70978edc | |||
216a8aa3b2 | |||
a21c854790 | |||
70ab2c37c5 | |||
b4b6ba6d82 | |||
3b9f1db061 | |||
d344489c12 | |||
44d6b6ae9e | |||
8d9c2c4425 | |||
b22aac92ac | |||
932998f5cc | |||
86c3b0c8c2 | |||
e54280fbfc | |||
d18ee58ab9 | |||
63bc231243 | |||
68856e5e2f | |||
8a088fb99e | |||
20ad43b908 | |||
772e55d174 | |||
d160305868 | |||
9961b78b06 | |||
eb7b9d9dbf | |||
f5e418ace7 | |||
48d211b8b0 | |||
dbd91e7151 | |||
720becb5e8 | |||
e2cefc9b4f | |||
a0b3620b05 | |||
cd043d4461 | |||
5989528833 | |||
fd3daa4423 | |||
8dca36433c | |||
446ed17589 | |||
0be09555f1 | |||
c2517e7d5f | |||
5cbe879325 | |||
741a4444a9 | |||
7f7fafb857 | |||
db0c681bae | |||
46f7df232a | |||
285849e3a6 | |||
a589f6c60b | |||
3e0a78acf3 | |||
4860fd4529 | |||
b3a22f31f6 | |||
9452fabfb2 | |||
8f702828ca | |||
e09eec37bc | |||
fc7cc770d4 | |||
a2f59a28f7 | |||
5c962c03dd | |||
2d1727697d | |||
823da19745 | |||
1d314328f0 | |||
3aaf1d62f3 | |||
0e379558a1 | |||
d6bba0663a | |||
0b02eb456c | |||
df38794c7d | |||
6cdb6722d1 | |||
d106eb5b90 | |||
5e639bc0c1 | |||
49a6d2d5f1 | |||
f230ae6fd5 | |||
c8930781eb | |||
01461af333 | |||
c51bb6789c | |||
af65485ba7 | |||
f2e1591826 | |||
2f20257070 | |||
794c0f64a9 | |||
731e0e5321 | |||
89d0758713 | |||
879d5e8799 | |||
88f6c18665 | |||
aa1ce97748 | |||
c084f7f731 | |||
0d1f83ba4b | |||
922f9fd4d5 | |||
4b99d8cb91 | |||
41fc0dcb62 | |||
d1df0d20f9 | |||
1b7f6ea1e7 | |||
71602e0f1b | |||
407f53872a | |||
687cd2e205 | |||
198c416bd8 | |||
6cb9c3b81f | |||
2a67308e29 | |||
42cf847a63 | |||
c4275f0d27 | |||
ecf8abc518 | |||
5b88df508e | |||
fcedff95e8 | |||
e9ada44509 | |||
110bf6b778 | |||
22ebd2658f | |||
7a5889bc5a | |||
ad0d311f8a | |||
6214c38da9 | |||
1c604de158 | |||
64df159057 | |||
01a4052828 | |||
51581d14f8 | |||
fcc520e49a | |||
1541bce952 | |||
7dbefae1e3 | |||
8fdf860c17 | |||
2102e0da6b | |||
89b9b61840 | |||
7f26c75610 | |||
cdeb07f0fd | |||
cb45a10bcd | |||
7eb2d71009 | |||
976dc1f4bc | |||
1290edd58a | |||
341c244965 | |||
d962e46ed1 | |||
90514e03d1 | |||
200e98c211 | |||
bc845324df | |||
6a141694da | |||
dc2b63abdf | |||
4ab7ca0e83 | |||
0038b3848a | |||
88646a63a1 | |||
b12738cfe9 | |||
7aa6cc9b04 | |||
ee3a49cfba | |||
0353fbb5df | |||
92c0a2cdc1 | |||
aa02a7fdd8 | |||
77de82aaa4 | |||
0227254a65 | |||
03a01166ba | |||
d23c250ad5 | |||
081278dfd6 | |||
5676b204dd | |||
8c86348119 | |||
a7ae552ba7 | |||
757b2b502a | |||
adfd4da24c | |||
a79661c6dc | |||
851f979039 | |||
f858f64b1f | |||
9f8095c069 | |||
fa44e95c91 | |||
0ab541627b | |||
16698f714b | |||
931021fe57 | |||
4c9531bdf3 | |||
0a78107525 | |||
a9553af635 | |||
838ed1cd32 | |||
cc54c41e30 | |||
63db43cc7a | |||
4562b278a8 | |||
a57e522a67 | |||
91c5d0c042 | |||
007fec21fc | |||
a6b4069172 | |||
d7bc6a6999 | |||
9f62149b94 | |||
f25f454bd4 | |||
885f243afc | |||
ec87bf3dd5 | |||
ef965aa3f3 | |||
fc09d77e89 | |||
056180e6c8 | |||
3c149d8a43 | |||
b4dcdbf00d | |||
32b7bd366f | |||
00e2845f0f | |||
c92ef54466 | |||
28782ff99d | |||
b489515f4d | |||
54889813ce | |||
4bce66d5ff | |||
66e6ea56b8 | |||
6044b80362 | |||
be75e738b1 | |||
56fceb1928 | |||
9dbc8b2dd0 | |||
80c6aaf1fd | |||
bdc5599b73 | |||
73384aec21 | |||
0013236e5d | |||
9e5f9a8a10 | |||
c38b0b883d | |||
98285b4b18 | |||
4fc8f06791 | |||
c31cadb54f | |||
41c4a5b60d | |||
faa3cd3b71 | |||
2ab24c4f49 | |||
9885fb4159 | |||
66f55e3e6a | |||
a6218a20ae | |||
2364777838 | |||
aeaac743ff | |||
5099192c44 | |||
d8695da1d1 | |||
28197b2435 | |||
8d2a0b43ff | |||
634201244c | |||
3d90b03d7b | |||
81643e6d70 | |||
5aea8dd75b | |||
77eb37934f | |||
5b6adc6d96 | |||
d53df8a002 | |||
ca9fa329d1 | |||
51dbb2e06d | |||
aecbd14761 | |||
0cca2ea24f | |||
481b0bf277 | |||
b073fd49ea | |||
be2ebdd395 | |||
320670f8fe | |||
daef43f504 | |||
b120c32cad | |||
35fcc351a0 | |||
5b19dd23d9 | |||
d08cfda796 | |||
a9e552ab18 | |||
6cb1102bdb | |||
969adaefdf | |||
a67ccfdf3a | |||
ccd6f13793 | |||
f496cd320d | |||
9f4184208e | |||
bb89ef9fc0 | |||
70bee7d405 | |||
abbebad669 | |||
1bcf43baac | |||
9716fb3b36 | |||
ba30cef987 | |||
41bdc90f46 | |||
3bd4cf94cc | |||
f5ff3e8e19 | |||
02e0271e44 | |||
ce0315a10f | |||
7ac441e473 | |||
adf0c389c5 | |||
8cfe3e1ec0 | |||
4eda438f6f | |||
713acc408b | |||
a7d6930905 | |||
f0e804afd5 | |||
28c004aa2c | |||
78fe4259a9 | |||
312c2d1d8e | |||
ca78cb5aca | |||
456541e921 | |||
44c353fafd | |||
23fcf7920e | |||
d2b1ecc885 | |||
65b1d09d55 | |||
ab727e428b | |||
93a8633f18 | |||
cfc7314bd1 | |||
93978ec38a | |||
ff9414a6ba | |||
0542e2179f | |||
7d5395c12b | |||
3e6c05fe13 | |||
f4cab080a6 | |||
36715f571c | |||
e923a3ed6a | |||
bc02031793 | |||
dc64e139b9 | |||
5cf1b0b138 | |||
afb4133bd2 | |||
86b916b008 | |||
6faa87302c | |||
f4ff30e99d | |||
ab696f6a23 | |||
d89f5ca48e | |||
7e93811fbc | |||
0bf4f3f48a | |||
82df524e09 | |||
8e2c41e7f7 | |||
103dddba2f | |||
faf148d297 | |||
133ab98260 | |||
b489d699ce | |||
afb09c914d | |||
b64cd2a3e3 | |||
1fcc5f73ac | |||
32cf5a29ce | |||
e0c327bae2 | |||
c82a382b0b | |||
eb149030eb | |||
fd032165d7 | |||
d912c94034 | |||
563492f1e5 | |||
38ab541f4a | |||
af38196a6b | |||
e9104a0a32 | |||
70229f07c8 | |||
ee7d291442 | |||
29824d05ab | |||
76a2343639 | |||
10882bcbce | |||
a32236c80c | |||
3b2b3aeea9 | |||
39ed133f9f | |||
fd598f060c | |||
99b45d2aa0 | |||
57898d8a90 | |||
82fb5f0bef | |||
6b7841fefc | |||
834504aec0 | |||
26a9974667 | |||
28962bce99 | |||
6dc08bf45e | |||
087ae64899 | |||
3db25153e5 | |||
3c304c89d4 | |||
b0c0490e85 | |||
3b1cd4c4b4 | |||
c2afdbb1fb | |||
6476827d3a | |||
c10469ddb6 | |||
1e366dae3e | |||
187c713de5 | |||
ff440c1d9d | |||
2a3f9b32ff | |||
f346805c0c | |||
ef1ac8a0cb | |||
edfcdb171c | |||
3c91a9a551 | |||
bc4f4ee829 | |||
61fe422a88 | |||
57ed96622b | |||
b3c0d43890 | |||
0d0e900158 | |||
4536dfccd0 | |||
06c414a753 | |||
3c84075d2d | |||
4969abeaab | |||
e5dfde88fd | |||
7c7fba4e57 | |||
5d5d115608 | |||
7086009f93 | |||
d0b44c380f | |||
beae843766 | |||
5132a106a1 | |||
136efd6b53 | |||
4b78ef31b6 | |||
ea0c6d8c40 | |||
3def42abd8 | |||
a2bff68c1a | |||
aee49bb3cd | |||
49e4cc3daf | |||
15cce89a45 | |||
e425f70ef9 | |||
4fdbfd6048 | |||
270da98c46 | |||
e857ca4d7d | |||
ab2cf69e8d | |||
8e6d1ff0dc | |||
168fe0aa28 | |||
608c5bad24 | |||
7d36d664a7 | |||
225ae6fd25 | |||
2f9f6a1f21 | |||
984dc7c1ed | |||
1373637da1 | |||
1df68d342a | |||
b8e6db0feb | |||
c701f8bf36 | |||
4ddf008be2 | |||
2f5e61bacb | |||
1c0a5cd136 | |||
76b9178b16 | |||
a5e98cf46d | |||
5012cc3a32 | |||
28bd9e183e | |||
3a4a150ef0 | |||
02c655ff1a | |||
79efded841 | |||
f7efde11d9 | |||
e62b89a2ed | |||
bd7b285bae | |||
038e03a4e4 | |||
597144b0b9 | |||
837c1041c7 | |||
a56c46b6f1 | |||
df7a32e3d0 | |||
49bee2ebc5 | |||
a3944a7083 | |||
efba662ca6 | |||
e923d51b8f | |||
eeb0c70ea2 | |||
313c362461 | |||
c620626515 | |||
44b6843de7 | |||
c1ce4e4ca9 | |||
a3f8686fbf | |||
25f75d4d03 | |||
7e63e32960 | |||
1e11578ef0 | |||
f8d0f5265f | |||
1207a058d0 | |||
d61566787e | |||
c08f4599f2 | |||
bb5823c775 | |||
792225eaff | |||
a8680887d8 | |||
5b93d6ab91 | |||
5c762b71dd | |||
c30f17fafb | |||
34e02aba42 | |||
03bb95539b | |||
d81c0e8bba | |||
c112877a4a | |||
e8e32e0ba1 | |||
b31f36d68c | |||
ee09e50e7f | |||
374c2782ad | |||
566c4a53c5 | |||
5b9524e1ba | |||
31607bf9cd | |||
5a10de1b9f | |||
3b7e6afb55 | |||
0add4d735c | |||
3794ffc952 | |||
329bd4a1bb | |||
3b1358b62f | |||
c862b1bc6b | |||
e92d137676 | |||
b3d6c6a9a0 | |||
498c2b298c | |||
0e4e6dfada | |||
47d780b8ce | |||
0daa0e170a | |||
0d7d3ce802 | |||
71740805a7 | |||
e77291a6f3 | |||
716c8e22b0 | |||
f853790016 | |||
2b036449be | |||
0efa011e09 | |||
17c8c6f945 | |||
b3e2280bb9 | |||
1eee0029a8 | |||
59f58c15f7 | |||
361193099f | |||
7ff4a2a708 | |||
1aad66bdaa | |||
e65bad16cc | |||
ab92c814c3 | |||
0ad9499b93 | |||
7aa5753ed2 | |||
658f316511 | |||
89ee2cf576 | |||
bd1a371c62 | |||
8bd4f5d93e | |||
f713828406 | |||
3069bf4f4a | |||
6b1b42b928 | |||
e8cc7f9cee | |||
3a25137ee4 | |||
c765f277a3 | |||
9242f2f1d4 | |||
b0a417f342 | |||
75e7b1e3da | |||
4ff67ec2ee | |||
0f4c0beffd | |||
3bcc1c0560 | |||
f8dee1b402 | |||
7fa3a1d23e | |||
28a8df2f0a | |||
6fa00c61d2 | |||
726fcf015a | |||
c9b2d3ae1a | |||
2aeef09316 | |||
51767725b2 | |||
efbfa81fa7 | |||
f5ec14c54c | |||
127d3d028e | |||
1095874e7e | |||
33860bc3b7 | |||
e39aabbfe6 | |||
995d1a07d4 | |||
f6b06d6e5d | |||
19b6620a92 | |||
9c4660d3d6 | |||
75464a1baa | |||
2f73fa55ae | |||
45c45e11dd | |||
6e126c96a9 | |||
2c5c79d68e | |||
c2df51aa95 | |||
dcb00b2e54 | |||
da036dcc3e | |||
f9eab6e0de | |||
6a128d4ec7 | |||
5efe67f375 | |||
3af8fa194c | |||
0d09c64dde | |||
84c1dda39d | |||
dc636d190d | |||
2bcdd8844c | |||
0a4bde1f2f | |||
ee3f93c029 | |||
2658c5c545 | |||
27c7ab6e00 | |||
67e25f8724 | |||
12fb509d84 | |||
a2f46029c7 | |||
62a8f1d707 | |||
56777af8e4 | |||
9205b640a4 | |||
f2a786ecbf | |||
13ce0ebb87 | |||
bcc131e866 | |||
529c8f0eb1 | |||
2cb32edaa9 | |||
5a1d3609a9 | |||
1e3f05db8f | |||
a776ec9718 | |||
522e79f2e0 | |||
9ad8b74111 | |||
73dcdb27f6 | |||
6b7cc0022b | |||
9c27183876 | |||
25f8789aa5 | |||
3455082458 | |||
b7b23cd4a8 | |||
f0210453a6 | |||
615fe095e1 | |||
80d0f9c49d | |||
c9f9d39b54 | |||
0cc3132f5a | |||
38b6e8decd | |||
d48008339e | |||
54b97ed8e1 | |||
d301859bbd | |||
facfb4b615 | |||
42fd7dea78 | |||
62a70c300d | |||
c53be51460 | |||
f51eb46c69 | |||
7a3ce9bb1d | |||
2f9af6a707 | |||
f204344102 | |||
22f20f0c29 | |||
18844d60b5 | |||
3d02b19fbd | |||
bd63da0a0e | |||
f9be3ad3fd | |||
d781a6164a | |||
b18ec00a7a | |||
82a0f678fb | |||
5fcaedb880 | |||
2606c92ef9 | |||
ae47bb3594 | |||
636a9df177 | |||
f190d5f496 | |||
3c76b3548d | |||
a58d2b6137 | |||
08a0ff7091 | |||
e3095be85c | |||
9e1eb25232 | |||
71b069d3e1 | |||
e5bb96bc3b | |||
2924ed31f3 | |||
9b6b35d9b7 | |||
2cc4a467a6 | |||
1fc25148da | |||
07784c8990 | |||
f376c6a728 | |||
5c5e51095c | |||
cdaa96df63 | |||
246286f0eb | |||
6bf6b40495 | |||
f118d7e067 | |||
025835c5b2 | |||
36c1f93ceb | |||
b0e0c5eba0 | |||
daf126a638 | |||
7ac09d7b7c | |||
5af63c74e0 | |||
4510bbccca | |||
ae4a237e58 | |||
9bc9b36645 | |||
22b84fe543 | |||
3d731cc861 | |||
14f9f85c4b | |||
b5b7ec0162 | |||
3415812b06 | |||
ef381e17bb | |||
e174ccbd8e | |||
1e47f9b3ff | |||
2d068bd45b | |||
d92ad5640a | |||
64688b3786 | |||
fb7e6df790 | |||
c5a32fd4fa | |||
a273c46559 | |||
9e093d5ff3 | |||
41fc51ebcf | |||
4da6e1ea9c | |||
67c71130df | |||
9ccaea2afc | |||
fea9ffc46a | |||
229130ed25 | |||
5344abc008 | |||
86bcecf840 | |||
4128bdc859 | |||
907482c8ac | |||
774a255f2e | |||
98e69e63d2 | |||
f091f370d0 | |||
ad20d72a39 | |||
f0ddea821c | |||
73286dc8bf | |||
4e84999f20 | |||
411a118148 | |||
240b02e175 | |||
a463ae821e | |||
6d135beb21 | |||
6008f528d0 | |||
1dc857a4b2 | |||
4f19749252 | |||
79a143b32f | |||
5f109e8589 | |||
9423310816 | |||
68102fced8 | |||
1eb7ce5cdb | |||
4884b324e6 | |||
78bede1ffb | |||
b59fe77ec7 | |||
45330a5e47 | |||
794fce7bff | |||
e08b6b3ec7 | |||
8dcb3e0c41 | |||
c62d2f56d8 | |||
c318373b88 | |||
3090751dfc | |||
519b1cb5c9 | |||
e62157e896 | |||
c2ffcc4bd1 | |||
09ca5d14c9 | |||
2f561c77f5 | |||
8d710c5130 | |||
fcfb39c5de | |||
85c3d8aa52 | |||
aa4d9882d2 | |||
49aee6d02c | |||
7a0f86a04f | |||
a4a48be923 | |||
8788485924 | |||
616ed8f73c | |||
ea37fd821d | |||
62eee9c69e | |||
b5b89990eb | |||
9b03b0a1b2 | |||
f365de636f | |||
ee5a60e1c5 | |||
5e7b26791b | |||
b3a21d5a50 | |||
48b470140b | |||
89ce4e74fe | |||
69acdd437e | |||
b3776598d8 | |||
5d0ac3e3e6 | |||
fecf3d6fc1 | |||
d8f3421608 | |||
e8639517da | |||
d450b971f9 | |||
8f43698a60 | |||
3b60432687 | |||
d487791b03 | |||
91d8198d17 | |||
fa0cc2dc13 | |||
14ae01a6c9 | |||
f5f4438b43 | |||
b6e91291fb | |||
b41bf58658 | |||
a3e3bebed7 | |||
11309ee99c | |||
9c8a654079 | |||
2e00740515 | |||
b52d500fbc | |||
d91d321129 | |||
60480a1e2f | |||
65b821b192 | |||
433ac8c38a | |||
70e9b1e936 | |||
61dbcfa44a | |||
916dd3b7c5 | |||
b0c31500fc | |||
7be275b692 | |||
4b9e81fc89 | |||
51a37de885 | |||
d893e83622 | |||
33945a3115 | |||
afa86d8a45 | |||
cb5e57e2dd | |||
a8e3269ad6 | |||
2cd8675734 | |||
3916c54501 | |||
a17bb54d8f | |||
aa129dd7e8 | |||
510df4729c | |||
d25a859985 | |||
3b64735058 | |||
30dae0205e | |||
87a56d2bc9 | |||
26f060f66b | |||
c35befbf38 | |||
2fa5808e3f | |||
44c0dd0762 | |||
1bb9348a90 | |||
9141f5ef94 | |||
51d1785576 | |||
4f7f7538f7 | |||
1ae761311e | |||
7e1c94ab9c | |||
0a1beb688c | |||
5dd4dc2862 | |||
a576c7ae4b | |||
6c7db3d956 | |||
9fcbc83ebc | |||
cd158d4cde | |||
49a016b53d | |||
5039528b56 | |||
77e951e933 | |||
b032ceb5d4 | |||
914eab12f7 | |||
0dec761e21 | |||
5a23417499 | |||
cd5605bb86 | |||
0e5609d40e | |||
9d966a28d3 | |||
e7f2ab9138 | |||
9628da2d17 | |||
026f54dcf7 | |||
3cdf14d4c5 | |||
4ffbddf21f | |||
13217f072b | |||
0959e1501f | |||
58d039a70d | |||
d8e25a0863 | |||
e0cc7faea1 | |||
61b383f422 | |||
f8f33d35e0 | |||
57e8e5c965 | |||
ecc8bc8910 | |||
0a63e69e04 | |||
16755b26e2 | |||
85d51ab228 | |||
92f253adb2 | |||
222f2913c1 | |||
878b1873cd | |||
96f64c629e | |||
58a1f9081c | |||
e4c2abb1d9 | |||
d0240bd9d0 | |||
6e3f4e5e45 | |||
844a9022fb | |||
45877b3154 | |||
6120f6590b | |||
ba4ba685f9 | |||
276c87af68 | |||
a50f63840f | |||
54d5cec582 | |||
fc686aaca7 | |||
7370ef8c5e | |||
fc242f6e1f | |||
a0adfb5e8e | |||
c52d09d5b1 | |||
498f0d8539 | |||
278391d961 | |||
531bd6ddc7 | |||
d40dd3e4da | |||
07a0c82790 | |||
59ca4b9fe4 | |||
0694cc4916 | |||
38c76754ef | |||
9e2cbe3362 | |||
ced0c29c56 | |||
7d67c9e2e7 | |||
67d4a1b3fc | |||
45e0feab4e | |||
7a6e6eb5e2 | |||
9ec95679e1 | |||
57d253aeda | |||
fd8360deb1 | |||
9b7e516a56 | |||
b255be93fa | |||
218eb97241 | |||
2341b99379 | |||
1d5795d134 | |||
05c95dfdc6 | |||
27f3ef5f7a | |||
fe82516f9f | |||
415c0b86ba | |||
eded5558b2 | |||
f06355b0bb | |||
b0c5f59c07 | |||
e76558b0cc | |||
f9cc12ae0f | |||
23f9a22edc | |||
8e6efe4d87 | |||
a18d9a1f87 | |||
4e5e55c21a | |||
8ae9888959 | |||
cf9ddd293d | |||
466fb601d6 | |||
ebe7087bff | |||
72f18759ba | |||
92ec908303 | |||
e0058c1125 | |||
b4951c058b | |||
a71a96894d | |||
ea43080548 | |||
e78b96a657 | |||
8a4794fc51 | |||
535f8088d7 | |||
fbe8ec1fe7 | |||
a55453e634 | |||
5a6b62e77c | |||
63fab07047 | |||
c00fc6f8bb | |||
0cfeee13ee | |||
cf8a6a042e | |||
45ae086974 | |||
8ffdfa72e3 | |||
4fb138c42e | |||
640c7d748a | |||
c94bc59d7e | |||
b220885f42 | |||
1c2d36d8a3 | |||
0408c9d66a | |||
749764f35b | |||
a31db33e93 | |||
01c4f5abcd | |||
63f65bac3e | |||
a20c871ece | |||
649fb6e401 | |||
e48630da72 | |||
68d783145b | |||
32486b5beb | |||
a716ec61b9 | |||
c059924a8f | |||
3ef031b2fe | |||
58c07e7f8c | |||
7e120fc441 | |||
87902de010 | |||
1718fe3d74 | |||
82322ddab6 | |||
3d1854ab95 | |||
995d72b8c1 | |||
0c612f08c7 | |||
9b08f48dbd | |||
303c3ce89e | |||
8f56753a2f | |||
4fded5bd0e | |||
3abfe8aa22 | |||
0ccf4cf785 | |||
d8ff939409 | |||
9047dc8163 | |||
600aa223c2 | |||
f0e63025b0 | |||
082ad84914 | |||
6d52c5b2f0 | |||
21b4d60101 | |||
a4f8be7811 | |||
f0d028d3a4 | |||
9d47ee52b4 | |||
ddbd336387 | |||
0d01e4854b | |||
955302fd95 | |||
7cc1a358f5 | |||
99da69c85f | |||
222063b19d | |||
085d3b9d94 | |||
a30206a665 | |||
e63fdf2b22 | |||
b5d52b6b45 | |||
40993a0d25 | |||
855a251489 | |||
1228c2948d | |||
98fc24cbdf | |||
d6338af766 | |||
3889d956d9 | |||
5c62fbb6a8 | |||
8f76ec97c0 | |||
92ef1faa97 | |||
1e1821f002 | |||
60347a5483 | |||
b14cca2ad9 | |||
adacc7977d | |||
a7a4984175 | |||
b44b04d25b | |||
656a851830 | |||
8d82e37ec0 | |||
2a4cd81c86 | |||
566a7c3039 | |||
9133f38138 | |||
802e925fd7 | |||
5caf523fd9 | |||
2210818114 | |||
f6eecb855e | |||
4eeeccb9cd | |||
16ab3e02a9 | |||
f948a03be2 | |||
cde8478388 | |||
8ed8abb9df | |||
a122d3d466 | |||
eb92e72e6c | |||
341046c96c | |||
3a934b7020 | |||
03ca1ff634 | |||
35c9a3c558 | |||
56c3a61d83 | |||
871222aebd | |||
d3145be744 | |||
8bfa43f9a7 | |||
65e32fecb1 | |||
ff389f1270 | |||
5b4eda670b | |||
edb8c99fbe | |||
eca49e3a03 | |||
83c1db8763 | |||
90d4c1d153 | |||
9021b2dba6 | |||
f980422c57 | |||
b342a86c15 | |||
fb2c402ae1 | |||
38820bc75c | |||
4e9bd1fef5 | |||
a00f5850ee | |||
433d9bbc6e | |||
4b819457c9 | |||
a2182e68a6 | |||
e9e03259c1 | |||
bb15f16d8c | |||
9af946a306 | |||
99705deb7d | |||
67577a3760 | |||
ce8e56ee18 | |||
770f29fd05 | |||
acd2a63879 | |||
6cc6addc2f | |||
e41a3822a6 | |||
c4b0c57059 | |||
007e647462 | |||
d4e80407e5 | |||
f6a8096720 | |||
891e0188dd | |||
079742b4d3 | |||
d0c73564b1 | |||
5a6a698e1d | |||
4eda149ffa | |||
ac84db2506 | |||
30755e31e7 | |||
bc35c9a598 | |||
c6b883289c | |||
58237bd67f | |||
991be8950e | |||
54370e228a | |||
f277ea134f | |||
68f4af7d2e | |||
59a127d022 | |||
6ddb3e722c | |||
a3821a0b33 | |||
51c237f9d8 | |||
d8354f6f02 | |||
25b2853b70 | |||
ed05999f63 | |||
4d22d80281 | |||
5178b3d59d | |||
b597a92487 | |||
1f6e00878d | |||
31224a8425 | |||
a58ae5eb2a | |||
d6fa9c0414 | |||
7b67ae6972 | |||
e34437b2d7 | |||
15208c7d3d | |||
e5adfaade0 | |||
d21c80b865 | |||
944df52e2a | |||
3ded98e5fa | |||
d5e5baa20f | |||
0fb086f241 | |||
aed0704404 | |||
072382fa61 | |||
ad11c5fb3f | |||
5664c37539 | |||
3e2250423c | |||
ea605b499c | |||
bb1ab428db | |||
f928b91e9d | |||
dec460ce52 | |||
daa3673c1c | |||
c2405bcae2 | |||
4ca9472e02 | |||
1c504471d3 | |||
dc88a86259 | |||
580ed1119a | |||
bad0663138 | |||
220ba0785c | |||
4afc4d0751 | |||
605f75b56f | |||
ad5cafbfed | |||
3db517548d | |||
816db7a0aa | |||
3fe497e129 | |||
21aafd603c | |||
0a44ff86ab | |||
d784d87880 | |||
7cde312f14 | |||
34db376ae5 | |||
38ddc71b83 | |||
ba2eb0d7ad | |||
32da07ccee | |||
d19f394630 | |||
ff479c865d | |||
ada30c2789 | |||
02335ee72d | |||
1e3e756c19 | |||
6a230fe803 | |||
e55a569629 | |||
962bad3cea | |||
8806fcd545 | |||
1e358e3ae8 | |||
7dc594ba4d | |||
bfb46cbfbe | |||
6d04a285dc | |||
1bd37d213a | |||
883a8109c8 | |||
a4e0f3f724 | |||
edc06a97d6 | |||
ae77fe5a69 | |||
394844062f | |||
ecd2b2f217 | |||
91282c8b6a | |||
fae694a102 | |||
d5a356902a | |||
405a71d3a4 | |||
d3b1096510 | |||
8d734941af | |||
a4e3c7c37c | |||
6508d497ce | |||
4873abe145 | |||
bd4b18541c | |||
3f21760d56 | |||
bc3a0ac6a3 | |||
d7d8f38fb7 | |||
ee305c9284 | |||
9ade00e27b | |||
085c376655 | |||
dd385ad05b | |||
aa92311d4e | |||
3d144e62c4 | |||
576dd011a1 | |||
6b14b20369 | |||
54afec58a3 | |||
92c2b1dd2d | |||
f757df5dfd | |||
12358476da | |||
2c62eeea3c | |||
d31da26a51 | |||
b8a1fc0126 | |||
f6eae91c7d | |||
d44428fa90 | |||
11c7fef80a | |||
b12bfcb03b | |||
7178b6c2c4 | |||
45d0d7c3d4 | |||
adb1038b26 | |||
2a3b03138b | |||
ec1023e790 | |||
cd7e64b2b3 | |||
ac8353a64f | |||
fea7cac206 | |||
46ced5c828 | |||
7e7440c431 | |||
2ae3f40971 | |||
a3ac2623d5 | |||
ac5cc7ddad | |||
014a25697d | |||
fc4013a43f | |||
2fcae719ad | |||
f98b615bf3 | |||
07abebfc46 | |||
5f0088594b | |||
8453828a65 | |||
63cbeca64e | |||
736f0f7560 | |||
fe3be8f18a | |||
6a2834f2b0 | |||
7e16afbdce | |||
1c7a9a4132 | |||
50169b9798 | |||
374ec6773f | |||
a044cb6cc8 | |||
ba3e805981 | |||
2f0e1afd16 | |||
8148210860 | |||
1628a31efa | |||
115e0142d9 | |||
beb49b24f6 | |||
c84012d655 | |||
d6705d5529 | |||
55a8941922 | |||
a3ca80d20d | |||
3577de04b8 | |||
e974e6b3c9 | |||
8db16ff306 | |||
a8cda248b4 | |||
69285b22d3 | |||
b9cc6c10af | |||
d02c5cb023 | |||
37a48489da | |||
302866ad73 | |||
0a83a86e65 | |||
4e86ecf807 | |||
6ca3579cc0 | |||
66a4b26811 | |||
78f27c0465 | |||
3ad883d7c7 | |||
fecd8ca54a | |||
13977d9338 | |||
5d5b827f1a | |||
2a6d6a7f69 | |||
dfdaceb410 | |||
f51a63e4ef | |||
ce86a43779 | |||
f55f4cb02a | |||
06bf03f075 | |||
eefc6d7c44 | |||
1f7035f18f | |||
71dc6a3828 | |||
5d1c625b74 | |||
c42d3c19e2 | |||
3a23dc242e | |||
1df1f88fe1 | |||
2174042994 | |||
5cc81a0179 | |||
6a047519f6 | |||
5404776f7a | |||
dff68a339a | |||
dde3e01a59 | |||
a26553c90a | |||
2a10b2275e | |||
ba9527abc0 | |||
6c726df9b9 | |||
24587148fd | |||
6762c2d08f | |||
3a998cf39c | |||
1237306ca8 | |||
3668627e03 | |||
a81f201fad | |||
91ba938953 | |||
4573f00a0d |
1
.github/dependabot.yml
vendored
1
.github/dependabot.yml
vendored
@ -2,7 +2,6 @@
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
|
10
.github/scripts/check-release.sh
vendored
10
.github/scripts/check-release.sh
vendored
@ -3,7 +3,7 @@
|
||||
# check_tag $current_tag $file_tag $file_name
|
||||
function check_tag {
|
||||
if [[ "$1" != "$2" ]]; then
|
||||
echo "Error: the current tag does not match the version in $3: found $2 - expected $1"
|
||||
echo "Error: the current tag does not match the version in Cargo.toml: found $2 - expected $1"
|
||||
ret=1
|
||||
fi
|
||||
}
|
||||
@ -11,12 +11,8 @@ function check_tag {
|
||||
ret=0
|
||||
current_tag=${GITHUB_REF#'refs/tags/v'}
|
||||
|
||||
toml_files='*/Cargo.toml'
|
||||
for toml_file in $toml_files;
|
||||
do
|
||||
file_tag="$(grep '^version = ' $toml_file | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')"
|
||||
check_tag $current_tag $file_tag $toml_file
|
||||
done
|
||||
file_tag="$(grep '^version = ' Cargo.toml | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')"
|
||||
check_tag $current_tag $file_tag
|
||||
|
||||
lock_file='Cargo.lock'
|
||||
lock_tag=$(grep -A 1 'name = "meilisearch-auth"' $lock_file | grep version | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')
|
||||
|
48
.github/uffizzi/Dockerfile
vendored
Normal file
48
.github/uffizzi/Dockerfile
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
# Compile
|
||||
FROM rust:alpine3.16 AS compiler
|
||||
|
||||
RUN apk add -q --update-cache --no-cache build-base openssl-dev
|
||||
|
||||
WORKDIR /meilisearch
|
||||
|
||||
ARG COMMIT_SHA
|
||||
ARG COMMIT_DATE
|
||||
ARG GIT_TAG
|
||||
ENV COMMIT_SHA=${COMMIT_SHA} COMMIT_DATE=${COMMIT_DATE} VERGEN_GIT_SEMVER_LIGHTWEIGHT=${GIT_TAG}
|
||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||
|
||||
COPY . .
|
||||
RUN set -eux; \
|
||||
apkArch="$(apk --print-arch)"; \
|
||||
if [ "$apkArch" = "aarch64" ]; then \
|
||||
export JEMALLOC_SYS_WITH_LG_PAGE=16; \
|
||||
fi && \
|
||||
cargo build --release
|
||||
|
||||
# Run
|
||||
FROM uffizzi/ttyd:alpine
|
||||
|
||||
ENV MEILI_HTTP_ADDR 0.0.0.0:7700
|
||||
ENV MEILI_SERVER_PROVIDER docker
|
||||
ENV MEILI_NO_ANALYTICS true
|
||||
|
||||
RUN apk update --quiet \
|
||||
&& apk add -q --no-cache libgcc tini curl
|
||||
|
||||
# add meilisearch to the `/bin` so you can run it from anywhere and it's easy
|
||||
# to find.
|
||||
COPY --from=compiler /meilisearch/target/release/meilisearch /bin/meilisearch
|
||||
# To stay compatible with the older version of the container (pre v0.27.0) we're
|
||||
# going to symlink the meilisearch binary in the path to `/meilisearch`
|
||||
RUN ln -s /bin/meilisearch /meilisearch
|
||||
|
||||
# This directory should hold all the data related to meilisearch so we're going
|
||||
# to move our PWD in there.
|
||||
# We don't want to put the meilisearch binary
|
||||
WORKDIR /meili_data
|
||||
|
||||
|
||||
EXPOSE 7700/tcp
|
||||
|
||||
ENTRYPOINT ["tini", "--"]
|
||||
CMD ["ttyd", "/bin/zsh"]
|
26
.github/uffizzi/docker-compose.uffizzi.yml
vendored
Normal file
26
.github/uffizzi/docker-compose.uffizzi.yml
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
version: "3"
|
||||
|
||||
x-uffizzi:
|
||||
ingress:
|
||||
service: nginx
|
||||
port: 8081
|
||||
|
||||
services:
|
||||
meilisearch:
|
||||
image: "${MEILISEARCH_IMAGE}"
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "7681:7681"
|
||||
- "7700:7700"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 500M
|
||||
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "8081:8081"
|
||||
volumes:
|
||||
- ./.github/uffizzi/nginx:/etc/nginx
|
28
.github/uffizzi/nginx/nginx.conf
vendored
Normal file
28
.github/uffizzi/nginx/nginx.conf
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
|
||||
events {
|
||||
worker_connections 4096; ## Default: 1024
|
||||
}
|
||||
|
||||
http {
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8081;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:7681;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
}
|
||||
|
||||
location /meilisearch/ {
|
||||
# rewrite /meilisearch/(.*) /$1 break;
|
||||
proxy_pass http://localhost:7700/;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
name: Create issue to upgrade dependencies
|
||||
on:
|
||||
schedule:
|
||||
# Run the first of the month, every 3 month
|
||||
- cron: '0 0 1 */3 *'
|
||||
workflow_dispatch:
|
||||
|
||||
@ -15,9 +16,13 @@ jobs:
|
||||
github_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
title: Upgrade dependencies
|
||||
body: |
|
||||
We need to update the dependencies of the Meilisearch repository, and, if possible, the dependencies of all the engine-team repositories that Meilisearch depends on (milli, charabia, heed...).
|
||||
This issue is about updating Meilisearch dependencies:
|
||||
- [ ] Cargo toml dependencies of Meilisearch; but also the main engine-team repositories that Meilisearch depends on (charabia, heed...)
|
||||
- [ ] If new Rust versions have been released, update the Rust version in the Clippy job of this [GitHub Action file](./.github/workflows/rust.yml)
|
||||
|
||||
⚠️ This issue should only be done at the beginning of the sprint!
|
||||
⚠️ To avoid last minute bugs, this issue should only be done at the beginning of the sprint!
|
||||
|
||||
The GitHub action dependencies are managed by [Dependabot](./.github/dependabot.yml)
|
||||
labels: |
|
||||
dependencies
|
||||
maintenance
|
||||
|
10
.github/workflows/flaky.yml
vendored
10
.github/workflows/flaky.yml
vendored
@ -22,5 +22,11 @@ jobs:
|
||||
override: true
|
||||
- name: Install cargo-flaky
|
||||
run: cargo install cargo-flaky
|
||||
- name: Run cargo flaky 100 times
|
||||
run: cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in the dumps
|
||||
run: cd dump; cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in the index-scheduler
|
||||
run: cd index-scheduler; cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in the auth
|
||||
run: cd meilisearch-auth; cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in meilisearch
|
||||
run: cd meilisearch; cargo flaky -i 100 --release
|
||||
|
77
.github/workflows/manual_benchmarks.yml
vendored
Normal file
77
.github/workflows/manual_benchmarks.yml
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
name: Benchmarks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dataset_name:
|
||||
description: 'The name of the dataset used to benchmark (search_songs, search_wiki, search_geo or indexing)'
|
||||
required: false
|
||||
default: 'search_songs'
|
||||
|
||||
env:
|
||||
BENCH_NAME: ${{ github.event.inputs.dataset_name }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
8
.github/workflows/publish-binaries.yml
vendored
8
.github/workflows/publish-binaries.yml
vendored
@ -54,7 +54,7 @@ jobs:
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/meilisearch
|
||||
@ -87,7 +87,7 @@ jobs:
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/${{ matrix.artifact_name }}
|
||||
@ -123,7 +123,7 @@ jobs:
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
@ -183,7 +183,7 @@ jobs:
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
|
2
.github/workflows/publish-deb-brew-pkg.yml
vendored
2
.github/workflows/publish-deb-brew-pkg.yml
vendored
@ -35,7 +35,7 @@ jobs:
|
||||
- name: Build deb package
|
||||
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
||||
- name: Upload debian pkg to release
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/debian/meilisearch.deb
|
||||
|
6
.github/workflows/publish-docker-images.yml
vendored
6
.github/workflows/publish-docker-images.yml
vendored
@ -52,9 +52,6 @@ jobs:
|
||||
- name: Set build-args for Docker buildx
|
||||
id: build-metadata
|
||||
run: |
|
||||
# Define ownership
|
||||
git config --global --add safe.directory /home/meili/actions-runner/_work/meilisearch/meilisearch
|
||||
|
||||
# Extract commit date
|
||||
commit_date=$(git show -s --format=%cd --date=iso-strict ${{ github.sha }})
|
||||
|
||||
@ -87,7 +84,7 @@ jobs:
|
||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@ -95,6 +92,7 @@ jobs:
|
||||
build-args: |
|
||||
COMMIT_SHA=${{ github.sha }}
|
||||
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
||||
GIT_TAG=${{ github.ref_name }}
|
||||
|
||||
# /!\ Don't touch this without checking with Cloud team
|
||||
- name: Send CI information to Cloud team
|
||||
|
79
.github/workflows/push_benchmarks_indexing.yml
vendored
Normal file
79
.github/workflows/push_benchmarks_indexing.yml
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
name: Benchmarks indexing (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }}
|
||||
BENCH_NAME: "indexing"
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Upload benchmarks to influxdb
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to influxDB
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
78
.github/workflows/push_benchmarks_search_geo.yml
vendored
Normal file
78
.github/workflows/push_benchmarks_search_geo.yml
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
name: Benchmarks search geo (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
BENCH_NAME: "search_geo"
|
||||
INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Upload benchmarks to influxdb
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to influxDB
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
78
.github/workflows/push_benchmarks_search_songs.yml
vendored
Normal file
78
.github/workflows/push_benchmarks_search_songs.yml
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
name: Benchmarks search songs (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
BENCH_NAME: "search_songs"
|
||||
INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Upload benchmarks to influxdb
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to influxDB
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
78
.github/workflows/push_benchmarks_search_wiki.yml
vendored
Normal file
78
.github/workflows/push_benchmarks_search_wiki.yml
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
name: Benchmarks search wikipedia articles (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
BENCH_NAME: "search_wiki"
|
||||
INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Upload benchmarks to influxdb
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to influxDB
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
54
.github/workflows/rust.yml
vendored
54
.github/workflows/rust.yml
vendored
@ -2,6 +2,9 @@ name: Rust
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Everyday at 5:00am
|
||||
- cron: '0 5 * * *'
|
||||
pull_request:
|
||||
push:
|
||||
# trying and staging branches are for Bors config
|
||||
@ -27,22 +30,31 @@ jobs:
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- name: Run test with Rust stable
|
||||
if: github.event_name != 'schedule'
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run test with Rust nightly
|
||||
if: github.event_name == 'schedule'
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
# Disable cache due to disk space issues with Windows workers in CI
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --locked --release --no-default-features
|
||||
args: --locked --release --no-default-features --all
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release
|
||||
args: --locked --release --all
|
||||
|
||||
test-others:
|
||||
name: Tests on ${{ matrix.os }}
|
||||
@ -53,18 +65,18 @@ jobs:
|
||||
os: [macos-12, windows-2022]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --locked --release --no-default-features
|
||||
args: --locked --release --no-default-features --all
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release
|
||||
args: --locked --release --all
|
||||
|
||||
# We run tests in debug also, to make sure that the debug_assertions are hit
|
||||
test-debug:
|
||||
@ -83,13 +95,13 @@ jobs:
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run tests in debug
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked
|
||||
args: --locked --all
|
||||
|
||||
clippy:
|
||||
name: Run Clippy
|
||||
@ -99,15 +111,16 @@ jobs:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
toolchain: 1.67.0
|
||||
override: true
|
||||
components: clippy
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo clippy
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
# allow unlined_format_args https://github.com/rust-lang/rust-clippy/issues/10087
|
||||
args: --all-targets -- --deny warnings --allow clippy::uninlined_format_args
|
||||
|
||||
fmt:
|
||||
@ -121,7 +134,12 @@ jobs:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
components: rustfmt
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
||||
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
||||
# we are going to create an empty file where rustfmt expects it.
|
||||
run: |
|
||||
echo -ne "\n" > benchmarks/benches/datasets_paths.rs
|
||||
cargo fmt --all -- --check
|
||||
|
100
.github/workflows/uffizzi-build.yml
vendored
Normal file
100
.github/workflows/uffizzi-build.yml
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
name: Uffizzi - Build PR Image
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened,synchronize,reopened,closed]
|
||||
|
||||
jobs:
|
||||
build-meilisearch:
|
||||
name: Build and push `meilisearch`
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
if: ${{ github.event.action != 'closed' }}
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Generate UUID image name
|
||||
id: uuid
|
||||
run: echo "UUID_TAG=$(uuidgen)" >> $GITHUB_ENV
|
||||
|
||||
- name: Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: registry.uffizzi.com/${{ env.UUID_TAG }}
|
||||
tags: |
|
||||
type=raw,value=60d
|
||||
|
||||
- name: Build Image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: ./
|
||||
file: .github/uffizzi/Dockerfile
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
push: true
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
render-compose-file:
|
||||
name: Render Docker Compose File
|
||||
# Pass output of this workflow to another triggered by `workflow_run` event.
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-meilisearch
|
||||
outputs:
|
||||
compose-file-cache-key: ${{ env.COMPOSE_FILE_HASH }}
|
||||
steps:
|
||||
- name: Checkout git repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Render Compose File
|
||||
run: |
|
||||
MEILISEARCH_IMAGE=$(echo ${{ needs.build-meilisearch.outputs.tags }})
|
||||
export MEILISEARCH_IMAGE
|
||||
# Render simple template from environment variables.
|
||||
envsubst < .github/uffizzi/docker-compose.uffizzi.yml > docker-compose.rendered.yml
|
||||
cat docker-compose.rendered.yml
|
||||
- name: Upload Rendered Compose File as Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: preview-spec
|
||||
path: docker-compose.rendered.yml
|
||||
retention-days: 2
|
||||
- name: Serialize PR Event to File
|
||||
run: |
|
||||
cat << EOF > event.json
|
||||
${{ toJSON(github.event) }}
|
||||
|
||||
EOF
|
||||
- name: Upload PR Event as Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: preview-spec
|
||||
path: event.json
|
||||
retention-days: 2
|
||||
|
||||
delete-preview:
|
||||
name: Call for Preview Deletion
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'closed' }}
|
||||
steps:
|
||||
# If this PR is closing, we will not render a compose file nor pass it to the next workflow.
|
||||
- name: Serialize PR Event to File
|
||||
run: |
|
||||
cat << EOF > event.json
|
||||
${{ toJSON(github.event) }}
|
||||
|
||||
EOF
|
||||
- name: Upload PR Event as Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: preview-spec
|
||||
path: event.json
|
||||
retention-days: 2
|
103
.github/workflows/uffizzi-preview-deploy.yml
vendored
Normal file
103
.github/workflows/uffizzi-preview-deploy.yml
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
name: Uffizzi - Deploy Preview
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows:
|
||||
- "Uffizzi - Build PR Image"
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
cache-compose-file:
|
||||
name: Cache Compose File
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
outputs:
|
||||
compose-file-cache-key: ${{ env.COMPOSE_FILE_HASH }}
|
||||
pr-number: ${{ env.PR_NUMBER }}
|
||||
expected-url: ${{ env.EXPECTED_URL }}
|
||||
steps:
|
||||
- name: 'Download artifacts'
|
||||
# Fetch output (zip archive) from the workflow run that triggered this workflow.
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
run_id: context.payload.workflow_run.id,
|
||||
});
|
||||
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
|
||||
return artifact.name == "preview-spec"
|
||||
})[0];
|
||||
let download = await github.rest.actions.downloadArtifact({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
artifact_id: matchArtifact.id,
|
||||
archive_format: 'zip',
|
||||
});
|
||||
let fs = require('fs');
|
||||
fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/preview-spec.zip`, Buffer.from(download.data));
|
||||
|
||||
- name: 'Unzip artifact'
|
||||
run: unzip preview-spec.zip
|
||||
|
||||
- name: Read Event into ENV
|
||||
run: |
|
||||
echo 'EVENT_JSON<<EOF' >> $GITHUB_ENV
|
||||
cat event.json >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
|
||||
- name: Hash Rendered Compose File
|
||||
id: hash
|
||||
# If the previous workflow was triggered by a PR close event, we will not have a compose file artifact.
|
||||
if: ${{ fromJSON(env.EVENT_JSON).action != 'closed' }}
|
||||
run: echo "COMPOSE_FILE_HASH=$(md5sum docker-compose.rendered.yml | awk '{ print $1 }')" >> $GITHUB_ENV
|
||||
|
||||
- name: Cache Rendered Compose File
|
||||
if: ${{ fromJSON(env.EVENT_JSON).action != 'closed' }}
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: docker-compose.rendered.yml
|
||||
key: ${{ env.COMPOSE_FILE_HASH }}
|
||||
|
||||
- name: Read PR Number From Event Object
|
||||
id: pr
|
||||
run: echo "PR_NUMBER=${{ fromJSON(env.EVENT_JSON).number }}" >> $GITHUB_ENV
|
||||
|
||||
- name: DEBUG - Print Job Outputs
|
||||
if: ${{ runner.debug }}
|
||||
run: |
|
||||
echo "PR number: ${{ env.PR_NUMBER }}"
|
||||
echo "Compose file hash: ${{ env.COMPOSE_FILE_HASH }}"
|
||||
cat event.json
|
||||
|
||||
- name: Add expected URL env var
|
||||
if: ${{ runner.debug }}
|
||||
run: |
|
||||
REPO=$(echo ${{ github.repository }} | sed 's/\./+/g')
|
||||
echo "EXPECTED_URL=${{ inputs.server }}/github.com/$REPO/pull/${{ env.PR_NUMBER }}" >> $GITHUB_ENV
|
||||
|
||||
deploy-uffizzi-preview:
|
||||
name: Use Remote Workflow to Preview on Uffizzi
|
||||
needs:
|
||||
- cache-compose-file
|
||||
uses: UffizziCloud/preview-action/.github/workflows/reusable.yaml@desc
|
||||
with:
|
||||
# If this workflow was triggered by a PR close event, cache-key will be an empty string
|
||||
# and this reusable workflow will delete the preview deployment.
|
||||
compose-file-cache-key: ${{ needs.cache-compose-file.outputs.compose-file-cache-key }}
|
||||
compose-file-cache-path: docker-compose.rendered.yml
|
||||
server: https://app.uffizzi.com
|
||||
pr-number: ${{ needs.cache-compose-file.outputs.pr-number }}
|
||||
description: |
|
||||
The meilisearch preview environment contains a web terminal from where you can run the
|
||||
`meilisearch` command. You should be able to access this instance of meilisearch running in
|
||||
the preview from the link Meilisearch Endpoint link given below.
|
||||
|
||||
Web Terminal Endpoint : ${{ needs.cache-compose-file.outputs.expected-url }}
|
||||
Meilisearch Endpoint : ${{ needs.cache-compose-file.outputs.expected-url }}/meilisearch
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
id-token: write
|
@ -13,7 +13,6 @@ env:
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
|
||||
update-version-cargo-toml:
|
||||
name: Update version in Cargo.toml files
|
||||
runs-on: ubuntu-latest
|
||||
@ -30,7 +29,7 @@ jobs:
|
||||
run: |
|
||||
raw_new_version=$(echo $NEW_VERSION | cut -d 'v' -f 2)
|
||||
new_string="version = \"$raw_new_version\""
|
||||
sd '^version = "\d+.\d+.\w+"$' "$new_string" */Cargo.toml
|
||||
sd '^version = "\d+.\d+.\w+"$' "$new_string" Cargo.toml
|
||||
- name: Build Meilisearch to update Cargo.lock
|
||||
run: cargo build
|
||||
- name: Commit and push the changes to the ${{ env.NEW_BRANCH }} branch
|
||||
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -1,3 +1,5 @@
|
||||
.idea/
|
||||
.vscode/
|
||||
/target
|
||||
**/*.csv
|
||||
**/*.json_lines
|
||||
@ -8,9 +10,11 @@
|
||||
/snapshots
|
||||
/dumps
|
||||
|
||||
|
||||
# Snapshots
|
||||
## ... large
|
||||
*.full.snap
|
||||
## ... unreviewed
|
||||
*.snap.new
|
||||
|
||||
# Fuzzcheck data for the facet indexing fuzz test
|
||||
milli/fuzz/update::facet::incremental::fuzz::fuzz/
|
||||
|
@ -52,6 +52,23 @@ cargo test
|
||||
|
||||
This command will be triggered to each PR as a requirement for merging it.
|
||||
|
||||
#### Snapshot-based tests
|
||||
|
||||
We are using [insta](https://insta.rs) to perform snapshot-based testing.
|
||||
We recommend using the insta tooling (such as `cargo-insta`) to update the snapshots if they change following a PR.
|
||||
|
||||
New tests should use insta where possible rather than manual `assert` statements.
|
||||
|
||||
Furthermore, we provide some macros on top of insta, notably a way to use snapshot hashes instead of inline snapshots, saving a lot of space in the repository.
|
||||
|
||||
To effectively debug snapshot-based hashes, we recommend you export the `MEILI_TEST_FULL_SNAPS` environment variable so that snapshot are fully created locally:
|
||||
|
||||
```
|
||||
export MEILI_TEST_FULL_SNAPS=true # add this to your .bashrc, .zshrc, ...
|
||||
```
|
||||
|
||||
#### Test troubleshooting
|
||||
|
||||
If you get a "Too many open files" error you might want to increase the open file limit using this command:
|
||||
|
||||
```bash
|
||||
@ -99,6 +116,34 @@ _[Read more about this](https://github.com/meilisearch/integration-guides/blob/m
|
||||
|
||||
The full Meilisearch release process is described in [this guide](https://github.com/meilisearch/engine-team/blob/main/resources/meilisearch-release.md). Please follow it carefully before doing any release.
|
||||
|
||||
### How to publish a prototype
|
||||
|
||||
Depending on the developed feature, you might need to provide a prototyped version of Meilisearch to make it easier to test by the users.
|
||||
|
||||
The prototype name must follow this convention: `prototype-X-Y` where
|
||||
- `X` is the feature name formatted in `kebab-case`. It should not end with a single number.
|
||||
- `Y` is the version of the prototype, starting from `0`.
|
||||
|
||||
✅ Example: `prototype-auto-resize-0`. </br>
|
||||
❌ Bad example: `auto-resize-0`: lacks the `prototype` prefix. </br>
|
||||
❌ Bad example: `prototype-auto-resize`: lacks the version suffix. </br>
|
||||
❌ Bad example: `prototype-auto-resize-0-0`: feature name ends with a single number.
|
||||
|
||||
Steps to create a prototype:
|
||||
|
||||
1. In your terminal, go to the last commit of your branch (the one you want to provide as a prototype).
|
||||
2. Create a tag following the convention: `git tag prototype-X-Y`
|
||||
3. Run Meilisearch and check that its launch summary features a line: `Prototype: prototype-X-Y` (you may need to switch branches and back after tagging for this to work).
|
||||
3. Push the tag: `git push origin prototype-X-Y`
|
||||
4. Check the [Docker CI](https://github.com/meilisearch/meilisearch/actions/workflows/publish-docker-images.yml) is now running.
|
||||
|
||||
🐳 Once the CI has finished to run (~1h30), a Docker image named `prototype-X-Y` will be available on [DockerHub](https://hub.docker.com/repository/docker/getmeili/meilisearch/general). People can use it with the following command: `docker run -p 7700:7700 -v $(pwd)/meili_data:/meili_data getmeili/meilisearch:prototype-X-Y`. <br>
|
||||
More information about [how to run Meilisearch with Docker](https://docs.meilisearch.com/learn/cookbooks/docker.html#download-meilisearch-with-docker).
|
||||
|
||||
⚙️ However, no binaries will be created. If the users do not use Docker, they can go to the `prototype-X-Y` tag in the Meilisearch repository and compile from the source code.
|
||||
|
||||
⚠️ When sharing a prototype with users, remind them to not use it in production. Prototypes are solely for test purposes.
|
||||
|
||||
### Release assets
|
||||
|
||||
For each release, the following assets are created:
|
||||
|
347
Cargo.lock
generated
347
Cargo.lock
generated
@ -36,9 +36,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "actix-http"
|
||||
version = "3.2.2"
|
||||
version = "3.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c83abf9903e1f0ad9973cc4f7b9767fd5a03a583f51a5b7a339e07987cd2724"
|
||||
checksum = "0070905b2c4a98d184c4e81025253cb192aa8a73827553f38e9410801ceb35bb"
|
||||
dependencies = [
|
||||
"actix-codec",
|
||||
"actix-rt",
|
||||
@ -46,7 +46,7 @@ dependencies = [
|
||||
"actix-tls",
|
||||
"actix-utils",
|
||||
"ahash",
|
||||
"base64 0.13.1",
|
||||
"base64 0.21.0",
|
||||
"bitflags",
|
||||
"brotli",
|
||||
"bytes",
|
||||
@ -68,7 +68,10 @@ dependencies = [
|
||||
"rand",
|
||||
"sha1",
|
||||
"smallvec",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"zstd 0.12.3+zstd.1.5.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -164,9 +167,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "actix-web"
|
||||
version = "4.2.1"
|
||||
version = "4.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d48f7b6534e06c7bfc72ee91db7917d4af6afe23e7d223b51e68fffbb21e96b9"
|
||||
checksum = "464e0fddc668ede5f26ec1f9557a8d44eda948732f40c6b0ad79126930eb775f"
|
||||
dependencies = [
|
||||
"actix-codec",
|
||||
"actix-http",
|
||||
@ -289,6 +292,12 @@ dependencies = [
|
||||
"alloc-no-stdlib",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anes"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.68"
|
||||
@ -377,7 +386,7 @@ dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"miniz_oxide",
|
||||
"object",
|
||||
"object 0.30.2",
|
||||
"rustc-demangle",
|
||||
]
|
||||
|
||||
@ -399,6 +408,25 @@ version = "1.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf"
|
||||
|
||||
[[package]]
|
||||
name = "benchmarks"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
"convert_case 0.6.0",
|
||||
"criterion",
|
||||
"csv",
|
||||
"flate2",
|
||||
"milli",
|
||||
"mimalloc",
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
"reqwest",
|
||||
"roaring",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "big_s"
|
||||
version = "1.0.2"
|
||||
@ -423,6 +451,12 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bit-vec"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.3.2"
|
||||
@ -575,14 +609,20 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cargo_toml"
|
||||
version = "0.13.3"
|
||||
version = "0.14.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "497049e9477329f8f6a559972ee42e117487d01d1e8c2cc9f836ea6fa23a9e1a"
|
||||
checksum = "2bfbc36312494041e2cdd5f06697b7e89d4b76f42773a0b5556ac290ff22acc2"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"toml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cast"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.78"
|
||||
@ -635,9 +675,37 @@ dependencies = [
|
||||
"slice-group-by",
|
||||
"unicode-normalization",
|
||||
"unicode-segmentation",
|
||||
"wana_kana",
|
||||
"whatlang",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ciborium"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f"
|
||||
dependencies = [
|
||||
"ciborium-io",
|
||||
"ciborium-ll",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ciborium-io"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369"
|
||||
|
||||
[[package]]
|
||||
name = "ciborium-ll"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b"
|
||||
dependencies = [
|
||||
"ciborium-io",
|
||||
"half",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cipher"
|
||||
version = "0.3.0"
|
||||
@ -823,6 +891,42 @@ dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "criterion"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb"
|
||||
dependencies = [
|
||||
"anes",
|
||||
"atty",
|
||||
"cast",
|
||||
"ciborium",
|
||||
"clap 3.2.23",
|
||||
"criterion-plot",
|
||||
"itertools",
|
||||
"lazy_static",
|
||||
"num-traits",
|
||||
"oorandom",
|
||||
"plotters",
|
||||
"rayon",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"tinytemplate",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "criterion-plot"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
|
||||
dependencies = [
|
||||
"cast",
|
||||
"itertools",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "critical-section"
|
||||
version = "1.1.1"
|
||||
@ -1009,20 +1113,26 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "deserr"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28380303ca15ec07e1d5b079baf19cf849b09edad5cab219c1c51b2bd07523de"
|
||||
checksum = "c71c14985c842bf1e520b1ebcd22daff6aeece32f510e11f063cecf9b308c04b"
|
||||
dependencies = [
|
||||
"actix-http",
|
||||
"actix-utils",
|
||||
"actix-web",
|
||||
"deserr-internal",
|
||||
"futures",
|
||||
"serde-cs",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"strsim",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deserr-internal"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "860928cd8af78d223a3d70dd581f21d7c3de8aa2eecd938e0c0a399ded7c1451"
|
||||
checksum = "cae1c51b191528c9e4e5d6cff671de94f61fcda1c206cc891251e0cf438c941a"
|
||||
dependencies = [
|
||||
"convert_case 0.5.0",
|
||||
"proc-macro2",
|
||||
@ -1300,9 +1410,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "filter-parser"
|
||||
version = "0.41.1"
|
||||
source = "git+https://github.com/meilisearch/milli.git?tag=v0.41.1#758b4acea7cecd689650bee65949b49cf09ddaa3"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"nom",
|
||||
"nom_locate",
|
||||
]
|
||||
@ -1314,14 +1424,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841"
|
||||
dependencies = [
|
||||
"crc32fast",
|
||||
"libz-sys",
|
||||
"miniz_oxide",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "flatten-serde-json"
|
||||
version = "0.41.1"
|
||||
source = "git+https://github.com/meilisearch/milli.git?tag=v0.41.1#758b4acea7cecd689650bee65949b49cf09ddaa3"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
@ -1435,6 +1546,50 @@ dependencies = [
|
||||
"slab",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fuzzcheck"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ee76e8096c3fcd82ab23177edddcc9b81b72c123caab54bb1e2dc19fd09d2dec"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"bit-vec",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"fastrand",
|
||||
"flate2",
|
||||
"fuzzcheck_common",
|
||||
"fuzzcheck_mutators_derive",
|
||||
"getopts",
|
||||
"libc",
|
||||
"md5",
|
||||
"nu-ansi-term",
|
||||
"object 0.27.1",
|
||||
"regex-syntax",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fuzzcheck_common"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dde06f8d25b14a35d43eb2d3dbace3b9193424460b10ad4ccf1b3d542d48f06f"
|
||||
dependencies = [
|
||||
"getopts",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fuzzcheck_mutators_derive"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30ce01e8bbb3e7e0758dcf907fe799f5998a54368963f766ae94b84624ba60c8"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fxhash"
|
||||
version = "0.2.1"
|
||||
@ -1460,6 +1615,15 @@ version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "36d244a08113319b5ebcabad2b8b7925732d15eec46d7e7ac3c11734f3b7a6ad"
|
||||
|
||||
[[package]]
|
||||
name = "getopts"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
|
||||
dependencies = [
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.8"
|
||||
@ -1538,6 +1702,12 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "half"
|
||||
version = "1.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
|
||||
|
||||
[[package]]
|
||||
name = "hash32"
|
||||
version = "0.2.1"
|
||||
@ -1577,8 +1747,8 @@ checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
|
||||
|
||||
[[package]]
|
||||
name = "heed"
|
||||
version = "0.12.4"
|
||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.4#7a4542bc72dd60ef0f508c89900ea292218223fb"
|
||||
version = "0.12.5"
|
||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"heed-traits",
|
||||
@ -1595,12 +1765,12 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "heed-traits"
|
||||
version = "0.7.0"
|
||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.4#7a4542bc72dd60ef0f508c89900ea292218223fb"
|
||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8"
|
||||
|
||||
[[package]]
|
||||
name = "heed-types"
|
||||
version = "0.7.2"
|
||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.4#7a4542bc72dd60ef0f508c89900ea292218223fb"
|
||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"heed-traits",
|
||||
@ -1884,9 +2054,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "json-depth-checker"
|
||||
version = "0.41.1"
|
||||
source = "git+https://github.com/meilisearch/milli.git?tag=v0.41.1#758b4acea7cecd689650bee65949b49cf09ddaa3"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
@ -1933,9 +2103,9 @@ checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79"
|
||||
|
||||
[[package]]
|
||||
name = "libgit2-sys"
|
||||
version = "0.14.1+1.5.0"
|
||||
version = "0.14.2+1.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a07fb2692bc3593bda59de45a502bb3071659f2c515e28c71e728306b038e17"
|
||||
checksum = "7f3d95f6b51075fe9810a7ae22c7095f12b98005ab364d8544797a825ce946a4"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
@ -2366,6 +2536,7 @@ dependencies = [
|
||||
"base64 0.13.1",
|
||||
"enum-iterator",
|
||||
"hmac",
|
||||
"maplit",
|
||||
"meilisearch-types",
|
||||
"rand",
|
||||
"roaring",
|
||||
@ -2433,9 +2604,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "milli"
|
||||
version = "0.41.1"
|
||||
source = "git+https://github.com/meilisearch/milli.git?tag=v0.41.1#758b4acea7cecd689650bee65949b49cf09ddaa3"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"big_s",
|
||||
"bimap",
|
||||
"bincode",
|
||||
"bstr 1.1.0",
|
||||
@ -2449,19 +2620,24 @@ dependencies = [
|
||||
"filter-parser",
|
||||
"flatten-serde-json",
|
||||
"fst",
|
||||
"fuzzcheck",
|
||||
"fxhash",
|
||||
"geoutils",
|
||||
"grenad",
|
||||
"heed",
|
||||
"insta",
|
||||
"itertools",
|
||||
"json-depth-checker",
|
||||
"levenshtein_automata",
|
||||
"log",
|
||||
"logging_timer",
|
||||
"maplit",
|
||||
"md5",
|
||||
"memmap2",
|
||||
"obkv",
|
||||
"once_cell",
|
||||
"ordered-float",
|
||||
"rand",
|
||||
"rayon",
|
||||
"roaring",
|
||||
"rstar",
|
||||
@ -2564,6 +2740,16 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nu-ansi-term"
|
||||
version = "0.39.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e62e2187cbceeafee9fb7b5e5e182623e0628ebf430a479df4487beb8f92fd7a"
|
||||
dependencies = [
|
||||
"overload",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-bigint"
|
||||
version = "0.4.3"
|
||||
@ -2605,6 +2791,15 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.27.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.30.2"
|
||||
@ -2626,6 +2821,12 @@ version = "1.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66"
|
||||
|
||||
[[package]]
|
||||
name = "oorandom"
|
||||
version = "11.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
|
||||
|
||||
[[package]]
|
||||
name = "opaque-debug"
|
||||
version = "0.3.0"
|
||||
@ -2647,6 +2848,12 @@ version = "6.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
|
||||
|
||||
[[package]]
|
||||
name = "overload"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
|
||||
|
||||
[[package]]
|
||||
name = "page_size"
|
||||
version = "0.4.2"
|
||||
@ -2872,6 +3079,34 @@ dependencies = [
|
||||
"dirs-next",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "plotters"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97"
|
||||
dependencies = [
|
||||
"num-traits",
|
||||
"plotters-backend",
|
||||
"plotters-svg",
|
||||
"wasm-bindgen",
|
||||
"web-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "plotters-backend"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142"
|
||||
|
||||
[[package]]
|
||||
name = "plotters-svg"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f"
|
||||
dependencies = [
|
||||
"plotters-backend",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.17"
|
||||
@ -3615,6 +3850,16 @@ dependencies = [
|
||||
"time-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tinytemplate"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tinyvec"
|
||||
version = "1.6.0"
|
||||
@ -3632,9 +3877,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.24.1"
|
||||
version = "1.24.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d9f76183f91ecfb55e1d7d5602bd1d979e38a3a522fe900241cf195624d67ae"
|
||||
checksum = "597a12a59981d9e3c38d216785b0c37399f6e415e8d0712047620f189371b0bb"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"bytes",
|
||||
@ -3787,6 +4032,12 @@ version = "1.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.4"
|
||||
@ -3880,6 +4131,17 @@ dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wana_kana"
|
||||
version = "2.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5770f395a1c9d6d64bee602f0a36763d7861ef5715f9d4f707cb0086f82dba54"
|
||||
dependencies = [
|
||||
"fnv",
|
||||
"itertools",
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "want"
|
||||
version = "0.3.0"
|
||||
@ -4170,7 +4432,7 @@ dependencies = [
|
||||
"pbkdf2",
|
||||
"sha1",
|
||||
"time",
|
||||
"zstd",
|
||||
"zstd 0.11.2+zstd.1.5.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -4179,7 +4441,16 @@ version = "0.11.2+zstd.1.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4"
|
||||
dependencies = [
|
||||
"zstd-safe",
|
||||
"zstd-safe 5.0.2+zstd.1.5.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zstd"
|
||||
version = "0.12.3+zstd.1.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806"
|
||||
dependencies = [
|
||||
"zstd-safe 6.0.4+zstd.1.5.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -4193,10 +4464,20 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zstd-sys"
|
||||
version = "2.0.5+zstd.1.5.2"
|
||||
name = "zstd-safe"
|
||||
version = "6.0.4+zstd.1.5.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "edc50ffce891ad571e9f9afe5039c4837bede781ac4bb13052ed7ae695518596"
|
||||
checksum = "7afb4b54b8910cf5447638cb54bf4e8a65cbedd783af98b98c62ffe91f185543"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"zstd-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zstd-sys"
|
||||
version = "2.0.7+zstd.1.5.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94509c3ba2fe55294d752b79842c530ccfab760192521df74a081a78d2b3c7f5"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
|
39
Cargo.toml
39
Cargo.toml
@ -9,13 +9,50 @@ members = [
|
||||
"dump",
|
||||
"file-store",
|
||||
"permissive-json-pointer",
|
||||
"milli",
|
||||
"filter-parser",
|
||||
"flatten-serde-json",
|
||||
"json-depth-checker",
|
||||
"benchmarks"
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.0.0"
|
||||
authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"]
|
||||
description = "Meilisearch HTTP server"
|
||||
homepage = "https://meilisearch.com"
|
||||
readme = "README.md"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
|
||||
[profile.dev.package.flate2]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.milli]
|
||||
[profile.dev.package.grenad]
|
||||
opt-level = 3
|
||||
[profile.dev.package.roaring]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.lindera-ipadic-builder]
|
||||
opt-level = 3
|
||||
[profile.dev.package.encoding]
|
||||
opt-level = 3
|
||||
[profile.dev.package.yada]
|
||||
opt-level = 3
|
||||
|
||||
[profile.release.package.lindera-ipadic-builder]
|
||||
opt-level = 3
|
||||
[profile.release.package.encoding]
|
||||
opt-level = 3
|
||||
[profile.release.package.yada]
|
||||
opt-level = 3
|
||||
|
||||
[profile.bench.package.lindera-ipadic-builder]
|
||||
opt-level = 3
|
||||
[profile.bench.package.encoding]
|
||||
opt-level = 3
|
||||
[profile.bench.package.yada]
|
||||
opt-level = 3
|
||||
|
17
Dockerfile
17
Dockerfile
@ -1,31 +1,30 @@
|
||||
# Compile
|
||||
FROM rust:alpine3.16 AS compiler
|
||||
|
||||
RUN apk add -q --update-cache --no-cache build-base openssl-dev
|
||||
FROM rust:bullseye AS compiler
|
||||
|
||||
WORKDIR /meilisearch
|
||||
|
||||
ARG COMMIT_SHA
|
||||
ARG COMMIT_DATE
|
||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE}
|
||||
ARG GIT_TAG
|
||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_SEMVER_LIGHTWEIGHT=${GIT_TAG}
|
||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||
|
||||
COPY . .
|
||||
RUN set -eux; \
|
||||
apkArch="$(apk --print-arch)"; \
|
||||
if [ "$apkArch" = "aarch64" ]; then \
|
||||
arch="$(dpkg --print-architecture)"; \
|
||||
if [ "$arch" = "aarch64" ]; then \
|
||||
export JEMALLOC_SYS_WITH_LG_PAGE=16; \
|
||||
fi && \
|
||||
cargo build --release
|
||||
|
||||
# Run
|
||||
FROM alpine:3.16
|
||||
FROM debian:11.6
|
||||
|
||||
ENV MEILI_HTTP_ADDR 0.0.0.0:7700
|
||||
ENV MEILI_SERVER_PROVIDER docker
|
||||
|
||||
RUN apk update --quiet \
|
||||
&& apk add -q --no-cache libgcc tini curl
|
||||
RUN apt update -q \
|
||||
&& apt install -q -y tini
|
||||
|
||||
# add meilisearch to the `/bin` so you can run it from anywhere and it's easy
|
||||
# to find.
|
||||
|
15
README.md
15
README.md
@ -13,7 +13,6 @@
|
||||
</h4>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/meilisearch/meilisearch/actions"><img src="https://github.com/meilisearch/meilisearch/workflows/Cargo%20test/badge.svg" alt="Build Status"></a>
|
||||
<a href="https://deps.rs/repo/github/meilisearch/meilisearch"><img src="https://deps.rs/repo/github/meilisearch/meilisearch/status.svg" alt="Dependency status"></a>
|
||||
<a href="https://github.com/meilisearch/meilisearch/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-informational" alt="License"></a>
|
||||
<a href="https://app.bors.tech/repositories/26457"><img src="https://bors.tech/images/badge_small.svg" alt="Bors enabled"></a>
|
||||
@ -69,7 +68,7 @@ Install one of our SDKs in your project for seamless integration between Meilise
|
||||
|
||||
Take a look at the complete [Meilisearch integration list](https://docs.meilisearch.com/learn/what_is_meilisearch/sdks.html).
|
||||
|
||||

|
||||
[](https://docs.meilisearch.com/learn/what_is_meilisearch/sdks.html)
|
||||
|
||||
## ⚙️ Advanced usage
|
||||
|
||||
@ -101,3 +100,15 @@ Meilisearch is a search engine created by [Meili](https://www.welcometothejungle
|
||||
- For everything else, please check [this page listing some of the other places where you can find us](https://docs.meilisearch.com/learn/what_is_meilisearch/contact.html)
|
||||
|
||||
Thank you for your support!
|
||||
|
||||
## 👩💻 Contributing
|
||||
|
||||
Meilisearch is, and will always be, open-source! If you want to contribute to the project, please take a look at [our contribution guidelines](CONTRIBUTING.md).
|
||||
|
||||
## 📦 Versioning
|
||||
|
||||
Meilisearch releases and their associated binaries are available [in this GitHub page](https://github.com/meilisearch/meilisearch/releases).
|
||||
|
||||
The binaries are versioned following [SemVer conventions](https://semver.org/). To know more, read our [versioning policy](https://github.com/meilisearch/engine-team/blob/main/resources/versioning-policy.md).
|
||||
|
||||
Differently from the binaries, crates in this repository are not currently available on [crates.io](https://crates.io/) and do not follow [SemVer conventions](https://semver.org).
|
||||
|
6
assets/milli-logo.svg
Normal file
6
assets/milli-logo.svg
Normal file
@ -0,0 +1,6 @@
|
||||
<svg width="277" height="236" viewBox="0 0 277 236" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M213.085 190L242.907 86H276.196L246.375 190H213.085Z" fill="#494949"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M0 190L29.8215 86H63.1111L33.2896 190H0Z" fill="#494949"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M124.986 0L57.5772 235.083L60.7752 236H90.6038L158.276 0H124.986Z" fill="#494949"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M195.273 0L127.601 236H160.891L228.563 0H195.273Z" fill="#494949"/>
|
||||
</svg>
|
After Width: | Height: | Size: 585 B |
1
benchmarks/.gitignore
vendored
Normal file
1
benchmarks/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
benches/datasets_paths.rs
|
54
benchmarks/Cargo.toml
Normal file
54
benchmarks/Cargo.toml
Normal file
@ -0,0 +1,54 @@
|
||||
[package]
|
||||
name = "benchmarks"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.65"
|
||||
csv = "1.1.6"
|
||||
milli = { path = "../milli", default-features = false }
|
||||
mimalloc = { version = "0.1.29", default-features = false }
|
||||
serde_json = { version = "1.0.85", features = ["preserve_order"] }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.4.0", features = ["html_reports"] }
|
||||
rand = "0.8.5"
|
||||
rand_chacha = "0.3.1"
|
||||
roaring = "0.10.1"
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = "1.0.65"
|
||||
bytes = "1.2.1"
|
||||
convert_case = "0.6.0"
|
||||
flate2 = "1.0.24"
|
||||
reqwest = { version = "0.11.12", features = ["blocking", "rustls-tls"], default-features = false }
|
||||
|
||||
[features]
|
||||
default = ["milli/default"]
|
||||
|
||||
[[bench]]
|
||||
name = "search_songs"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "search_wiki"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "search_geo"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "indexing"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "formatting"
|
||||
harness = false
|
138
benchmarks/README.md
Normal file
138
benchmarks/README.md
Normal file
@ -0,0 +1,138 @@
|
||||
Benchmarks
|
||||
==========
|
||||
|
||||
## TOC
|
||||
|
||||
- [Run the benchmarks](#run-the-benchmarks)
|
||||
- [Comparison between benchmarks](#comparison-between-benchmarks)
|
||||
- [Datasets](#datasets)
|
||||
|
||||
## Run the benchmarks
|
||||
|
||||
### On our private server
|
||||
|
||||
The Meili team has self-hosted his own GitHub runner to run benchmarks on our dedicated bare metal server.
|
||||
|
||||
To trigger the benchmark workflow:
|
||||
- Go to the `Actions` tab of this repository.
|
||||
- Select the `Benchmarks` workflow on the left.
|
||||
- Click on `Run workflow` in the blue banner.
|
||||
- Select the branch on which you want to run the benchmarks and select the dataset you want (default: `songs`).
|
||||
- Finally, click on `Run workflow`.
|
||||
|
||||
This GitHub workflow will run the benchmarks and push the `critcmp` report to a DigitalOcean Space (= S3).
|
||||
|
||||
The name of the uploaded file is displayed in the workflow.
|
||||
|
||||
_[More about critcmp](https://github.com/BurntSushi/critcmp)._
|
||||
|
||||
💡 To compare the just-uploaded benchmark with another one, check out the [next section](#comparison-between-benchmarks).
|
||||
|
||||
### On your machine
|
||||
|
||||
To run all the benchmarks (~5h):
|
||||
|
||||
```bash
|
||||
cargo bench
|
||||
```
|
||||
|
||||
To run only the `search_songs` (~1h), `search_wiki` (~3h), `search_geo` (~20m) or `indexing` (~2h) benchmark:
|
||||
|
||||
```bash
|
||||
cargo bench --bench <dataset name>
|
||||
```
|
||||
|
||||
By default, the benchmarks will be downloaded and uncompressed automatically in the target directory.<br>
|
||||
If you don't want to download the datasets every time you update something on the code, you can specify a custom directory with the environment variable `MILLI_BENCH_DATASETS_PATH`:
|
||||
|
||||
```bash
|
||||
mkdir ~/datasets
|
||||
MILLI_BENCH_DATASETS_PATH=~/datasets cargo bench --bench search_songs # the four datasets are downloaded
|
||||
touch build.rs
|
||||
MILLI_BENCH_DATASETS_PATH=~/datasets cargo bench --bench songs # the code is compiled again but the datasets are not downloaded
|
||||
```
|
||||
|
||||
## Comparison between benchmarks
|
||||
|
||||
The benchmark reports we push are generated with `critcmp`. Thus, we use `critcmp` to show the result of a benchmark, or compare results between multiple benchmarks.
|
||||
|
||||
We provide a script to download and display the comparison report.
|
||||
|
||||
Requirements:
|
||||
- `grep`
|
||||
- `curl`
|
||||
- [`critcmp`](https://github.com/BurntSushi/critcmp)
|
||||
|
||||
List the available file in the DO Space:
|
||||
|
||||
```bash
|
||||
./benchmarks/script/list.sh
|
||||
```
|
||||
```bash
|
||||
songs_main_09a4321.json
|
||||
songs_geosearch_24ec456.json
|
||||
search_songs_main_cb45a10b.json
|
||||
```
|
||||
|
||||
Run the comparison script:
|
||||
|
||||
```bash
|
||||
# we get the result of ONE benchmark, this give you an idea of how much time an operation took
|
||||
./benchmarks/scripts/compare.sh son songs_geosearch_24ec456.json
|
||||
# we compare two benchmarks
|
||||
./benchmarks/scripts/compare.sh songs_main_09a4321.json songs_geosearch_24ec456.json
|
||||
# we compare three benchmarks
|
||||
./benchmarks/scripts/compare.sh songs_main_09a4321.json songs_geosearch_24ec456.json search_songs_main_cb45a10b.json
|
||||
```
|
||||
|
||||
## Datasets
|
||||
|
||||
The benchmarks uses the following datasets:
|
||||
- `smol-songs`
|
||||
- `smol-wiki`
|
||||
- `movies`
|
||||
- `smol-all-countries`
|
||||
|
||||
### Songs
|
||||
|
||||
`smol-songs` is a subset of the [`songs.csv` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/songs.csv.gz).
|
||||
|
||||
It was generated with this command:
|
||||
|
||||
```bash
|
||||
xsv sample --seed 42 1000000 songs.csv -o smol-songs.csv
|
||||
```
|
||||
|
||||
_[Download the generated `smol-songs` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/smol-songs.csv.gz)._
|
||||
|
||||
### Wiki
|
||||
|
||||
`smol-wiki` is a subset of the [`wikipedia-articles.csv` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/wiki-articles.csv.gz).
|
||||
|
||||
It was generated with the following command:
|
||||
|
||||
```bash
|
||||
xsv sample --seed 42 500000 wiki-articles.csv -o smol-wiki-articles.csv
|
||||
```
|
||||
|
||||
_[Download the `smol-wiki` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/smol-wiki-articles.csv.gz)._
|
||||
|
||||
### Movies
|
||||
|
||||
`movies` is a really small dataset we uses as our example in the [getting started](https://docs.meilisearch.com/learn/getting_started/)
|
||||
|
||||
_[Download the `movies` dataset](https://docs.meilisearch.com/movies.json)._
|
||||
|
||||
|
||||
### All Countries
|
||||
|
||||
`smol-all-countries` is a subset of the [`all-countries.csv` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/all-countries.csv.gz)
|
||||
It has been converted to jsonlines and then edited so it matches our format for the `_geo` field.
|
||||
|
||||
It was generated with the following command:
|
||||
```bash
|
||||
bat all-countries.csv.gz | gunzip | xsv sample --seed 42 1000000 | csv2json-lite | sd '"latitude":"(.*?)","longitude":"(.*?)"' '"_geo": { "lat": $1, "lng": $2 }' | sd '\[|\]|,$' '' | gzip > smol-all-countries.jsonl.gz
|
||||
```
|
||||
|
||||
_[Download the `smol-all-countries` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/smol-all-countries.jsonl.gz)._
|
||||
|
67
benchmarks/benches/formatting.rs
Normal file
67
benchmarks/benches/formatting.rs
Normal file
@ -0,0 +1,67 @@
|
||||
use std::rc::Rc;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::tokenizer::TokenizerBuilder;
|
||||
use milli::{FormatOptions, MatcherBuilder, MatchingWord, MatchingWords};
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
struct Conf<'a> {
|
||||
name: &'a str,
|
||||
text: &'a str,
|
||||
matching_words: MatcherBuilder<'a, Vec<u8>>,
|
||||
}
|
||||
|
||||
fn bench_formatting(c: &mut criterion::Criterion) {
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
Conf {
|
||||
name: "'the door d'",
|
||||
text: r#"He used to do the door sounds in "Star Trek" with his mouth, phssst, phssst. The MD-11 passenger and cargo doors also tend to behave like electromagnetic apertures, because the doors do not have continuous electrical contact with the door frames around the door perimeter. But Theodor said that the doors don't work."#,
|
||||
matching_words: MatcherBuilder::new(MatchingWords::new(vec![
|
||||
(vec![Rc::new(MatchingWord::new("t".to_string(), 0, false).unwrap()), Rc::new(MatchingWord::new("he".to_string(), 0, false).unwrap())], vec![0]),
|
||||
(vec![Rc::new(MatchingWord::new("the".to_string(), 0, false).unwrap())], vec![0]),
|
||||
(vec![Rc::new(MatchingWord::new("door".to_string(), 1, false).unwrap())], vec![1]),
|
||||
(vec![Rc::new(MatchingWord::new("do".to_string(), 0, false).unwrap()), Rc::new(MatchingWord::new("or".to_string(), 0, false).unwrap())], vec![0]),
|
||||
(vec![Rc::new(MatchingWord::new("thedoor".to_string(), 1, false).unwrap())], vec![0, 1]),
|
||||
(vec![Rc::new(MatchingWord::new("d".to_string(), 0, true).unwrap())], vec![2]),
|
||||
(vec![Rc::new(MatchingWord::new("thedoord".to_string(), 1, true).unwrap())], vec![0, 1, 2]),
|
||||
(vec![Rc::new(MatchingWord::new("doord".to_string(), 1, true).unwrap())], vec![1, 2]),
|
||||
]
|
||||
), TokenizerBuilder::default().build()),
|
||||
},
|
||||
];
|
||||
|
||||
let format_options = &[
|
||||
FormatOptions { highlight: false, crop: None },
|
||||
FormatOptions { highlight: true, crop: None },
|
||||
FormatOptions { highlight: false, crop: Some(10) },
|
||||
FormatOptions { highlight: true, crop: Some(10) },
|
||||
FormatOptions { highlight: false, crop: Some(20) },
|
||||
FormatOptions { highlight: true, crop: Some(20) },
|
||||
];
|
||||
|
||||
for option in format_options {
|
||||
let highlight = if option.highlight { "highlight" } else { "no-highlight" };
|
||||
|
||||
let name = match option.crop {
|
||||
Some(size) => format!("{}-crop({})", highlight, size),
|
||||
None => format!("{}-no-crop", highlight),
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group(&name);
|
||||
for conf in confs {
|
||||
group.bench_function(conf.name, |b| {
|
||||
b.iter(|| {
|
||||
let mut matcher = conf.matching_words.build(conf.text);
|
||||
matcher.format(*option);
|
||||
})
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_formatting);
|
||||
criterion_main!(benches);
|
1380
benchmarks/benches/indexing.rs
Normal file
1380
benchmarks/benches/indexing.rs
Normal file
File diff suppressed because it is too large
Load Diff
122
benchmarks/benches/search_geo.rs
Normal file
122
benchmarks/benches/search_geo.rs
Normal file
@ -0,0 +1,122 @@
|
||||
mod datasets_paths;
|
||||
mod utils;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::update::Settings;
|
||||
use utils::Conf;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn base_conf(builder: &mut Settings) {
|
||||
let displayed_fields =
|
||||
["geonameid", "name", "asciiname", "alternatenames", "_geo", "population"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
builder.set_displayed_fields(displayed_fields);
|
||||
|
||||
let searchable_fields =
|
||||
["name", "alternatenames", "elevation"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_searchable_fields(searchable_fields);
|
||||
|
||||
let filterable_fields =
|
||||
["_geo", "population", "elevation"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_filterable_fields(filterable_fields);
|
||||
|
||||
let sortable_fields =
|
||||
["_geo", "population", "elevation"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_sortable_fields(sortable_fields);
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
const BASE_CONF: Conf = Conf {
|
||||
dataset: datasets_paths::SMOL_ALL_COUNTRIES,
|
||||
dataset_format: "jsonl",
|
||||
queries: &[
|
||||
"",
|
||||
],
|
||||
configure: base_conf,
|
||||
primary_key: Some("geonameid"),
|
||||
..Conf::BASE
|
||||
};
|
||||
|
||||
fn bench_geo(c: &mut criterion::Criterion) {
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
// A basic placeholder with no geo
|
||||
utils::Conf {
|
||||
group_name: "placeholder with no geo",
|
||||
..BASE_CONF
|
||||
},
|
||||
// Medium aglomeration: probably the most common usecase
|
||||
utils::Conf {
|
||||
group_name: "asc sort from Lille",
|
||||
sort: Some(vec!["_geoPoint(50.62999333378238, 3.086269263384099):asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc sort from Lille",
|
||||
sort: Some(vec!["_geoPoint(50.62999333378238, 3.086269263384099):desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
// Big agglomeration: a lot of documents close to our point
|
||||
utils::Conf {
|
||||
group_name: "asc sort from Tokyo",
|
||||
sort: Some(vec!["_geoPoint(35.749512532692144, 139.61664952543356):asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc sort from Tokyo",
|
||||
sort: Some(vec!["_geoPoint(35.749512532692144, 139.61664952543356):desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
// The furthest point from any civilization
|
||||
utils::Conf {
|
||||
group_name: "asc sort from Point Nemo",
|
||||
sort: Some(vec!["_geoPoint(-48.87561645055408, -123.39275749319793):asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc sort from Point Nemo",
|
||||
sort: Some(vec!["_geoPoint(-48.87561645055408, -123.39275749319793):desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
// Filters
|
||||
utils::Conf {
|
||||
group_name: "filter of 100km from Lille",
|
||||
filter: Some("_geoRadius(50.62999333378238, 3.086269263384099, 100000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 1km from Lille",
|
||||
filter: Some("_geoRadius(50.62999333378238, 3.086269263384099, 1000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 100km from Tokyo",
|
||||
filter: Some("_geoRadius(35.749512532692144, 139.61664952543356, 100000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 1km from Tokyo",
|
||||
filter: Some("_geoRadius(35.749512532692144, 139.61664952543356, 1000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 100km from Point Nemo",
|
||||
filter: Some("_geoRadius(-48.87561645055408, -123.39275749319793, 100000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 1km from Point Nemo",
|
||||
filter: Some("_geoRadius(-48.87561645055408, -123.39275749319793, 1000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
];
|
||||
|
||||
utils::run_benches(c, confs);
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_geo);
|
||||
criterion_main!(benches);
|
196
benchmarks/benches/search_songs.rs
Normal file
196
benchmarks/benches/search_songs.rs
Normal file
@ -0,0 +1,196 @@
|
||||
mod datasets_paths;
|
||||
mod utils;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::update::Settings;
|
||||
use utils::Conf;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn base_conf(builder: &mut Settings) {
|
||||
let displayed_fields =
|
||||
["id", "title", "album", "artist", "genre", "country", "released", "duration"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
builder.set_displayed_fields(displayed_fields);
|
||||
|
||||
let searchable_fields = ["title", "album", "artist"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_searchable_fields(searchable_fields);
|
||||
|
||||
let faceted_fields = ["released-timestamp", "duration-float", "genre", "country", "artist"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
builder.set_filterable_fields(faceted_fields);
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
const BASE_CONF: Conf = Conf {
|
||||
dataset: datasets_paths::SMOL_SONGS,
|
||||
queries: &[
|
||||
"john ", // 9097
|
||||
"david ", // 4794
|
||||
"charles ", // 1957
|
||||
"david bowie ", // 1200
|
||||
"michael jackson ", // 600
|
||||
"thelonious monk ", // 303
|
||||
"charles mingus ", // 142
|
||||
"marcus miller ", // 60
|
||||
"tamo ", // 13
|
||||
"Notstandskomitee ", // 4
|
||||
],
|
||||
configure: base_conf,
|
||||
primary_key: Some("id"),
|
||||
..Conf::BASE
|
||||
};
|
||||
|
||||
fn bench_songs(c: &mut criterion::Criterion) {
|
||||
let default_criterion: Vec<String> =
|
||||
milli::default_criteria().iter().map(|criteria| criteria.to_string()).collect();
|
||||
let default_criterion = default_criterion.iter().map(|s| s.as_str());
|
||||
let asc_default: Vec<&str> =
|
||||
std::iter::once("released-timestamp:asc").chain(default_criterion.clone()).collect();
|
||||
let desc_default: Vec<&str> =
|
||||
std::iter::once("released-timestamp:desc").chain(default_criterion.clone()).collect();
|
||||
|
||||
let basic_with_quote: Vec<String> = BASE_CONF
|
||||
.queries
|
||||
.iter()
|
||||
.map(|s| {
|
||||
s.trim().split(' ').map(|s| format!(r#""{}""#, s)).collect::<Vec<String>>().join(" ")
|
||||
})
|
||||
.collect();
|
||||
let basic_with_quote: &[&str] =
|
||||
&basic_with_quote.iter().map(|s| s.as_str()).collect::<Vec<&str>>();
|
||||
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
/* first we bench each criterion alone */
|
||||
utils::Conf {
|
||||
group_name: "proximity",
|
||||
queries: &[
|
||||
"black saint sinner lady ",
|
||||
"les dangeureuses 1960 ",
|
||||
"The Disneyland Sing-Along Chorus ",
|
||||
"Under Great Northern Lights ",
|
||||
"7000 Danses Un Jour Dans Notre Vie ",
|
||||
],
|
||||
criterion: Some(&["proximity"]),
|
||||
optional_words: false,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "typo",
|
||||
queries: &[
|
||||
"mongus ",
|
||||
"thelonius monk ",
|
||||
"Disnaylande ",
|
||||
"the white striper ",
|
||||
"indochie ",
|
||||
"indochien ",
|
||||
"klub des loopers ",
|
||||
"fear of the duck ",
|
||||
"michel depech ",
|
||||
"stromal ",
|
||||
"dire straights ",
|
||||
"Arethla Franklin ",
|
||||
],
|
||||
criterion: Some(&["typo"]),
|
||||
optional_words: false,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "words",
|
||||
queries: &[
|
||||
"the black saint and the sinner lady and the good doggo ", // four words to pop
|
||||
"les liaisons dangeureuses 1793 ", // one word to pop
|
||||
"The Disneyland Children's Sing-Alone song ", // two words to pop
|
||||
"seven nation mummy ", // one word to pop
|
||||
"7000 Danses / Le Baiser / je me trompe de mots ", // four words to pop
|
||||
"Bring Your Daughter To The Slaughter but now this is not part of the title ", // nine words to pop
|
||||
"whathavenotnsuchforth and a good amount of words to pop to match the first one ", // 13
|
||||
],
|
||||
criterion: Some(&["words"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "asc",
|
||||
criterion: Some(&["released-timestamp:desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc",
|
||||
criterion: Some(&["released-timestamp:desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
/* then we bench the asc and desc criterion on top of the default criterion */
|
||||
utils::Conf {
|
||||
group_name: "asc + default",
|
||||
criterion: Some(&asc_default[..]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc + default",
|
||||
criterion: Some(&desc_default[..]),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
/* we bench the filters with the default request */
|
||||
utils::Conf {
|
||||
group_name: "basic filter: <=",
|
||||
filter: Some("released-timestamp <= 946728000"), // year 2000
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic filter: TO",
|
||||
filter: Some("released-timestamp 946728000 TO 1262347200"), // year 2000 to 2010
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "big filter",
|
||||
filter: Some("released-timestamp != 1262347200 AND (NOT (released-timestamp = 946728000)) AND (duration-float = 1 OR (duration-float 1.1 TO 1.5 AND released-timestamp > 315576000))"),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
/* the we bench some global / normal search with all the default criterion in the default
|
||||
* order */
|
||||
utils::Conf {
|
||||
group_name: "basic placeholder",
|
||||
queries: &[""],
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic without quote",
|
||||
queries: &BASE_CONF
|
||||
.queries
|
||||
.iter()
|
||||
.map(|s| s.trim()) // we remove the space at the end of each request
|
||||
.collect::<Vec<&str>>(),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic with quote",
|
||||
queries: basic_with_quote,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "prefix search",
|
||||
queries: &[
|
||||
"s", // 500k+ results
|
||||
"a", //
|
||||
"b", //
|
||||
"i", //
|
||||
"x", // only 7k results
|
||||
],
|
||||
..BASE_CONF
|
||||
},
|
||||
];
|
||||
|
||||
utils::run_benches(c, confs);
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_songs);
|
||||
criterion_main!(benches);
|
129
benchmarks/benches/search_wiki.rs
Normal file
129
benchmarks/benches/search_wiki.rs
Normal file
@ -0,0 +1,129 @@
|
||||
mod datasets_paths;
|
||||
mod utils;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::update::Settings;
|
||||
use utils::Conf;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn base_conf(builder: &mut Settings) {
|
||||
let displayed_fields = ["title", "body", "url"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_displayed_fields(displayed_fields);
|
||||
|
||||
let searchable_fields = ["title", "body"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_searchable_fields(searchable_fields);
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
const BASE_CONF: Conf = Conf {
|
||||
dataset: datasets_paths::SMOL_WIKI_ARTICLES,
|
||||
queries: &[
|
||||
"mingus ", // 46 candidates
|
||||
"miles davis ", // 159
|
||||
"rock and roll ", // 1007
|
||||
"machine ", // 3448
|
||||
"spain ", // 7002
|
||||
"japan ", // 10.593
|
||||
"france ", // 17.616
|
||||
"film ", // 24.959
|
||||
],
|
||||
configure: base_conf,
|
||||
..Conf::BASE
|
||||
};
|
||||
|
||||
fn bench_songs(c: &mut criterion::Criterion) {
|
||||
let basic_with_quote: Vec<String> = BASE_CONF
|
||||
.queries
|
||||
.iter()
|
||||
.map(|s| {
|
||||
s.trim().split(' ').map(|s| format!(r#""{}""#, s)).collect::<Vec<String>>().join(" ")
|
||||
})
|
||||
.collect();
|
||||
let basic_with_quote: &[&str] =
|
||||
&basic_with_quote.iter().map(|s| s.as_str()).collect::<Vec<&str>>();
|
||||
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
/* first we bench each criterion alone */
|
||||
utils::Conf {
|
||||
group_name: "proximity",
|
||||
queries: &[
|
||||
"herald sings ",
|
||||
"april paris ",
|
||||
"tea two ",
|
||||
"diesel engine ",
|
||||
],
|
||||
criterion: Some(&["proximity"]),
|
||||
optional_words: false,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "typo",
|
||||
queries: &[
|
||||
"migrosoft ",
|
||||
"linax ",
|
||||
"Disnaylande ",
|
||||
"phytogropher ",
|
||||
"nympalidea ",
|
||||
"aritmetric ",
|
||||
"the fronce ",
|
||||
"sisan ",
|
||||
],
|
||||
criterion: Some(&["typo"]),
|
||||
optional_words: false,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "words",
|
||||
queries: &[
|
||||
"the black saint and the sinner lady and the good doggo ", // four words to pop, 27 results
|
||||
"Kameya Tokujirō mingus monk ", // two words to pop, 55
|
||||
"Ulrich Hensel meilisearch milli ", // two words to pop, 306
|
||||
"Idaho Bellevue pizza ", // one word to pop, 800
|
||||
"Abraham machin ", // one word to pop, 1141
|
||||
],
|
||||
criterion: Some(&["words"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
/* the we bench some global / normal search with all the default criterion in the default
|
||||
* order */
|
||||
utils::Conf {
|
||||
group_name: "basic placeholder",
|
||||
queries: &[""],
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic without quote",
|
||||
queries: &BASE_CONF
|
||||
.queries
|
||||
.iter()
|
||||
.map(|s| s.trim()) // we remove the space at the end of each request
|
||||
.collect::<Vec<&str>>(),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic with quote",
|
||||
queries: basic_with_quote,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "prefix search",
|
||||
queries: &[
|
||||
"t", // 453k results
|
||||
"c", // 405k
|
||||
"g", // 318k
|
||||
"j", // 227k
|
||||
"q", // 71k
|
||||
"x", // 17k
|
||||
],
|
||||
..BASE_CONF
|
||||
},
|
||||
];
|
||||
|
||||
utils::run_benches(c, confs);
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_songs);
|
||||
criterion_main!(benches);
|
256
benchmarks/benches/utils.rs
Normal file
256
benchmarks/benches/utils.rs
Normal file
@ -0,0 +1,256 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::fs::{create_dir_all, remove_dir_all, File};
|
||||
use std::io::{self, BufRead, BufReader, Cursor, Read, Seek};
|
||||
use std::num::ParseFloatError;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
|
||||
use criterion::BenchmarkId;
|
||||
use milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
|
||||
use milli::heed::EnvOpenOptions;
|
||||
use milli::update::{
|
||||
IndexDocuments, IndexDocumentsConfig, IndexDocumentsMethod, IndexerConfig, Settings,
|
||||
};
|
||||
use milli::{Criterion, Filter, Index, Object, TermsMatchingStrategy};
|
||||
use serde_json::Value;
|
||||
|
||||
pub struct Conf<'a> {
|
||||
/// where we are going to create our database.mmdb directory
|
||||
/// each benchmark will first try to delete it and then recreate it
|
||||
pub database_name: &'a str,
|
||||
/// the dataset to be used, it must be an uncompressed csv
|
||||
pub dataset: &'a str,
|
||||
/// The format of the dataset
|
||||
pub dataset_format: &'a str,
|
||||
pub group_name: &'a str,
|
||||
pub queries: &'a [&'a str],
|
||||
/// here you can change which criterion are used and in which order.
|
||||
/// - if you specify something all the base configuration will be thrown out
|
||||
/// - if you don't specify anything (None) the default configuration will be kept
|
||||
pub criterion: Option<&'a [&'a str]>,
|
||||
/// the last chance to configure your database as you want
|
||||
pub configure: fn(&mut Settings),
|
||||
pub filter: Option<&'a str>,
|
||||
pub sort: Option<Vec<&'a str>>,
|
||||
/// enable or disable the optional words on the query
|
||||
pub optional_words: bool,
|
||||
/// primary key, if there is None we'll auto-generate docids for every documents
|
||||
pub primary_key: Option<&'a str>,
|
||||
}
|
||||
|
||||
impl Conf<'_> {
|
||||
pub const BASE: Self = Conf {
|
||||
database_name: "benches.mmdb",
|
||||
dataset_format: "csv",
|
||||
dataset: "",
|
||||
group_name: "",
|
||||
queries: &[],
|
||||
criterion: None,
|
||||
configure: |_| (),
|
||||
filter: None,
|
||||
sort: None,
|
||||
optional_words: true,
|
||||
primary_key: None,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn base_setup(conf: &Conf) -> Index {
|
||||
match remove_dir_all(conf.database_name) {
|
||||
Ok(_) => (),
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => (),
|
||||
Err(e) => panic!("{}", e),
|
||||
}
|
||||
create_dir_all(conf.database_name).unwrap();
|
||||
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(100 * 1024 * 1024 * 1024); // 100 GB
|
||||
options.max_readers(10);
|
||||
let index = Index::new(options, conf.database_name).unwrap();
|
||||
|
||||
let config = IndexerConfig::default();
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
let mut builder = Settings::new(&mut wtxn, &index, &config);
|
||||
|
||||
if let Some(primary_key) = conf.primary_key {
|
||||
builder.set_primary_key(primary_key.to_string());
|
||||
}
|
||||
|
||||
if let Some(criterion) = conf.criterion {
|
||||
builder.reset_filterable_fields();
|
||||
builder.reset_criteria();
|
||||
builder.reset_stop_words();
|
||||
|
||||
let criterion = criterion.iter().map(|s| Criterion::from_str(s).unwrap()).collect();
|
||||
builder.set_criteria(criterion);
|
||||
}
|
||||
|
||||
(conf.configure)(&mut builder);
|
||||
|
||||
builder.execute(|_| (), || false).unwrap();
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
let config = IndexerConfig::default();
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
let indexing_config = IndexDocumentsConfig {
|
||||
autogenerate_docids: conf.primary_key.is_none(),
|
||||
update_method: IndexDocumentsMethod::ReplaceDocuments,
|
||||
..Default::default()
|
||||
};
|
||||
let builder =
|
||||
IndexDocuments::new(&mut wtxn, &index, &config, indexing_config, |_| (), || false).unwrap();
|
||||
let documents = documents_from(conf.dataset, conf.dataset_format);
|
||||
let (builder, user_error) = builder.add_documents(documents).unwrap();
|
||||
user_error.unwrap();
|
||||
builder.execute().unwrap();
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
index
|
||||
}
|
||||
|
||||
pub fn run_benches(c: &mut criterion::Criterion, confs: &[Conf]) {
|
||||
for conf in confs {
|
||||
let index = base_setup(conf);
|
||||
|
||||
let file_name = Path::new(conf.dataset).file_name().and_then(|f| f.to_str()).unwrap();
|
||||
let name = format!("{}: {}", file_name, conf.group_name);
|
||||
let mut group = c.benchmark_group(&name);
|
||||
|
||||
for &query in conf.queries {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(query), &query, |b, &query| {
|
||||
b.iter(|| {
|
||||
let rtxn = index.read_txn().unwrap();
|
||||
let mut search = index.search(&rtxn);
|
||||
search.query(query).terms_matching_strategy(TermsMatchingStrategy::default());
|
||||
if let Some(filter) = conf.filter {
|
||||
let filter = Filter::from_str(filter).unwrap().unwrap();
|
||||
search.filter(filter);
|
||||
}
|
||||
if let Some(sort) = &conf.sort {
|
||||
let sort = sort.iter().map(|sort| sort.parse().unwrap()).collect();
|
||||
search.sort_criteria(sort);
|
||||
}
|
||||
let _ids = search.execute().unwrap();
|
||||
});
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
|
||||
index.prepare_for_closing().wait();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn documents_from(filename: &str, filetype: &str) -> DocumentsBatchReader<impl BufRead + Seek> {
|
||||
let reader = File::open(filename)
|
||||
.unwrap_or_else(|_| panic!("could not find the dataset in: {}", filename));
|
||||
let reader = BufReader::new(reader);
|
||||
let documents = match filetype {
|
||||
"csv" => documents_from_csv(reader).unwrap(),
|
||||
"json" => documents_from_json(reader).unwrap(),
|
||||
"jsonl" => documents_from_jsonl(reader).unwrap(),
|
||||
otherwise => panic!("invalid update format {:?}", otherwise),
|
||||
};
|
||||
DocumentsBatchReader::from_reader(Cursor::new(documents)).unwrap()
|
||||
}
|
||||
|
||||
fn documents_from_jsonl(reader: impl BufRead) -> anyhow::Result<Vec<u8>> {
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
|
||||
for result in serde_json::Deserializer::from_reader(reader).into_iter::<Object>() {
|
||||
let object = result?;
|
||||
documents.append_json_object(&object)?;
|
||||
}
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
fn documents_from_json(reader: impl BufRead) -> anyhow::Result<Vec<u8>> {
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
|
||||
documents.append_json_array(reader)?;
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
fn documents_from_csv(reader: impl BufRead) -> anyhow::Result<Vec<u8>> {
|
||||
let csv = csv::Reader::from_reader(reader);
|
||||
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
documents.append_csv(csv)?;
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
enum AllowedType {
|
||||
String,
|
||||
Number,
|
||||
}
|
||||
|
||||
fn parse_csv_header(header: &str) -> (String, AllowedType) {
|
||||
// if there are several separators we only split on the last one.
|
||||
match header.rsplit_once(':') {
|
||||
Some((field_name, field_type)) => match field_type {
|
||||
"string" => (field_name.to_string(), AllowedType::String),
|
||||
"number" => (field_name.to_string(), AllowedType::Number),
|
||||
// we may return an error in this case.
|
||||
_otherwise => (header.to_string(), AllowedType::String),
|
||||
},
|
||||
None => (header.to_string(), AllowedType::String),
|
||||
}
|
||||
}
|
||||
|
||||
struct CSVDocumentDeserializer<R>
|
||||
where
|
||||
R: Read,
|
||||
{
|
||||
documents: csv::StringRecordsIntoIter<R>,
|
||||
headers: Vec<(String, AllowedType)>,
|
||||
}
|
||||
|
||||
impl<R: Read> CSVDocumentDeserializer<R> {
|
||||
fn from_reader(reader: R) -> io::Result<Self> {
|
||||
let mut records = csv::Reader::from_reader(reader);
|
||||
|
||||
let headers = records.headers()?.into_iter().map(parse_csv_header).collect();
|
||||
|
||||
Ok(Self { documents: records.into_records(), headers })
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Iterator for CSVDocumentDeserializer<R> {
|
||||
type Item = anyhow::Result<Object>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let csv_document = self.documents.next()?;
|
||||
|
||||
match csv_document {
|
||||
Ok(csv_document) => {
|
||||
let mut document = Object::new();
|
||||
|
||||
for ((field_name, field_type), value) in
|
||||
self.headers.iter().zip(csv_document.into_iter())
|
||||
{
|
||||
let parsed_value: Result<Value, ParseFloatError> = match field_type {
|
||||
AllowedType::Number => {
|
||||
value.parse::<f64>().map(Value::from).map_err(Into::into)
|
||||
}
|
||||
AllowedType::String => Ok(Value::String(value.to_string())),
|
||||
};
|
||||
|
||||
match parsed_value {
|
||||
Ok(value) => drop(document.insert(field_name.to_string(), value)),
|
||||
Err(_e) => {
|
||||
return Some(Err(anyhow::anyhow!(
|
||||
"Value '{}' is not a valid number",
|
||||
value
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some(Ok(document))
|
||||
}
|
||||
Err(e) => Some(Err(anyhow::anyhow!("Error parsing csv document: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
115
benchmarks/build.rs
Normal file
115
benchmarks/build.rs
Normal file
@ -0,0 +1,115 @@
|
||||
use std::fs::File;
|
||||
use std::io::{Cursor, Read, Seek, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::{env, fs};
|
||||
|
||||
use bytes::Bytes;
|
||||
use convert_case::{Case, Casing};
|
||||
use flate2::read::GzDecoder;
|
||||
use reqwest::IntoUrl;
|
||||
|
||||
const BASE_URL: &str = "https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets";
|
||||
|
||||
const DATASET_SONGS: (&str, &str) = ("smol-songs", "csv");
|
||||
const DATASET_SONGS_1_2: (&str, &str) = ("smol-songs-1_2", "csv");
|
||||
const DATASET_SONGS_3_4: (&str, &str) = ("smol-songs-3_4", "csv");
|
||||
const DATASET_SONGS_4_4: (&str, &str) = ("smol-songs-4_4", "csv");
|
||||
const DATASET_WIKI: (&str, &str) = ("smol-wiki-articles", "csv");
|
||||
const DATASET_WIKI_1_2: (&str, &str) = ("smol-wiki-articles-1_2", "csv");
|
||||
const DATASET_WIKI_3_4: (&str, &str) = ("smol-wiki-articles-3_4", "csv");
|
||||
const DATASET_WIKI_4_4: (&str, &str) = ("smol-wiki-articles-4_4", "csv");
|
||||
const DATASET_MOVIES: (&str, &str) = ("movies", "json");
|
||||
const DATASET_MOVIES_1_2: (&str, &str) = ("movies-1_2", "json");
|
||||
const DATASET_MOVIES_3_4: (&str, &str) = ("movies-3_4", "json");
|
||||
const DATASET_MOVIES_4_4: (&str, &str) = ("movies-4_4", "json");
|
||||
const DATASET_NESTED_MOVIES: (&str, &str) = ("nested_movies", "json");
|
||||
const DATASET_GEO: (&str, &str) = ("smol-all-countries", "jsonl");
|
||||
|
||||
const ALL_DATASETS: &[(&str, &str)] = &[
|
||||
DATASET_SONGS,
|
||||
DATASET_SONGS_1_2,
|
||||
DATASET_SONGS_3_4,
|
||||
DATASET_SONGS_4_4,
|
||||
DATASET_WIKI,
|
||||
DATASET_WIKI_1_2,
|
||||
DATASET_WIKI_3_4,
|
||||
DATASET_WIKI_4_4,
|
||||
DATASET_MOVIES,
|
||||
DATASET_MOVIES_1_2,
|
||||
DATASET_MOVIES_3_4,
|
||||
DATASET_MOVIES_4_4,
|
||||
DATASET_NESTED_MOVIES,
|
||||
DATASET_GEO,
|
||||
];
|
||||
|
||||
/// The name of the environment variable used to select the path
|
||||
/// of the directory containing the datasets
|
||||
const BASE_DATASETS_PATH_KEY: &str = "MILLI_BENCH_DATASETS_PATH";
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let out_dir = PathBuf::from(env::var(BASE_DATASETS_PATH_KEY).unwrap_or(env::var("OUT_DIR")?));
|
||||
|
||||
let benches_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR")?).join("benches");
|
||||
let mut manifest_paths_file = File::create(benches_dir.join("datasets_paths.rs"))?;
|
||||
write!(
|
||||
manifest_paths_file,
|
||||
r#"//! This file is generated by the build script.
|
||||
//! Do not modify by hand, use the build.rs file.
|
||||
#![allow(dead_code)]
|
||||
"#
|
||||
)?;
|
||||
writeln!(manifest_paths_file)?;
|
||||
|
||||
for (dataset, extension) in ALL_DATASETS {
|
||||
let out_path = out_dir.join(dataset);
|
||||
let out_file = out_path.with_extension(extension);
|
||||
|
||||
writeln!(
|
||||
&mut manifest_paths_file,
|
||||
r#"pub const {}: &str = {:?};"#,
|
||||
dataset.to_case(Case::ScreamingSnake),
|
||||
out_file.display(),
|
||||
)?;
|
||||
|
||||
if out_file.exists() {
|
||||
eprintln!(
|
||||
"The dataset {} already exists on the file system and will not be downloaded again",
|
||||
out_path.display(),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let url = format!("{}/{}.{}.gz", BASE_URL, dataset, extension);
|
||||
eprintln!("downloading: {}", url);
|
||||
let bytes = retry(|| download_dataset(url.clone()), 10)?;
|
||||
eprintln!("{} downloaded successfully", url);
|
||||
eprintln!("uncompressing in {}", out_file.display());
|
||||
uncompress_in_file(bytes, &out_file)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn retry<Ok, Err>(fun: impl Fn() -> Result<Ok, Err>, times: usize) -> Result<Ok, Err> {
|
||||
for _ in 0..times {
|
||||
if let ok @ Ok(_) = fun() {
|
||||
return ok;
|
||||
}
|
||||
}
|
||||
fun()
|
||||
}
|
||||
|
||||
fn download_dataset<U: IntoUrl>(url: U) -> anyhow::Result<Cursor<Bytes>> {
|
||||
let bytes =
|
||||
reqwest::blocking::Client::builder().timeout(None).build()?.get(url).send()?.bytes()?;
|
||||
Ok(Cursor::new(bytes))
|
||||
}
|
||||
|
||||
fn uncompress_in_file<R: Read + Seek, P: AsRef<Path>>(bytes: R, path: P) -> anyhow::Result<()> {
|
||||
let path = path.as_ref();
|
||||
let mut gz = GzDecoder::new(bytes);
|
||||
let mut dataset = Vec::new();
|
||||
gz.read_to_end(&mut dataset)?;
|
||||
|
||||
fs::write(path, dataset)?;
|
||||
Ok(())
|
||||
}
|
38
benchmarks/scripts/compare.sh
Executable file
38
benchmarks/scripts/compare.sh
Executable file
@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Requirements:
|
||||
# - critcmp. See: https://github.com/BurntSushi/critcmp
|
||||
# - curl
|
||||
|
||||
# Usage
|
||||
# $ bash compare.sh json_file1 json_file1
|
||||
# ex: bash compare.sh songs_main_09a4321.json songs_geosearch_24ec456.json
|
||||
|
||||
# Checking that critcmp is installed
|
||||
command -v critcmp > /dev/null 2>&1
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
echo 'You must install critcmp to make this script work.'
|
||||
echo 'See: https://github.com/BurntSushi/critcmp'
|
||||
echo ' $ cargo install critcmp'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
s3_url='https://milli-benchmarks.fra1.digitaloceanspaces.com/critcmp_results'
|
||||
|
||||
for file in $@
|
||||
do
|
||||
file_s3_url="$s3_url/$file"
|
||||
file_local_path="/tmp/$file"
|
||||
|
||||
if [[ ! -f $file_local_path ]]; then
|
||||
curl $file_s3_url --output $file_local_path --silent
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
echo 'curl command failed.'
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
path_list=$(echo " $@" | sed 's/ / \/tmp\//g')
|
||||
|
||||
critcmp $path_list
|
14
benchmarks/scripts/list.sh
Executable file
14
benchmarks/scripts/list.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Requirements:
|
||||
# - curl
|
||||
# - grep
|
||||
|
||||
res=$(curl -s https://milli-benchmarks.fra1.digitaloceanspaces.com | grep -o '<Key>[^<]\+' | cut -c 5- | grep critcmp_results/ | cut -c 18-)
|
||||
|
||||
for pattern in "$@"
|
||||
do
|
||||
res=$(echo "$res" | grep $pattern)
|
||||
done
|
||||
|
||||
echo "$res"
|
5
benchmarks/src/lib.rs
Normal file
5
benchmarks/src/lib.rs
Normal file
@ -0,0 +1,5 @@
|
||||
//! This library is only used to isolate the benchmarks
|
||||
//! from the original milli library.
|
||||
//!
|
||||
//! It does not include interesting functions for milli library
|
||||
//! users only for milli contributors.
|
@ -45,7 +45,7 @@ log_level = "INFO"
|
||||
|
||||
dump_dir = "dumps/"
|
||||
# Sets the directory where Meilisearch will create dump files.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#dumps-destination
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#dump-directory
|
||||
|
||||
# import_dump = "./path/to/my/file.dump"
|
||||
# Imports the dump file located at the specified path. Path must point to a .dump file.
|
||||
|
@ -1,7 +1,14 @@
|
||||
[package]
|
||||
name = "dump"
|
||||
version = "1.0.0"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.65"
|
||||
|
@ -203,12 +203,11 @@ pub(crate) mod test {
|
||||
|
||||
use big_s::S;
|
||||
use maplit::btreeset;
|
||||
use meilisearch_types::index_uid::IndexUid;
|
||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
use meilisearch_types::keys::{Action, Key};
|
||||
use meilisearch_types::milli::update::Setting;
|
||||
use meilisearch_types::milli::{self};
|
||||
use meilisearch_types::settings::{Checked, Settings};
|
||||
use meilisearch_types::star_or::StarOr;
|
||||
use meilisearch_types::tasks::{Details, Status};
|
||||
use serde_json::{json, Map, Value};
|
||||
use time::macros::datetime;
|
||||
@ -341,7 +340,7 @@ pub(crate) mod test {
|
||||
name: Some(S("doggos_key")),
|
||||
uid: Uuid::from_str("9f8a34da-b6b2-42f0-939b-dbd4c3448655").unwrap(),
|
||||
actions: vec![Action::DocumentsAll],
|
||||
indexes: vec![StarOr::Other(IndexUid::from_str("doggos").unwrap())],
|
||||
indexes: vec![IndexUidPattern::from_str("doggos").unwrap()],
|
||||
expires_at: Some(datetime!(4130-03-14 12:21 UTC)),
|
||||
created_at: datetime!(1960-11-15 0:00 UTC),
|
||||
updated_at: datetime!(2022-11-10 0:00 UTC),
|
||||
@ -351,7 +350,7 @@ pub(crate) mod test {
|
||||
name: Some(S("master_key")),
|
||||
uid: Uuid::from_str("4622f717-1c00-47bb-a494-39d76a49b591").unwrap(),
|
||||
actions: vec![Action::All],
|
||||
indexes: vec![StarOr::Star],
|
||||
indexes: vec![IndexUidPattern::all()],
|
||||
expires_at: None,
|
||||
created_at: datetime!(0000-01-01 00:01 UTC),
|
||||
updated_at: datetime!(1964-05-04 17:25 UTC),
|
||||
|
@ -181,10 +181,8 @@ impl CompatV5ToV6 {
|
||||
.indexes
|
||||
.into_iter()
|
||||
.map(|index| match index {
|
||||
v5::StarOr::Star => v6::StarOr::Star,
|
||||
v5::StarOr::Other(uid) => {
|
||||
v6::StarOr::Other(v6::IndexUid::new_unchecked(uid.as_str()))
|
||||
}
|
||||
v5::StarOr::Star => v6::IndexUidPattern::all(),
|
||||
v5::StarOr::Other(uid) => v6::IndexUidPattern::new_unchecked(uid.as_str()),
|
||||
})
|
||||
.collect(),
|
||||
expires_at: key.expires_at,
|
||||
|
@ -112,8 +112,11 @@ impl V3Reader {
|
||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V3IndexReader>> + '_> {
|
||||
Ok(self.index_uuid.iter().map(|index| -> Result<_> {
|
||||
V3IndexReader::new(
|
||||
index.uid.clone(),
|
||||
&self.dump.path().join("indexes").join(index.uuid.to_string()),
|
||||
index,
|
||||
BufReader::new(
|
||||
File::open(self.dump.path().join("updates").join("data.jsonl")).unwrap(),
|
||||
),
|
||||
)
|
||||
}))
|
||||
}
|
||||
@ -155,16 +158,42 @@ pub struct V3IndexReader {
|
||||
}
|
||||
|
||||
impl V3IndexReader {
|
||||
pub fn new(name: String, path: &Path) -> Result<Self> {
|
||||
pub fn new(path: &Path, index_uuid: &IndexUuid, tasks: BufReader<File>) -> Result<Self> {
|
||||
let meta = File::open(path.join("meta.json"))?;
|
||||
let meta: DumpMeta = serde_json::from_reader(meta)?;
|
||||
|
||||
let mut created_at = None;
|
||||
let mut updated_at = None;
|
||||
|
||||
for line in tasks.lines() {
|
||||
let task: Task = serde_json::from_str(&line?)?;
|
||||
|
||||
if !(task.uuid == index_uuid.uuid && task.is_finished()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let new_created_at = match task.update.meta() {
|
||||
Kind::DocumentAddition { .. } | Kind::Settings(_) => task.update.finished_at(),
|
||||
_ => None,
|
||||
};
|
||||
let new_updated_at = task.update.finished_at();
|
||||
|
||||
if created_at.is_none() || created_at > new_created_at {
|
||||
created_at = new_created_at;
|
||||
}
|
||||
|
||||
if updated_at.is_none() || updated_at < new_updated_at {
|
||||
updated_at = new_updated_at;
|
||||
}
|
||||
}
|
||||
|
||||
let current_time = OffsetDateTime::now_utc();
|
||||
|
||||
let metadata = IndexMetadata {
|
||||
uid: name,
|
||||
uid: index_uuid.uid.clone(),
|
||||
primary_key: meta.primary_key,
|
||||
// FIXME: Iterate over the whole task queue to find the creation and last update date.
|
||||
created_at: OffsetDateTime::now_utc(),
|
||||
updated_at: OffsetDateTime::now_utc(),
|
||||
created_at: created_at.unwrap_or(current_time),
|
||||
updated_at: updated_at.unwrap_or(current_time),
|
||||
};
|
||||
|
||||
let ret = V3IndexReader {
|
||||
@ -263,12 +292,12 @@ pub(crate) mod test {
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-07T11:38:54.74389899Z",
|
||||
"updatedAt": "2022-10-07T11:38:55.963185778Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -278,12 +307,12 @@ pub(crate) mod test {
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-07T11:38:54.026649575Z",
|
||||
"updatedAt": "2022-10-07T11:39:04.188852537Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -308,12 +337,12 @@ pub(crate) mod test {
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-07T11:38:56.265951133Z",
|
||||
"updatedAt": "2022-10-07T11:38:56.521004328Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
|
@ -74,6 +74,26 @@ impl UpdateStatus {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enqueued_at(&self) -> Option<OffsetDateTime> {
|
||||
match self {
|
||||
UpdateStatus::Processing(u) => Some(u.from.enqueued_at),
|
||||
UpdateStatus::Enqueued(u) => Some(u.enqueued_at),
|
||||
UpdateStatus::Processed(u) => Some(u.from.from.enqueued_at),
|
||||
UpdateStatus::Aborted(u) => Some(u.from.enqueued_at),
|
||||
UpdateStatus::Failed(u) => Some(u.from.from.enqueued_at),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finished_at(&self) -> Option<OffsetDateTime> {
|
||||
match self {
|
||||
UpdateStatus::Processing(_) => None,
|
||||
UpdateStatus::Enqueued(_) => None,
|
||||
UpdateStatus::Processed(u) => Some(u.processed_at),
|
||||
UpdateStatus::Aborted(_) => None,
|
||||
UpdateStatus::Failed(u) => Some(u.failed_at),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
|
@ -34,8 +34,7 @@ pub type PaginationSettings = meilisearch_types::settings::PaginationSettings;
|
||||
|
||||
// everything related to the api keys
|
||||
pub type Action = meilisearch_types::keys::Action;
|
||||
pub type StarOr<T> = meilisearch_types::star_or::StarOr<T>;
|
||||
pub type IndexUid = meilisearch_types::index_uid::IndexUid;
|
||||
pub type IndexUidPattern = meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
|
||||
// everything related to the errors
|
||||
pub type ResponseError = meilisearch_types::error::ResponseError;
|
||||
|
@ -1,7 +1,14 @@
|
||||
[package]
|
||||
name = "file-store"
|
||||
version = "1.0.0"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
tempfile = "3.3.0"
|
||||
|
19
filter-parser/Cargo.toml
Normal file
19
filter-parser/Cargo.toml
Normal file
@ -0,0 +1,19 @@
|
||||
[package]
|
||||
name = "filter-parser"
|
||||
description = "The parser for the Meilisearch filter syntax"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
# description.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
nom = "7.1.1"
|
||||
nom_locate = "4.0.0"
|
||||
|
||||
[dev-dependencies]
|
||||
insta = "1.21.0"
|
36
filter-parser/README.md
Normal file
36
filter-parser/README.md
Normal file
@ -0,0 +1,36 @@
|
||||
# Filter parser
|
||||
|
||||
This workspace is dedicated to the parsing of the Meilisearch filters.
|
||||
|
||||
Most of the code and explanation are in the [`lib.rs`](./src/lib.rs). Especially, the BNF of the filters at the top of this file.
|
||||
|
||||
The parser use [nom](https://docs.rs/nom/) to do most of its work and [nom-locate](https://docs.rs/nom_locate/) to keep track of what we were doing when we encountered an error.
|
||||
|
||||
## Cli
|
||||
A simple main is provided to quick-test if a filter can be parsed or not without bringing milli.
|
||||
It takes one argument and try to parse it.
|
||||
```
|
||||
cargo run -- 'field = value' # success
|
||||
cargo run -- 'field = "doggo' # error => missing closing delimiter "
|
||||
```
|
||||
|
||||
## Fuzz
|
||||
The workspace have been fuzzed with [cargo-fuzz](https://rust-fuzz.github.io/book/cargo-fuzz.html).
|
||||
|
||||
### Setup
|
||||
You'll need rust-nightly to execute the fuzzer.
|
||||
|
||||
```
|
||||
cargo install cargo-fuzz
|
||||
```
|
||||
|
||||
### Run
|
||||
When the filter parser is executed by the fuzzer it's triggering a stackoverflow really fast. We can avoid this problem by limiting the `max_len` of [libfuzzer](https://llvm.org/docs/LibFuzzer.html) at 500 characters.
|
||||
```
|
||||
cargo fuzz run parse -- -max_len=500
|
||||
```
|
||||
|
||||
## What to do if you find a bug in the parser
|
||||
|
||||
- Write a test at the end of the [`lib.rs`](./src/lib.rs) to ensure it never happens again.
|
||||
- Add a file in [the corpus directory](./fuzz/corpus/parse/) with your filter to help the fuzzer find new bugs. Since this directory is going to be heavily polluted by the execution of the fuzzer it's in the gitignore and you'll need to force push your new test.
|
3
filter-parser/fuzz/.gitignore
vendored
Normal file
3
filter-parser/fuzz/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
/corpus/
|
||||
/artifacts/
|
||||
/target/
|
25
filter-parser/fuzz/Cargo.toml
Normal file
25
filter-parser/fuzz/Cargo.toml
Normal file
@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "filter-parser-fuzz"
|
||||
version = "0.0.0"
|
||||
authors = ["Automatically generated"]
|
||||
publish = false
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata]
|
||||
cargo-fuzz = true
|
||||
|
||||
[dependencies]
|
||||
libfuzzer-sys = "0.4"
|
||||
|
||||
[dependencies.filter-parser]
|
||||
path = ".."
|
||||
|
||||
# Prevent this from interfering with workspaces
|
||||
[workspace]
|
||||
members = ["."]
|
||||
|
||||
[[bin]]
|
||||
name = "parse"
|
||||
path = "fuzz_targets/parse.rs"
|
||||
test = false
|
||||
doc = false
|
1
filter-parser/fuzz/corpus/parse/test_1
Normal file
1
filter-parser/fuzz/corpus/parse/test_1
Normal file
@ -0,0 +1 @@
|
||||
channel = Ponce
|
1
filter-parser/fuzz/corpus/parse/test_10
Normal file
1
filter-parser/fuzz/corpus/parse/test_10
Normal file
@ -0,0 +1 @@
|
||||
channel != ponce
|
1
filter-parser/fuzz/corpus/parse/test_11
Normal file
1
filter-parser/fuzz/corpus/parse/test_11
Normal file
@ -0,0 +1 @@
|
||||
NOT channel = ponce
|
1
filter-parser/fuzz/corpus/parse/test_12
Normal file
1
filter-parser/fuzz/corpus/parse/test_12
Normal file
@ -0,0 +1 @@
|
||||
subscribers < 1000
|
1
filter-parser/fuzz/corpus/parse/test_13
Normal file
1
filter-parser/fuzz/corpus/parse/test_13
Normal file
@ -0,0 +1 @@
|
||||
subscribers > 1000
|
1
filter-parser/fuzz/corpus/parse/test_14
Normal file
1
filter-parser/fuzz/corpus/parse/test_14
Normal file
@ -0,0 +1 @@
|
||||
subscribers <= 1000
|
1
filter-parser/fuzz/corpus/parse/test_15
Normal file
1
filter-parser/fuzz/corpus/parse/test_15
Normal file
@ -0,0 +1 @@
|
||||
subscribers >= 1000
|
1
filter-parser/fuzz/corpus/parse/test_16
Normal file
1
filter-parser/fuzz/corpus/parse/test_16
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers < 1000
|
1
filter-parser/fuzz/corpus/parse/test_17
Normal file
1
filter-parser/fuzz/corpus/parse/test_17
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers > 1000
|
1
filter-parser/fuzz/corpus/parse/test_18
Normal file
1
filter-parser/fuzz/corpus/parse/test_18
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers <= 1000
|
1
filter-parser/fuzz/corpus/parse/test_19
Normal file
1
filter-parser/fuzz/corpus/parse/test_19
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers >= 1000
|
1
filter-parser/fuzz/corpus/parse/test_2
Normal file
1
filter-parser/fuzz/corpus/parse/test_2
Normal file
@ -0,0 +1 @@
|
||||
subscribers = 12
|
1
filter-parser/fuzz/corpus/parse/test_20
Normal file
1
filter-parser/fuzz/corpus/parse/test_20
Normal file
@ -0,0 +1 @@
|
||||
subscribers 100 TO 1000
|
1
filter-parser/fuzz/corpus/parse/test_21
Normal file
1
filter-parser/fuzz/corpus/parse/test_21
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers 100 TO 1000
|
1
filter-parser/fuzz/corpus/parse/test_22
Normal file
1
filter-parser/fuzz/corpus/parse/test_22
Normal file
@ -0,0 +1 @@
|
||||
_geoRadius(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_23
Normal file
1
filter-parser/fuzz/corpus/parse/test_23
Normal file
@ -0,0 +1 @@
|
||||
NOT _geoRadius(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_24
Normal file
1
filter-parser/fuzz/corpus/parse/test_24
Normal file
@ -0,0 +1 @@
|
||||
channel = ponce AND 'dog race' != 'bernese mountain'
|
1
filter-parser/fuzz/corpus/parse/test_25
Normal file
1
filter-parser/fuzz/corpus/parse/test_25
Normal file
@ -0,0 +1 @@
|
||||
channel = ponce OR 'dog race' != 'bernese mountain'
|
1
filter-parser/fuzz/corpus/parse/test_26
Normal file
1
filter-parser/fuzz/corpus/parse/test_26
Normal file
@ -0,0 +1 @@
|
||||
channel = ponce AND 'dog race' != 'bernese mountain' OR subscribers > 1000
|
1
filter-parser/fuzz/corpus/parse/test_27
Normal file
1
filter-parser/fuzz/corpus/parse/test_27
Normal file
@ -0,0 +1 @@
|
||||
channel = ponce AND ( 'dog race' != 'bernese mountain' OR subscribers > 1000 )
|
1
filter-parser/fuzz/corpus/parse/test_28
Normal file
1
filter-parser/fuzz/corpus/parse/test_28
Normal file
@ -0,0 +1 @@
|
||||
(channel = ponce AND 'dog race' != 'bernese mountain' OR subscribers > 1000) AND _geoRadius(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_29
Normal file
1
filter-parser/fuzz/corpus/parse/test_29
Normal file
@ -0,0 +1 @@
|
||||
channel = Ponce = 12
|
1
filter-parser/fuzz/corpus/parse/test_3
Normal file
1
filter-parser/fuzz/corpus/parse/test_3
Normal file
@ -0,0 +1 @@
|
||||
channel = 'Mister Mv'
|
1
filter-parser/fuzz/corpus/parse/test_30
Normal file
1
filter-parser/fuzz/corpus/parse/test_30
Normal file
@ -0,0 +1 @@
|
||||
channel =
|
1
filter-parser/fuzz/corpus/parse/test_31
Normal file
1
filter-parser/fuzz/corpus/parse/test_31
Normal file
@ -0,0 +1 @@
|
||||
channel = 🐻
|
1
filter-parser/fuzz/corpus/parse/test_32
Normal file
1
filter-parser/fuzz/corpus/parse/test_32
Normal file
@ -0,0 +1 @@
|
||||
OR
|
1
filter-parser/fuzz/corpus/parse/test_33
Normal file
1
filter-parser/fuzz/corpus/parse/test_33
Normal file
@ -0,0 +1 @@
|
||||
AND
|
1
filter-parser/fuzz/corpus/parse/test_34
Normal file
1
filter-parser/fuzz/corpus/parse/test_34
Normal file
@ -0,0 +1 @@
|
||||
channel Ponce
|
1
filter-parser/fuzz/corpus/parse/test_35
Normal file
1
filter-parser/fuzz/corpus/parse/test_35
Normal file
@ -0,0 +1 @@
|
||||
channel = Ponce OR
|
1
filter-parser/fuzz/corpus/parse/test_36
Normal file
1
filter-parser/fuzz/corpus/parse/test_36
Normal file
@ -0,0 +1 @@
|
||||
_geoRadius
|
1
filter-parser/fuzz/corpus/parse/test_37
Normal file
1
filter-parser/fuzz/corpus/parse/test_37
Normal file
@ -0,0 +1 @@
|
||||
_geoRadius = 12
|
1
filter-parser/fuzz/corpus/parse/test_38
Normal file
1
filter-parser/fuzz/corpus/parse/test_38
Normal file
@ -0,0 +1 @@
|
||||
_geoPoint(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_39
Normal file
1
filter-parser/fuzz/corpus/parse/test_39
Normal file
@ -0,0 +1 @@
|
||||
position <= _geoPoint(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_4
Normal file
1
filter-parser/fuzz/corpus/parse/test_4
Normal file
@ -0,0 +1 @@
|
||||
channel = "Mister Mv"
|
1
filter-parser/fuzz/corpus/parse/test_40
Normal file
1
filter-parser/fuzz/corpus/parse/test_40
Normal file
@ -0,0 +1 @@
|
||||
position <= _geoRadius(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_41
Normal file
1
filter-parser/fuzz/corpus/parse/test_41
Normal file
@ -0,0 +1 @@
|
||||
channel = 'ponce
|
1
filter-parser/fuzz/corpus/parse/test_42
Normal file
1
filter-parser/fuzz/corpus/parse/test_42
Normal file
@ -0,0 +1 @@
|
||||
channel = "ponce
|
1
filter-parser/fuzz/corpus/parse/test_43
Normal file
1
filter-parser/fuzz/corpus/parse/test_43
Normal file
@ -0,0 +1 @@
|
||||
channel = mv OR (followers >= 1000
|
1
filter-parser/fuzz/corpus/parse/test_5
Normal file
1
filter-parser/fuzz/corpus/parse/test_5
Normal file
@ -0,0 +1 @@
|
||||
'dog race' = Borzoi
|
1
filter-parser/fuzz/corpus/parse/test_6
Normal file
1
filter-parser/fuzz/corpus/parse/test_6
Normal file
@ -0,0 +1 @@
|
||||
"dog race" = Chusky
|
1
filter-parser/fuzz/corpus/parse/test_7
Normal file
1
filter-parser/fuzz/corpus/parse/test_7
Normal file
@ -0,0 +1 @@
|
||||
"dog race" = "Bernese Mountain"
|
1
filter-parser/fuzz/corpus/parse/test_8
Normal file
1
filter-parser/fuzz/corpus/parse/test_8
Normal file
@ -0,0 +1 @@
|
||||
'dog race' = 'Bernese Mountain'
|
1
filter-parser/fuzz/corpus/parse/test_9
Normal file
1
filter-parser/fuzz/corpus/parse/test_9
Normal file
@ -0,0 +1 @@
|
||||
"dog race" = 'Bernese Mountain'
|
18
filter-parser/fuzz/fuzz_targets/parse.rs
Normal file
18
filter-parser/fuzz/fuzz_targets/parse.rs
Normal file
@ -0,0 +1,18 @@
|
||||
#![no_main]
|
||||
use filter_parser::{ErrorKind, FilterCondition};
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
|
||||
fuzz_target!(|data: &[u8]| {
|
||||
if let Ok(s) = std::str::from_utf8(data) {
|
||||
// When we are fuzzing the parser we can get a stack overflow very easily.
|
||||
// But since this doesn't happens with a normal build we are just going to limit the fuzzer to 500 characters.
|
||||
if s.len() < 500 {
|
||||
match FilterCondition::parse(s) {
|
||||
Err(e) if matches!(e.kind(), ErrorKind::InternalError(_)) => {
|
||||
panic!("Found an internal error: `{:?}`", e)
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
67
filter-parser/src/condition.rs
Normal file
67
filter-parser/src/condition.rs
Normal file
@ -0,0 +1,67 @@
|
||||
//! BNF grammar:
|
||||
//!
|
||||
//! ```text
|
||||
//! condition = value ("==" | ">" ...) value
|
||||
//! to = value value TO value
|
||||
//! ```
|
||||
|
||||
use nom::branch::alt;
|
||||
use nom::bytes::complete::tag;
|
||||
use nom::character::complete::multispace1;
|
||||
use nom::combinator::cut;
|
||||
use nom::sequence::{terminated, tuple};
|
||||
use Condition::*;
|
||||
|
||||
use crate::{parse_value, FilterCondition, IResult, Span, Token};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Condition<'a> {
|
||||
GreaterThan(Token<'a>),
|
||||
GreaterThanOrEqual(Token<'a>),
|
||||
Equal(Token<'a>),
|
||||
NotEqual(Token<'a>),
|
||||
Exists,
|
||||
LowerThan(Token<'a>),
|
||||
LowerThanOrEqual(Token<'a>),
|
||||
Between { from: Token<'a>, to: Token<'a> },
|
||||
}
|
||||
|
||||
/// condition = value ("==" | ">" ...) value
|
||||
pub fn parse_condition(input: Span) -> IResult<FilterCondition> {
|
||||
let operator = alt((tag("<="), tag(">="), tag("!="), tag("<"), tag(">"), tag("=")));
|
||||
let (input, (fid, op, value)) = tuple((parse_value, operator, cut(parse_value)))(input)?;
|
||||
|
||||
let condition = match *op.fragment() {
|
||||
"<=" => FilterCondition::Condition { fid, op: LowerThanOrEqual(value) },
|
||||
">=" => FilterCondition::Condition { fid, op: GreaterThanOrEqual(value) },
|
||||
"!=" => FilterCondition::Condition { fid, op: NotEqual(value) },
|
||||
"<" => FilterCondition::Condition { fid, op: LowerThan(value) },
|
||||
">" => FilterCondition::Condition { fid, op: GreaterThan(value) },
|
||||
"=" => FilterCondition::Condition { fid, op: Equal(value) },
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
Ok((input, condition))
|
||||
}
|
||||
|
||||
/// exist = value "EXISTS"
|
||||
pub fn parse_exists(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, key) = terminated(parse_value, tag("EXISTS"))(input)?;
|
||||
|
||||
Ok((input, FilterCondition::Condition { fid: key, op: Exists }))
|
||||
}
|
||||
/// exist = value "NOT" WS+ "EXISTS"
|
||||
pub fn parse_not_exists(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, key) = parse_value(input)?;
|
||||
|
||||
let (input, _) = tuple((tag("NOT"), multispace1, tag("EXISTS")))(input)?;
|
||||
Ok((input, FilterCondition::Not(Box::new(FilterCondition::Condition { fid: key, op: Exists }))))
|
||||
}
|
||||
|
||||
/// to = value value "TO" WS+ value
|
||||
pub fn parse_to(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, (key, from, _, _, to)) =
|
||||
tuple((parse_value, parse_value, tag("TO"), multispace1, cut(parse_value)))(input)?;
|
||||
|
||||
Ok((input, FilterCondition::Condition { fid: key, op: Between { from, to } }))
|
||||
}
|
206
filter-parser/src/error.rs
Normal file
206
filter-parser/src/error.rs
Normal file
@ -0,0 +1,206 @@
|
||||
use std::fmt::Display;
|
||||
|
||||
use nom::error::{self, ParseError};
|
||||
use nom::Parser;
|
||||
|
||||
use crate::{IResult, Span};
|
||||
|
||||
pub trait NomErrorExt<E> {
|
||||
fn is_failure(&self) -> bool;
|
||||
fn map_err<O: FnOnce(E) -> E>(self, op: O) -> nom::Err<E>;
|
||||
fn map_fail<O: FnOnce(E) -> E>(self, op: O) -> nom::Err<E>;
|
||||
}
|
||||
|
||||
impl<E> NomErrorExt<E> for nom::Err<E> {
|
||||
fn is_failure(&self) -> bool {
|
||||
matches!(self, Self::Failure(_))
|
||||
}
|
||||
|
||||
fn map_err<O: FnOnce(E) -> E>(self, op: O) -> nom::Err<E> {
|
||||
match self {
|
||||
e @ Self::Failure(_) => e,
|
||||
e => e.map(op),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_fail<O: FnOnce(E) -> E>(self, op: O) -> nom::Err<E> {
|
||||
match self {
|
||||
e @ Self::Error(_) => e,
|
||||
e => e.map(op),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// cut a parser and map the error
|
||||
pub fn cut_with_err<'a, O>(
|
||||
mut parser: impl FnMut(Span<'a>) -> IResult<'a, O>,
|
||||
mut with: impl FnMut(Error<'a>) -> Error<'a>,
|
||||
) -> impl FnMut(Span<'a>) -> IResult<O> {
|
||||
move |input| match parser.parse(input) {
|
||||
Err(nom::Err::Error(e)) => Err(nom::Err::Failure(with(e))),
|
||||
rest => rest,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Error<'a> {
|
||||
context: Span<'a>,
|
||||
kind: ErrorKind<'a>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ExpectedValueKind {
|
||||
ReservedKeyword,
|
||||
Other,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ErrorKind<'a> {
|
||||
ReservedGeo(&'a str),
|
||||
GeoRadius,
|
||||
GeoBoundingBox,
|
||||
MisusedGeoRadius,
|
||||
MisusedGeoBoundingBox,
|
||||
InvalidPrimary,
|
||||
ExpectedEof,
|
||||
ExpectedValue(ExpectedValueKind),
|
||||
MalformedValue,
|
||||
InOpeningBracket,
|
||||
InClosingBracket,
|
||||
NonFiniteFloat,
|
||||
InExpectedValue(ExpectedValueKind),
|
||||
ReservedKeyword(String),
|
||||
MissingClosingDelimiter(char),
|
||||
Char(char),
|
||||
InternalError(error::ErrorKind),
|
||||
DepthLimitReached,
|
||||
External(String),
|
||||
}
|
||||
|
||||
impl<'a> Error<'a> {
|
||||
pub fn kind(&self) -> &ErrorKind<'a> {
|
||||
&self.kind
|
||||
}
|
||||
|
||||
pub fn context(&self) -> &Span<'a> {
|
||||
&self.context
|
||||
}
|
||||
|
||||
pub fn new_from_kind(context: Span<'a>, kind: ErrorKind<'a>) -> Self {
|
||||
Self { context, kind }
|
||||
}
|
||||
|
||||
pub fn new_from_external(context: Span<'a>, error: impl std::error::Error) -> Self {
|
||||
Self::new_from_kind(context, ErrorKind::External(error.to_string()))
|
||||
}
|
||||
|
||||
pub fn char(self) -> char {
|
||||
match self.kind {
|
||||
ErrorKind::Char(c) => c,
|
||||
error => panic!("Internal filter parser error: {:?}", error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ParseError<Span<'a>> for Error<'a> {
|
||||
fn from_error_kind(input: Span<'a>, kind: error::ErrorKind) -> Self {
|
||||
let kind = match kind {
|
||||
error::ErrorKind::Eof => ErrorKind::ExpectedEof,
|
||||
kind => ErrorKind::InternalError(kind),
|
||||
};
|
||||
Self { context: input, kind }
|
||||
}
|
||||
|
||||
fn append(_input: Span<'a>, _kind: error::ErrorKind, other: Self) -> Self {
|
||||
other
|
||||
}
|
||||
|
||||
fn from_char(input: Span<'a>, c: char) -> Self {
|
||||
Self { context: input, kind: ErrorKind::Char(c) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Display for Error<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let input = self.context.fragment();
|
||||
// When printing our error message we want to escape all `\n` to be sure we keep our format with the
|
||||
// first line being the diagnostic and the second line being the incriminated filter.
|
||||
let escaped_input = input.escape_debug();
|
||||
|
||||
match &self.kind {
|
||||
ErrorKind::ExpectedValue(_) if input.trim().is_empty() => {
|
||||
writeln!(f, "Was expecting a value but instead got nothing.")?
|
||||
}
|
||||
ErrorKind::ExpectedValue(ExpectedValueKind::ReservedKeyword) => {
|
||||
writeln!(f, "Was expecting a value but instead got `{escaped_input}`, which is a reserved keyword. To use `{escaped_input}` as a field name or a value, surround it by quotes.")?
|
||||
}
|
||||
ErrorKind::ExpectedValue(ExpectedValueKind::Other) => {
|
||||
writeln!(f, "Was expecting a value but instead got `{}`.", escaped_input)?
|
||||
}
|
||||
ErrorKind::MalformedValue => {
|
||||
writeln!(f, "Malformed value: `{}`.", escaped_input)?
|
||||
}
|
||||
ErrorKind::MissingClosingDelimiter(c) => {
|
||||
writeln!(f, "Expression `{}` is missing the following closing delimiter: `{}`.", escaped_input, c)?
|
||||
}
|
||||
ErrorKind::InvalidPrimary if input.trim().is_empty() => {
|
||||
writeln!(f, "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` but instead got nothing.")?
|
||||
}
|
||||
ErrorKind::InvalidPrimary => {
|
||||
writeln!(f, "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` at `{}`.", escaped_input)?
|
||||
}
|
||||
ErrorKind::ExpectedEof => {
|
||||
writeln!(f, "Found unexpected characters at the end of the filter: `{}`. You probably forgot an `OR` or an `AND` rule.", escaped_input)?
|
||||
}
|
||||
ErrorKind::GeoRadius => {
|
||||
writeln!(f, "The `_geoRadius` filter expects three arguments: `_geoRadius(latitude, longitude, radius)`.")?
|
||||
}
|
||||
ErrorKind::GeoBoundingBox => {
|
||||
writeln!(f, "The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.")?
|
||||
}
|
||||
ErrorKind::ReservedGeo(name) => {
|
||||
writeln!(f, "`{}` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance), or _geoBoundingBox([latitude, longitude], [latitude, longitude]) built-in rules to filter on `_geo` coordinates.", name.escape_debug())?
|
||||
}
|
||||
ErrorKind::MisusedGeoRadius => {
|
||||
writeln!(f, "The `_geoRadius` filter is an operation and can't be used as a value.")?
|
||||
}
|
||||
ErrorKind::MisusedGeoBoundingBox => {
|
||||
writeln!(f, "The `_geoBoundingBox` filter is an operation and can't be used as a value.")?
|
||||
}
|
||||
ErrorKind::ReservedKeyword(word) => {
|
||||
writeln!(f, "`{word}` is a reserved keyword and thus cannot be used as a field name unless it is put inside quotes. Use \"{word}\" or \'{word}\' instead.")?
|
||||
}
|
||||
ErrorKind::InOpeningBracket => {
|
||||
writeln!(f, "Expected `[` after `IN` keyword.")?
|
||||
}
|
||||
ErrorKind::InClosingBracket => {
|
||||
writeln!(f, "Expected matching `]` after the list of field names given to `IN[`")?
|
||||
}
|
||||
ErrorKind::NonFiniteFloat => {
|
||||
writeln!(f, "Non finite floats are not supported")?
|
||||
}
|
||||
ErrorKind::InExpectedValue(ExpectedValueKind::ReservedKeyword) => {
|
||||
writeln!(f, "Expected only comma-separated field names inside `IN[..]` but instead found `{escaped_input}`, which is a keyword. To use `{escaped_input}` as a field name or a value, surround it by quotes.")?
|
||||
}
|
||||
ErrorKind::InExpectedValue(ExpectedValueKind::Other) => {
|
||||
writeln!(f, "Expected only comma-separated field names inside `IN[..]` but instead found `{escaped_input}`.")?
|
||||
}
|
||||
ErrorKind::Char(c) => {
|
||||
panic!("Tried to display a char error with `{}`", c)
|
||||
}
|
||||
ErrorKind::DepthLimitReached => writeln!(
|
||||
f,
|
||||
"The filter exceeded the maximum depth limit. Try rewriting the filter so that it contains fewer nested conditions."
|
||||
)?,
|
||||
ErrorKind::InternalError(kind) => writeln!(
|
||||
f,
|
||||
"Encountered an internal `{:?}` error while parsing your filter. Please fill an issue", kind
|
||||
)?,
|
||||
ErrorKind::External(ref error) => writeln!(f, "{}", error)?,
|
||||
}
|
||||
let base_column = self.context.get_utf8_column();
|
||||
let size = self.context.fragment().chars().count();
|
||||
|
||||
write!(f, "{}:{} {}", base_column, base_column + size, self.context.extra)
|
||||
}
|
||||
}
|
814
filter-parser/src/lib.rs
Normal file
814
filter-parser/src/lib.rs
Normal file
@ -0,0 +1,814 @@
|
||||
//! BNF grammar:
|
||||
//!
|
||||
//! ```text
|
||||
//! filter = expression EOF
|
||||
//! expression = or
|
||||
//! or = and ("OR" WS+ and)*
|
||||
//! and = not ("AND" WS+ not)*
|
||||
//! not = ("NOT" WS+ not) | primary
|
||||
//! primary = (WS* "(" WS* expression WS* ")" WS*) | geoRadius | in | condition | exists | not_exists | to
|
||||
//! in = value "IN" WS* "[" value_list "]"
|
||||
//! condition = value ("=" | "!=" | ">" | ">=" | "<" | "<=") value
|
||||
//! exists = value "EXISTS"
|
||||
//! not_exists = value "NOT" WS+ "EXISTS"
|
||||
//! to = value value "TO" WS+ value
|
||||
//! value = WS* ( word | singleQuoted | doubleQuoted) WS+
|
||||
//! value_list = (value ("," value)* ","?)?
|
||||
//! singleQuoted = "'" .* all but quotes "'"
|
||||
//! doubleQuoted = "\"" .* all but double quotes "\""
|
||||
//! word = (alphanumeric | _ | - | .)+
|
||||
//! geoRadius = "_geoRadius(" WS* float WS* "," WS* float WS* "," float WS* ")"
|
||||
//! geoBoundingBox = "_geoBoundingBox([" WS * float WS* "," WS* float WS* "], [" WS* float WS* "," WS* float WS* "]")
|
||||
//! ```
|
||||
//!
|
||||
//! Other BNF grammar used to handle some specific errors:
|
||||
//! ```text
|
||||
//! geoPoint = WS* "_geoPoint(" (float ",")* ")"
|
||||
//! ```
|
||||
//!
|
||||
//! Specific errors:
|
||||
//! ================
|
||||
//! - If a user try to use a geoPoint, as a primary OR as a value we must throw an error.
|
||||
//! ```text
|
||||
//! field = _geoPoint(12, 13, 14)
|
||||
//! field < 12 AND _geoPoint(1, 2)
|
||||
//! ```
|
||||
//!
|
||||
//! - If a user try to use a geoRadius as a value we must throw an error.
|
||||
//! ```text
|
||||
//! field = _geoRadius(12, 13, 14)
|
||||
//! ```
|
||||
//!
|
||||
|
||||
mod condition;
|
||||
mod error;
|
||||
mod value;
|
||||
|
||||
use std::fmt::Debug;
|
||||
|
||||
pub use condition::{parse_condition, parse_to, Condition};
|
||||
use condition::{parse_exists, parse_not_exists};
|
||||
use error::{cut_with_err, ExpectedValueKind, NomErrorExt};
|
||||
pub use error::{Error, ErrorKind};
|
||||
use nom::branch::alt;
|
||||
use nom::bytes::complete::tag;
|
||||
use nom::character::complete::{char, multispace0};
|
||||
use nom::combinator::{cut, eof, map, opt};
|
||||
use nom::multi::{many0, separated_list1};
|
||||
use nom::number::complete::recognize_float;
|
||||
use nom::sequence::{delimited, preceded, terminated, tuple};
|
||||
use nom::Finish;
|
||||
use nom_locate::LocatedSpan;
|
||||
pub(crate) use value::parse_value;
|
||||
use value::word_exact;
|
||||
|
||||
pub type Span<'a> = LocatedSpan<&'a str, &'a str>;
|
||||
|
||||
type IResult<'a, Ret> = nom::IResult<Span<'a>, Ret, Error<'a>>;
|
||||
|
||||
const MAX_FILTER_DEPTH: usize = 200;
|
||||
|
||||
#[derive(Debug, Clone, Eq)]
|
||||
pub struct Token<'a> {
|
||||
/// The token in the original input, it should be used when possible.
|
||||
span: Span<'a>,
|
||||
/// If you need to modify the original input you can use the `value` field
|
||||
/// to store your modified input.
|
||||
value: Option<String>,
|
||||
}
|
||||
|
||||
impl<'a> PartialEq for Token<'a> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.span.fragment() == other.span.fragment()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Token<'a> {
|
||||
pub fn new(span: Span<'a>, value: Option<String>) -> Self {
|
||||
Self { span, value }
|
||||
}
|
||||
|
||||
/// Returns the string contained in the span of the `Token`.
|
||||
/// This is only useful in the tests. You should always use
|
||||
/// the value.
|
||||
#[cfg(test)]
|
||||
pub fn lexeme(&self) -> &str {
|
||||
&self.span
|
||||
}
|
||||
|
||||
/// Return the string contained in the token.
|
||||
pub fn value(&self) -> &str {
|
||||
self.value.as_ref().map_or(&self.span, |value| value)
|
||||
}
|
||||
|
||||
pub fn as_external_error(&self, error: impl std::error::Error) -> Error<'a> {
|
||||
Error::new_from_external(self.span, error)
|
||||
}
|
||||
|
||||
/// Returns a copy of the span this token was created with.
|
||||
pub fn original_span(&self) -> Span<'a> {
|
||||
self.span
|
||||
}
|
||||
|
||||
pub fn parse_finite_float(&self) -> Result<f64, Error> {
|
||||
let value: f64 = self.value().parse().map_err(|e| self.as_external_error(e))?;
|
||||
if value.is_finite() {
|
||||
Ok(value)
|
||||
} else {
|
||||
Err(Error::new_from_kind(self.span, ErrorKind::NonFiniteFloat))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<Span<'a>> for Token<'a> {
|
||||
fn from(span: Span<'a>) -> Self {
|
||||
Self { span, value: None }
|
||||
}
|
||||
}
|
||||
|
||||
/// Allow [Token] to be constructed from &[str]
|
||||
impl<'a> From<&'a str> for Token<'a> {
|
||||
fn from(s: &'a str) -> Self {
|
||||
Token::from(Span::new_extra(s, s))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum FilterCondition<'a> {
|
||||
Not(Box<Self>),
|
||||
Condition { fid: Token<'a>, op: Condition<'a> },
|
||||
In { fid: Token<'a>, els: Vec<Token<'a>> },
|
||||
Or(Vec<Self>),
|
||||
And(Vec<Self>),
|
||||
GeoLowerThan { point: [Token<'a>; 2], radius: Token<'a> },
|
||||
GeoBoundingBox { top_left_point: [Token<'a>; 2], bottom_right_point: [Token<'a>; 2] },
|
||||
}
|
||||
|
||||
impl<'a> FilterCondition<'a> {
|
||||
/// Returns the first token found at the specified depth, `None` if no token at this depth.
|
||||
pub fn token_at_depth(&self, depth: usize) -> Option<&Token> {
|
||||
match self {
|
||||
FilterCondition::Condition { fid, .. } if depth == 0 => Some(fid),
|
||||
FilterCondition::Or(subfilters) => {
|
||||
let depth = depth.saturating_sub(1);
|
||||
for f in subfilters.iter() {
|
||||
if let Some(t) = f.token_at_depth(depth) {
|
||||
return Some(t);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
FilterCondition::And(subfilters) => {
|
||||
let depth = depth.saturating_sub(1);
|
||||
for f in subfilters.iter() {
|
||||
if let Some(t) = f.token_at_depth(depth) {
|
||||
return Some(t);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
FilterCondition::GeoLowerThan { point: [point, _], .. } if depth == 0 => Some(point),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse(input: &'a str) -> Result<Option<Self>, Error> {
|
||||
if input.trim().is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
let span = Span::new_extra(input, input);
|
||||
parse_filter(span).finish().map(|(_rem, output)| Some(output))
|
||||
}
|
||||
}
|
||||
|
||||
/// remove OPTIONAL whitespaces before AND after the provided parser.
|
||||
fn ws<'a, O>(
|
||||
inner: impl FnMut(Span<'a>) -> IResult<'a, O>,
|
||||
) -> impl FnMut(Span<'a>) -> IResult<'a, O> {
|
||||
delimited(multispace0, inner, multispace0)
|
||||
}
|
||||
|
||||
/// value_list = (value ("," value)* ","?)?
|
||||
fn parse_value_list(input: Span) -> IResult<Vec<Token>> {
|
||||
let (input, first_value) = opt(parse_value)(input)?;
|
||||
if let Some(first_value) = first_value {
|
||||
let value_list_el_parser = preceded(ws(tag(",")), parse_value);
|
||||
|
||||
let (input, mut values) = many0(value_list_el_parser)(input)?;
|
||||
let (input, _) = opt(ws(tag(",")))(input)?;
|
||||
values.insert(0, first_value);
|
||||
|
||||
Ok((input, values))
|
||||
} else {
|
||||
Ok((input, vec![]))
|
||||
}
|
||||
}
|
||||
|
||||
/// "IN" WS* "[" value_list "]"
|
||||
fn parse_in_body(input: Span) -> IResult<Vec<Token>> {
|
||||
let (input, _) = ws(word_exact("IN"))(input)?;
|
||||
|
||||
// everything after `IN` can be a failure
|
||||
let (input, _) =
|
||||
cut_with_err(tag("["), |_| Error::new_from_kind(input, ErrorKind::InOpeningBracket))(
|
||||
input,
|
||||
)?;
|
||||
|
||||
let (input, content) = cut(parse_value_list)(input)?;
|
||||
|
||||
// everything after `IN` can be a failure
|
||||
let (input, _) = cut_with_err(ws(tag("]")), |_| {
|
||||
if eof::<_, ()>(input).is_ok() {
|
||||
Error::new_from_kind(input, ErrorKind::InClosingBracket)
|
||||
} else {
|
||||
let expected_value_kind = match parse_value(input) {
|
||||
Err(nom::Err::Error(e)) => match e.kind() {
|
||||
ErrorKind::ReservedKeyword(_) => ExpectedValueKind::ReservedKeyword,
|
||||
_ => ExpectedValueKind::Other,
|
||||
},
|
||||
_ => ExpectedValueKind::Other,
|
||||
};
|
||||
Error::new_from_kind(input, ErrorKind::InExpectedValue(expected_value_kind))
|
||||
}
|
||||
})(input)?;
|
||||
|
||||
Ok((input, content))
|
||||
}
|
||||
|
||||
/// in = value "IN" "[" value_list "]"
|
||||
fn parse_in(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, value) = parse_value(input)?;
|
||||
let (input, content) = parse_in_body(input)?;
|
||||
|
||||
let filter = FilterCondition::In { fid: value, els: content };
|
||||
Ok((input, filter))
|
||||
}
|
||||
|
||||
/// in = value "NOT" WS* "IN" "[" value_list "]"
|
||||
fn parse_not_in(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, value) = parse_value(input)?;
|
||||
let (input, _) = word_exact("NOT")(input)?;
|
||||
let (input, content) = parse_in_body(input)?;
|
||||
|
||||
let filter = FilterCondition::Not(Box::new(FilterCondition::In { fid: value, els: content }));
|
||||
Ok((input, filter))
|
||||
}
|
||||
|
||||
/// or = and ("OR" and)
|
||||
fn parse_or(input: Span, depth: usize) -> IResult<FilterCondition> {
|
||||
if depth > MAX_FILTER_DEPTH {
|
||||
return Err(nom::Err::Error(Error::new_from_kind(input, ErrorKind::DepthLimitReached)));
|
||||
}
|
||||
let (input, first_filter) = parse_and(input, depth + 1)?;
|
||||
// if we found a `OR` then we MUST find something next
|
||||
let (input, mut ors) =
|
||||
many0(preceded(ws(word_exact("OR")), cut(|input| parse_and(input, depth + 1))))(input)?;
|
||||
|
||||
let filter = if ors.is_empty() {
|
||||
first_filter
|
||||
} else {
|
||||
ors.insert(0, first_filter);
|
||||
FilterCondition::Or(ors)
|
||||
};
|
||||
|
||||
Ok((input, filter))
|
||||
}
|
||||
|
||||
/// and = not ("AND" not)*
|
||||
fn parse_and(input: Span, depth: usize) -> IResult<FilterCondition> {
|
||||
if depth > MAX_FILTER_DEPTH {
|
||||
return Err(nom::Err::Error(Error::new_from_kind(input, ErrorKind::DepthLimitReached)));
|
||||
}
|
||||
let (input, first_filter) = parse_not(input, depth + 1)?;
|
||||
// if we found a `AND` then we MUST find something next
|
||||
let (input, mut ands) =
|
||||
many0(preceded(ws(word_exact("AND")), cut(|input| parse_not(input, depth + 1))))(input)?;
|
||||
|
||||
let filter = if ands.is_empty() {
|
||||
first_filter
|
||||
} else {
|
||||
ands.insert(0, first_filter);
|
||||
FilterCondition::And(ands)
|
||||
};
|
||||
|
||||
Ok((input, filter))
|
||||
}
|
||||
|
||||
/// not = ("NOT" WS+ not) | primary
|
||||
/// We can have multiple consecutive not, eg: `NOT NOT channel = mv`.
|
||||
/// If we parse a `NOT` we MUST parse something behind.
|
||||
fn parse_not(input: Span, depth: usize) -> IResult<FilterCondition> {
|
||||
if depth > MAX_FILTER_DEPTH {
|
||||
return Err(nom::Err::Error(Error::new_from_kind(input, ErrorKind::DepthLimitReached)));
|
||||
}
|
||||
alt((
|
||||
map(
|
||||
preceded(ws(word_exact("NOT")), cut(|input| parse_not(input, depth + 1))),
|
||||
|e| match e {
|
||||
FilterCondition::Not(e) => *e,
|
||||
_ => FilterCondition::Not(Box::new(e)),
|
||||
},
|
||||
),
|
||||
|input| parse_primary(input, depth + 1),
|
||||
))(input)
|
||||
}
|
||||
|
||||
/// geoRadius = WS* "_geoRadius(float WS* "," WS* float WS* "," WS* float)
|
||||
/// If we parse `_geoRadius` we MUST parse the rest of the expression.
|
||||
fn parse_geo_radius(input: Span) -> IResult<FilterCondition> {
|
||||
// we want to allow space BEFORE the _geoRadius but not after
|
||||
let parsed = preceded(
|
||||
tuple((multispace0, word_exact("_geoRadius"))),
|
||||
// if we were able to parse `_geoRadius` and can't parse the rest of the input we return a failure
|
||||
cut(delimited(char('('), separated_list1(tag(","), ws(recognize_float)), char(')'))),
|
||||
)(input)
|
||||
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::GeoRadius)));
|
||||
|
||||
let (input, args) = parsed?;
|
||||
|
||||
if args.len() != 3 {
|
||||
return Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::GeoRadius)));
|
||||
}
|
||||
|
||||
let res = FilterCondition::GeoLowerThan {
|
||||
point: [args[0].into(), args[1].into()],
|
||||
radius: args[2].into(),
|
||||
};
|
||||
Ok((input, res))
|
||||
}
|
||||
|
||||
/// geoBoundingBox = WS* "_geoBoundingBox([float WS* "," WS* float WS* "], [float WS* "," WS* float WS* "]")
|
||||
/// If we parse `_geoBoundingBox` we MUST parse the rest of the expression.
|
||||
fn parse_geo_bounding_box(input: Span) -> IResult<FilterCondition> {
|
||||
// we want to allow space BEFORE the _geoBoundingBox but not after
|
||||
let parsed = preceded(
|
||||
tuple((multispace0, word_exact("_geoBoundingBox"))),
|
||||
// if we were able to parse `_geoBoundingBox` and can't parse the rest of the input we return a failure
|
||||
cut(delimited(
|
||||
char('('),
|
||||
separated_list1(
|
||||
tag(","),
|
||||
ws(delimited(char('['), separated_list1(tag(","), ws(recognize_float)), char(']'))),
|
||||
),
|
||||
char(')'),
|
||||
)),
|
||||
)(input)
|
||||
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::GeoBoundingBox)));
|
||||
|
||||
let (input, args) = parsed?;
|
||||
|
||||
if args.len() != 2 || args[0].len() != 2 || args[1].len() != 2 {
|
||||
return Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::GeoBoundingBox)));
|
||||
}
|
||||
|
||||
let res = FilterCondition::GeoBoundingBox {
|
||||
top_left_point: [args[0][0].into(), args[0][1].into()],
|
||||
bottom_right_point: [args[1][0].into(), args[1][1].into()],
|
||||
};
|
||||
Ok((input, res))
|
||||
}
|
||||
|
||||
/// geoPoint = WS* "_geoPoint(float WS* "," WS* float WS* "," WS* float)
|
||||
fn parse_geo_point(input: Span) -> IResult<FilterCondition> {
|
||||
// we want to forbid space BEFORE the _geoPoint but not after
|
||||
tuple((
|
||||
multispace0,
|
||||
tag("_geoPoint"),
|
||||
// if we were able to parse `_geoPoint` we are going to return a Failure whatever happens next.
|
||||
cut(delimited(char('('), separated_list1(tag(","), ws(recognize_float)), char(')'))),
|
||||
))(input)
|
||||
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::ReservedGeo("_geoPoint"))))?;
|
||||
// if we succeeded we still return a `Failure` because geoPoints are not allowed
|
||||
Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::ReservedGeo("_geoPoint"))))
|
||||
}
|
||||
|
||||
fn parse_error_reserved_keyword(input: Span) -> IResult<FilterCondition> {
|
||||
match parse_condition(input) {
|
||||
Ok(result) => Ok(result),
|
||||
Err(nom::Err::Error(inner) | nom::Err::Failure(inner)) => match inner.kind() {
|
||||
ErrorKind::ExpectedValue(ExpectedValueKind::ReservedKeyword) => {
|
||||
Err(nom::Err::Failure(inner))
|
||||
}
|
||||
_ => Err(nom::Err::Error(inner)),
|
||||
},
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
/// primary = (WS* "(" WS* expression WS* ")" WS*) | geoRadius | condition | exists | not_exists | to
|
||||
fn parse_primary(input: Span, depth: usize) -> IResult<FilterCondition> {
|
||||
if depth > MAX_FILTER_DEPTH {
|
||||
return Err(nom::Err::Error(Error::new_from_kind(input, ErrorKind::DepthLimitReached)));
|
||||
}
|
||||
alt((
|
||||
// if we find a first parenthesis, then we must parse an expression and find the closing parenthesis
|
||||
delimited(
|
||||
ws(char('(')),
|
||||
cut(|input| parse_expression(input, depth + 1)),
|
||||
cut_with_err(ws(char(')')), |c| {
|
||||
Error::new_from_kind(input, ErrorKind::MissingClosingDelimiter(c.char()))
|
||||
}),
|
||||
),
|
||||
parse_geo_radius,
|
||||
parse_geo_bounding_box,
|
||||
parse_in,
|
||||
parse_not_in,
|
||||
parse_condition,
|
||||
parse_exists,
|
||||
parse_not_exists,
|
||||
parse_to,
|
||||
// the next lines are only for error handling and are written at the end to have the less possible performance impact
|
||||
parse_geo_point,
|
||||
parse_error_reserved_keyword,
|
||||
))(input)
|
||||
// if the inner parsers did not match enough information to return an accurate error
|
||||
.map_err(|e| e.map_err(|_| Error::new_from_kind(input, ErrorKind::InvalidPrimary)))
|
||||
}
|
||||
|
||||
/// expression = or
|
||||
pub fn parse_expression(input: Span, depth: usize) -> IResult<FilterCondition> {
|
||||
parse_or(input, depth)
|
||||
}
|
||||
|
||||
/// filter = expression EOF
|
||||
pub fn parse_filter(input: Span) -> IResult<FilterCondition> {
|
||||
terminated(|input| parse_expression(input, 0), eof)(input)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Create a raw [Token]. You must specify the string that appear BEFORE your element followed by your element
|
||||
pub fn rtok<'a>(before: &'a str, value: &'a str) -> Token<'a> {
|
||||
// if the string is empty we still need to return 1 for the line number
|
||||
let lines = before.is_empty().then_some(1).unwrap_or_else(|| before.lines().count());
|
||||
let offset = before.chars().count();
|
||||
// the extra field is not checked in the tests so we can set it to nothing
|
||||
unsafe { Span::new_from_raw_offset(offset, lines as u32, value, "") }.into()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse() {
|
||||
use FilterCondition as Fc;
|
||||
|
||||
fn p(s: &str) -> impl std::fmt::Display + '_ {
|
||||
Fc::parse(s).unwrap().unwrap()
|
||||
}
|
||||
|
||||
// Test equal
|
||||
insta::assert_display_snapshot!(p("channel = Ponce"), @"{channel} = {Ponce}");
|
||||
insta::assert_display_snapshot!(p("subscribers = 12"), @"{subscribers} = {12}");
|
||||
insta::assert_display_snapshot!(p("channel = 'Mister Mv'"), @"{channel} = {Mister Mv}");
|
||||
insta::assert_display_snapshot!(p("channel = \"Mister Mv\""), @"{channel} = {Mister Mv}");
|
||||
insta::assert_display_snapshot!(p("'dog race' = Borzoi"), @"{dog race} = {Borzoi}");
|
||||
insta::assert_display_snapshot!(p("\"dog race\" = Chusky"), @"{dog race} = {Chusky}");
|
||||
insta::assert_display_snapshot!(p("\"dog race\" = \"Bernese Mountain\""), @"{dog race} = {Bernese Mountain}");
|
||||
insta::assert_display_snapshot!(p("'dog race' = 'Bernese Mountain'"), @"{dog race} = {Bernese Mountain}");
|
||||
insta::assert_display_snapshot!(p("\"dog race\" = 'Bernese Mountain'"), @"{dog race} = {Bernese Mountain}");
|
||||
|
||||
// Test IN
|
||||
insta::assert_display_snapshot!(p("colour IN[]"), @"{colour} IN[]");
|
||||
insta::assert_display_snapshot!(p("colour IN[green]"), @"{colour} IN[{green}, ]");
|
||||
insta::assert_display_snapshot!(p("colour IN[green,]"), @"{colour} IN[{green}, ]");
|
||||
insta::assert_display_snapshot!(p("colour NOT IN[green,blue]"), @"NOT ({colour} IN[{green}, {blue}, ])");
|
||||
insta::assert_display_snapshot!(p(" colour IN [ green , blue , ]"), @"{colour} IN[{green}, {blue}, ]");
|
||||
|
||||
// Test IN + OR/AND/()
|
||||
insta::assert_display_snapshot!(p(" colour IN [green, blue] AND color = green "), @"AND[{colour} IN[{green}, {blue}, ], {color} = {green}, ]");
|
||||
insta::assert_display_snapshot!(p("NOT (colour IN [green, blue]) AND color = green "), @"AND[NOT ({colour} IN[{green}, {blue}, ]), {color} = {green}, ]");
|
||||
insta::assert_display_snapshot!(p("x = 1 OR NOT (colour IN [green, blue] OR color = green) "), @"OR[{x} = {1}, NOT (OR[{colour} IN[{green}, {blue}, ], {color} = {green}, ]), ]");
|
||||
|
||||
// Test whitespace start/end
|
||||
insta::assert_display_snapshot!(p(" colour = green "), @"{colour} = {green}");
|
||||
insta::assert_display_snapshot!(p(" (colour = green OR colour = red) "), @"OR[{colour} = {green}, {colour} = {red}, ]");
|
||||
insta::assert_display_snapshot!(p(" colour IN [green, blue] AND color = green "), @"AND[{colour} IN[{green}, {blue}, ], {color} = {green}, ]");
|
||||
insta::assert_display_snapshot!(p(" colour NOT IN [green, blue] "), @"NOT ({colour} IN[{green}, {blue}, ])");
|
||||
insta::assert_display_snapshot!(p(" colour IN [green, blue] "), @"{colour} IN[{green}, {blue}, ]");
|
||||
|
||||
// Test conditions
|
||||
insta::assert_display_snapshot!(p("channel != ponce"), @"{channel} != {ponce}");
|
||||
insta::assert_display_snapshot!(p("NOT channel = ponce"), @"NOT ({channel} = {ponce})");
|
||||
insta::assert_display_snapshot!(p("subscribers < 1000"), @"{subscribers} < {1000}");
|
||||
insta::assert_display_snapshot!(p("subscribers > 1000"), @"{subscribers} > {1000}");
|
||||
insta::assert_display_snapshot!(p("subscribers <= 1000"), @"{subscribers} <= {1000}");
|
||||
insta::assert_display_snapshot!(p("subscribers >= 1000"), @"{subscribers} >= {1000}");
|
||||
insta::assert_display_snapshot!(p("subscribers <= 1000"), @"{subscribers} <= {1000}");
|
||||
insta::assert_display_snapshot!(p("subscribers 100 TO 1000"), @"{subscribers} {100} TO {1000}");
|
||||
|
||||
// Test NOT + EXISTS
|
||||
insta::assert_display_snapshot!(p("subscribers EXISTS"), @"{subscribers} EXISTS");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers < 1000"), @"NOT ({subscribers} < {1000})");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers EXISTS"), @"NOT ({subscribers} EXISTS)");
|
||||
insta::assert_display_snapshot!(p("subscribers NOT EXISTS"), @"NOT ({subscribers} EXISTS)");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers NOT EXISTS"), @"{subscribers} EXISTS");
|
||||
insta::assert_display_snapshot!(p("subscribers NOT EXISTS"), @"NOT ({subscribers} EXISTS)");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers 100 TO 1000"), @"NOT ({subscribers} {100} TO {1000})");
|
||||
|
||||
// Test nested NOT
|
||||
insta::assert_display_snapshot!(p("NOT NOT NOT NOT x = 5"), @"{x} = {5}");
|
||||
insta::assert_display_snapshot!(p("NOT NOT (NOT NOT x = 5)"), @"{x} = {5}");
|
||||
|
||||
// Test geo radius
|
||||
insta::assert_display_snapshot!(p("_geoRadius(12, 13, 14)"), @"_geoRadius({12}, {13}, {14})");
|
||||
insta::assert_display_snapshot!(p("NOT _geoRadius(12, 13, 14)"), @"NOT (_geoRadius({12}, {13}, {14}))");
|
||||
insta::assert_display_snapshot!(p("_geoRadius(12,13,14)"), @"_geoRadius({12}, {13}, {14})");
|
||||
|
||||
// Test geo bounding box
|
||||
insta::assert_display_snapshot!(p("_geoBoundingBox([12, 13], [14, 15])"), @"_geoBoundingBox([{12}, {13}], [{14}, {15}])");
|
||||
insta::assert_display_snapshot!(p("NOT _geoBoundingBox([12, 13], [14, 15])"), @"NOT (_geoBoundingBox([{12}, {13}], [{14}, {15}]))");
|
||||
insta::assert_display_snapshot!(p("_geoBoundingBox([12,13],[14,15])"), @"_geoBoundingBox([{12}, {13}], [{14}, {15}])");
|
||||
|
||||
// Test OR + AND
|
||||
insta::assert_display_snapshot!(p("channel = ponce AND 'dog race' != 'bernese mountain'"), @"AND[{channel} = {ponce}, {dog race} != {bernese mountain}, ]");
|
||||
insta::assert_display_snapshot!(p("channel = ponce OR 'dog race' != 'bernese mountain'"), @"OR[{channel} = {ponce}, {dog race} != {bernese mountain}, ]");
|
||||
insta::assert_display_snapshot!(p("channel = ponce AND 'dog race' != 'bernese mountain' OR subscribers > 1000"), @"OR[AND[{channel} = {ponce}, {dog race} != {bernese mountain}, ], {subscribers} > {1000}, ]");
|
||||
insta::assert_display_snapshot!(
|
||||
p("channel = ponce AND 'dog race' != 'bernese mountain' OR subscribers > 1000 OR colour = red OR colour = blue AND size = 7"),
|
||||
@"OR[AND[{channel} = {ponce}, {dog race} != {bernese mountain}, ], {subscribers} > {1000}, {colour} = {red}, AND[{colour} = {blue}, {size} = {7}, ], ]"
|
||||
);
|
||||
|
||||
// Test parentheses
|
||||
insta::assert_display_snapshot!(p("channel = ponce AND ( 'dog race' != 'bernese mountain' OR subscribers > 1000 )"), @"AND[{channel} = {ponce}, OR[{dog race} != {bernese mountain}, {subscribers} > {1000}, ], ]");
|
||||
insta::assert_display_snapshot!(p("(channel = ponce AND 'dog race' != 'bernese mountain' OR subscribers > 1000) AND _geoRadius(12, 13, 14)"), @"AND[OR[AND[{channel} = {ponce}, {dog race} != {bernese mountain}, ], {subscribers} > {1000}, ], _geoRadius({12}, {13}, {14}), ]");
|
||||
|
||||
// Test recursion
|
||||
// This is the most that is allowed
|
||||
insta::assert_display_snapshot!(
|
||||
p("(((((((((((((((((((((((((((((((((((((((((((((((((x = 1)))))))))))))))))))))))))))))))))))))))))))))))))"),
|
||||
@"{x} = {1}"
|
||||
);
|
||||
insta::assert_display_snapshot!(
|
||||
p("NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT x = 1"),
|
||||
@"NOT ({x} = {1})"
|
||||
);
|
||||
|
||||
// Confusing keywords
|
||||
insta::assert_display_snapshot!(p(r#"NOT "OR" EXISTS AND "EXISTS" NOT EXISTS"#), @"AND[NOT ({OR} EXISTS), NOT ({EXISTS} EXISTS), ]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error() {
|
||||
use FilterCondition as Fc;
|
||||
|
||||
fn p(s: &str) -> impl std::fmt::Display + '_ {
|
||||
Fc::parse(s).unwrap_err().to_string()
|
||||
}
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = Ponce = 12"), @r###"
|
||||
Found unexpected characters at the end of the filter: `= 12`. You probably forgot an `OR` or an `AND` rule.
|
||||
17:21 channel = Ponce = 12
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = "), @r###"
|
||||
Was expecting a value but instead got nothing.
|
||||
14:14 channel =
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = 🐻"), @r###"
|
||||
Was expecting a value but instead got `🐻`.
|
||||
11:12 channel = 🐻
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = 🐻 AND followers < 100"), @r###"
|
||||
Was expecting a value but instead got `🐻`.
|
||||
11:12 channel = 🐻 AND followers < 100
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("'OR'"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` at `\'OR\'`.
|
||||
1:5 'OR'
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("OR"), @r###"
|
||||
Was expecting a value but instead got `OR`, which is a reserved keyword. To use `OR` as a field name or a value, surround it by quotes.
|
||||
1:3 OR
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel Ponce"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` at `channel Ponce`.
|
||||
1:14 channel Ponce
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = Ponce OR"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` but instead got nothing.
|
||||
19:19 channel = Ponce OR
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("_geoRadius"), @r###"
|
||||
The `_geoRadius` filter expects three arguments: `_geoRadius(latitude, longitude, radius)`.
|
||||
1:11 _geoRadius
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("_geoRadius = 12"), @r###"
|
||||
The `_geoRadius` filter expects three arguments: `_geoRadius(latitude, longitude, radius)`.
|
||||
1:16 _geoRadius = 12
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("_geoBoundingBox"), @r###"
|
||||
The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.
|
||||
1:16 _geoBoundingBox
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("_geoBoundingBox = 12"), @r###"
|
||||
The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.
|
||||
1:21 _geoBoundingBox = 12
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("_geoBoundingBox(1.0, 1.0)"), @r###"
|
||||
The `_geoBoundingBox` filter expects two pairs of arguments: `_geoBoundingBox([latitude, longitude], [latitude, longitude])`.
|
||||
1:26 _geoBoundingBox(1.0, 1.0)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("_geoPoint(12, 13, 14)"), @r###"
|
||||
`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance), or _geoBoundingBox([latitude, longitude], [latitude, longitude]) built-in rules to filter on `_geo` coordinates.
|
||||
1:22 _geoPoint(12, 13, 14)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("position <= _geoPoint(12, 13, 14)"), @r###"
|
||||
`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance), or _geoBoundingBox([latitude, longitude], [latitude, longitude]) built-in rules to filter on `_geo` coordinates.
|
||||
13:34 position <= _geoPoint(12, 13, 14)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("position <= _geoRadius(12, 13, 14)"), @r###"
|
||||
The `_geoRadius` filter is an operation and can't be used as a value.
|
||||
13:35 position <= _geoRadius(12, 13, 14)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = 'ponce"), @r###"
|
||||
Expression `\'ponce` is missing the following closing delimiter: `'`.
|
||||
11:17 channel = 'ponce
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = \"ponce"), @r###"
|
||||
Expression `\"ponce` is missing the following closing delimiter: `"`.
|
||||
11:17 channel = "ponce
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = mv OR (followers >= 1000"), @r###"
|
||||
Expression `(followers >= 1000` is missing the following closing delimiter: `)`.
|
||||
17:35 channel = mv OR (followers >= 1000
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = mv OR followers >= 1000)"), @r###"
|
||||
Found unexpected characters at the end of the filter: `)`. You probably forgot an `OR` or an `AND` rule.
|
||||
34:35 channel = mv OR followers >= 1000)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("colour NOT EXIST"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` at `colour NOT EXIST`.
|
||||
1:17 colour NOT EXIST
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("subscribers 100 TO1000"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `_geoRadius`, or `_geoBoundingBox` at `subscribers 100 TO1000`.
|
||||
1:23 subscribers 100 TO1000
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = ponce ORdog != 'bernese mountain'"), @r###"
|
||||
Found unexpected characters at the end of the filter: `ORdog != \'bernese mountain\'`. You probably forgot an `OR` or an `AND` rule.
|
||||
17:44 channel = ponce ORdog != 'bernese mountain'
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("colour IN blue, green]"), @r###"
|
||||
Expected `[` after `IN` keyword.
|
||||
11:23 colour IN blue, green]
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("colour IN [blue, green, 'blue' > 2]"), @r###"
|
||||
Expected only comma-separated field names inside `IN[..]` but instead found `> 2]`.
|
||||
32:36 colour IN [blue, green, 'blue' > 2]
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("colour IN [blue, green, AND]"), @r###"
|
||||
Expected only comma-separated field names inside `IN[..]` but instead found `AND]`.
|
||||
25:29 colour IN [blue, green, AND]
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("colour IN [blue, green"), @r###"
|
||||
Expected matching `]` after the list of field names given to `IN[`
|
||||
23:23 colour IN [blue, green
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("colour IN ['blue, green"), @r###"
|
||||
Expression `\'blue, green` is missing the following closing delimiter: `'`.
|
||||
12:24 colour IN ['blue, green
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("x = EXISTS"), @r###"
|
||||
Was expecting a value but instead got `EXISTS`, which is a reserved keyword. To use `EXISTS` as a field name or a value, surround it by quotes.
|
||||
5:11 x = EXISTS
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("AND = 8"), @r###"
|
||||
Was expecting a value but instead got `AND`, which is a reserved keyword. To use `AND` as a field name or a value, surround it by quotes.
|
||||
1:4 AND = 8
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("((((((((((((((((((((((((((((((((((((((((((((((((((x = 1))))))))))))))))))))))))))))))))))))))))))))))))))"), @r###"
|
||||
The filter exceeded the maximum depth limit. Try rewriting the filter so that it contains fewer nested conditions.
|
||||
51:106 ((((((((((((((((((((((((((((((((((((((((((((((((((x = 1))))))))))))))))))))))))))))))))))))))))))))))))))
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(
|
||||
p("NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT x = 1"),
|
||||
@r###"
|
||||
The filter exceeded the maximum depth limit. Try rewriting the filter so that it contains fewer nested conditions.
|
||||
797:802 NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT x = 1
|
||||
"###
|
||||
);
|
||||
|
||||
insta::assert_display_snapshot!(p(r#"NOT OR EXISTS AND EXISTS NOT EXISTS"#), @r###"
|
||||
Was expecting a value but instead got `OR`, which is a reserved keyword. To use `OR` as a field name or a value, surround it by quotes.
|
||||
5:7 NOT OR EXISTS AND EXISTS NOT EXISTS
|
||||
"###);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn depth() {
|
||||
let filter = FilterCondition::parse("account_ids=1 OR account_ids=2 OR account_ids=3 OR account_ids=4 OR account_ids=5 OR account_ids=6").unwrap().unwrap();
|
||||
assert!(filter.token_at_depth(1).is_some());
|
||||
assert!(filter.token_at_depth(2).is_none());
|
||||
|
||||
let filter = FilterCondition::parse("(account_ids=1 OR (account_ids=2 AND account_ids=3) OR (account_ids=4 AND account_ids=5) OR account_ids=6)").unwrap().unwrap();
|
||||
assert!(filter.token_at_depth(2).is_some());
|
||||
assert!(filter.token_at_depth(3).is_none());
|
||||
|
||||
let filter = FilterCondition::parse("account_ids=1 OR account_ids=2 AND account_ids=3 OR account_ids=4 AND account_ids=5 OR account_ids=6").unwrap().unwrap();
|
||||
assert!(filter.token_at_depth(2).is_some());
|
||||
assert!(filter.token_at_depth(3).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn token_from_str() {
|
||||
let s = "test string that should not be parsed";
|
||||
let token: Token = s.into();
|
||||
assert_eq!(token.value(), s);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> std::fmt::Display for FilterCondition<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
FilterCondition::Not(filter) => {
|
||||
write!(f, "NOT ({filter})")
|
||||
}
|
||||
FilterCondition::Condition { fid, op } => {
|
||||
write!(f, "{fid} {op}")
|
||||
}
|
||||
FilterCondition::In { fid, els } => {
|
||||
write!(f, "{fid} IN[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::Or(els) => {
|
||||
write!(f, "OR[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::And(els) => {
|
||||
write!(f, "AND[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::GeoLowerThan { point, radius } => {
|
||||
write!(f, "_geoRadius({}, {}, {})", point[0], point[1], radius)
|
||||
}
|
||||
FilterCondition::GeoBoundingBox { top_left_point, bottom_right_point } => {
|
||||
write!(
|
||||
f,
|
||||
"_geoBoundingBox([{}, {}], [{}, {}])",
|
||||
top_left_point[0],
|
||||
top_left_point[1],
|
||||
bottom_right_point[0],
|
||||
bottom_right_point[1]
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Condition<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Condition::GreaterThan(token) => write!(f, "> {token}"),
|
||||
Condition::GreaterThanOrEqual(token) => write!(f, ">= {token}"),
|
||||
Condition::Equal(token) => write!(f, "= {token}"),
|
||||
Condition::NotEqual(token) => write!(f, "!= {token}"),
|
||||
Condition::Exists => write!(f, "EXISTS"),
|
||||
Condition::LowerThan(token) => write!(f, "< {token}"),
|
||||
Condition::LowerThanOrEqual(token) => write!(f, "<= {token}"),
|
||||
Condition::Between { from, to } => write!(f, "{from} TO {to}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Token<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{{{}}}", self.value())
|
||||
}
|
||||
}
|
16
filter-parser/src/main.rs
Normal file
16
filter-parser/src/main.rs
Normal file
@ -0,0 +1,16 @@
|
||||
fn main() {
|
||||
let input = std::env::args().nth(1).expect("You must provide a filter to test");
|
||||
|
||||
println!("Trying to execute the following filter:\n{}\n", input);
|
||||
|
||||
match filter_parser::FilterCondition::parse(&input) {
|
||||
Ok(filter) => {
|
||||
println!("✅ Valid filter");
|
||||
println!("{:#?}", filter);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("❎ Invalid filter");
|
||||
println!("{}", e);
|
||||
}
|
||||
}
|
||||
}
|
364
filter-parser/src/value.rs
Normal file
364
filter-parser/src/value.rs
Normal file
@ -0,0 +1,364 @@
|
||||
use nom::branch::alt;
|
||||
use nom::bytes::complete::{take_till, take_while, take_while1};
|
||||
use nom::character::complete::{char, multispace0};
|
||||
use nom::combinator::cut;
|
||||
use nom::sequence::{delimited, terminated};
|
||||
use nom::{InputIter, InputLength, InputTake, Slice};
|
||||
|
||||
use crate::error::{ExpectedValueKind, NomErrorExt};
|
||||
use crate::{
|
||||
parse_geo_bounding_box, parse_geo_point, parse_geo_radius, Error, ErrorKind, IResult, Span,
|
||||
Token,
|
||||
};
|
||||
|
||||
/// This function goes through all characters in the [Span] if it finds any escaped character (`\`).
|
||||
/// It generates a new string with all `\` removed from the [Span].
|
||||
fn unescape(buf: Span, char_to_escape: char) -> String {
|
||||
let to_escape = format!("\\{}", char_to_escape);
|
||||
buf.replace(&to_escape, &char_to_escape.to_string())
|
||||
}
|
||||
|
||||
/// Parse a value in quote. If it encounter an escaped quote it'll unescape it.
|
||||
fn quoted_by(quote: char, input: Span) -> IResult<Token> {
|
||||
// empty fields / values are valid in json
|
||||
if input.is_empty() {
|
||||
return Ok((input.slice(input.input_len()..), input.into()));
|
||||
}
|
||||
|
||||
let mut escaped = false;
|
||||
let mut i = input.iter_indices();
|
||||
|
||||
while let Some((idx, c)) = i.next() {
|
||||
if c == quote {
|
||||
let (rem, output) = input.take_split(idx);
|
||||
return Ok((rem, Token::new(output, escaped.then(|| unescape(output, quote)))));
|
||||
} else if c == '\\' {
|
||||
if let Some((_, c)) = i.next() {
|
||||
escaped |= c == quote;
|
||||
} else {
|
||||
return Err(nom::Err::Error(Error::new_from_kind(
|
||||
input,
|
||||
ErrorKind::MalformedValue,
|
||||
)));
|
||||
}
|
||||
}
|
||||
// if it was preceeded by a `\` or if it was anything else we can continue to advance
|
||||
}
|
||||
|
||||
Ok((
|
||||
input.slice(input.input_len()..),
|
||||
Token::new(input, escaped.then(|| unescape(input, quote))),
|
||||
))
|
||||
}
|
||||
|
||||
// word = (alphanumeric | _ | - | .)+ except for reserved keywords
|
||||
pub fn word_not_keyword<'a>(input: Span<'a>) -> IResult<Token<'a>> {
|
||||
let (input, word): (_, Token<'a>) =
|
||||
take_while1(is_value_component)(input).map(|(s, t)| (s, t.into()))?;
|
||||
if is_keyword(word.value()) {
|
||||
return Err(nom::Err::Error(Error::new_from_kind(
|
||||
input,
|
||||
ErrorKind::ReservedKeyword(word.value().to_owned()),
|
||||
)));
|
||||
}
|
||||
Ok((input, word))
|
||||
}
|
||||
|
||||
// word = {tag}
|
||||
pub fn word_exact<'a, 'b: 'a>(tag: &'b str) -> impl Fn(Span<'a>) -> IResult<'a, Token<'a>> {
|
||||
move |input| {
|
||||
let (input, word): (_, Token<'a>) =
|
||||
take_while1(is_value_component)(input).map(|(s, t)| (s, t.into()))?;
|
||||
if word.value() == tag {
|
||||
Ok((input, word))
|
||||
} else {
|
||||
Err(nom::Err::Error(Error::new_from_kind(
|
||||
input,
|
||||
ErrorKind::InternalError(nom::error::ErrorKind::Tag),
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// value = WS* ( word | singleQuoted | doubleQuoted) WS+
|
||||
pub fn parse_value(input: Span) -> IResult<Token> {
|
||||
// to get better diagnostic message we are going to strip the left whitespaces from the input right now
|
||||
let (input, _) = take_while(char::is_whitespace)(input)?;
|
||||
|
||||
// then, we want to check if the user is misusing a geo expression
|
||||
// This expression can’t finish without error.
|
||||
// We want to return an error in case of failure.
|
||||
if let Err(err) = parse_geo_point(input) {
|
||||
if err.is_failure() {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
match parse_geo_radius(input) {
|
||||
Ok(_) => {
|
||||
return Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::MisusedGeoRadius)))
|
||||
}
|
||||
// if we encountered a failure it means the user badly wrote a _geoRadius filter.
|
||||
// But instead of showing them how to fix his syntax we are going to tell them they should not use this filter as a value.
|
||||
Err(e) if e.is_failure() => {
|
||||
return Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::MisusedGeoRadius)))
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
match parse_geo_bounding_box(input) {
|
||||
Ok(_) => {
|
||||
return Err(nom::Err::Failure(Error::new_from_kind(
|
||||
input,
|
||||
ErrorKind::MisusedGeoBoundingBox,
|
||||
)))
|
||||
}
|
||||
// if we encountered a failure it means the user badly wrote a _geoBoundingBox filter.
|
||||
// But instead of showing them how to fix his syntax we are going to tell them they should not use this filter as a value.
|
||||
Err(e) if e.is_failure() => {
|
||||
return Err(nom::Err::Failure(Error::new_from_kind(
|
||||
input,
|
||||
ErrorKind::MisusedGeoBoundingBox,
|
||||
)))
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
// this parser is only used when an error is encountered and it parse the
|
||||
// largest string possible that do not contain any “language” syntax.
|
||||
// If we try to parse `name = 🦀 AND language = rust` we want to return an
|
||||
// error saying we could not parse `🦀`. Not that no value were found or that
|
||||
// we could note parse `🦀 AND language = rust`.
|
||||
// we want to remove the space before entering the alt because if we don't,
|
||||
// when we create the errors from the output of the alt we have spaces everywhere
|
||||
let error_word = take_till::<_, _, Error>(is_syntax_component);
|
||||
|
||||
let (input, value) = terminated(
|
||||
alt((
|
||||
delimited(char('\''), cut(|input| quoted_by('\'', input)), cut(char('\''))),
|
||||
delimited(char('"'), cut(|input| quoted_by('"', input)), cut(char('"'))),
|
||||
word_not_keyword,
|
||||
)),
|
||||
multispace0,
|
||||
)(input)
|
||||
// if we found nothing in the alt it means the user specified something that was not recognized as a value
|
||||
.map_err(|e: nom::Err<Error>| {
|
||||
e.map_err(|error| {
|
||||
let expected_value_kind = if matches!(error.kind(), ErrorKind::ReservedKeyword(_)) {
|
||||
ExpectedValueKind::ReservedKeyword
|
||||
} else {
|
||||
ExpectedValueKind::Other
|
||||
};
|
||||
Error::new_from_kind(
|
||||
error_word(input).unwrap().1,
|
||||
ErrorKind::ExpectedValue(expected_value_kind),
|
||||
)
|
||||
})
|
||||
})
|
||||
.map_err(|e| {
|
||||
e.map_fail(|failure| {
|
||||
// if we found encountered a char failure it means the user had an unmatched quote
|
||||
if matches!(failure.kind(), ErrorKind::Char(_)) {
|
||||
Error::new_from_kind(input, ErrorKind::MissingClosingDelimiter(failure.char()))
|
||||
} else {
|
||||
// else we let the failure untouched
|
||||
failure
|
||||
}
|
||||
})
|
||||
})?;
|
||||
|
||||
Ok((input, value))
|
||||
}
|
||||
|
||||
fn is_value_component(c: char) -> bool {
|
||||
c.is_alphanumeric() || ['_', '-', '.'].contains(&c)
|
||||
}
|
||||
|
||||
fn is_syntax_component(c: char) -> bool {
|
||||
c.is_whitespace() || ['(', ')', '=', '<', '>', '!'].contains(&c)
|
||||
}
|
||||
|
||||
fn is_keyword(s: &str) -> bool {
|
||||
matches!(s, "AND" | "OR" | "IN" | "NOT" | "TO" | "EXISTS" | "_geoRadius" | "_geoBoundingBox")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use nom::Finish;
|
||||
|
||||
use super::*;
|
||||
use crate::tests::rtok;
|
||||
|
||||
#[test]
|
||||
fn test_span() {
|
||||
let test_case = [
|
||||
("channel", rtok("", "channel")),
|
||||
(".private", rtok("", ".private")),
|
||||
("I-love-kebab", rtok("", "I-love-kebab")),
|
||||
("but_snakes_is_also_good", rtok("", "but_snakes_is_also_good")),
|
||||
("parens(", rtok("", "parens")),
|
||||
("parens)", rtok("", "parens")),
|
||||
("not!", rtok("", "not")),
|
||||
(" channel", rtok(" ", "channel")),
|
||||
("channel ", rtok("", "channel")),
|
||||
(" channel ", rtok(" ", "channel")),
|
||||
("'channel'", rtok("'", "channel")),
|
||||
("\"channel\"", rtok("\"", "channel")),
|
||||
("'cha)nnel'", rtok("'", "cha)nnel")),
|
||||
("'cha\"nnel'", rtok("'", "cha\"nnel")),
|
||||
("\"cha'nnel\"", rtok("\"", "cha'nnel")),
|
||||
("\" some spaces \"", rtok("\"", " some spaces ")),
|
||||
("\"cha'nnel\"", rtok("'", "cha'nnel")),
|
||||
("\"cha'nnel\"", rtok("'", "cha'nnel")),
|
||||
("I'm tamo", rtok("'m tamo", "I")),
|
||||
("\"I'm \\\"super\\\" tamo\"", rtok("\"", "I'm \\\"super\\\" tamo")),
|
||||
];
|
||||
|
||||
for (input, expected) in test_case {
|
||||
let input = Span::new_extra(input, input);
|
||||
let result = parse_value(input);
|
||||
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Filter `{:?}` was supposed to be parsed but failed with the following error: `{}`",
|
||||
expected,
|
||||
result.unwrap_err()
|
||||
);
|
||||
let token = result.unwrap().1;
|
||||
assert_eq!(token, expected, "Filter `{}` failed.", input);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_escape_inside_double_quote() {
|
||||
// (input, remaining, expected output token, output value)
|
||||
let test_case = [
|
||||
("aaaa", "", rtok("", "aaaa"), "aaaa"),
|
||||
(r#"aa"aa"#, r#""aa"#, rtok("", "aa"), "aa"),
|
||||
(r#"aa\"aa"#, r#""#, rtok("", r#"aa\"aa"#), r#"aa"aa"#),
|
||||
(r#"aa\\\aa"#, r#""#, rtok("", r#"aa\\\aa"#), r#"aa\\\aa"#),
|
||||
(r#"aa\\"\aa"#, r#""\aa"#, rtok("", r#"aa\\"#), r#"aa\\"#),
|
||||
(r#"aa\\\"\aa"#, r#""#, rtok("", r#"aa\\\"\aa"#), r#"aa\\"\aa"#),
|
||||
(r#"\"\""#, r#""#, rtok("", r#"\"\""#), r#""""#),
|
||||
];
|
||||
|
||||
for (input, remaining, expected_tok, expected_val) in test_case {
|
||||
let span = Span::new_extra(input, "");
|
||||
let result = quoted_by('"', span);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let (rem, output) = result.unwrap();
|
||||
assert_eq!(rem.to_string(), remaining);
|
||||
assert_eq!(output, expected_tok);
|
||||
assert_eq!(output.value(), expected_val.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unescape() {
|
||||
// double quote
|
||||
assert_eq!(
|
||||
unescape(Span::new_extra(r#"Hello \"World\""#, ""), '"'),
|
||||
r#"Hello "World""#.to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
unescape(Span::new_extra(r#"Hello \\\"World\\\""#, ""), '"'),
|
||||
r#"Hello \\"World\\""#.to_string()
|
||||
);
|
||||
// simple quote
|
||||
assert_eq!(
|
||||
unescape(Span::new_extra(r#"Hello \'World\'"#, ""), '\''),
|
||||
r#"Hello 'World'"#.to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
unescape(Span::new_extra(r#"Hello \\\'World\\\'"#, ""), '\''),
|
||||
r#"Hello \\'World\\'"#.to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_value() {
|
||||
let test_case = [
|
||||
// (input, expected value, if a string was generated to hold the new value)
|
||||
("channel", "channel", false),
|
||||
// All the base test, no escaped string should be generated
|
||||
(".private", ".private", false),
|
||||
("I-love-kebab", "I-love-kebab", false),
|
||||
("but_snakes_is_also_good", "but_snakes_is_also_good", false),
|
||||
("parens(", "parens", false),
|
||||
("parens)", "parens", false),
|
||||
("not!", "not", false),
|
||||
(" channel", "channel", false),
|
||||
("channel ", "channel", false),
|
||||
(" channel ", "channel", false),
|
||||
("'channel'", "channel", false),
|
||||
("\"channel\"", "channel", false),
|
||||
("'cha)nnel'", "cha)nnel", false),
|
||||
("'cha\"nnel'", "cha\"nnel", false),
|
||||
("\"cha'nnel\"", "cha'nnel", false),
|
||||
("\" some spaces \"", " some spaces ", false),
|
||||
("\"cha'nnel\"", "cha'nnel", false),
|
||||
("\"cha'nnel\"", "cha'nnel", false),
|
||||
("I'm tamo", "I", false),
|
||||
// escaped thing but not quote
|
||||
(r#""\\""#, r#"\\"#, false),
|
||||
(r#""\\\\\\""#, r#"\\\\\\"#, false),
|
||||
(r#""aa\\aa""#, r#"aa\\aa"#, false),
|
||||
// with double quote
|
||||
(r#""Hello \"world\"""#, r#"Hello "world""#, true),
|
||||
(r#""Hello \\\"world\\\"""#, r#"Hello \\"world\\""#, true),
|
||||
(r#""I'm \"super\" tamo""#, r#"I'm "super" tamo"#, true),
|
||||
(r#""\"\"""#, r#""""#, true),
|
||||
// with simple quote
|
||||
(r#"'Hello \'world\''"#, r#"Hello 'world'"#, true),
|
||||
(r#"'Hello \\\'world\\\''"#, r#"Hello \\'world\\'"#, true),
|
||||
(r#"'I\'m "super" tamo'"#, r#"I'm "super" tamo"#, true),
|
||||
(r#"'\'\''"#, r#"''"#, true),
|
||||
];
|
||||
|
||||
for (input, expected, escaped) in test_case {
|
||||
let input = Span::new_extra(input, input);
|
||||
let result = parse_value(input);
|
||||
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Filter `{:?}` was supposed to be parsed but failed with the following error: `{}`",
|
||||
expected,
|
||||
result.unwrap_err()
|
||||
);
|
||||
let token = result.unwrap().1;
|
||||
assert_eq!(
|
||||
token.value.is_some(),
|
||||
escaped,
|
||||
"Filter `{}` was not supposed to be escaped",
|
||||
input
|
||||
);
|
||||
assert_eq!(token.value(), expected, "Filter `{}` failed.", input);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn diagnostic() {
|
||||
let test_case = [
|
||||
("🦀", "🦀"),
|
||||
(" 🦀", "🦀"),
|
||||
("🦀 AND crab = truc", "🦀"),
|
||||
("🦀_in_name", "🦀_in_name"),
|
||||
(" (name = ...", ""),
|
||||
];
|
||||
|
||||
for (input, expected) in test_case {
|
||||
let input = Span::new_extra(input, input);
|
||||
let result = parse_value(input);
|
||||
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"Filter `{}` wasn’t supposed to be parsed but it did with the following result: `{:?}`",
|
||||
expected,
|
||||
result.unwrap()
|
||||
);
|
||||
// get the inner string referenced in the error
|
||||
let value = *result.finish().unwrap_err().context().fragment();
|
||||
assert_eq!(value, expected, "Filter `{}` was supposed to fail with the following value: `{}`, but it failed with: `{}`.", input, expected, value);
|
||||
}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user