mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-07-18 12:20:48 +00:00
Compare commits
2710 Commits
v1.0.0-rc.
...
dockerfile
Author | SHA1 | Date | |
---|---|---|---|
94d8484581 | |||
5333edd1db | |||
bddf3f96e6 | |||
1e9ac00800 | |||
b08a49a16e | |||
a8f6f108e0 | |||
1479050f7a | |||
97b8c32e22 | |||
35f6c624bc | |||
1116788475 | |||
951a5b5832 | |||
1c670d7fa0 | |||
6cc3797aa1 | |||
faf1e17a27 | |||
4c519c2ab3 | |||
dd120e0e16 | |||
18796d6e6a | |||
c91bfeaf15 | |||
28961b2ad1 | |||
895ab2906c | |||
f11c7d4b62 | |||
e79f6f87f6 | |||
5367d8f05a | |||
52686da028 | |||
8c074f5028 | |||
49e18da23e | |||
54240db495 | |||
e1ed4bc750 | |||
9bd1cfb3a3 | |||
a341c94871 | |||
f46cf46b8c | |||
c3a30a5a91 | |||
143e3cf948 | |||
ab2adba183 | |||
74d1a67a99 | |||
91ce8a5e67 | |||
fd7ae1883b | |||
42a3cdca66 | |||
a43765d454 | |||
769576fd94 | |||
8fb7b1d10f | |||
d494c29768 | |||
74dcfe9676 | |||
1b1703a609 | |||
fb5e4957a6 | |||
8de3c9f737 | |||
43a19d0709 | |||
29d14bed90 | |||
f3b54337f9 | |||
7f3ae40204 | |||
a53536836b | |||
b095325bf8 | |||
d7ad39ad77 | |||
849de089d2 | |||
7f25007d31 | |||
c810af3ebf | |||
c0b77773ba | |||
7481559e8b | |||
83c765ce6c | |||
4c91037602 | |||
825923f6fc | |||
e405702733 | |||
6fa877efb0 | |||
4b1cd10653 | |||
47748395dc | |||
ff595156d7 | |||
8770088df3 | |||
827c1c8447 | |||
764df24b7d | |||
4570d5bf3a | |||
746b31c1ce | |||
eaad84bd1d | |||
c690c4fec4 | |||
ea9ac46f28 | |||
93db755d57 | |||
93f130a400 | |||
860c993ef7 | |||
67dda0678f | |||
2db6347686 | |||
421a9cf05e | |||
7b4b57ecc8 | |||
8f64fba1ce | |||
9882029fa4 | |||
5f56e6dd58 | |||
c88c3637b4 | |||
97fd9ac493 | |||
821d92b5d0 | |||
0b60928cbc | |||
42114325cd | |||
7a38fe624f | |||
1b005f697d | |||
fbec48f56e | |||
a377a49218 | |||
41cbaad1cb | |||
a015e232ab | |||
3ebc99473f | |||
fadea504ed | |||
d27007005e | |||
fcb09ccc3d | |||
734a9ecea8 | |||
69fcd3d05e | |||
1ca7778e6a | |||
a11d992923 | |||
781691191a | |||
ae8660e585 | |||
d80ce00623 | |||
2d66fdc8e9 | |||
b297b5deb0 | |||
0d71c80ba6 | |||
b2054d3f6c | |||
65a3086cf1 | |||
426d63b01b | |||
b078477d80 | |||
5c525168a0 | |||
39b62b7158 | |||
3f97f630ed | |||
a36b1dbd70 | |||
5672165e44 | |||
d563ed8a39 | |||
36cae3b480 | |||
5e12af88e2 | |||
231067a1c4 | |||
2a1a7ef00a | |||
758b4acea7 | |||
20f8184c06 | |||
2f8ebd0501 | |||
6be9a828fa | |||
4b7b2d6a90 | |||
a4e8158239 | |||
151e52c481 | |||
e269027cdd | |||
a2690ea8d4 | |||
33f61d2cd4 | |||
544b581b15 | |||
5c0668afcf | |||
20f05efb3c | |||
cbf029f64c | |||
bffabf9cc6 | |||
f647b20818 | |||
924d5d4c11 | |||
771a367b97 | |||
07603373f3 | |||
d91f8fc493 | |||
3296cf7ae6 | |||
89675e5f15 | |||
47b7d515ed | |||
2ba4629938 | |||
982dd76042 | |||
3505ee47f8 | |||
b2d25c07d7 | |||
b9d8bd77fc | |||
8a66ba01d8 | |||
8a6d548041 | |||
b452358124 | |||
bfb1f9279b | |||
48dabd27ea | |||
4549e0a36e | |||
cac93f149e | |||
481df7a8b6 | |||
8356f109c1 | |||
934f2b3cb5 | |||
a3f1b8fdb9 | |||
9c3830a19c | |||
ff6b8dfac4 | |||
ec7de4bae7 | |||
d963c2ce55 | |||
5beb1aab7d | |||
184b8afd9e | |||
a858531574 | |||
29961b8c6b | |||
0b08413c98 | |||
474d4ec498 | |||
bf94f89035 | |||
3bcff60d1c | |||
04c4487660 | |||
c92948b143 | |||
0544b60974 | |||
4223c51838 | |||
b3c2a4ae27 | |||
c7b2e3be87 | |||
aa17a54feb | |||
6f71a2b38b | |||
898160587f | |||
7c9935f96a | |||
f7ae8bc065 | |||
3d8a3d22d1 | |||
30f88350c7 | |||
4c4baaf1ce | |||
55e8046551 | |||
32364e9919 | |||
4e4d8dfda7 | |||
de3c4f1986 | |||
ea3b269b77 | |||
a4be4c49e8 | |||
7d1ebb7295 | |||
e664f09045 | |||
767cb725a5 | |||
13c2cd700d | |||
fea41ca788 | |||
217504fff3 | |||
5672118bfa | |||
57682cbabe | |||
5dd582918d | |||
74747b65b1 | |||
c79b6a1ee4 | |||
f0e6b9c0c5 | |||
56db54486c | |||
a9b3f91467 | |||
5f4497935f | |||
3f69dd6450 | |||
1c4b1b3b2d | |||
0de9a3ffe7 | |||
b4f1e9bc36 | |||
abd65d9307 | |||
30fc376713 | |||
2a1787ed22 | |||
d1a31afdd6 | |||
60018d0fe4 | |||
8fb685f5aa | |||
e3742a38d4 | |||
13b1abceaf | |||
e16b5c615a | |||
3521a3a0b2 | |||
d2420f5c8f | |||
72e2b220ed | |||
40a53f8824 | |||
b0c33ed6d2 | |||
f5ca421227 | |||
3f048927a0 | |||
e7c0617699 | |||
a1e9c44fe5 | |||
7df1dda002 | |||
3d8ca62c35 | |||
e8e7070cc6 | |||
798aa4ee92 | |||
4fd6fd9bef | |||
f857d9c2df | |||
a2cd7214f0 | |||
0ce1d6d487 | |||
d0988e115f | |||
5dcb920fb4 | |||
b3166df7ea | |||
6f7e0c431a | |||
00f6af6475 | |||
1803998017 | |||
3e5b3df487 | |||
e89973f1bf | |||
d3c796af38 | |||
182eea1f17 | |||
1af3089456 | |||
a4476c20f8 | |||
d1fc42b53a | |||
e64571a881 | |||
57da80900d | |||
7322f4e78e | |||
497187083b | |||
0f727d079b | |||
32e2848a74 | |||
6b3da8a6de | |||
0769090dd6 | |||
82bdb54537 | |||
b6ec1f1c6d | |||
1d507c84b2 | |||
1b78231e18 | |||
2b1f6a7f11 | |||
6993924f32 | |||
41a970247e | |||
e225608337 | |||
56e79fa850 | |||
c71a8ea183 | |||
0c7d1f761e | |||
e3d30e28ef | |||
63af1e9f28 | |||
f073a86387 | |||
b781f9a0f9 | |||
07b90dec08 | |||
9194508a0f | |||
9dd01ff44b | |||
49ddaaef49 | |||
766dd830ae | |||
436ae4e466 | |||
507a7bad96 | |||
cde62fcb5b | |||
03a82136dc | |||
e68758cec4 | |||
4fb47492e5 | |||
5bab8cf7ec | |||
97005dd505 | |||
eabef5194a | |||
ebb2494879 | |||
0cec352d2b | |||
a97281af08 | |||
a5c4fbbcea | |||
21b8cd53b7 | |||
7f80b116bc | |||
341f8478b4 | |||
79c7f65c30 | |||
2bc60c29fc | |||
680ea39bba | |||
a524dfb713 | |||
705fcaa3b8 | |||
55605435bc | |||
a09b6a341d | |||
387874ea26 | |||
5c1a7c3b9a | |||
6d658f4c52 | |||
bf573885ea | |||
a68ac3a1dc | |||
b252c87197 | |||
b0b7ad7caf | |||
c91ffec72e | |||
1fc11264e8 | |||
2bc2e99ff3 | |||
808e184069 | |||
e6bea99974 | |||
9e32ac7cb2 | |||
302d6cccd7 | |||
21b7d709ad | |||
7a30d98264 | |||
02fd06ea0b | |||
d0a85057a3 | |||
b3574de809 | |||
59704c000c | |||
b117c688f5 | |||
5ec85b7dfb | |||
d80be0c28d | |||
398c0c32cd | |||
d4157c0ce4 | |||
98dffbf213 | |||
11ee7daa0f | |||
f63fee5e97 | |||
f0d408c295 | |||
d308684395 | |||
00746b32c0 | |||
1cce613399 | |||
be9786bed9 | |||
c3f4835e8e | |||
49f58b2c47 | |||
6a10e85707 | |||
c505fa9d7d | |||
9519e60f97 | |||
b5df889dcb | |||
31155dce4c | |||
8d36570958 | |||
939e7faf31 | |||
32c6062e65 | |||
f097aafa1c | |||
777b387dc4 | |||
b0f3dc2c06 | |||
4b166bea2b | |||
5943100754 | |||
b24def3281 | |||
402dcd6b2f | |||
13c95d25aa | |||
a8defb585b | |||
339a4b0789 | |||
904fd2f6d1 | |||
229405aeb9 | |||
249e051cd4 | |||
fc0e7382fe | |||
97fb64e40e | |||
69edbf9f6d | |||
8957251eed | |||
c72535531b | |||
916c23e7be | |||
ad9937c755 | |||
171c942282 | |||
e2ae3b24aa | |||
fc7618d49b | |||
7f88c4ff2f | |||
96d4242b93 | |||
5114686394 | |||
3322018c06 | |||
0276d5212a | |||
e2ffc3d69a | |||
739da9fd4d | |||
2af93966e0 | |||
2c47500bc3 | |||
406ee31d1a | |||
2d8d0af1a6 | |||
e0a8f8cb5a | |||
be3b00350c | |||
80d34a4169 | |||
e3ee553dcc | |||
bebd050961 | |||
1f1beae077 | |||
55724f2412 | |||
6d50ea0830 | |||
f37c86e0b2 | |||
098c410612 | |||
ee10cb8c87 | |||
d38cc73630 | |||
e688581c36 | |||
4ac8f96342 | |||
1c9555566e | |||
303d740245 | |||
250743885d | |||
5eecb8489d | |||
0e5c3b1f64 | |||
f53bdc4320 | |||
0a301b5f88 | |||
a993b68684 | |||
80c7a00567 | |||
67d8cec209 | |||
2a846aaae7 | |||
d6eacb2aac | |||
212dbfa3b5 | |||
456da5de9c | |||
46e26ab550 | |||
cda4ba2bb6 | |||
ae59d37b75 | |||
f2cf981641 | |||
50954d31fa | |||
1b5b5778c1 | |||
d3731dda48 | |||
51a2613c5c | |||
82e1c4f468 | |||
5bdf5c0aaf | |||
282b2e3b98 | |||
5e754b3ee0 | |||
e1612fcb01 | |||
9dd4b33a9a | |||
de22116b3d | |||
5f78522044 | |||
87e2bc3bed | |||
61b58b115a | |||
d3182f3830 | |||
f698e6cfdf | |||
f70856bab1 | |||
80588daae5 | |||
e2ebed62b1 | |||
8284bd760f | |||
8d0ace2d64 | |||
86c34a996b | |||
eba7af1d2c | |||
e0d24104a3 | |||
2db738dbac | |||
84dd2e4df1 | |||
3d06ea41ea | |||
3958db4b17 | |||
935a724c57 | |||
ed29cceae9 | |||
bb9e33bf85 | |||
7c0e544839 | |||
d19c8672bb | |||
57c9f03e51 | |||
467e742bd1 | |||
cd5aaa3a9f | |||
8ceb199dca | |||
777eb3fa00 | |||
0caadedd3b | |||
ac3baafbe8 | |||
990a861241 | |||
d95d02cb8a | |||
f00108d2ec | |||
f7c8730d09 | |||
a651397afc | |||
2000db8453 | |||
92cc3550d8 | |||
cd3bca06e9 | |||
87576cf26c | |||
6dc6a5d874 | |||
e75829aded | |||
d00d2aab3f | |||
f46a8ab2e2 | |||
c3b75bbe5d | |||
c7711daca3 | |||
f18a4581f1 | |||
8ce8bbcdfc | |||
bd12989610 | |||
24a298a83c | |||
d85cd9bf1a | |||
37b3c5c323 | |||
1b1ad1923b | |||
a836b8e703 | |||
3328560788 | |||
cf76ec7b37 | |||
abf1cf9cd5 | |||
b09676779d | |||
70465aa5ce | |||
3009981d31 | |||
401e956128 | |||
48eafc546f | |||
6add470805 | |||
13175f2339 | |||
1a1ad8a792 | |||
4492605a78 | |||
fe5a0219e1 | |||
5ff066c3e7 | |||
6770eb2a87 | |||
0d43ddbd85 | |||
3950ec8d3c | |||
3b35ebda50 | |||
4bcfd14a45 | |||
a07f0a4a43 | |||
2dec6e86e9 | |||
c965200010 | |||
d55f0e2e53 | |||
d53a80b408 | |||
ecb88143f9 | |||
03eb5d87c1 | |||
a1d7ed1258 | |||
f3c0b05ae8 | |||
f4ec1abb9b | |||
d35afa0cf5 | |||
752d031010 | |||
c7322f704c | |||
811f156031 | |||
d8fed1f7a9 | |||
2e539249cb | |||
488d31ecdf | |||
af33d22f25 | |||
f1da623af3 | |||
77f1ff019b | |||
2aa11afb87 | |||
bb9ce3c5c5 | |||
d187b32a28 | |||
c8c666c6a6 | |||
3e190503e6 | |||
709ab3c14c | |||
ef13c6a5b6 | |||
6a10b679ca | |||
62816dddde | |||
54c0cf93fe | |||
365f44c39b | |||
2fa85a24ec | |||
631e9910da | |||
2741756248 | |||
d3f95e6c69 | |||
b7f2428961 | |||
3b1f908e5e | |||
14ca8048a8 | |||
206a3e00e5 | |||
f198b20c42 | |||
e3ba1fc883 | |||
ab5e56fd16 | |||
d885de1600 | |||
ee1abfd1c1 | |||
2295e0e3ce | |||
acc8caebe6 | |||
a034a1e628 | |||
1165ba2171 | |||
0ade699873 | |||
d0109627b9 | |||
a2270b7432 | |||
1ecd3bb822 | |||
51961e1064 | |||
cb8442a119 | |||
3baa34d842 | |||
86d9f50b9c | |||
de52a9bf75 | |||
985a94adfc | |||
b1ab09196c | |||
3d7ed3263f | |||
fca4577e23 | |||
27454e9828 | |||
bee3c23b45 | |||
b2f01ad204 | |||
9026867d17 | |||
330c9eb1b2 | |||
485a72306d | |||
9b55e582cd | |||
3d145d7f48 | |||
982efab88f | |||
079ed4a992 | |||
afdf87f6f7 | |||
a7201ece04 | |||
36296bbb20 | |||
07ff92c663 | |||
61252248fb | |||
68cbcdf08b | |||
85824ee203 | |||
d30c89e345 | |||
e8a156d682 | |||
fb8d23deb3 | |||
e570c23153 | |||
bd2c0e1ab6 | |||
39a4a0a362 | |||
22d80eeaf9 | |||
6cc91824c1 | |||
5a904cf29d | |||
b8a1caad5e | |||
63ef0aba18 | |||
7913d6365c | |||
c3f49f766d | |||
e883bccc76 | |||
c8f16530d5 | |||
9d27ac8a2e | |||
42cdc38c7b | |||
2ce025a906 | |||
17f7922bfc | |||
6b2fe94192 | |||
004c09a8e2 | |||
36bd66281d | |||
d11a6e187f | |||
9a569d73d1 | |||
be302fd250 | |||
d76d0cb1bf | |||
2bf867982a | |||
f3874d58b9 | |||
a983129613 | |||
f11a4087da | |||
176ffd23f5 | |||
ab2f6f3aa4 | |||
e6e76fbefe | |||
178d00f93a | |||
830a7c0c7a | |||
18d578dfc4 | |||
072b576514 | |||
6c3a5d69e1 | |||
a7de4f5b85 | |||
264a04922d | |||
1dbbd8694f | |||
bdeb47305e | |||
19b2326f3d | |||
81919a35a2 | |||
516e838eb4 | |||
fc03e53615 | |||
6603437cb1 | |||
6f55e7844c | |||
cf203b7fde | |||
d71bc1e69f | |||
a396806343 | |||
fad0de4581 | |||
c2ca259f48 | |||
4c481a8947 | |||
beb987d3d1 | |||
95e45e1c2c | |||
59fe1e8efa | |||
f30979d021 | |||
85f3028317 | |||
8195fc6141 | |||
32f825d442 | |||
ff8b2d4422 | |||
6cb8b46900 | |||
8c9245149e | |||
2000f7958d | |||
63e79a9039 | |||
7f9680f0a0 | |||
53503f09ca | |||
6fbf5dac68 | |||
98fc093823 | |||
5cfb5df31e | |||
55d889522b | |||
762e320c35 | |||
358aa337ea | |||
1764a33690 | |||
a90d7e4cc7 | |||
aec220ab63 | |||
4348c49656 | |||
a18de9b5f0 | |||
f9c2dacf33 | |||
7d247353d0 | |||
bc502ee125 | |||
00c02d00f3 | |||
804db03e41 | |||
26efdf4dd9 | |||
4b903719a0 | |||
ed3d87f061 | |||
a3622eda46 | |||
513a38f07b | |||
e1e025c319 | |||
b6fe6838d3 | |||
d94339a858 | |||
15d478cf4d | |||
add96f921b | |||
4fc6331cb6 | |||
753e76d451 | |||
3794962330 | |||
2865b063ad | |||
d4d7c9d577 | |||
f8697075ea | |||
7cd0aea1d3 | |||
69b2d31b71 | |||
8cd5200f48 | |||
99b45a7820 | |||
5e07ea79c2 | |||
3af3d3f7d9 | |||
549fa12d5a | |||
077dcd2002 | |||
2907928d93 | |||
fe3973a51c | |||
c83c3cd796 | |||
b9539c59f3 | |||
f2b140d3d7 | |||
e3400a05d3 | |||
b308463022 | |||
5e85059a71 | |||
9e661f2cb9 | |||
44192d754f | |||
1fa851a8d0 | |||
61abc61a69 | |||
efee0e3f43 | |||
0639b14906 | |||
f7c352a32d | |||
bf750e45a1 | |||
a38608fe59 | |||
97a04887a3 | |||
17d020e996 | |||
c3363706c5 | |||
2c2f3d38cc | |||
7f92116b51 | |||
0b55e7ce6a | |||
f6024b3269 | |||
a79ff8a1a9 | |||
e314423653 | |||
d0521e493f | |||
9ed7324995 | |||
e140227065 | |||
18886dc6b7 | |||
5391e3842c | |||
f9029727e0 | |||
a5b9a35c50 | |||
ba5ca8a362 | |||
5943e1c3b2 | |||
b46225070f | |||
e7624abe63 | |||
993aa1321c | |||
bff9653050 | |||
9640976c79 | |||
60a7221827 | |||
afc10acd19 | |||
c7a86b56ef | |||
9b6602cba2 | |||
8a271223a9 | |||
dd34dbaca5 | |||
5d74ebd5e5 | |||
9af69c151b | |||
c51dcad51b | |||
98f0da6b38 | |||
b030efdc83 | |||
84a784834e | |||
79094bcbcf | |||
497f9817a2 | |||
4aae07d5f5 | |||
e96b852107 | |||
238a7be58d | |||
b09a8f1b91 | |||
087da5621a | |||
fb95e67a2a | |||
e4a52e6e45 | |||
8c3f1a9c39 | |||
e9e2349ce6 | |||
2668f841d1 | |||
7384650d85 | |||
39869be23b | |||
6cc975704d | |||
93252769af | |||
196f79115a | |||
d10d78d520 | |||
4ecfb95d0c | |||
2fd20fadfc | |||
ca97cb0eda | |||
90a304cb07 | |||
cc7415bb31 | |||
44744d9e67 | |||
01675771d5 | |||
258c3dd563 | |||
39687908f1 | |||
8d4b21a005 | |||
cf0cd92ed4 | |||
cd2635ccfc | |||
78d9f0622d | |||
4f9edf13d7 | |||
405555b401 | |||
1bc4788e59 | |||
ef75a77464 | |||
7309111433 | |||
f6f8f543e1 | |||
34c991ea02 | |||
06f3fd8c6d | |||
474500362c | |||
ea4a96761c | |||
220921628b | |||
044356d221 | |||
d350114159 | |||
86807ca848 | |||
306593144d | |||
5d59bfde8a | |||
f55034ed54 | |||
03e679b634 | |||
f20e588ec1 | |||
20be69e1b9 | |||
293a246af8 | |||
dea00311b6 | |||
fb2b6c0c28 | |||
6f49126223 | |||
12920f2a4f | |||
4b7fd4dfae | |||
ce560fdcb5 | |||
748bb86b5b | |||
051f24f674 | |||
d2e01528a6 | |||
a9c7d82693 | |||
4bba2f41d7 | |||
8ac24d3114 | |||
6066256689 | |||
3a734af159 | |||
b9907997e4 | |||
ef889ade5d | |||
334098a7e0 | |||
8f73251012 | |||
b389be48a0 | |||
950d8e4c44 | |||
58cb1c1bda | |||
acff17fb88 | |||
21284cf235 | |||
50f6524ff2 | |||
e8987cf5aa | |||
d6f9a60a32 | |||
7fc35c5586 | |||
f156d7dd3b | |||
1fe224f2c6 | |||
07003704a8 | |||
e1bc610d27 | |||
d5e9b7305b | |||
cbb3b25459 | |||
941af58239 | |||
41a0ce07cb | |||
1506683705 | |||
d0eee5ff7a | |||
aed8c69bcb | |||
1eb1e73bb3 | |||
4f0bd317df | |||
80b962b4f4 | |||
ea0642c32d | |||
c17d616250 | |||
30bd4db0fc | |||
392472f4bb | |||
bd15f5625a | |||
722db7b088 | |||
a5c9162250 | |||
0388b2d463 | |||
dc64170a69 | |||
72452f0cb2 | |||
a8641b42a7 | |||
453d593ce8 | |||
5704235521 | |||
f6415b679f | |||
2d79720f5d | |||
8ddb4e750b | |||
a277daa1f2 | |||
fb794c6b5e | |||
1237cfc249 | |||
d7fd5c58cd | |||
fc9f3f31e7 | |||
ab1571cdec | |||
8270e2b768 | |||
e261ef64d7 | |||
1da4ab5918 | |||
448114cc1c | |||
25e768f31c | |||
192793ee38 | |||
a892a4a79c | |||
dc61105554 | |||
2eec290424 | |||
5d149d631f | |||
0bbcc7b180 | |||
d1a4da9812 | |||
c8ebf0de47 | |||
905af2a2e9 | |||
742543091e | |||
5f1bfb73ee | |||
6a0a0ae94f | |||
ea852200bb | |||
dc3f092d07 | |||
8ebf5eed0d | |||
19eb3b4708 | |||
2ceeb51c37 | |||
399eec5c01 | |||
fcfc4caf8c | |||
0146175fe6 | |||
cefffde9af | |||
bdc4263883 | |||
a97d4d63b9 | |||
f29114f94a | |||
a4ceef9624 | |||
6d0498df24 | |||
e8297ad27e | |||
419ce3966c | |||
eb63af1f10 | |||
048e174efb | |||
5d79617a56 | |||
ce90fc628a | |||
aae03356cb | |||
ebddfdb9a3 | |||
eeba196053 | |||
1bfdcfc84f | |||
dd1e606f13 | |||
250be9fe6c | |||
62692c171d | |||
9bc7627e27 | |||
b61efd09fc | |||
eaf28b0628 | |||
3b309f654a | |||
2700d8dc67 | |||
77c837fc1b | |||
446439e8be | |||
c6f4775fde | |||
3ff03a3f5f | |||
83ad1aaf05 | |||
cc48992e79 | |||
68bb170732 | |||
238692a8e7 | |||
290a40b7a5 | |||
d546f6f40e | |||
38a8d3cae1 | |||
f5c3b951bc | |||
d7c248042b | |||
d2f84a9d9e | |||
4f547eff02 | |||
64b833410c | |||
31f749b5d8 | |||
a0ab90a4d7 | |||
a59ae19842 | |||
2652310f2a | |||
adbb0ff318 | |||
0a5d1a445e | |||
447195a27a | |||
177154828c | |||
0d1d354052 | |||
f1d848bb9a | |||
676187ba43 | |||
90afde435b | |||
19d44142a1 | |||
445d5474cc | |||
69931e50d2 | |||
52a494bd3b | |||
9580b9de79 | |||
a762d7f462 | |||
56ee9cc21f | |||
2a505503b3 | |||
bae4007447 | |||
7313d6c533 | |||
306d2f37ff | |||
478dbfa45a | |||
d0aaa7ff00 | |||
31776fdc3f | |||
05ae6dbfa4 | |||
78f76c841d | |||
d212dc6b8b | |||
a5c790bf4b | |||
6ce1c6487a | |||
727d663f28 | |||
7aabe42ae0 | |||
dd186533f0 | |||
4dd7b20c32 | |||
4dd3675d2b | |||
86ac8568e6 | |||
192e024ada | |||
ac6df0df57 | |||
c19c17eddb | |||
74d1914a64 | |||
582930dbbb | |||
9f78e392b1 | |||
25fc576696 | |||
69dc4de80f | |||
ac975cc747 | |||
8993fec8a3 | |||
754f48a4fb | |||
cd7c6e19ed | |||
19dac01c5c | |||
895f5d8a26 | |||
3389561f34 | |||
137434a1c8 | |||
08c6d50cd1 | |||
cf3e574cb4 | |||
0af399a6d7 | |||
f586028f9a | |||
e1e85267fd | |||
51809eb260 | |||
484a9ddb27 | |||
65e6aa0de2 | |||
f3b9f7b867 | |||
48cdfddebf | |||
c55368ddd4 | |||
60ccb3fa4c | |||
5ad5d56f7e | |||
0c2c8af44e | |||
2fe9a02b1c | |||
211c8763b9 | |||
7e47031bdc | |||
f820c9804d | |||
3cb1f6d0a1 | |||
1ee3d6ae33 | |||
312515dd6b | |||
3eb3f0269e | |||
9db86aac51 | |||
2aae19dc52 | |||
a4d343aade | |||
c2bd94c871 | |||
7d1c2d97bf | |||
d388ea0f9d | |||
ec89030483 | |||
5c29258e8e | |||
2fdf520271 | |||
f19d2dc548 | |||
5adeac8047 | |||
7cb7643565 | |||
d138b3c704 | |||
fa6f495662 | |||
8cc86d5a8d | |||
5e562ffecf | |||
2277172f9c | |||
2db3d60259 | |||
7e19bf1c0e | |||
fb192aaa9f | |||
e1e362fa43 | |||
08753d002a | |||
8d15ae37a1 | |||
3e53791de3 | |||
8010eca9c7 | |||
dc0d4addd9 | |||
71414630fc | |||
2e0089d5ff | |||
3a2451fcba | |||
eb5830aa40 | |||
d81a3f4a74 | |||
c7d0097c97 | |||
152a10344c | |||
04eb32e539 | |||
8b14090927 | |||
ea4bb9402f | |||
f1115e274f | |||
a68e3a79fb | |||
8d630a6f62 | |||
d362278a41 | |||
00f78d6b5a | |||
399fba16bb | |||
c2469b6765 | |||
7791ef90e7 | |||
ee64f4a936 | |||
456887a54a | |||
b3cec1a383 | |||
436d2032c4 | |||
3828635fb2 | |||
dda28d7415 | |||
cd83014fff | |||
bbb6728d2f | |||
49fbbacafc | |||
7ad582f39f | |||
aa896f0e7a | |||
0261a0e3cf | |||
5809d3ae0d | |||
827cedcd15 | |||
011f8210ed | |||
6b0737384b | |||
e153418b8a | |||
c8306616e0 | |||
9383629d13 | |||
a16de5de84 | |||
a769e09dfa | |||
9ac2fd1c37 | |||
80ae020bee | |||
bab898ce86 | |||
c8ed1675a7 | |||
b1905dfa24 | |||
ab458d8840 | |||
4f3ce6d9cd | |||
ee1d627803 | |||
4ae7aea3b2 | |||
aadb0c58c9 | |||
86249e2ae4 | |||
b799f3326b | |||
fa7d3a37c0 | |||
3bb1e35ada | |||
56e0edd621 | |||
a93cd8c61c | |||
b3f0f39106 | |||
6dc345bc53 | |||
bd30ee97b8 | |||
29c5f76d7f | |||
734d0899d3 | |||
4428cb5909 | |||
844f546a8b | |||
3be1790803 | |||
d96e72e5dc | |||
201fea0fda | |||
5cfd3d8407 | |||
9eec44dd98 | |||
b85cd4983e | |||
dac81b2d44 | |||
ab185a59b5 | |||
59e41d98e3 | |||
1810927dbd | |||
b7694c34f5 | |||
6cabd47c32 | |||
9963f11172 | |||
c8d3a09af8 | |||
bfd81ce050 | |||
6b2c2509b2 | |||
56b4f5dce2 | |||
21ae4143b1 | |||
e8f06f6c06 | |||
6dd2e4ffbd | |||
ba0bb29cd8 | |||
c4c6e35352 | |||
8d46a5b0b5 | |||
5451c64d5d | |||
0a77be4ec0 | |||
5f9f82757d | |||
f82d4b36eb | |||
c882d8daf0 | |||
7e9d56a9e7 | |||
900825bac0 | |||
3e67d8818c | |||
284d8a24e0 | |||
30a2711bac | |||
0fd55db21c | |||
559e46be5e | |||
8b1e5d9c6d | |||
774fa8f065 | |||
9bbffb8fee | |||
48a5ce7434 | |||
6bf9824fec | |||
853b4a520f | |||
2cb71dff4a | |||
1941072bb2 | |||
fdaf45aab2 | |||
950a740bd4 | |||
66020cd923 | |||
4c4b336ecb | |||
286dd7b2e4 | |||
55af85db3c | |||
9102de5500 | |||
a1a3a49bc9 | |||
5a24e60572 | |||
9fe40df960 | |||
d5ddc6b080 | |||
d2d930dd3f | |||
3e34981d9b | |||
6ef3bb9d83 | |||
f782fe2062 | |||
c4653347fd | |||
d8dd357326 | |||
6a77c81a28 | |||
e10c26e70d | |||
ddf78a735b | |||
2c7cafbf20 | |||
86dd88698d | |||
b82f46e862 | |||
5dc464b9a7 | |||
90276d9a2d | |||
49d59d88c2 | |||
5863afa1a5 | |||
adc71742c8 | |||
cb6b6915a4 | |||
2a31cd13c9 | |||
4822fe1beb | |||
f04ab67083 | |||
ad4c982c68 | |||
3f24555c3d | |||
628c835a22 | |||
8efac33b53 | |||
d127c57f2d | |||
d633ac5b9d | |||
d68fe2b3c7 | |||
08a06b49f0 | |||
d87e8b63a9 | |||
0c5f4ed7de | |||
21ec334dcc | |||
63682c2c9a | |||
288a879411 | |||
712bf035a7 | |||
5e08fac729 | |||
92e2e09434 | |||
290a29b5fb | |||
1ae13c1374 | |||
a8d28e364d | |||
2ef5751795 | |||
8bb45956d4 | |||
3cbadf92b6 | |||
db3a1905de | |||
6cf82ba993 | |||
66c6d5e1ef | |||
df518d8b0b | |||
d9ed9de2b0 | |||
51cf44d6fd | |||
d5b8b5a2f8 | |||
8d26f3040c | |||
21898ffc60 | |||
04b1bbf932 | |||
382be56d36 | |||
acfc96525c | |||
a820aa11e6 | |||
8d2e3e4aba | |||
ab5247dc64 | |||
acd9535588 | |||
19bfb2649b | |||
25123af3b8 | |||
ff8d7a810d | |||
f367cc2e75 | |||
f2984f66e6 | |||
0defeb268c | |||
030064da25 | |||
84035a27f5 | |||
0885fcf973 | |||
48542ac8fd | |||
ea15ad6c34 | |||
d03b3ceb58 | |||
5d58cb7449 | |||
c5a996aa78 | |||
1279c38ac9 | |||
267d14c28d | |||
bd2262ceea | |||
13de251047 | |||
fda4f229bb | |||
2468ebb76b | |||
9142ba9dd4 | |||
d59bcea749 | |||
7541ab99cd | |||
d0aabde502 | |||
55e6cb9c7b | |||
642c01d0dc | |||
d852dc0d2b | |||
fb79c32430 | |||
51d1e64b23 | |||
e9c02173cf | |||
dbba5fd461 | |||
e760e02737 | |||
d59e559317 | |||
2ec8542105 | |||
28692f65be | |||
5404bc02dd | |||
c90fa95f93 | |||
822f67e9ad | |||
d28f18658e | |||
38d23546a5 | |||
c63f945093 | |||
0f213f2202 | |||
de808a391a | |||
0d282e3cc5 | |||
d342c3c357 | |||
f9b214f34e | |||
e1cc025cbd | |||
f04cd19886 | |||
1514dfa1b7 | |||
23ea3ad738 | |||
e3c34684c6 | |||
b5f01b52c7 | |||
fb51d511be | |||
9f2ff71581 | |||
fd177b63f8 | |||
8433516d85 | |||
0c84a40298 | |||
74962b2fd9 | |||
01968d7ca7 | |||
367f403693 | |||
8f4499090b | |||
4c516c00da | |||
d1ac40ea14 | |||
15bbde1022 | |||
c0313f3026 | |||
2d7607734e | |||
5ab505be33 | |||
c10f58b7bd | |||
e752bd06f7 | |||
30247d70cd | |||
0605c0ac68 | |||
b22c80106f | |||
c94952e25d | |||
e1053989c0 | |||
559e019de1 | |||
660eac50b2 | |||
92804f6f45 | |||
0fcde35a20 | |||
3c7ea1d298 | |||
74594be234 | |||
3d99686f7a | |||
c039562723 | |||
9bdcd42b9b | |||
4cae691b86 | |||
6a1216bd51 | |||
02a21fd309 | |||
98a365aaae | |||
d671d6f0f1 | |||
11a056d116 | |||
94011bb9a8 | |||
1c6c89f345 | |||
25faef67d0 | |||
65519bc04b | |||
ef59762d8e | |||
80dcfd5c3e | |||
ee856a7a46 | |||
32bd9f091f | |||
90f49eab6d | |||
49c2db9485 | |||
57502fcf6a | |||
c83b77304a | |||
1b3923b5ce | |||
26629a3f9e | |||
8970246bc4 | |||
cc32519a2d | |||
6e977dd8e8 | |||
68f1db123a | |||
35f9499638 | |||
64ef5869d7 | |||
2c14efa8a2 | |||
6eb47ab792 | |||
21b78f3926 | |||
09b4281cff | |||
721fc294be | |||
8dff08d772 | |||
7c3017734a | |||
bff48681d2 | |||
519d6b2bf3 | |||
73df873f44 | |||
99197387af | |||
f28600031d | |||
0ea0146e04 | |||
a211a9cdcd | |||
9b24f83456 | |||
2c6d08c519 | |||
18eb4b9c51 | |||
cf98bf37d0 | |||
bc9daf9041 | |||
9c36e497d9 | |||
6515838d35 | |||
ea52aff6dc | |||
ef0d5a8240 | |||
15bd14297e | |||
21d115dcbb | |||
959ca66125 | |||
7483c7513a | |||
e5af3ac65c | |||
6831c23449 | |||
5c01e9bf7c | |||
075d9c97c0 | |||
b249989bef | |||
070ec9bd97 | |||
27a6a26b4b | |||
76d961cc77 | |||
8234f9fdf3 | |||
7328ffb034 | |||
3e5550c910 | |||
72a9071203 | |||
07a5ffb04c | |||
a58bc5bebb | |||
b1a0110a47 | |||
d0fe9dea61 | |||
b165c77fa7 | |||
54aec7ac5f | |||
a2fc74f010 | |||
743ed9f57f | |||
7b3bac46a0 | |||
3be37b00e7 | |||
702589104d | |||
cb9e7e510b | |||
0c0038488c | |||
5d3af5f273 | |||
76a2adb7c3 | |||
5a6d22d4ec | |||
08ae47e475 | |||
056ff13c4d | |||
9f1e0d2a49 | |||
ed6db19681 | |||
9875f2646a | |||
183d3dada7 | |||
2be755ce75 | |||
3599df77f0 | |||
d7943fe225 | |||
6758146213 | |||
9b8ab40d80 | |||
baddd80069 | |||
f9445c1d90 | |||
15c29cdd9b | |||
13d8272173 | |||
208903ddde | |||
679fe18b17 | |||
3fcccc31b5 | |||
430e9b13d3 | |||
53c79e85f2 | |||
2e62925a6e | |||
0f86d6b28f | |||
8d70b01714 | |||
1327807caa | |||
c8d03046bf | |||
3942b3732f | |||
7cd9109e2f | |||
4e113bbf1b | |||
e25ca9776f | |||
6c9165b6a8 | |||
efb2f8b325 | |||
d6ba84ea99 | |||
c27870e765 | |||
01dedde1c9 | |||
7e5c5c4d27 | |||
c634d43ac5 | |||
6c15f50899 | |||
e1d81342cf | |||
423baac08b | |||
36281a653f | |||
f8fe9316c0 | |||
661bc21af5 | |||
b6af84eb77 | |||
7906461c14 | |||
2e4604b0b9 | |||
4c34164d2e | |||
9df4f3aaad | |||
513d3178c6 | |||
2209acbfe2 | |||
70121e3c6b | |||
59cc59e93e | |||
493d9b98f5 | |||
efaef4f748 | |||
7666e4f34a | |||
2ea2f7570c | |||
e750465e15 | |||
aa5e099718 | |||
c7db4176f3 | |||
a3e7c468cd | |||
cd359cd96e | |||
5de5dd80a3 | |||
2c65781d91 | |||
6e3b869e6a | |||
86ead92ed5 | |||
9a266a531b | |||
3f7f24b90e | |||
c5a6075484 | |||
360c5ff3df | |||
d323e35001 | |||
70f576d5d3 | |||
28f9be8d7c | |||
469d92c569 | |||
7a90a101ee | |||
f7796edc7e | |||
ac1df9d9d7 | |||
50ad750ec1 | |||
8748df2ca4 | |||
8f6b6c9042 | |||
07fb6d64e5 | |||
e45c846af5 | |||
dd56e82dba | |||
99889a0ed0 | |||
799f3d43c8 | |||
ed7fd855af | |||
2dfe24f067 | |||
a2743baaa3 | |||
b65aa7b5ac | |||
11dfe38761 | |||
dde1da1c0e | |||
085bc6440c | |||
1bd15d849b | |||
ea4bd29d14 | |||
5ed75de0db | |||
3296bb243c | |||
75d341d928 | |||
31c18f0953 | |||
05d8a33a28 | |||
c9092c72bf | |||
d9eba9d145 | |||
0ee67bb7d1 | |||
22551d0941 | |||
26b5dad042 | |||
6a057a3bd0 | |||
2e49230ca2 | |||
7ad0214089 | |||
1df5b8712b | |||
bfedbc1b6d | |||
68c758a533 | |||
d2427f18e5 | |||
00f94b1ffd | |||
0e8665bf18 | |||
f65153ad64 | |||
adddf3f179 | |||
785c1372f2 | |||
3580b2d803 | |||
3a12f5887e | |||
a80dcfd4a3 | |||
b2a332599e | |||
8046ae4bd5 | |||
1988416295 | |||
3b479948c6 | |||
cc732fe95e | |||
c7cb816ae1 | |||
4c09f6838f | |||
b188063869 | |||
0f8320bdc2 | |||
551df0cb77 | |||
87dd441a3a | |||
1eacab2169 | |||
b806097141 | |||
218f0a6661 | |||
47ee93b0bd | |||
1e5e3d57e2 | |||
023446ecf3 | |||
86e272856a | |||
257e621d40 | |||
113a061bee | |||
ad3befaaf5 | |||
176160d32f | |||
16790ee620 | |||
78b0bce9a1 | |||
2837cab5da | |||
2e99fa8251 | |||
fe9f380993 | |||
f8ecbc28e2 | |||
700318dc62 | |||
9d9010e45f | |||
aa6c5df0bc | |||
94764e5c7c | |||
31c8de1cca | |||
0d104a0fce | |||
3f1453f470 | |||
f4b8e5675d | |||
3b7a2cdbce | |||
203aa727a7 | |||
eaba772f21 | |||
9a920d1f93 | |||
5e683ba472 | |||
f6c6b026bb | |||
c695a1ffd2 | |||
91ce4d1721 | |||
3b1885859d | |||
2741aa8589 | |||
a43f99c600 | |||
90d64d257f | |||
f167f7b412 | |||
4af31ec9a6 | |||
cfc62a1c15 | |||
26deeb45a3 | |||
3fc145c254 | |||
a84f3a8b31 | |||
c81ff22c5b | |||
bad8ea47d5 | |||
b15c77ebc4 | |||
4b618b95e4 | |||
2988d3c76d | |||
e5ef0cad9a | |||
4f69b190bc | |||
7ae2a7341c | |||
6d5762a6c8 | |||
ebf82ac28c | |||
bd4c248292 | |||
e8c093c1d0 | |||
f0b74637dc | |||
b1bf7d4f40 | |||
aca707413c | |||
a8a1f5bd55 | |||
dc84ecc40b | |||
7483614b75 | |||
4820ac71a6 | |||
13c78e5aa2 | |||
5bb175fc90 | |||
f73273d71c | |||
4fd0116a0d | |||
ea2f2ecf96 | |||
4b459768a0 | |||
6d70978edc | |||
216a8aa3b2 | |||
a21c854790 | |||
70ab2c37c5 | |||
b4b6ba6d82 | |||
3b9f1db061 | |||
d344489c12 | |||
44d6b6ae9e | |||
8d9c2c4425 | |||
b22aac92ac | |||
932998f5cc | |||
86c3b0c8c2 | |||
e54280fbfc | |||
d18ee58ab9 | |||
63bc231243 | |||
68856e5e2f | |||
8a088fb99e | |||
20ad43b908 | |||
772e55d174 | |||
d160305868 | |||
9961b78b06 | |||
eb7b9d9dbf | |||
f5e418ace7 | |||
48d211b8b0 | |||
dbd91e7151 | |||
720becb5e8 | |||
e2cefc9b4f | |||
a0b3620b05 | |||
cd043d4461 | |||
5989528833 | |||
fd3daa4423 | |||
8dca36433c | |||
446ed17589 | |||
0be09555f1 | |||
c2517e7d5f | |||
5cbe879325 | |||
741a4444a9 | |||
7f7fafb857 | |||
db0c681bae | |||
46f7df232a | |||
285849e3a6 | |||
a589f6c60b | |||
3e0a78acf3 | |||
4860fd4529 | |||
b3a22f31f6 | |||
9452fabfb2 | |||
8f702828ca | |||
e09eec37bc | |||
fc7cc770d4 | |||
a2f59a28f7 | |||
5c962c03dd | |||
2d1727697d | |||
823da19745 | |||
1d314328f0 | |||
3aaf1d62f3 | |||
0e379558a1 | |||
d6bba0663a | |||
0b02eb456c | |||
df38794c7d | |||
6cdb6722d1 | |||
d106eb5b90 | |||
5e639bc0c1 | |||
49a6d2d5f1 | |||
f230ae6fd5 | |||
c8930781eb | |||
01461af333 | |||
c51bb6789c | |||
af65485ba7 | |||
f2e1591826 | |||
2f20257070 | |||
794c0f64a9 | |||
731e0e5321 | |||
89d0758713 | |||
879d5e8799 | |||
88f6c18665 | |||
aa1ce97748 | |||
c084f7f731 | |||
0d1f83ba4b | |||
922f9fd4d5 | |||
4b99d8cb91 | |||
41fc0dcb62 | |||
d1df0d20f9 | |||
1b7f6ea1e7 | |||
71602e0f1b | |||
407f53872a | |||
687cd2e205 | |||
198c416bd8 | |||
6cb9c3b81f | |||
2a67308e29 | |||
42cf847a63 | |||
c4275f0d27 | |||
ecf8abc518 | |||
5b88df508e | |||
fcedff95e8 | |||
e9ada44509 | |||
110bf6b778 | |||
22ebd2658f | |||
7a5889bc5a | |||
ad0d311f8a | |||
6214c38da9 | |||
1c604de158 | |||
64df159057 | |||
01a4052828 | |||
51581d14f8 | |||
fcc520e49a | |||
1541bce952 | |||
7dbefae1e3 | |||
8fdf860c17 | |||
2102e0da6b | |||
89b9b61840 | |||
7f26c75610 | |||
cdeb07f0fd | |||
cb45a10bcd | |||
7eb2d71009 | |||
976dc1f4bc | |||
1290edd58a | |||
341c244965 | |||
d962e46ed1 | |||
90514e03d1 | |||
200e98c211 | |||
bc845324df | |||
6a141694da | |||
dc2b63abdf | |||
4ab7ca0e83 | |||
0038b3848a | |||
88646a63a1 | |||
b12738cfe9 | |||
7aa6cc9b04 | |||
ee3a49cfba | |||
0353fbb5df | |||
92c0a2cdc1 | |||
aa02a7fdd8 | |||
77de82aaa4 | |||
0227254a65 | |||
03a01166ba | |||
d23c250ad5 | |||
081278dfd6 | |||
5676b204dd | |||
8c86348119 | |||
a7ae552ba7 | |||
757b2b502a | |||
adfd4da24c | |||
a79661c6dc | |||
851f979039 | |||
f858f64b1f | |||
9f8095c069 | |||
fa44e95c91 | |||
0ab541627b | |||
16698f714b | |||
931021fe57 | |||
4c9531bdf3 | |||
0a78107525 | |||
a9553af635 | |||
838ed1cd32 | |||
cc54c41e30 | |||
63db43cc7a | |||
4562b278a8 | |||
a57e522a67 | |||
91c5d0c042 | |||
007fec21fc | |||
a6b4069172 | |||
d7bc6a6999 | |||
9f62149b94 | |||
f25f454bd4 | |||
885f243afc | |||
ec87bf3dd5 | |||
ef965aa3f3 | |||
fc09d77e89 | |||
056180e6c8 | |||
3c149d8a43 | |||
b4dcdbf00d | |||
32b7bd366f | |||
00e2845f0f | |||
c92ef54466 | |||
28782ff99d | |||
b489515f4d | |||
54889813ce | |||
4bce66d5ff | |||
66e6ea56b8 | |||
6044b80362 | |||
be75e738b1 | |||
56fceb1928 | |||
9dbc8b2dd0 | |||
80c6aaf1fd | |||
bdc5599b73 | |||
73384aec21 | |||
0013236e5d | |||
9e5f9a8a10 | |||
c38b0b883d | |||
98285b4b18 | |||
4fc8f06791 | |||
c31cadb54f | |||
41c4a5b60d | |||
faa3cd3b71 | |||
2ab24c4f49 | |||
9885fb4159 | |||
66f55e3e6a | |||
a6218a20ae | |||
2364777838 | |||
aeaac743ff | |||
5099192c44 | |||
d8695da1d1 | |||
28197b2435 | |||
8d2a0b43ff | |||
634201244c | |||
3d90b03d7b | |||
81643e6d70 | |||
5aea8dd75b | |||
77eb37934f | |||
5b6adc6d96 | |||
d53df8a002 | |||
ca9fa329d1 | |||
51dbb2e06d | |||
aecbd14761 | |||
0cca2ea24f | |||
481b0bf277 | |||
b073fd49ea | |||
be2ebdd395 | |||
320670f8fe | |||
daef43f504 | |||
b120c32cad | |||
35fcc351a0 | |||
5b19dd23d9 | |||
d08cfda796 | |||
a9e552ab18 | |||
6cb1102bdb | |||
969adaefdf | |||
a67ccfdf3a | |||
ccd6f13793 | |||
f496cd320d | |||
9f4184208e | |||
bb89ef9fc0 | |||
70bee7d405 | |||
abbebad669 | |||
1bcf43baac | |||
9716fb3b36 | |||
ba30cef987 | |||
41bdc90f46 | |||
3bd4cf94cc | |||
f5ff3e8e19 | |||
02e0271e44 | |||
ce0315a10f | |||
7ac441e473 | |||
adf0c389c5 | |||
8cfe3e1ec0 | |||
4eda438f6f | |||
713acc408b | |||
a7d6930905 | |||
f0e804afd5 | |||
28c004aa2c | |||
78fe4259a9 | |||
312c2d1d8e | |||
ca78cb5aca | |||
456541e921 | |||
44c353fafd | |||
23fcf7920e | |||
d2b1ecc885 | |||
65b1d09d55 | |||
ab727e428b | |||
93a8633f18 | |||
cfc7314bd1 | |||
93978ec38a | |||
ff9414a6ba | |||
0542e2179f | |||
7d5395c12b | |||
3e6c05fe13 | |||
f4cab080a6 | |||
36715f571c | |||
e923a3ed6a | |||
bc02031793 | |||
dc64e139b9 | |||
5cf1b0b138 | |||
afb4133bd2 | |||
86b916b008 | |||
6faa87302c | |||
f4ff30e99d | |||
ab696f6a23 | |||
d89f5ca48e | |||
7e93811fbc | |||
0bf4f3f48a | |||
82df524e09 | |||
8e2c41e7f7 | |||
103dddba2f | |||
faf148d297 | |||
133ab98260 | |||
b489d699ce | |||
afb09c914d | |||
b64cd2a3e3 | |||
1fcc5f73ac | |||
32cf5a29ce | |||
e0c327bae2 | |||
c82a382b0b | |||
eb149030eb | |||
fd032165d7 | |||
d912c94034 | |||
563492f1e5 | |||
38ab541f4a | |||
af38196a6b | |||
e9104a0a32 | |||
70229f07c8 | |||
ee7d291442 | |||
29824d05ab | |||
76a2343639 | |||
10882bcbce | |||
a32236c80c | |||
3b2b3aeea9 | |||
39ed133f9f | |||
fd598f060c | |||
99b45d2aa0 | |||
57898d8a90 | |||
82fb5f0bef | |||
6b7841fefc | |||
834504aec0 | |||
26a9974667 | |||
28962bce99 | |||
6dc08bf45e | |||
087ae64899 | |||
3db25153e5 | |||
3c304c89d4 | |||
b0c0490e85 | |||
3b1cd4c4b4 | |||
c2afdbb1fb | |||
6476827d3a | |||
c10469ddb6 | |||
1e366dae3e | |||
187c713de5 | |||
ff440c1d9d | |||
2a3f9b32ff | |||
f346805c0c | |||
ef1ac8a0cb | |||
edfcdb171c | |||
3c91a9a551 | |||
bc4f4ee829 | |||
61fe422a88 | |||
57ed96622b | |||
b3c0d43890 | |||
0d0e900158 | |||
4536dfccd0 | |||
06c414a753 | |||
3c84075d2d | |||
4969abeaab | |||
e5dfde88fd | |||
7c7fba4e57 | |||
5d5d115608 | |||
7086009f93 | |||
d0b44c380f | |||
beae843766 | |||
5132a106a1 | |||
136efd6b53 | |||
4b78ef31b6 | |||
ea0c6d8c40 | |||
3def42abd8 | |||
a2bff68c1a | |||
aee49bb3cd | |||
49e4cc3daf | |||
15cce89a45 | |||
e425f70ef9 | |||
4fdbfd6048 | |||
270da98c46 | |||
e857ca4d7d | |||
ab2cf69e8d | |||
8e6d1ff0dc | |||
168fe0aa28 | |||
608c5bad24 | |||
7d36d664a7 | |||
225ae6fd25 | |||
2f9f6a1f21 | |||
984dc7c1ed | |||
1373637da1 | |||
1df68d342a | |||
b8e6db0feb | |||
c701f8bf36 | |||
4ddf008be2 | |||
2f5e61bacb | |||
1c0a5cd136 | |||
76b9178b16 | |||
a5e98cf46d | |||
5012cc3a32 | |||
28bd9e183e | |||
3a4a150ef0 | |||
02c655ff1a | |||
79efded841 | |||
f7efde11d9 | |||
e62b89a2ed | |||
bd7b285bae | |||
038e03a4e4 | |||
597144b0b9 | |||
837c1041c7 | |||
a56c46b6f1 | |||
df7a32e3d0 | |||
49bee2ebc5 | |||
a3944a7083 | |||
efba662ca6 | |||
e923d51b8f | |||
eeb0c70ea2 | |||
313c362461 | |||
c620626515 | |||
44b6843de7 | |||
c1ce4e4ca9 | |||
a3f8686fbf | |||
25f75d4d03 | |||
7e63e32960 | |||
1e11578ef0 | |||
f8d0f5265f | |||
1207a058d0 | |||
d61566787e | |||
c08f4599f2 | |||
bb5823c775 | |||
792225eaff | |||
a8680887d8 | |||
5b93d6ab91 | |||
5c762b71dd | |||
c30f17fafb | |||
34e02aba42 | |||
03bb95539b | |||
d81c0e8bba | |||
c112877a4a | |||
e8e32e0ba1 | |||
b31f36d68c | |||
ee09e50e7f | |||
374c2782ad | |||
566c4a53c5 | |||
5b9524e1ba | |||
31607bf9cd | |||
5a10de1b9f | |||
3b7e6afb55 | |||
0add4d735c | |||
3794ffc952 | |||
329bd4a1bb | |||
3b1358b62f | |||
c862b1bc6b | |||
e92d137676 | |||
b3d6c6a9a0 | |||
498c2b298c | |||
0e4e6dfada | |||
47d780b8ce | |||
0daa0e170a | |||
0d7d3ce802 | |||
71740805a7 | |||
e77291a6f3 | |||
716c8e22b0 | |||
f853790016 | |||
2b036449be | |||
0efa011e09 | |||
17c8c6f945 | |||
b3e2280bb9 | |||
1eee0029a8 | |||
59f58c15f7 | |||
361193099f | |||
7ff4a2a708 | |||
1aad66bdaa | |||
e65bad16cc | |||
ab92c814c3 | |||
0ad9499b93 | |||
7aa5753ed2 | |||
658f316511 | |||
89ee2cf576 | |||
bd1a371c62 | |||
8bd4f5d93e | |||
f713828406 | |||
3069bf4f4a | |||
6b1b42b928 | |||
e8cc7f9cee | |||
3a25137ee4 | |||
c765f277a3 | |||
9242f2f1d4 | |||
b0a417f342 | |||
75e7b1e3da | |||
4ff67ec2ee | |||
0f4c0beffd | |||
3bcc1c0560 | |||
f8dee1b402 | |||
7fa3a1d23e | |||
28a8df2f0a | |||
6fa00c61d2 | |||
726fcf015a | |||
c9b2d3ae1a | |||
2aeef09316 | |||
51767725b2 | |||
efbfa81fa7 | |||
f5ec14c54c | |||
127d3d028e | |||
1095874e7e | |||
33860bc3b7 | |||
e39aabbfe6 | |||
995d1a07d4 | |||
f6b06d6e5d | |||
19b6620a92 | |||
9c4660d3d6 | |||
75464a1baa | |||
2f73fa55ae | |||
45c45e11dd | |||
6e126c96a9 | |||
2c5c79d68e | |||
c2df51aa95 | |||
dcb00b2e54 | |||
da036dcc3e | |||
f9eab6e0de | |||
6a128d4ec7 | |||
5efe67f375 | |||
3af8fa194c | |||
0d09c64dde | |||
84c1dda39d | |||
dc636d190d | |||
2bcdd8844c | |||
0a4bde1f2f | |||
ee3f93c029 | |||
2658c5c545 | |||
27c7ab6e00 | |||
67e25f8724 | |||
12fb509d84 | |||
a2f46029c7 | |||
62a8f1d707 | |||
56777af8e4 | |||
9205b640a4 | |||
f2a786ecbf | |||
13ce0ebb87 | |||
bcc131e866 | |||
529c8f0eb1 | |||
2cb32edaa9 | |||
5a1d3609a9 | |||
1e3f05db8f | |||
a776ec9718 | |||
522e79f2e0 | |||
9ad8b74111 | |||
73dcdb27f6 | |||
6b7cc0022b | |||
9c27183876 | |||
25f8789aa5 | |||
3455082458 | |||
b7b23cd4a8 | |||
f0210453a6 | |||
615fe095e1 | |||
80d0f9c49d | |||
c9f9d39b54 | |||
0cc3132f5a | |||
38b6e8decd | |||
d48008339e | |||
54b97ed8e1 | |||
d301859bbd | |||
facfb4b615 | |||
42fd7dea78 | |||
62a70c300d | |||
c53be51460 | |||
f51eb46c69 | |||
7a3ce9bb1d | |||
2f9af6a707 | |||
f204344102 | |||
22f20f0c29 | |||
18844d60b5 | |||
3d02b19fbd | |||
bd63da0a0e | |||
f9be3ad3fd | |||
d781a6164a | |||
b18ec00a7a | |||
82a0f678fb | |||
5fcaedb880 | |||
2606c92ef9 | |||
ae47bb3594 | |||
636a9df177 | |||
f190d5f496 | |||
3c76b3548d | |||
a58d2b6137 | |||
08a0ff7091 | |||
e3095be85c | |||
9e1eb25232 | |||
71b069d3e1 | |||
e5bb96bc3b | |||
2924ed31f3 | |||
9b6b35d9b7 | |||
2cc4a467a6 | |||
1fc25148da | |||
07784c8990 | |||
f376c6a728 | |||
5c5e51095c | |||
cdaa96df63 | |||
246286f0eb | |||
6bf6b40495 | |||
f118d7e067 | |||
025835c5b2 | |||
36c1f93ceb | |||
b0e0c5eba0 | |||
daf126a638 | |||
7ac09d7b7c | |||
5af63c74e0 | |||
4510bbccca | |||
ae4a237e58 | |||
9bc9b36645 | |||
22b84fe543 | |||
3d731cc861 | |||
14f9f85c4b | |||
b5b7ec0162 | |||
3415812b06 | |||
ef381e17bb | |||
e174ccbd8e | |||
1e47f9b3ff | |||
2d068bd45b | |||
d92ad5640a | |||
64688b3786 | |||
fb7e6df790 | |||
c5a32fd4fa | |||
a273c46559 | |||
9e093d5ff3 | |||
41fc51ebcf | |||
4da6e1ea9c | |||
67c71130df | |||
9ccaea2afc | |||
fea9ffc46a | |||
229130ed25 | |||
5344abc008 | |||
86bcecf840 | |||
4128bdc859 | |||
907482c8ac | |||
774a255f2e | |||
98e69e63d2 | |||
f091f370d0 | |||
ad20d72a39 | |||
f0ddea821c | |||
73286dc8bf | |||
4e84999f20 | |||
411a118148 | |||
240b02e175 | |||
a463ae821e | |||
6d135beb21 | |||
6008f528d0 | |||
1dc857a4b2 | |||
4f19749252 | |||
79a143b32f | |||
5f109e8589 | |||
9423310816 | |||
68102fced8 | |||
1eb7ce5cdb | |||
4884b324e6 | |||
78bede1ffb | |||
b59fe77ec7 | |||
45330a5e47 | |||
794fce7bff | |||
e08b6b3ec7 | |||
8dcb3e0c41 | |||
c62d2f56d8 | |||
c318373b88 | |||
3090751dfc | |||
519b1cb5c9 | |||
e62157e896 | |||
c2ffcc4bd1 | |||
09ca5d14c9 | |||
2f561c77f5 | |||
8d710c5130 | |||
fcfb39c5de | |||
85c3d8aa52 | |||
aa4d9882d2 | |||
49aee6d02c | |||
7a0f86a04f | |||
a4a48be923 | |||
8788485924 | |||
616ed8f73c | |||
ea37fd821d | |||
62eee9c69e | |||
b5b89990eb | |||
9b03b0a1b2 | |||
f365de636f | |||
ee5a60e1c5 | |||
5e7b26791b | |||
b3a21d5a50 | |||
48b470140b | |||
89ce4e74fe | |||
69acdd437e | |||
b3776598d8 | |||
5d0ac3e3e6 | |||
fecf3d6fc1 | |||
d8f3421608 | |||
e8639517da | |||
d450b971f9 | |||
8f43698a60 | |||
3b60432687 | |||
d487791b03 | |||
91d8198d17 | |||
fa0cc2dc13 | |||
14ae01a6c9 | |||
f5f4438b43 | |||
b6e91291fb | |||
b41bf58658 | |||
a3e3bebed7 | |||
11309ee99c | |||
9c8a654079 | |||
2e00740515 | |||
b52d500fbc | |||
d91d321129 | |||
60480a1e2f | |||
65b821b192 | |||
433ac8c38a | |||
70e9b1e936 | |||
61dbcfa44a | |||
916dd3b7c5 | |||
b0c31500fc | |||
7be275b692 | |||
4b9e81fc89 | |||
51a37de885 | |||
d893e83622 | |||
33945a3115 | |||
afa86d8a45 | |||
cb5e57e2dd | |||
a8e3269ad6 | |||
2cd8675734 | |||
3916c54501 | |||
a17bb54d8f | |||
aa129dd7e8 | |||
510df4729c | |||
d25a859985 | |||
3b64735058 | |||
30dae0205e | |||
87a56d2bc9 | |||
26f060f66b | |||
c35befbf38 | |||
2fa5808e3f | |||
44c0dd0762 | |||
1bb9348a90 | |||
9141f5ef94 | |||
51d1785576 | |||
4f7f7538f7 | |||
1ae761311e | |||
7e1c94ab9c | |||
0a1beb688c | |||
5dd4dc2862 | |||
a576c7ae4b | |||
6c7db3d956 | |||
9fcbc83ebc | |||
cd158d4cde | |||
49a016b53d | |||
5039528b56 | |||
77e951e933 | |||
b032ceb5d4 | |||
914eab12f7 | |||
0dec761e21 | |||
5a23417499 | |||
cd5605bb86 | |||
0e5609d40e | |||
9d966a28d3 | |||
e7f2ab9138 | |||
9628da2d17 | |||
026f54dcf7 | |||
3cdf14d4c5 | |||
4ffbddf21f | |||
13217f072b | |||
0959e1501f | |||
58d039a70d | |||
d8e25a0863 | |||
e0cc7faea1 | |||
61b383f422 | |||
f8f33d35e0 | |||
57e8e5c965 | |||
ecc8bc8910 | |||
0a63e69e04 | |||
16755b26e2 | |||
85d51ab228 | |||
92f253adb2 | |||
222f2913c1 | |||
878b1873cd | |||
96f64c629e | |||
58a1f9081c | |||
e4c2abb1d9 | |||
d0240bd9d0 | |||
6e3f4e5e45 | |||
844a9022fb | |||
45877b3154 | |||
6120f6590b | |||
ba4ba685f9 | |||
276c87af68 | |||
a50f63840f | |||
54d5cec582 | |||
fc686aaca7 | |||
7370ef8c5e | |||
fc242f6e1f | |||
a0adfb5e8e | |||
c52d09d5b1 | |||
498f0d8539 | |||
278391d961 | |||
531bd6ddc7 | |||
d40dd3e4da | |||
07a0c82790 | |||
59ca4b9fe4 | |||
0694cc4916 | |||
38c76754ef | |||
9e2cbe3362 | |||
ced0c29c56 | |||
7d67c9e2e7 | |||
67d4a1b3fc | |||
45e0feab4e | |||
7a6e6eb5e2 | |||
9ec95679e1 | |||
57d253aeda | |||
fd8360deb1 | |||
9b7e516a56 | |||
b255be93fa | |||
218eb97241 | |||
2341b99379 | |||
1d5795d134 | |||
05c95dfdc6 | |||
27f3ef5f7a | |||
fe82516f9f | |||
415c0b86ba | |||
eded5558b2 | |||
f06355b0bb | |||
b0c5f59c07 | |||
e76558b0cc | |||
f9cc12ae0f | |||
23f9a22edc | |||
8e6efe4d87 | |||
a18d9a1f87 | |||
4e5e55c21a | |||
8ae9888959 | |||
cf9ddd293d | |||
466fb601d6 | |||
ebe7087bff | |||
72f18759ba | |||
92ec908303 | |||
e0058c1125 | |||
b4951c058b | |||
a71a96894d | |||
ea43080548 | |||
e78b96a657 | |||
8a4794fc51 | |||
535f8088d7 | |||
fbe8ec1fe7 | |||
a55453e634 | |||
5a6b62e77c | |||
63fab07047 | |||
c00fc6f8bb | |||
0cfeee13ee | |||
cf8a6a042e | |||
45ae086974 | |||
8ffdfa72e3 | |||
4fb138c42e | |||
640c7d748a | |||
c94bc59d7e | |||
b220885f42 | |||
1c2d36d8a3 | |||
0408c9d66a | |||
749764f35b | |||
a31db33e93 | |||
01c4f5abcd | |||
63f65bac3e | |||
a20c871ece | |||
649fb6e401 | |||
e48630da72 | |||
68d783145b | |||
32486b5beb | |||
a716ec61b9 | |||
c059924a8f | |||
3ef031b2fe | |||
58c07e7f8c | |||
7e120fc441 | |||
87902de010 | |||
1718fe3d74 | |||
82322ddab6 | |||
3d1854ab95 | |||
995d72b8c1 | |||
0c612f08c7 | |||
9b08f48dbd | |||
303c3ce89e | |||
8f56753a2f | |||
4fded5bd0e | |||
3abfe8aa22 | |||
0ccf4cf785 | |||
d8ff939409 | |||
9047dc8163 | |||
600aa223c2 | |||
f0e63025b0 | |||
082ad84914 | |||
6d52c5b2f0 | |||
21b4d60101 | |||
a4f8be7811 | |||
f0d028d3a4 | |||
9d47ee52b4 | |||
ddbd336387 | |||
0d01e4854b | |||
955302fd95 | |||
7cc1a358f5 | |||
99da69c85f | |||
222063b19d | |||
085d3b9d94 | |||
a30206a665 | |||
e63fdf2b22 | |||
b5d52b6b45 | |||
40993a0d25 | |||
855a251489 | |||
1228c2948d | |||
98fc24cbdf | |||
d6338af766 | |||
3889d956d9 | |||
5c62fbb6a8 | |||
8f76ec97c0 | |||
92ef1faa97 | |||
1e1821f002 | |||
60347a5483 | |||
b14cca2ad9 | |||
adacc7977d | |||
a7a4984175 | |||
b44b04d25b | |||
656a851830 | |||
8d82e37ec0 | |||
2a4cd81c86 | |||
566a7c3039 | |||
9133f38138 | |||
802e925fd7 | |||
5caf523fd9 | |||
2210818114 | |||
f6eecb855e | |||
4eeeccb9cd | |||
16ab3e02a9 | |||
f948a03be2 | |||
cde8478388 | |||
8ed8abb9df | |||
a122d3d466 | |||
eb92e72e6c | |||
341046c96c | |||
3a934b7020 | |||
03ca1ff634 | |||
35c9a3c558 | |||
56c3a61d83 | |||
871222aebd | |||
d3145be744 | |||
8bfa43f9a7 | |||
65e32fecb1 | |||
ff389f1270 | |||
5b4eda670b | |||
edb8c99fbe | |||
eca49e3a03 | |||
83c1db8763 | |||
90d4c1d153 | |||
9021b2dba6 | |||
f980422c57 | |||
b342a86c15 | |||
fb2c402ae1 | |||
38820bc75c | |||
4e9bd1fef5 | |||
a00f5850ee | |||
433d9bbc6e | |||
4b819457c9 | |||
a2182e68a6 | |||
e9e03259c1 | |||
bb15f16d8c | |||
9af946a306 | |||
99705deb7d | |||
67577a3760 | |||
ce8e56ee18 | |||
770f29fd05 | |||
acd2a63879 | |||
6cc6addc2f | |||
e41a3822a6 | |||
c4b0c57059 | |||
007e647462 | |||
d4e80407e5 | |||
f6a8096720 | |||
891e0188dd | |||
079742b4d3 | |||
d0c73564b1 | |||
5a6a698e1d | |||
4eda149ffa | |||
ac84db2506 | |||
30755e31e7 | |||
bc35c9a598 | |||
c6b883289c | |||
58237bd67f | |||
991be8950e | |||
54370e228a | |||
f277ea134f | |||
68f4af7d2e | |||
59a127d022 | |||
6ddb3e722c | |||
a3821a0b33 | |||
51c237f9d8 | |||
d8354f6f02 | |||
25b2853b70 | |||
ed05999f63 | |||
4d22d80281 | |||
5178b3d59d | |||
b597a92487 | |||
1f6e00878d | |||
31224a8425 | |||
a58ae5eb2a | |||
d6fa9c0414 | |||
7b67ae6972 | |||
e34437b2d7 | |||
15208c7d3d | |||
e5adfaade0 | |||
d21c80b865 | |||
944df52e2a | |||
3ded98e5fa | |||
d5e5baa20f | |||
0fb086f241 | |||
aed0704404 | |||
072382fa61 | |||
ad11c5fb3f | |||
5664c37539 | |||
3e2250423c | |||
ea605b499c | |||
bb1ab428db | |||
f928b91e9d | |||
dec460ce52 | |||
daa3673c1c | |||
c2405bcae2 | |||
4ca9472e02 | |||
1c504471d3 | |||
dc88a86259 | |||
580ed1119a | |||
bad0663138 | |||
220ba0785c | |||
4afc4d0751 | |||
605f75b56f | |||
ad5cafbfed | |||
3db517548d | |||
816db7a0aa | |||
3fe497e129 | |||
21aafd603c | |||
0a44ff86ab | |||
d784d87880 | |||
7cde312f14 | |||
34db376ae5 | |||
38ddc71b83 | |||
ba2eb0d7ad | |||
32da07ccee | |||
d19f394630 | |||
ff479c865d | |||
ada30c2789 | |||
02335ee72d | |||
1e3e756c19 | |||
6a230fe803 | |||
e55a569629 | |||
962bad3cea | |||
8806fcd545 | |||
1e358e3ae8 | |||
7dc594ba4d | |||
bfb46cbfbe | |||
6d04a285dc | |||
1bd37d213a | |||
883a8109c8 | |||
a4e0f3f724 | |||
edc06a97d6 | |||
ae77fe5a69 | |||
394844062f | |||
ecd2b2f217 | |||
91282c8b6a | |||
fae694a102 | |||
d5a356902a | |||
405a71d3a4 | |||
d3b1096510 | |||
8d734941af | |||
a4e3c7c37c | |||
6508d497ce | |||
4873abe145 | |||
bd4b18541c | |||
3f21760d56 | |||
bc3a0ac6a3 | |||
d7d8f38fb7 | |||
ee305c9284 | |||
9ade00e27b | |||
085c376655 | |||
dd385ad05b | |||
aa92311d4e | |||
3d144e62c4 | |||
576dd011a1 | |||
6b14b20369 | |||
54afec58a3 | |||
92c2b1dd2d | |||
f757df5dfd | |||
12358476da | |||
2c62eeea3c | |||
d31da26a51 | |||
b8a1fc0126 | |||
f6eae91c7d | |||
d44428fa90 | |||
11c7fef80a | |||
b12bfcb03b | |||
7178b6c2c4 | |||
45d0d7c3d4 | |||
adb1038b26 | |||
2a3b03138b | |||
ec1023e790 | |||
cd7e64b2b3 | |||
ac8353a64f | |||
fea7cac206 | |||
46ced5c828 | |||
7e7440c431 | |||
2ae3f40971 | |||
a3ac2623d5 | |||
ac5cc7ddad | |||
014a25697d | |||
fc4013a43f | |||
2fcae719ad | |||
f98b615bf3 | |||
07abebfc46 | |||
5f0088594b | |||
8453828a65 | |||
63cbeca64e | |||
736f0f7560 | |||
fe3be8f18a | |||
6a2834f2b0 | |||
7e16afbdce | |||
1c7a9a4132 | |||
50169b9798 | |||
374ec6773f | |||
a044cb6cc8 | |||
ba3e805981 | |||
2f0e1afd16 | |||
8148210860 | |||
1628a31efa | |||
115e0142d9 | |||
beb49b24f6 | |||
c84012d655 | |||
d6705d5529 | |||
55a8941922 | |||
a3ca80d20d | |||
3577de04b8 | |||
e974e6b3c9 | |||
8db16ff306 | |||
a8cda248b4 | |||
69285b22d3 | |||
b9cc6c10af | |||
d02c5cb023 | |||
37a48489da | |||
302866ad73 | |||
0a83a86e65 | |||
4e86ecf807 | |||
6ca3579cc0 | |||
66a4b26811 | |||
78f27c0465 | |||
3ad883d7c7 | |||
fecd8ca54a | |||
13977d9338 | |||
5d5b827f1a | |||
2a6d6a7f69 | |||
dfdaceb410 | |||
f51a63e4ef | |||
ce86a43779 | |||
f55f4cb02a | |||
06bf03f075 | |||
eefc6d7c44 | |||
1f7035f18f | |||
71dc6a3828 | |||
5d1c625b74 | |||
c42d3c19e2 | |||
3a23dc242e | |||
1df1f88fe1 | |||
2174042994 | |||
5cc81a0179 | |||
6a047519f6 | |||
5404776f7a | |||
dff68a339a | |||
dde3e01a59 | |||
a26553c90a | |||
2a10b2275e | |||
ba9527abc0 | |||
6c726df9b9 | |||
24587148fd | |||
6762c2d08f | |||
3a998cf39c | |||
1237306ca8 | |||
3668627e03 | |||
a81f201fad | |||
91ba938953 | |||
4573f00a0d |
1
.github/dependabot.yml
vendored
1
.github/dependabot.yml
vendored
@ -2,7 +2,6 @@
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
|
10
.github/scripts/check-release.sh
vendored
10
.github/scripts/check-release.sh
vendored
@ -3,7 +3,7 @@
|
||||
# check_tag $current_tag $file_tag $file_name
|
||||
function check_tag {
|
||||
if [[ "$1" != "$2" ]]; then
|
||||
echo "Error: the current tag does not match the version in $3: found $2 - expected $1"
|
||||
echo "Error: the current tag does not match the version in Cargo.toml: found $2 - expected $1"
|
||||
ret=1
|
||||
fi
|
||||
}
|
||||
@ -11,12 +11,8 @@ function check_tag {
|
||||
ret=0
|
||||
current_tag=${GITHUB_REF#'refs/tags/v'}
|
||||
|
||||
toml_files='*/Cargo.toml'
|
||||
for toml_file in $toml_files;
|
||||
do
|
||||
file_tag="$(grep '^version = ' $toml_file | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')"
|
||||
check_tag $current_tag $file_tag $toml_file
|
||||
done
|
||||
file_tag="$(grep '^version = ' Cargo.toml | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')"
|
||||
check_tag $current_tag $file_tag
|
||||
|
||||
lock_file='Cargo.lock'
|
||||
lock_tag=$(grep -A 1 'name = "meilisearch-auth"' $lock_file | grep version | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')
|
||||
|
48
.github/uffizzi/Dockerfile
vendored
Normal file
48
.github/uffizzi/Dockerfile
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
# Compile
|
||||
FROM rust:alpine3.16 AS compiler
|
||||
|
||||
RUN apk add -q --update-cache --no-cache build-base openssl-dev
|
||||
|
||||
WORKDIR /meilisearch
|
||||
|
||||
ARG COMMIT_SHA
|
||||
ARG COMMIT_DATE
|
||||
ARG GIT_TAG
|
||||
ENV COMMIT_SHA=${COMMIT_SHA} COMMIT_DATE=${COMMIT_DATE} VERGEN_GIT_SEMVER_LIGHTWEIGHT=${GIT_TAG}
|
||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||
|
||||
COPY . .
|
||||
RUN set -eux; \
|
||||
apkArch="$(apk --print-arch)"; \
|
||||
if [ "$apkArch" = "aarch64" ]; then \
|
||||
export JEMALLOC_SYS_WITH_LG_PAGE=16; \
|
||||
fi && \
|
||||
cargo build --release
|
||||
|
||||
# Run
|
||||
FROM uffizzi/ttyd:alpine
|
||||
|
||||
ENV MEILI_HTTP_ADDR 0.0.0.0:7700
|
||||
ENV MEILI_SERVER_PROVIDER docker
|
||||
ENV MEILI_NO_ANALYTICS true
|
||||
|
||||
RUN apk update --quiet \
|
||||
&& apk add -q --no-cache libgcc tini curl
|
||||
|
||||
# add meilisearch to the `/bin` so you can run it from anywhere and it's easy
|
||||
# to find.
|
||||
COPY --from=compiler /meilisearch/target/release/meilisearch /bin/meilisearch
|
||||
# To stay compatible with the older version of the container (pre v0.27.0) we're
|
||||
# going to symlink the meilisearch binary in the path to `/meilisearch`
|
||||
RUN ln -s /bin/meilisearch /meilisearch
|
||||
|
||||
# This directory should hold all the data related to meilisearch so we're going
|
||||
# to move our PWD in there.
|
||||
# We don't want to put the meilisearch binary
|
||||
WORKDIR /meili_data
|
||||
|
||||
|
||||
EXPOSE 7700/tcp
|
||||
|
||||
ENTRYPOINT ["tini", "--"]
|
||||
CMD ["ttyd", "/bin/zsh"]
|
26
.github/uffizzi/docker-compose.uffizzi.yml
vendored
Normal file
26
.github/uffizzi/docker-compose.uffizzi.yml
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
version: "3"
|
||||
|
||||
x-uffizzi:
|
||||
ingress:
|
||||
service: nginx
|
||||
port: 8081
|
||||
|
||||
services:
|
||||
meilisearch:
|
||||
image: "${MEILISEARCH_IMAGE}"
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "7681:7681"
|
||||
- "7700:7700"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 500M
|
||||
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "8081:8081"
|
||||
volumes:
|
||||
- ./.github/uffizzi/nginx:/etc/nginx
|
28
.github/uffizzi/nginx/nginx.conf
vendored
Normal file
28
.github/uffizzi/nginx/nginx.conf
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
|
||||
events {
|
||||
worker_connections 4096; ## Default: 1024
|
||||
}
|
||||
|
||||
http {
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8081;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:7681;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
}
|
||||
|
||||
location /meilisearch/ {
|
||||
# rewrite /meilisearch/(.*) /$1 break;
|
||||
proxy_pass http://localhost:7700/;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
name: Create issue to upgrade dependencies
|
||||
on:
|
||||
schedule:
|
||||
# Run the first of the month, every 3 month
|
||||
- cron: '0 0 1 */3 *'
|
||||
workflow_dispatch:
|
||||
|
||||
@ -15,9 +16,13 @@ jobs:
|
||||
github_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
title: Upgrade dependencies
|
||||
body: |
|
||||
We need to update the dependencies of the Meilisearch repository, and, if possible, the dependencies of all the engine-team repositories that Meilisearch depends on (milli, charabia, heed...).
|
||||
This issue is about updating Meilisearch dependencies:
|
||||
- [ ] Cargo toml dependencies of Meilisearch; but also the main engine-team repositories that Meilisearch depends on (charabia, heed...)
|
||||
- [ ] If new Rust versions have been released, update the Rust version in the Clippy job of this [GitHub Action file](./.github/workflows/rust.yml)
|
||||
|
||||
⚠️ This issue should only be done at the beginning of the sprint!
|
||||
⚠️ To avoid last minute bugs, this issue should only be done at the beginning of the sprint!
|
||||
|
||||
The GitHub action dependencies are managed by [Dependabot](./.github/dependabot.yml)
|
||||
labels: |
|
||||
dependencies
|
||||
maintenance
|
||||
|
10
.github/workflows/flaky.yml
vendored
10
.github/workflows/flaky.yml
vendored
@ -22,5 +22,11 @@ jobs:
|
||||
override: true
|
||||
- name: Install cargo-flaky
|
||||
run: cargo install cargo-flaky
|
||||
- name: Run cargo flaky 100 times
|
||||
run: cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in the dumps
|
||||
run: cd dump; cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in the index-scheduler
|
||||
run: cd index-scheduler; cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in the auth
|
||||
run: cd meilisearch-auth; cargo flaky -i 100 --release
|
||||
- name: Run cargo flaky in meilisearch
|
||||
run: cd meilisearch; cargo flaky -i 100 --release
|
||||
|
3
.github/workflows/latest-git-tag.yml
vendored
3
.github/workflows/latest-git-tag.yml
vendored
@ -3,7 +3,7 @@ name: Update latest git tag
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types: [published]
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
check-version:
|
||||
@ -17,6 +17,7 @@ jobs:
|
||||
|
||||
update-latest-tag:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-version
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: rickstaa/action-create-tag@v1
|
||||
|
77
.github/workflows/manual_benchmarks.yml
vendored
Normal file
77
.github/workflows/manual_benchmarks.yml
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
name: Benchmarks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dataset_name:
|
||||
description: 'The name of the dataset used to benchmark (search_songs, search_wiki, search_geo or indexing)'
|
||||
required: false
|
||||
default: 'search_songs'
|
||||
|
||||
env:
|
||||
BENCH_NAME: ${{ github.event.inputs.dataset_name }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
8
.github/workflows/publish-binaries.yml
vendored
8
.github/workflows/publish-binaries.yml
vendored
@ -54,7 +54,7 @@ jobs:
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/meilisearch
|
||||
@ -87,7 +87,7 @@ jobs:
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/${{ matrix.artifact_name }}
|
||||
@ -123,7 +123,7 @@ jobs:
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
@ -183,7 +183,7 @@ jobs:
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
|
4
.github/workflows/publish-deb-brew-pkg.yml
vendored
4
.github/workflows/publish-deb-brew-pkg.yml
vendored
@ -2,7 +2,7 @@ name: Publish to APT repository & Homebrew
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
check-version:
|
||||
@ -35,7 +35,7 @@ jobs:
|
||||
- name: Build deb package
|
||||
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
||||
- name: Upload debian pkg to release
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
uses: svenstaro/upload-release-action@2.4.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/debian/meilisearch.deb
|
||||
|
3
.github/workflows/publish-docker-images.yml
vendored
3
.github/workflows/publish-docker-images.yml
vendored
@ -84,7 +84,7 @@ jobs:
|
||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@ -92,6 +92,7 @@ jobs:
|
||||
build-args: |
|
||||
COMMIT_SHA=${{ github.sha }}
|
||||
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
||||
GIT_TAG=${{ github.ref_name }}
|
||||
|
||||
# /!\ Don't touch this without checking with Cloud team
|
||||
- name: Send CI information to Cloud team
|
||||
|
79
.github/workflows/push_benchmarks_indexing.yml
vendored
Normal file
79
.github/workflows/push_benchmarks_indexing.yml
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
name: Benchmarks indexing (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }}
|
||||
BENCH_NAME: "indexing"
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Upload benchmarks to influxdb
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to influxDB
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
78
.github/workflows/push_benchmarks_search_geo.yml
vendored
Normal file
78
.github/workflows/push_benchmarks_search_geo.yml
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
name: Benchmarks search geo (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
BENCH_NAME: "search_geo"
|
||||
INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Upload benchmarks to influxdb
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to influxDB
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
78
.github/workflows/push_benchmarks_search_songs.yml
vendored
Normal file
78
.github/workflows/push_benchmarks_search_songs.yml
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
name: Benchmarks search songs (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
BENCH_NAME: "search_songs"
|
||||
INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Upload benchmarks to influxdb
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to influxDB
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
78
.github/workflows/push_benchmarks_search_wiki.yml
vendored
Normal file
78
.github/workflows/push_benchmarks_search_wiki.yml
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
name: Benchmarks search wikipedia articles (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
BENCH_NAME: "search_wiki"
|
||||
INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Upload benchmarks to influxdb
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to influxDB
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
56
.github/workflows/rust.yml
vendored
56
.github/workflows/rust.yml
vendored
@ -2,6 +2,9 @@ name: Rust
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Everyday at 5:00am
|
||||
- cron: '0 5 * * *'
|
||||
pull_request:
|
||||
push:
|
||||
# trying and staging branches are for Bors config
|
||||
@ -27,22 +30,31 @@ jobs:
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- name: Run test with Rust stable
|
||||
if: github.event_name != 'schedule'
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run test with Rust nightly
|
||||
if: github.event_name == 'schedule'
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
# Disable cache due to disk space issues with Windows workers in CI
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --locked --release --no-default-features
|
||||
args: --locked --release --no-default-features --all
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release
|
||||
args: --locked --release --all
|
||||
|
||||
test-others:
|
||||
name: Tests on ${{ matrix.os }}
|
||||
@ -53,18 +65,18 @@ jobs:
|
||||
os: [macos-12, windows-2022]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --locked --release --no-default-features
|
||||
args: --locked --release --no-default-features --all
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release
|
||||
args: --locked --release --all
|
||||
|
||||
# We run tests in debug also, to make sure that the debug_assertions are hit
|
||||
test-debug:
|
||||
@ -83,13 +95,13 @@ jobs:
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run tests in debug
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked
|
||||
args: --locked --all
|
||||
|
||||
clippy:
|
||||
name: Run Clippy
|
||||
@ -99,16 +111,17 @@ jobs:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
toolchain: 1.67.0
|
||||
override: true
|
||||
components: clippy
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo clippy
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
args: --all-targets -- --deny warnings
|
||||
# allow unlined_format_args https://github.com/rust-lang/rust-clippy/issues/10087
|
||||
args: --all-targets -- --deny warnings --allow clippy::uninlined_format_args
|
||||
|
||||
fmt:
|
||||
name: Run Rustfmt
|
||||
@ -121,7 +134,12 @@ jobs:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
components: rustfmt
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
||||
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
||||
# we are going to create an empty file where rustfmt expects it.
|
||||
run: |
|
||||
echo -ne "\n" > benchmarks/benches/datasets_paths.rs
|
||||
cargo fmt --all -- --check
|
||||
|
100
.github/workflows/uffizzi-build.yml
vendored
Normal file
100
.github/workflows/uffizzi-build.yml
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
name: Uffizzi - Build PR Image
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened,synchronize,reopened,closed]
|
||||
|
||||
jobs:
|
||||
build-meilisearch:
|
||||
name: Build and push `meilisearch`
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
if: ${{ github.event.action != 'closed' }}
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Generate UUID image name
|
||||
id: uuid
|
||||
run: echo "UUID_TAG=$(uuidgen)" >> $GITHUB_ENV
|
||||
|
||||
- name: Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: registry.uffizzi.com/${{ env.UUID_TAG }}
|
||||
tags: |
|
||||
type=raw,value=60d
|
||||
|
||||
- name: Build Image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: ./
|
||||
file: .github/uffizzi/Dockerfile
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
push: true
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
render-compose-file:
|
||||
name: Render Docker Compose File
|
||||
# Pass output of this workflow to another triggered by `workflow_run` event.
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-meilisearch
|
||||
outputs:
|
||||
compose-file-cache-key: ${{ env.COMPOSE_FILE_HASH }}
|
||||
steps:
|
||||
- name: Checkout git repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Render Compose File
|
||||
run: |
|
||||
MEILISEARCH_IMAGE=$(echo ${{ needs.build-meilisearch.outputs.tags }})
|
||||
export MEILISEARCH_IMAGE
|
||||
# Render simple template from environment variables.
|
||||
envsubst < .github/uffizzi/docker-compose.uffizzi.yml > docker-compose.rendered.yml
|
||||
cat docker-compose.rendered.yml
|
||||
- name: Upload Rendered Compose File as Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: preview-spec
|
||||
path: docker-compose.rendered.yml
|
||||
retention-days: 2
|
||||
- name: Serialize PR Event to File
|
||||
run: |
|
||||
cat << EOF > event.json
|
||||
${{ toJSON(github.event) }}
|
||||
|
||||
EOF
|
||||
- name: Upload PR Event as Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: preview-spec
|
||||
path: event.json
|
||||
retention-days: 2
|
||||
|
||||
delete-preview:
|
||||
name: Call for Preview Deletion
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'closed' }}
|
||||
steps:
|
||||
# If this PR is closing, we will not render a compose file nor pass it to the next workflow.
|
||||
- name: Serialize PR Event to File
|
||||
run: |
|
||||
cat << EOF > event.json
|
||||
${{ toJSON(github.event) }}
|
||||
|
||||
EOF
|
||||
- name: Upload PR Event as Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: preview-spec
|
||||
path: event.json
|
||||
retention-days: 2
|
103
.github/workflows/uffizzi-preview-deploy.yml
vendored
Normal file
103
.github/workflows/uffizzi-preview-deploy.yml
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
name: Uffizzi - Deploy Preview
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows:
|
||||
- "Uffizzi - Build PR Image"
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
cache-compose-file:
|
||||
name: Cache Compose File
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
outputs:
|
||||
compose-file-cache-key: ${{ env.COMPOSE_FILE_HASH }}
|
||||
pr-number: ${{ env.PR_NUMBER }}
|
||||
expected-url: ${{ env.EXPECTED_URL }}
|
||||
steps:
|
||||
- name: 'Download artifacts'
|
||||
# Fetch output (zip archive) from the workflow run that triggered this workflow.
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
run_id: context.payload.workflow_run.id,
|
||||
});
|
||||
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
|
||||
return artifact.name == "preview-spec"
|
||||
})[0];
|
||||
let download = await github.rest.actions.downloadArtifact({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
artifact_id: matchArtifact.id,
|
||||
archive_format: 'zip',
|
||||
});
|
||||
let fs = require('fs');
|
||||
fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/preview-spec.zip`, Buffer.from(download.data));
|
||||
|
||||
- name: 'Unzip artifact'
|
||||
run: unzip preview-spec.zip
|
||||
|
||||
- name: Read Event into ENV
|
||||
run: |
|
||||
echo 'EVENT_JSON<<EOF' >> $GITHUB_ENV
|
||||
cat event.json >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
|
||||
- name: Hash Rendered Compose File
|
||||
id: hash
|
||||
# If the previous workflow was triggered by a PR close event, we will not have a compose file artifact.
|
||||
if: ${{ fromJSON(env.EVENT_JSON).action != 'closed' }}
|
||||
run: echo "COMPOSE_FILE_HASH=$(md5sum docker-compose.rendered.yml | awk '{ print $1 }')" >> $GITHUB_ENV
|
||||
|
||||
- name: Cache Rendered Compose File
|
||||
if: ${{ fromJSON(env.EVENT_JSON).action != 'closed' }}
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: docker-compose.rendered.yml
|
||||
key: ${{ env.COMPOSE_FILE_HASH }}
|
||||
|
||||
- name: Read PR Number From Event Object
|
||||
id: pr
|
||||
run: echo "PR_NUMBER=${{ fromJSON(env.EVENT_JSON).number }}" >> $GITHUB_ENV
|
||||
|
||||
- name: DEBUG - Print Job Outputs
|
||||
if: ${{ runner.debug }}
|
||||
run: |
|
||||
echo "PR number: ${{ env.PR_NUMBER }}"
|
||||
echo "Compose file hash: ${{ env.COMPOSE_FILE_HASH }}"
|
||||
cat event.json
|
||||
|
||||
- name: Add expected URL env var
|
||||
if: ${{ runner.debug }}
|
||||
run: |
|
||||
REPO=$(echo ${{ github.repository }} | sed 's/\./+/g')
|
||||
echo "EXPECTED_URL=${{ inputs.server }}/github.com/$REPO/pull/${{ env.PR_NUMBER }}" >> $GITHUB_ENV
|
||||
|
||||
deploy-uffizzi-preview:
|
||||
name: Use Remote Workflow to Preview on Uffizzi
|
||||
needs:
|
||||
- cache-compose-file
|
||||
uses: UffizziCloud/preview-action/.github/workflows/reusable.yaml@desc
|
||||
with:
|
||||
# If this workflow was triggered by a PR close event, cache-key will be an empty string
|
||||
# and this reusable workflow will delete the preview deployment.
|
||||
compose-file-cache-key: ${{ needs.cache-compose-file.outputs.compose-file-cache-key }}
|
||||
compose-file-cache-path: docker-compose.rendered.yml
|
||||
server: https://app.uffizzi.com
|
||||
pr-number: ${{ needs.cache-compose-file.outputs.pr-number }}
|
||||
description: |
|
||||
The meilisearch preview environment contains a web terminal from where you can run the
|
||||
`meilisearch` command. You should be able to access this instance of meilisearch running in
|
||||
the preview from the link Meilisearch Endpoint link given below.
|
||||
|
||||
Web Terminal Endpoint : ${{ needs.cache-compose-file.outputs.expected-url }}
|
||||
Meilisearch Endpoint : ${{ needs.cache-compose-file.outputs.expected-url }}/meilisearch
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
id-token: write
|
@ -13,7 +13,6 @@ env:
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
|
||||
update-version-cargo-toml:
|
||||
name: Update version in Cargo.toml files
|
||||
runs-on: ubuntu-latest
|
||||
@ -30,7 +29,7 @@ jobs:
|
||||
run: |
|
||||
raw_new_version=$(echo $NEW_VERSION | cut -d 'v' -f 2)
|
||||
new_string="version = \"$raw_new_version\""
|
||||
sd '^version = "\d+.\d+.\w+"$' "$new_string" */Cargo.toml
|
||||
sd '^version = "\d+.\d+.\w+"$' "$new_string" Cargo.toml
|
||||
- name: Build Meilisearch to update Cargo.lock
|
||||
run: cargo build
|
||||
- name: Commit and push the changes to the ${{ env.NEW_BRANCH }} branch
|
||||
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -1,3 +1,5 @@
|
||||
.idea/
|
||||
.vscode/
|
||||
/target
|
||||
**/*.csv
|
||||
**/*.json_lines
|
||||
@ -8,9 +10,11 @@
|
||||
/snapshots
|
||||
/dumps
|
||||
|
||||
|
||||
# Snapshots
|
||||
## ... large
|
||||
*.full.snap
|
||||
## ... unreviewed
|
||||
*.snap.new
|
||||
|
||||
# Fuzzcheck data for the facet indexing fuzz test
|
||||
milli/fuzz/update::facet::incremental::fuzz::fuzz/
|
||||
|
@ -52,6 +52,23 @@ cargo test
|
||||
|
||||
This command will be triggered to each PR as a requirement for merging it.
|
||||
|
||||
#### Snapshot-based tests
|
||||
|
||||
We are using [insta](https://insta.rs) to perform snapshot-based testing.
|
||||
We recommend using the insta tooling (such as `cargo-insta`) to update the snapshots if they change following a PR.
|
||||
|
||||
New tests should use insta where possible rather than manual `assert` statements.
|
||||
|
||||
Furthermore, we provide some macros on top of insta, notably a way to use snapshot hashes instead of inline snapshots, saving a lot of space in the repository.
|
||||
|
||||
To effectively debug snapshot-based hashes, we recommend you export the `MEILI_TEST_FULL_SNAPS` environment variable so that snapshot are fully created locally:
|
||||
|
||||
```
|
||||
export MEILI_TEST_FULL_SNAPS=true # add this to your .bashrc, .zshrc, ...
|
||||
```
|
||||
|
||||
#### Test troubleshooting
|
||||
|
||||
If you get a "Too many open files" error you might want to increase the open file limit using this command:
|
||||
|
||||
```bash
|
||||
@ -99,6 +116,34 @@ _[Read more about this](https://github.com/meilisearch/integration-guides/blob/m
|
||||
|
||||
The full Meilisearch release process is described in [this guide](https://github.com/meilisearch/engine-team/blob/main/resources/meilisearch-release.md). Please follow it carefully before doing any release.
|
||||
|
||||
### How to publish a prototype
|
||||
|
||||
Depending on the developed feature, you might need to provide a prototyped version of Meilisearch to make it easier to test by the users.
|
||||
|
||||
The prototype name must follow this convention: `prototype-X-Y` where
|
||||
- `X` is the feature name formatted in `kebab-case`. It should not end with a single number.
|
||||
- `Y` is the version of the prototype, starting from `0`.
|
||||
|
||||
✅ Example: `prototype-auto-resize-0`. </br>
|
||||
❌ Bad example: `auto-resize-0`: lacks the `prototype` prefix. </br>
|
||||
❌ Bad example: `prototype-auto-resize`: lacks the version suffix. </br>
|
||||
❌ Bad example: `prototype-auto-resize-0-0`: feature name ends with a single number.
|
||||
|
||||
Steps to create a prototype:
|
||||
|
||||
1. In your terminal, go to the last commit of your branch (the one you want to provide as a prototype).
|
||||
2. Create a tag following the convention: `git tag prototype-X-Y`
|
||||
3. Run Meilisearch and check that its launch summary features a line: `Prototype: prototype-X-Y` (you may need to switch branches and back after tagging for this to work).
|
||||
3. Push the tag: `git push origin prototype-X-Y`
|
||||
4. Check the [Docker CI](https://github.com/meilisearch/meilisearch/actions/workflows/publish-docker-images.yml) is now running.
|
||||
|
||||
🐳 Once the CI has finished to run (~1h30), a Docker image named `prototype-X-Y` will be available on [DockerHub](https://hub.docker.com/repository/docker/getmeili/meilisearch/general). People can use it with the following command: `docker run -p 7700:7700 -v $(pwd)/meili_data:/meili_data getmeili/meilisearch:prototype-X-Y`. <br>
|
||||
More information about [how to run Meilisearch with Docker](https://docs.meilisearch.com/learn/cookbooks/docker.html#download-meilisearch-with-docker).
|
||||
|
||||
⚙️ However, no binaries will be created. If the users do not use Docker, they can go to the `prototype-X-Y` tag in the Meilisearch repository and compile from the source code.
|
||||
|
||||
⚠️ When sharing a prototype with users, remind them to not use it in production. Prototypes are solely for test purposes.
|
||||
|
||||
### Release assets
|
||||
|
||||
For each release, the following assets are created:
|
||||
|
720
Cargo.lock
generated
720
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
39
Cargo.toml
39
Cargo.toml
@ -9,13 +9,50 @@ members = [
|
||||
"dump",
|
||||
"file-store",
|
||||
"permissive-json-pointer",
|
||||
"milli",
|
||||
"filter-parser",
|
||||
"flatten-serde-json",
|
||||
"json-depth-checker",
|
||||
"benchmarks"
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.0.0"
|
||||
authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"]
|
||||
description = "Meilisearch HTTP server"
|
||||
homepage = "https://meilisearch.com"
|
||||
readme = "README.md"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
|
||||
[profile.dev.package.flate2]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.milli]
|
||||
[profile.dev.package.grenad]
|
||||
opt-level = 3
|
||||
[profile.dev.package.roaring]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.lindera-ipadic-builder]
|
||||
opt-level = 3
|
||||
[profile.dev.package.encoding]
|
||||
opt-level = 3
|
||||
[profile.dev.package.yada]
|
||||
opt-level = 3
|
||||
|
||||
[profile.release.package.lindera-ipadic-builder]
|
||||
opt-level = 3
|
||||
[profile.release.package.encoding]
|
||||
opt-level = 3
|
||||
[profile.release.package.yada]
|
||||
opt-level = 3
|
||||
|
||||
[profile.bench.package.lindera-ipadic-builder]
|
||||
opt-level = 3
|
||||
[profile.bench.package.encoding]
|
||||
opt-level = 3
|
||||
[profile.bench.package.yada]
|
||||
opt-level = 3
|
||||
|
19
Dockerfile
19
Dockerfile
@ -1,31 +1,32 @@
|
||||
# Compile
|
||||
FROM rust:alpine3.16 AS compiler
|
||||
|
||||
RUN apk add -q --update-cache --no-cache build-base openssl-dev
|
||||
FROM rust:bullseye AS compiler
|
||||
|
||||
WORKDIR /meilisearch
|
||||
|
||||
ARG COMMIT_SHA
|
||||
ARG COMMIT_DATE
|
||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE}
|
||||
ARG GIT_TAG
|
||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_SEMVER_LIGHTWEIGHT=${GIT_TAG}
|
||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||
|
||||
COPY . .
|
||||
RUN set -eux; \
|
||||
apkArch="$(apk --print-arch)"; \
|
||||
if [ "$apkArch" = "aarch64" ]; then \
|
||||
arch="$(dpkg --print-architecture)"; \
|
||||
if [ "$arch" = "arm64" ]; then \
|
||||
export JEMALLOC_SYS_WITH_LG_PAGE=16; \
|
||||
fi && \
|
||||
cargo build --release
|
||||
|
||||
# Run
|
||||
FROM alpine:3.16
|
||||
FROM debian:11.6
|
||||
|
||||
ENV MEILI_HTTP_ADDR 0.0.0.0:7700
|
||||
ENV MEILI_SERVER_PROVIDER docker
|
||||
|
||||
RUN apk update --quiet \
|
||||
&& apk add -q --no-cache libgcc tini curl
|
||||
RUN set -ex; \
|
||||
apt-get update -q; \
|
||||
apt-get install -q -y --no-install-recommends tini; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# add meilisearch to the `/bin` so you can run it from anywhere and it's easy
|
||||
# to find.
|
||||
|
15
README.md
15
README.md
@ -13,7 +13,6 @@
|
||||
</h4>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/meilisearch/meilisearch/actions"><img src="https://github.com/meilisearch/meilisearch/workflows/Cargo%20test/badge.svg" alt="Build Status"></a>
|
||||
<a href="https://deps.rs/repo/github/meilisearch/meilisearch"><img src="https://deps.rs/repo/github/meilisearch/meilisearch/status.svg" alt="Dependency status"></a>
|
||||
<a href="https://github.com/meilisearch/meilisearch/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-informational" alt="License"></a>
|
||||
<a href="https://app.bors.tech/repositories/26457"><img src="https://bors.tech/images/badge_small.svg" alt="Bors enabled"></a>
|
||||
@ -69,7 +68,7 @@ Install one of our SDKs in your project for seamless integration between Meilise
|
||||
|
||||
Take a look at the complete [Meilisearch integration list](https://docs.meilisearch.com/learn/what_is_meilisearch/sdks.html).
|
||||
|
||||

|
||||
[](https://docs.meilisearch.com/learn/what_is_meilisearch/sdks.html)
|
||||
|
||||
## ⚙️ Advanced usage
|
||||
|
||||
@ -101,3 +100,15 @@ Meilisearch is a search engine created by [Meili](https://www.welcometothejungle
|
||||
- For everything else, please check [this page listing some of the other places where you can find us](https://docs.meilisearch.com/learn/what_is_meilisearch/contact.html)
|
||||
|
||||
Thank you for your support!
|
||||
|
||||
## 👩💻 Contributing
|
||||
|
||||
Meilisearch is, and will always be, open-source! If you want to contribute to the project, please take a look at [our contribution guidelines](CONTRIBUTING.md).
|
||||
|
||||
## 📦 Versioning
|
||||
|
||||
Meilisearch releases and their associated binaries are available [in this GitHub page](https://github.com/meilisearch/meilisearch/releases).
|
||||
|
||||
The binaries are versioned following [SemVer conventions](https://semver.org/). To know more, read our [versioning policy](https://github.com/meilisearch/engine-team/blob/main/resources/versioning-policy.md).
|
||||
|
||||
Differently from the binaries, crates in this repository are not currently available on [crates.io](https://crates.io/) and do not follow [SemVer conventions](https://semver.org).
|
||||
|
6
assets/milli-logo.svg
Normal file
6
assets/milli-logo.svg
Normal file
@ -0,0 +1,6 @@
|
||||
<svg width="277" height="236" viewBox="0 0 277 236" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M213.085 190L242.907 86H276.196L246.375 190H213.085Z" fill="#494949"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M0 190L29.8215 86H63.1111L33.2896 190H0Z" fill="#494949"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M124.986 0L57.5772 235.083L60.7752 236H90.6038L158.276 0H124.986Z" fill="#494949"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M195.273 0L127.601 236H160.891L228.563 0H195.273Z" fill="#494949"/>
|
||||
</svg>
|
After Width: | Height: | Size: 585 B |
1
benchmarks/.gitignore
vendored
Normal file
1
benchmarks/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
benches/datasets_paths.rs
|
54
benchmarks/Cargo.toml
Normal file
54
benchmarks/Cargo.toml
Normal file
@ -0,0 +1,54 @@
|
||||
[package]
|
||||
name = "benchmarks"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.65"
|
||||
csv = "1.1.6"
|
||||
milli = { path = "../milli", default-features = false }
|
||||
mimalloc = { version = "0.1.29", default-features = false }
|
||||
serde_json = { version = "1.0.85", features = ["preserve_order"] }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.4.0", features = ["html_reports"] }
|
||||
rand = "0.8.5"
|
||||
rand_chacha = "0.3.1"
|
||||
roaring = "0.10.1"
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = "1.0.65"
|
||||
bytes = "1.2.1"
|
||||
convert_case = "0.6.0"
|
||||
flate2 = "1.0.24"
|
||||
reqwest = { version = "0.11.12", features = ["blocking", "rustls-tls"], default-features = false }
|
||||
|
||||
[features]
|
||||
default = ["milli/default"]
|
||||
|
||||
[[bench]]
|
||||
name = "search_songs"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "search_wiki"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "search_geo"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "indexing"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "formatting"
|
||||
harness = false
|
138
benchmarks/README.md
Normal file
138
benchmarks/README.md
Normal file
@ -0,0 +1,138 @@
|
||||
Benchmarks
|
||||
==========
|
||||
|
||||
## TOC
|
||||
|
||||
- [Run the benchmarks](#run-the-benchmarks)
|
||||
- [Comparison between benchmarks](#comparison-between-benchmarks)
|
||||
- [Datasets](#datasets)
|
||||
|
||||
## Run the benchmarks
|
||||
|
||||
### On our private server
|
||||
|
||||
The Meili team has self-hosted his own GitHub runner to run benchmarks on our dedicated bare metal server.
|
||||
|
||||
To trigger the benchmark workflow:
|
||||
- Go to the `Actions` tab of this repository.
|
||||
- Select the `Benchmarks` workflow on the left.
|
||||
- Click on `Run workflow` in the blue banner.
|
||||
- Select the branch on which you want to run the benchmarks and select the dataset you want (default: `songs`).
|
||||
- Finally, click on `Run workflow`.
|
||||
|
||||
This GitHub workflow will run the benchmarks and push the `critcmp` report to a DigitalOcean Space (= S3).
|
||||
|
||||
The name of the uploaded file is displayed in the workflow.
|
||||
|
||||
_[More about critcmp](https://github.com/BurntSushi/critcmp)._
|
||||
|
||||
💡 To compare the just-uploaded benchmark with another one, check out the [next section](#comparison-between-benchmarks).
|
||||
|
||||
### On your machine
|
||||
|
||||
To run all the benchmarks (~5h):
|
||||
|
||||
```bash
|
||||
cargo bench
|
||||
```
|
||||
|
||||
To run only the `search_songs` (~1h), `search_wiki` (~3h), `search_geo` (~20m) or `indexing` (~2h) benchmark:
|
||||
|
||||
```bash
|
||||
cargo bench --bench <dataset name>
|
||||
```
|
||||
|
||||
By default, the benchmarks will be downloaded and uncompressed automatically in the target directory.<br>
|
||||
If you don't want to download the datasets every time you update something on the code, you can specify a custom directory with the environment variable `MILLI_BENCH_DATASETS_PATH`:
|
||||
|
||||
```bash
|
||||
mkdir ~/datasets
|
||||
MILLI_BENCH_DATASETS_PATH=~/datasets cargo bench --bench search_songs # the four datasets are downloaded
|
||||
touch build.rs
|
||||
MILLI_BENCH_DATASETS_PATH=~/datasets cargo bench --bench songs # the code is compiled again but the datasets are not downloaded
|
||||
```
|
||||
|
||||
## Comparison between benchmarks
|
||||
|
||||
The benchmark reports we push are generated with `critcmp`. Thus, we use `critcmp` to show the result of a benchmark, or compare results between multiple benchmarks.
|
||||
|
||||
We provide a script to download and display the comparison report.
|
||||
|
||||
Requirements:
|
||||
- `grep`
|
||||
- `curl`
|
||||
- [`critcmp`](https://github.com/BurntSushi/critcmp)
|
||||
|
||||
List the available file in the DO Space:
|
||||
|
||||
```bash
|
||||
./benchmarks/script/list.sh
|
||||
```
|
||||
```bash
|
||||
songs_main_09a4321.json
|
||||
songs_geosearch_24ec456.json
|
||||
search_songs_main_cb45a10b.json
|
||||
```
|
||||
|
||||
Run the comparison script:
|
||||
|
||||
```bash
|
||||
# we get the result of ONE benchmark, this give you an idea of how much time an operation took
|
||||
./benchmarks/scripts/compare.sh son songs_geosearch_24ec456.json
|
||||
# we compare two benchmarks
|
||||
./benchmarks/scripts/compare.sh songs_main_09a4321.json songs_geosearch_24ec456.json
|
||||
# we compare three benchmarks
|
||||
./benchmarks/scripts/compare.sh songs_main_09a4321.json songs_geosearch_24ec456.json search_songs_main_cb45a10b.json
|
||||
```
|
||||
|
||||
## Datasets
|
||||
|
||||
The benchmarks uses the following datasets:
|
||||
- `smol-songs`
|
||||
- `smol-wiki`
|
||||
- `movies`
|
||||
- `smol-all-countries`
|
||||
|
||||
### Songs
|
||||
|
||||
`smol-songs` is a subset of the [`songs.csv` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/songs.csv.gz).
|
||||
|
||||
It was generated with this command:
|
||||
|
||||
```bash
|
||||
xsv sample --seed 42 1000000 songs.csv -o smol-songs.csv
|
||||
```
|
||||
|
||||
_[Download the generated `smol-songs` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/smol-songs.csv.gz)._
|
||||
|
||||
### Wiki
|
||||
|
||||
`smol-wiki` is a subset of the [`wikipedia-articles.csv` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/wiki-articles.csv.gz).
|
||||
|
||||
It was generated with the following command:
|
||||
|
||||
```bash
|
||||
xsv sample --seed 42 500000 wiki-articles.csv -o smol-wiki-articles.csv
|
||||
```
|
||||
|
||||
_[Download the `smol-wiki` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/smol-wiki-articles.csv.gz)._
|
||||
|
||||
### Movies
|
||||
|
||||
`movies` is a really small dataset we uses as our example in the [getting started](https://docs.meilisearch.com/learn/getting_started/)
|
||||
|
||||
_[Download the `movies` dataset](https://docs.meilisearch.com/movies.json)._
|
||||
|
||||
|
||||
### All Countries
|
||||
|
||||
`smol-all-countries` is a subset of the [`all-countries.csv` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/all-countries.csv.gz)
|
||||
It has been converted to jsonlines and then edited so it matches our format for the `_geo` field.
|
||||
|
||||
It was generated with the following command:
|
||||
```bash
|
||||
bat all-countries.csv.gz | gunzip | xsv sample --seed 42 1000000 | csv2json-lite | sd '"latitude":"(.*?)","longitude":"(.*?)"' '"_geo": { "lat": $1, "lng": $2 }' | sd '\[|\]|,$' '' | gzip > smol-all-countries.jsonl.gz
|
||||
```
|
||||
|
||||
_[Download the `smol-all-countries` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/smol-all-countries.jsonl.gz)._
|
||||
|
67
benchmarks/benches/formatting.rs
Normal file
67
benchmarks/benches/formatting.rs
Normal file
@ -0,0 +1,67 @@
|
||||
use std::rc::Rc;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::tokenizer::TokenizerBuilder;
|
||||
use milli::{FormatOptions, MatcherBuilder, MatchingWord, MatchingWords};
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
struct Conf<'a> {
|
||||
name: &'a str,
|
||||
text: &'a str,
|
||||
matching_words: MatcherBuilder<'a, Vec<u8>>,
|
||||
}
|
||||
|
||||
fn bench_formatting(c: &mut criterion::Criterion) {
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
Conf {
|
||||
name: "'the door d'",
|
||||
text: r#"He used to do the door sounds in "Star Trek" with his mouth, phssst, phssst. The MD-11 passenger and cargo doors also tend to behave like electromagnetic apertures, because the doors do not have continuous electrical contact with the door frames around the door perimeter. But Theodor said that the doors don't work."#,
|
||||
matching_words: MatcherBuilder::new(MatchingWords::new(vec![
|
||||
(vec![Rc::new(MatchingWord::new("t".to_string(), 0, false).unwrap()), Rc::new(MatchingWord::new("he".to_string(), 0, false).unwrap())], vec![0]),
|
||||
(vec![Rc::new(MatchingWord::new("the".to_string(), 0, false).unwrap())], vec![0]),
|
||||
(vec![Rc::new(MatchingWord::new("door".to_string(), 1, false).unwrap())], vec![1]),
|
||||
(vec![Rc::new(MatchingWord::new("do".to_string(), 0, false).unwrap()), Rc::new(MatchingWord::new("or".to_string(), 0, false).unwrap())], vec![0]),
|
||||
(vec![Rc::new(MatchingWord::new("thedoor".to_string(), 1, false).unwrap())], vec![0, 1]),
|
||||
(vec![Rc::new(MatchingWord::new("d".to_string(), 0, true).unwrap())], vec![2]),
|
||||
(vec![Rc::new(MatchingWord::new("thedoord".to_string(), 1, true).unwrap())], vec![0, 1, 2]),
|
||||
(vec![Rc::new(MatchingWord::new("doord".to_string(), 1, true).unwrap())], vec![1, 2]),
|
||||
]
|
||||
), TokenizerBuilder::default().build()),
|
||||
},
|
||||
];
|
||||
|
||||
let format_options = &[
|
||||
FormatOptions { highlight: false, crop: None },
|
||||
FormatOptions { highlight: true, crop: None },
|
||||
FormatOptions { highlight: false, crop: Some(10) },
|
||||
FormatOptions { highlight: true, crop: Some(10) },
|
||||
FormatOptions { highlight: false, crop: Some(20) },
|
||||
FormatOptions { highlight: true, crop: Some(20) },
|
||||
];
|
||||
|
||||
for option in format_options {
|
||||
let highlight = if option.highlight { "highlight" } else { "no-highlight" };
|
||||
|
||||
let name = match option.crop {
|
||||
Some(size) => format!("{}-crop({})", highlight, size),
|
||||
None => format!("{}-no-crop", highlight),
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group(&name);
|
||||
for conf in confs {
|
||||
group.bench_function(conf.name, |b| {
|
||||
b.iter(|| {
|
||||
let mut matcher = conf.matching_words.build(conf.text);
|
||||
matcher.format(*option);
|
||||
})
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_formatting);
|
||||
criterion_main!(benches);
|
1380
benchmarks/benches/indexing.rs
Normal file
1380
benchmarks/benches/indexing.rs
Normal file
File diff suppressed because it is too large
Load Diff
122
benchmarks/benches/search_geo.rs
Normal file
122
benchmarks/benches/search_geo.rs
Normal file
@ -0,0 +1,122 @@
|
||||
mod datasets_paths;
|
||||
mod utils;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::update::Settings;
|
||||
use utils::Conf;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn base_conf(builder: &mut Settings) {
|
||||
let displayed_fields =
|
||||
["geonameid", "name", "asciiname", "alternatenames", "_geo", "population"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
builder.set_displayed_fields(displayed_fields);
|
||||
|
||||
let searchable_fields =
|
||||
["name", "alternatenames", "elevation"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_searchable_fields(searchable_fields);
|
||||
|
||||
let filterable_fields =
|
||||
["_geo", "population", "elevation"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_filterable_fields(filterable_fields);
|
||||
|
||||
let sortable_fields =
|
||||
["_geo", "population", "elevation"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_sortable_fields(sortable_fields);
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
const BASE_CONF: Conf = Conf {
|
||||
dataset: datasets_paths::SMOL_ALL_COUNTRIES,
|
||||
dataset_format: "jsonl",
|
||||
queries: &[
|
||||
"",
|
||||
],
|
||||
configure: base_conf,
|
||||
primary_key: Some("geonameid"),
|
||||
..Conf::BASE
|
||||
};
|
||||
|
||||
fn bench_geo(c: &mut criterion::Criterion) {
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
// A basic placeholder with no geo
|
||||
utils::Conf {
|
||||
group_name: "placeholder with no geo",
|
||||
..BASE_CONF
|
||||
},
|
||||
// Medium aglomeration: probably the most common usecase
|
||||
utils::Conf {
|
||||
group_name: "asc sort from Lille",
|
||||
sort: Some(vec!["_geoPoint(50.62999333378238, 3.086269263384099):asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc sort from Lille",
|
||||
sort: Some(vec!["_geoPoint(50.62999333378238, 3.086269263384099):desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
// Big agglomeration: a lot of documents close to our point
|
||||
utils::Conf {
|
||||
group_name: "asc sort from Tokyo",
|
||||
sort: Some(vec!["_geoPoint(35.749512532692144, 139.61664952543356):asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc sort from Tokyo",
|
||||
sort: Some(vec!["_geoPoint(35.749512532692144, 139.61664952543356):desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
// The furthest point from any civilization
|
||||
utils::Conf {
|
||||
group_name: "asc sort from Point Nemo",
|
||||
sort: Some(vec!["_geoPoint(-48.87561645055408, -123.39275749319793):asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc sort from Point Nemo",
|
||||
sort: Some(vec!["_geoPoint(-48.87561645055408, -123.39275749319793):desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
// Filters
|
||||
utils::Conf {
|
||||
group_name: "filter of 100km from Lille",
|
||||
filter: Some("_geoRadius(50.62999333378238, 3.086269263384099, 100000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 1km from Lille",
|
||||
filter: Some("_geoRadius(50.62999333378238, 3.086269263384099, 1000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 100km from Tokyo",
|
||||
filter: Some("_geoRadius(35.749512532692144, 139.61664952543356, 100000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 1km from Tokyo",
|
||||
filter: Some("_geoRadius(35.749512532692144, 139.61664952543356, 1000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 100km from Point Nemo",
|
||||
filter: Some("_geoRadius(-48.87561645055408, -123.39275749319793, 100000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 1km from Point Nemo",
|
||||
filter: Some("_geoRadius(-48.87561645055408, -123.39275749319793, 1000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
];
|
||||
|
||||
utils::run_benches(c, confs);
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_geo);
|
||||
criterion_main!(benches);
|
196
benchmarks/benches/search_songs.rs
Normal file
196
benchmarks/benches/search_songs.rs
Normal file
@ -0,0 +1,196 @@
|
||||
mod datasets_paths;
|
||||
mod utils;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::update::Settings;
|
||||
use utils::Conf;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn base_conf(builder: &mut Settings) {
|
||||
let displayed_fields =
|
||||
["id", "title", "album", "artist", "genre", "country", "released", "duration"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
builder.set_displayed_fields(displayed_fields);
|
||||
|
||||
let searchable_fields = ["title", "album", "artist"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_searchable_fields(searchable_fields);
|
||||
|
||||
let faceted_fields = ["released-timestamp", "duration-float", "genre", "country", "artist"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
builder.set_filterable_fields(faceted_fields);
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
const BASE_CONF: Conf = Conf {
|
||||
dataset: datasets_paths::SMOL_SONGS,
|
||||
queries: &[
|
||||
"john ", // 9097
|
||||
"david ", // 4794
|
||||
"charles ", // 1957
|
||||
"david bowie ", // 1200
|
||||
"michael jackson ", // 600
|
||||
"thelonious monk ", // 303
|
||||
"charles mingus ", // 142
|
||||
"marcus miller ", // 60
|
||||
"tamo ", // 13
|
||||
"Notstandskomitee ", // 4
|
||||
],
|
||||
configure: base_conf,
|
||||
primary_key: Some("id"),
|
||||
..Conf::BASE
|
||||
};
|
||||
|
||||
fn bench_songs(c: &mut criterion::Criterion) {
|
||||
let default_criterion: Vec<String> =
|
||||
milli::default_criteria().iter().map(|criteria| criteria.to_string()).collect();
|
||||
let default_criterion = default_criterion.iter().map(|s| s.as_str());
|
||||
let asc_default: Vec<&str> =
|
||||
std::iter::once("released-timestamp:asc").chain(default_criterion.clone()).collect();
|
||||
let desc_default: Vec<&str> =
|
||||
std::iter::once("released-timestamp:desc").chain(default_criterion.clone()).collect();
|
||||
|
||||
let basic_with_quote: Vec<String> = BASE_CONF
|
||||
.queries
|
||||
.iter()
|
||||
.map(|s| {
|
||||
s.trim().split(' ').map(|s| format!(r#""{}""#, s)).collect::<Vec<String>>().join(" ")
|
||||
})
|
||||
.collect();
|
||||
let basic_with_quote: &[&str] =
|
||||
&basic_with_quote.iter().map(|s| s.as_str()).collect::<Vec<&str>>();
|
||||
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
/* first we bench each criterion alone */
|
||||
utils::Conf {
|
||||
group_name: "proximity",
|
||||
queries: &[
|
||||
"black saint sinner lady ",
|
||||
"les dangeureuses 1960 ",
|
||||
"The Disneyland Sing-Along Chorus ",
|
||||
"Under Great Northern Lights ",
|
||||
"7000 Danses Un Jour Dans Notre Vie ",
|
||||
],
|
||||
criterion: Some(&["proximity"]),
|
||||
optional_words: false,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "typo",
|
||||
queries: &[
|
||||
"mongus ",
|
||||
"thelonius monk ",
|
||||
"Disnaylande ",
|
||||
"the white striper ",
|
||||
"indochie ",
|
||||
"indochien ",
|
||||
"klub des loopers ",
|
||||
"fear of the duck ",
|
||||
"michel depech ",
|
||||
"stromal ",
|
||||
"dire straights ",
|
||||
"Arethla Franklin ",
|
||||
],
|
||||
criterion: Some(&["typo"]),
|
||||
optional_words: false,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "words",
|
||||
queries: &[
|
||||
"the black saint and the sinner lady and the good doggo ", // four words to pop
|
||||
"les liaisons dangeureuses 1793 ", // one word to pop
|
||||
"The Disneyland Children's Sing-Alone song ", // two words to pop
|
||||
"seven nation mummy ", // one word to pop
|
||||
"7000 Danses / Le Baiser / je me trompe de mots ", // four words to pop
|
||||
"Bring Your Daughter To The Slaughter but now this is not part of the title ", // nine words to pop
|
||||
"whathavenotnsuchforth and a good amount of words to pop to match the first one ", // 13
|
||||
],
|
||||
criterion: Some(&["words"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "asc",
|
||||
criterion: Some(&["released-timestamp:desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc",
|
||||
criterion: Some(&["released-timestamp:desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
/* then we bench the asc and desc criterion on top of the default criterion */
|
||||
utils::Conf {
|
||||
group_name: "asc + default",
|
||||
criterion: Some(&asc_default[..]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc + default",
|
||||
criterion: Some(&desc_default[..]),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
/* we bench the filters with the default request */
|
||||
utils::Conf {
|
||||
group_name: "basic filter: <=",
|
||||
filter: Some("released-timestamp <= 946728000"), // year 2000
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic filter: TO",
|
||||
filter: Some("released-timestamp 946728000 TO 1262347200"), // year 2000 to 2010
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "big filter",
|
||||
filter: Some("released-timestamp != 1262347200 AND (NOT (released-timestamp = 946728000)) AND (duration-float = 1 OR (duration-float 1.1 TO 1.5 AND released-timestamp > 315576000))"),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
/* the we bench some global / normal search with all the default criterion in the default
|
||||
* order */
|
||||
utils::Conf {
|
||||
group_name: "basic placeholder",
|
||||
queries: &[""],
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic without quote",
|
||||
queries: &BASE_CONF
|
||||
.queries
|
||||
.iter()
|
||||
.map(|s| s.trim()) // we remove the space at the end of each request
|
||||
.collect::<Vec<&str>>(),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic with quote",
|
||||
queries: basic_with_quote,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "prefix search",
|
||||
queries: &[
|
||||
"s", // 500k+ results
|
||||
"a", //
|
||||
"b", //
|
||||
"i", //
|
||||
"x", // only 7k results
|
||||
],
|
||||
..BASE_CONF
|
||||
},
|
||||
];
|
||||
|
||||
utils::run_benches(c, confs);
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_songs);
|
||||
criterion_main!(benches);
|
129
benchmarks/benches/search_wiki.rs
Normal file
129
benchmarks/benches/search_wiki.rs
Normal file
@ -0,0 +1,129 @@
|
||||
mod datasets_paths;
|
||||
mod utils;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::update::Settings;
|
||||
use utils::Conf;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn base_conf(builder: &mut Settings) {
|
||||
let displayed_fields = ["title", "body", "url"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_displayed_fields(displayed_fields);
|
||||
|
||||
let searchable_fields = ["title", "body"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_searchable_fields(searchable_fields);
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
const BASE_CONF: Conf = Conf {
|
||||
dataset: datasets_paths::SMOL_WIKI_ARTICLES,
|
||||
queries: &[
|
||||
"mingus ", // 46 candidates
|
||||
"miles davis ", // 159
|
||||
"rock and roll ", // 1007
|
||||
"machine ", // 3448
|
||||
"spain ", // 7002
|
||||
"japan ", // 10.593
|
||||
"france ", // 17.616
|
||||
"film ", // 24.959
|
||||
],
|
||||
configure: base_conf,
|
||||
..Conf::BASE
|
||||
};
|
||||
|
||||
fn bench_songs(c: &mut criterion::Criterion) {
|
||||
let basic_with_quote: Vec<String> = BASE_CONF
|
||||
.queries
|
||||
.iter()
|
||||
.map(|s| {
|
||||
s.trim().split(' ').map(|s| format!(r#""{}""#, s)).collect::<Vec<String>>().join(" ")
|
||||
})
|
||||
.collect();
|
||||
let basic_with_quote: &[&str] =
|
||||
&basic_with_quote.iter().map(|s| s.as_str()).collect::<Vec<&str>>();
|
||||
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
/* first we bench each criterion alone */
|
||||
utils::Conf {
|
||||
group_name: "proximity",
|
||||
queries: &[
|
||||
"herald sings ",
|
||||
"april paris ",
|
||||
"tea two ",
|
||||
"diesel engine ",
|
||||
],
|
||||
criterion: Some(&["proximity"]),
|
||||
optional_words: false,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "typo",
|
||||
queries: &[
|
||||
"migrosoft ",
|
||||
"linax ",
|
||||
"Disnaylande ",
|
||||
"phytogropher ",
|
||||
"nympalidea ",
|
||||
"aritmetric ",
|
||||
"the fronce ",
|
||||
"sisan ",
|
||||
],
|
||||
criterion: Some(&["typo"]),
|
||||
optional_words: false,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "words",
|
||||
queries: &[
|
||||
"the black saint and the sinner lady and the good doggo ", // four words to pop, 27 results
|
||||
"Kameya Tokujirō mingus monk ", // two words to pop, 55
|
||||
"Ulrich Hensel meilisearch milli ", // two words to pop, 306
|
||||
"Idaho Bellevue pizza ", // one word to pop, 800
|
||||
"Abraham machin ", // one word to pop, 1141
|
||||
],
|
||||
criterion: Some(&["words"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
/* the we bench some global / normal search with all the default criterion in the default
|
||||
* order */
|
||||
utils::Conf {
|
||||
group_name: "basic placeholder",
|
||||
queries: &[""],
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic without quote",
|
||||
queries: &BASE_CONF
|
||||
.queries
|
||||
.iter()
|
||||
.map(|s| s.trim()) // we remove the space at the end of each request
|
||||
.collect::<Vec<&str>>(),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic with quote",
|
||||
queries: basic_with_quote,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "prefix search",
|
||||
queries: &[
|
||||
"t", // 453k results
|
||||
"c", // 405k
|
||||
"g", // 318k
|
||||
"j", // 227k
|
||||
"q", // 71k
|
||||
"x", // 17k
|
||||
],
|
||||
..BASE_CONF
|
||||
},
|
||||
];
|
||||
|
||||
utils::run_benches(c, confs);
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_songs);
|
||||
criterion_main!(benches);
|
256
benchmarks/benches/utils.rs
Normal file
256
benchmarks/benches/utils.rs
Normal file
@ -0,0 +1,256 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::fs::{create_dir_all, remove_dir_all, File};
|
||||
use std::io::{self, BufRead, BufReader, Cursor, Read, Seek};
|
||||
use std::num::ParseFloatError;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
|
||||
use criterion::BenchmarkId;
|
||||
use milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
|
||||
use milli::heed::EnvOpenOptions;
|
||||
use milli::update::{
|
||||
IndexDocuments, IndexDocumentsConfig, IndexDocumentsMethod, IndexerConfig, Settings,
|
||||
};
|
||||
use milli::{Criterion, Filter, Index, Object, TermsMatchingStrategy};
|
||||
use serde_json::Value;
|
||||
|
||||
pub struct Conf<'a> {
|
||||
/// where we are going to create our database.mmdb directory
|
||||
/// each benchmark will first try to delete it and then recreate it
|
||||
pub database_name: &'a str,
|
||||
/// the dataset to be used, it must be an uncompressed csv
|
||||
pub dataset: &'a str,
|
||||
/// The format of the dataset
|
||||
pub dataset_format: &'a str,
|
||||
pub group_name: &'a str,
|
||||
pub queries: &'a [&'a str],
|
||||
/// here you can change which criterion are used and in which order.
|
||||
/// - if you specify something all the base configuration will be thrown out
|
||||
/// - if you don't specify anything (None) the default configuration will be kept
|
||||
pub criterion: Option<&'a [&'a str]>,
|
||||
/// the last chance to configure your database as you want
|
||||
pub configure: fn(&mut Settings),
|
||||
pub filter: Option<&'a str>,
|
||||
pub sort: Option<Vec<&'a str>>,
|
||||
/// enable or disable the optional words on the query
|
||||
pub optional_words: bool,
|
||||
/// primary key, if there is None we'll auto-generate docids for every documents
|
||||
pub primary_key: Option<&'a str>,
|
||||
}
|
||||
|
||||
impl Conf<'_> {
|
||||
pub const BASE: Self = Conf {
|
||||
database_name: "benches.mmdb",
|
||||
dataset_format: "csv",
|
||||
dataset: "",
|
||||
group_name: "",
|
||||
queries: &[],
|
||||
criterion: None,
|
||||
configure: |_| (),
|
||||
filter: None,
|
||||
sort: None,
|
||||
optional_words: true,
|
||||
primary_key: None,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn base_setup(conf: &Conf) -> Index {
|
||||
match remove_dir_all(conf.database_name) {
|
||||
Ok(_) => (),
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => (),
|
||||
Err(e) => panic!("{}", e),
|
||||
}
|
||||
create_dir_all(conf.database_name).unwrap();
|
||||
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(100 * 1024 * 1024 * 1024); // 100 GB
|
||||
options.max_readers(10);
|
||||
let index = Index::new(options, conf.database_name).unwrap();
|
||||
|
||||
let config = IndexerConfig::default();
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
let mut builder = Settings::new(&mut wtxn, &index, &config);
|
||||
|
||||
if let Some(primary_key) = conf.primary_key {
|
||||
builder.set_primary_key(primary_key.to_string());
|
||||
}
|
||||
|
||||
if let Some(criterion) = conf.criterion {
|
||||
builder.reset_filterable_fields();
|
||||
builder.reset_criteria();
|
||||
builder.reset_stop_words();
|
||||
|
||||
let criterion = criterion.iter().map(|s| Criterion::from_str(s).unwrap()).collect();
|
||||
builder.set_criteria(criterion);
|
||||
}
|
||||
|
||||
(conf.configure)(&mut builder);
|
||||
|
||||
builder.execute(|_| (), || false).unwrap();
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
let config = IndexerConfig::default();
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
let indexing_config = IndexDocumentsConfig {
|
||||
autogenerate_docids: conf.primary_key.is_none(),
|
||||
update_method: IndexDocumentsMethod::ReplaceDocuments,
|
||||
..Default::default()
|
||||
};
|
||||
let builder =
|
||||
IndexDocuments::new(&mut wtxn, &index, &config, indexing_config, |_| (), || false).unwrap();
|
||||
let documents = documents_from(conf.dataset, conf.dataset_format);
|
||||
let (builder, user_error) = builder.add_documents(documents).unwrap();
|
||||
user_error.unwrap();
|
||||
builder.execute().unwrap();
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
index
|
||||
}
|
||||
|
||||
pub fn run_benches(c: &mut criterion::Criterion, confs: &[Conf]) {
|
||||
for conf in confs {
|
||||
let index = base_setup(conf);
|
||||
|
||||
let file_name = Path::new(conf.dataset).file_name().and_then(|f| f.to_str()).unwrap();
|
||||
let name = format!("{}: {}", file_name, conf.group_name);
|
||||
let mut group = c.benchmark_group(&name);
|
||||
|
||||
for &query in conf.queries {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(query), &query, |b, &query| {
|
||||
b.iter(|| {
|
||||
let rtxn = index.read_txn().unwrap();
|
||||
let mut search = index.search(&rtxn);
|
||||
search.query(query).terms_matching_strategy(TermsMatchingStrategy::default());
|
||||
if let Some(filter) = conf.filter {
|
||||
let filter = Filter::from_str(filter).unwrap().unwrap();
|
||||
search.filter(filter);
|
||||
}
|
||||
if let Some(sort) = &conf.sort {
|
||||
let sort = sort.iter().map(|sort| sort.parse().unwrap()).collect();
|
||||
search.sort_criteria(sort);
|
||||
}
|
||||
let _ids = search.execute().unwrap();
|
||||
});
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
|
||||
index.prepare_for_closing().wait();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn documents_from(filename: &str, filetype: &str) -> DocumentsBatchReader<impl BufRead + Seek> {
|
||||
let reader = File::open(filename)
|
||||
.unwrap_or_else(|_| panic!("could not find the dataset in: {}", filename));
|
||||
let reader = BufReader::new(reader);
|
||||
let documents = match filetype {
|
||||
"csv" => documents_from_csv(reader).unwrap(),
|
||||
"json" => documents_from_json(reader).unwrap(),
|
||||
"jsonl" => documents_from_jsonl(reader).unwrap(),
|
||||
otherwise => panic!("invalid update format {:?}", otherwise),
|
||||
};
|
||||
DocumentsBatchReader::from_reader(Cursor::new(documents)).unwrap()
|
||||
}
|
||||
|
||||
fn documents_from_jsonl(reader: impl BufRead) -> anyhow::Result<Vec<u8>> {
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
|
||||
for result in serde_json::Deserializer::from_reader(reader).into_iter::<Object>() {
|
||||
let object = result?;
|
||||
documents.append_json_object(&object)?;
|
||||
}
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
fn documents_from_json(reader: impl BufRead) -> anyhow::Result<Vec<u8>> {
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
|
||||
documents.append_json_array(reader)?;
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
fn documents_from_csv(reader: impl BufRead) -> anyhow::Result<Vec<u8>> {
|
||||
let csv = csv::Reader::from_reader(reader);
|
||||
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
documents.append_csv(csv)?;
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
enum AllowedType {
|
||||
String,
|
||||
Number,
|
||||
}
|
||||
|
||||
fn parse_csv_header(header: &str) -> (String, AllowedType) {
|
||||
// if there are several separators we only split on the last one.
|
||||
match header.rsplit_once(':') {
|
||||
Some((field_name, field_type)) => match field_type {
|
||||
"string" => (field_name.to_string(), AllowedType::String),
|
||||
"number" => (field_name.to_string(), AllowedType::Number),
|
||||
// we may return an error in this case.
|
||||
_otherwise => (header.to_string(), AllowedType::String),
|
||||
},
|
||||
None => (header.to_string(), AllowedType::String),
|
||||
}
|
||||
}
|
||||
|
||||
struct CSVDocumentDeserializer<R>
|
||||
where
|
||||
R: Read,
|
||||
{
|
||||
documents: csv::StringRecordsIntoIter<R>,
|
||||
headers: Vec<(String, AllowedType)>,
|
||||
}
|
||||
|
||||
impl<R: Read> CSVDocumentDeserializer<R> {
|
||||
fn from_reader(reader: R) -> io::Result<Self> {
|
||||
let mut records = csv::Reader::from_reader(reader);
|
||||
|
||||
let headers = records.headers()?.into_iter().map(parse_csv_header).collect();
|
||||
|
||||
Ok(Self { documents: records.into_records(), headers })
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Iterator for CSVDocumentDeserializer<R> {
|
||||
type Item = anyhow::Result<Object>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let csv_document = self.documents.next()?;
|
||||
|
||||
match csv_document {
|
||||
Ok(csv_document) => {
|
||||
let mut document = Object::new();
|
||||
|
||||
for ((field_name, field_type), value) in
|
||||
self.headers.iter().zip(csv_document.into_iter())
|
||||
{
|
||||
let parsed_value: Result<Value, ParseFloatError> = match field_type {
|
||||
AllowedType::Number => {
|
||||
value.parse::<f64>().map(Value::from).map_err(Into::into)
|
||||
}
|
||||
AllowedType::String => Ok(Value::String(value.to_string())),
|
||||
};
|
||||
|
||||
match parsed_value {
|
||||
Ok(value) => drop(document.insert(field_name.to_string(), value)),
|
||||
Err(_e) => {
|
||||
return Some(Err(anyhow::anyhow!(
|
||||
"Value '{}' is not a valid number",
|
||||
value
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some(Ok(document))
|
||||
}
|
||||
Err(e) => Some(Err(anyhow::anyhow!("Error parsing csv document: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
115
benchmarks/build.rs
Normal file
115
benchmarks/build.rs
Normal file
@ -0,0 +1,115 @@
|
||||
use std::fs::File;
|
||||
use std::io::{Cursor, Read, Seek, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::{env, fs};
|
||||
|
||||
use bytes::Bytes;
|
||||
use convert_case::{Case, Casing};
|
||||
use flate2::read::GzDecoder;
|
||||
use reqwest::IntoUrl;
|
||||
|
||||
const BASE_URL: &str = "https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets";
|
||||
|
||||
const DATASET_SONGS: (&str, &str) = ("smol-songs", "csv");
|
||||
const DATASET_SONGS_1_2: (&str, &str) = ("smol-songs-1_2", "csv");
|
||||
const DATASET_SONGS_3_4: (&str, &str) = ("smol-songs-3_4", "csv");
|
||||
const DATASET_SONGS_4_4: (&str, &str) = ("smol-songs-4_4", "csv");
|
||||
const DATASET_WIKI: (&str, &str) = ("smol-wiki-articles", "csv");
|
||||
const DATASET_WIKI_1_2: (&str, &str) = ("smol-wiki-articles-1_2", "csv");
|
||||
const DATASET_WIKI_3_4: (&str, &str) = ("smol-wiki-articles-3_4", "csv");
|
||||
const DATASET_WIKI_4_4: (&str, &str) = ("smol-wiki-articles-4_4", "csv");
|
||||
const DATASET_MOVIES: (&str, &str) = ("movies", "json");
|
||||
const DATASET_MOVIES_1_2: (&str, &str) = ("movies-1_2", "json");
|
||||
const DATASET_MOVIES_3_4: (&str, &str) = ("movies-3_4", "json");
|
||||
const DATASET_MOVIES_4_4: (&str, &str) = ("movies-4_4", "json");
|
||||
const DATASET_NESTED_MOVIES: (&str, &str) = ("nested_movies", "json");
|
||||
const DATASET_GEO: (&str, &str) = ("smol-all-countries", "jsonl");
|
||||
|
||||
const ALL_DATASETS: &[(&str, &str)] = &[
|
||||
DATASET_SONGS,
|
||||
DATASET_SONGS_1_2,
|
||||
DATASET_SONGS_3_4,
|
||||
DATASET_SONGS_4_4,
|
||||
DATASET_WIKI,
|
||||
DATASET_WIKI_1_2,
|
||||
DATASET_WIKI_3_4,
|
||||
DATASET_WIKI_4_4,
|
||||
DATASET_MOVIES,
|
||||
DATASET_MOVIES_1_2,
|
||||
DATASET_MOVIES_3_4,
|
||||
DATASET_MOVIES_4_4,
|
||||
DATASET_NESTED_MOVIES,
|
||||
DATASET_GEO,
|
||||
];
|
||||
|
||||
/// The name of the environment variable used to select the path
|
||||
/// of the directory containing the datasets
|
||||
const BASE_DATASETS_PATH_KEY: &str = "MILLI_BENCH_DATASETS_PATH";
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let out_dir = PathBuf::from(env::var(BASE_DATASETS_PATH_KEY).unwrap_or(env::var("OUT_DIR")?));
|
||||
|
||||
let benches_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR")?).join("benches");
|
||||
let mut manifest_paths_file = File::create(benches_dir.join("datasets_paths.rs"))?;
|
||||
write!(
|
||||
manifest_paths_file,
|
||||
r#"//! This file is generated by the build script.
|
||||
//! Do not modify by hand, use the build.rs file.
|
||||
#![allow(dead_code)]
|
||||
"#
|
||||
)?;
|
||||
writeln!(manifest_paths_file)?;
|
||||
|
||||
for (dataset, extension) in ALL_DATASETS {
|
||||
let out_path = out_dir.join(dataset);
|
||||
let out_file = out_path.with_extension(extension);
|
||||
|
||||
writeln!(
|
||||
&mut manifest_paths_file,
|
||||
r#"pub const {}: &str = {:?};"#,
|
||||
dataset.to_case(Case::ScreamingSnake),
|
||||
out_file.display(),
|
||||
)?;
|
||||
|
||||
if out_file.exists() {
|
||||
eprintln!(
|
||||
"The dataset {} already exists on the file system and will not be downloaded again",
|
||||
out_path.display(),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let url = format!("{}/{}.{}.gz", BASE_URL, dataset, extension);
|
||||
eprintln!("downloading: {}", url);
|
||||
let bytes = retry(|| download_dataset(url.clone()), 10)?;
|
||||
eprintln!("{} downloaded successfully", url);
|
||||
eprintln!("uncompressing in {}", out_file.display());
|
||||
uncompress_in_file(bytes, &out_file)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn retry<Ok, Err>(fun: impl Fn() -> Result<Ok, Err>, times: usize) -> Result<Ok, Err> {
|
||||
for _ in 0..times {
|
||||
if let ok @ Ok(_) = fun() {
|
||||
return ok;
|
||||
}
|
||||
}
|
||||
fun()
|
||||
}
|
||||
|
||||
fn download_dataset<U: IntoUrl>(url: U) -> anyhow::Result<Cursor<Bytes>> {
|
||||
let bytes =
|
||||
reqwest::blocking::Client::builder().timeout(None).build()?.get(url).send()?.bytes()?;
|
||||
Ok(Cursor::new(bytes))
|
||||
}
|
||||
|
||||
fn uncompress_in_file<R: Read + Seek, P: AsRef<Path>>(bytes: R, path: P) -> anyhow::Result<()> {
|
||||
let path = path.as_ref();
|
||||
let mut gz = GzDecoder::new(bytes);
|
||||
let mut dataset = Vec::new();
|
||||
gz.read_to_end(&mut dataset)?;
|
||||
|
||||
fs::write(path, dataset)?;
|
||||
Ok(())
|
||||
}
|
38
benchmarks/scripts/compare.sh
Executable file
38
benchmarks/scripts/compare.sh
Executable file
@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Requirements:
|
||||
# - critcmp. See: https://github.com/BurntSushi/critcmp
|
||||
# - curl
|
||||
|
||||
# Usage
|
||||
# $ bash compare.sh json_file1 json_file1
|
||||
# ex: bash compare.sh songs_main_09a4321.json songs_geosearch_24ec456.json
|
||||
|
||||
# Checking that critcmp is installed
|
||||
command -v critcmp > /dev/null 2>&1
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
echo 'You must install critcmp to make this script work.'
|
||||
echo 'See: https://github.com/BurntSushi/critcmp'
|
||||
echo ' $ cargo install critcmp'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
s3_url='https://milli-benchmarks.fra1.digitaloceanspaces.com/critcmp_results'
|
||||
|
||||
for file in $@
|
||||
do
|
||||
file_s3_url="$s3_url/$file"
|
||||
file_local_path="/tmp/$file"
|
||||
|
||||
if [[ ! -f $file_local_path ]]; then
|
||||
curl $file_s3_url --output $file_local_path --silent
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
echo 'curl command failed.'
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
path_list=$(echo " $@" | sed 's/ / \/tmp\//g')
|
||||
|
||||
critcmp $path_list
|
14
benchmarks/scripts/list.sh
Executable file
14
benchmarks/scripts/list.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Requirements:
|
||||
# - curl
|
||||
# - grep
|
||||
|
||||
res=$(curl -s https://milli-benchmarks.fra1.digitaloceanspaces.com | grep -o '<Key>[^<]\+' | cut -c 5- | grep critcmp_results/ | cut -c 18-)
|
||||
|
||||
for pattern in "$@"
|
||||
do
|
||||
res=$(echo "$res" | grep $pattern)
|
||||
done
|
||||
|
||||
echo "$res"
|
5
benchmarks/src/lib.rs
Normal file
5
benchmarks/src/lib.rs
Normal file
@ -0,0 +1,5 @@
|
||||
//! This library is only used to isolate the benchmarks
|
||||
//! from the original milli library.
|
||||
//!
|
||||
//! It does not include interesting functions for milli library
|
||||
//! users only for milli contributors.
|
@ -45,7 +45,7 @@ log_level = "INFO"
|
||||
|
||||
dump_dir = "dumps/"
|
||||
# Sets the directory where Meilisearch will create dump files.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#dumps-destination
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#dump-directory
|
||||
|
||||
# import_dump = "./path/to/my/file.dump"
|
||||
# Imports the dump file located at the specified path. Path must point to a .dump file.
|
||||
|
@ -1,7 +1,14 @@
|
||||
[package]
|
||||
name = "dump"
|
||||
version = "1.0.0"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.65"
|
||||
|
@ -198,17 +198,16 @@ impl From<KindWithContent> for KindDump {
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use std::io::Seek;
|
||||
use std::str::FromStr;
|
||||
|
||||
use big_s::S;
|
||||
use maplit::btreeset;
|
||||
use meilisearch_types::index_uid::IndexUid;
|
||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
use meilisearch_types::keys::{Action, Key};
|
||||
use meilisearch_types::milli::update::Setting;
|
||||
use meilisearch_types::milli::{self};
|
||||
use meilisearch_types::settings::{Checked, Settings};
|
||||
use meilisearch_types::star_or::StarOr;
|
||||
use meilisearch_types::tasks::{Details, Status};
|
||||
use serde_json::{json, Map, Value};
|
||||
use time::macros::datetime;
|
||||
@ -249,17 +248,17 @@ pub(crate) mod test {
|
||||
|
||||
pub fn create_test_settings() -> Settings<Checked> {
|
||||
let settings = Settings {
|
||||
displayed_attributes: Setting::Set(vec![S("race"), S("name")]).into(),
|
||||
searchable_attributes: Setting::Set(vec![S("name"), S("race")]).into(),
|
||||
filterable_attributes: Setting::Set(btreeset! { S("race"), S("age") }).into(),
|
||||
sortable_attributes: Setting::Set(btreeset! { S("age") }).into(),
|
||||
ranking_rules: Setting::NotSet.into(),
|
||||
stop_words: Setting::NotSet.into(),
|
||||
synonyms: Setting::NotSet.into(),
|
||||
distinct_attribute: Setting::NotSet.into(),
|
||||
typo_tolerance: Setting::NotSet.into(),
|
||||
faceting: Setting::NotSet.into(),
|
||||
pagination: Setting::NotSet.into(),
|
||||
displayed_attributes: Setting::Set(vec![S("race"), S("name")]),
|
||||
searchable_attributes: Setting::Set(vec![S("name"), S("race")]),
|
||||
filterable_attributes: Setting::Set(btreeset! { S("race"), S("age") }),
|
||||
sortable_attributes: Setting::Set(btreeset! { S("age") }),
|
||||
ranking_rules: Setting::NotSet,
|
||||
stop_words: Setting::NotSet,
|
||||
synonyms: Setting::NotSet,
|
||||
distinct_attribute: Setting::NotSet,
|
||||
typo_tolerance: Setting::NotSet,
|
||||
faceting: Setting::NotSet,
|
||||
pagination: Setting::NotSet,
|
||||
_kind: std::marker::PhantomData,
|
||||
};
|
||||
settings.check()
|
||||
@ -341,7 +340,7 @@ pub(crate) mod test {
|
||||
name: Some(S("doggos_key")),
|
||||
uid: Uuid::from_str("9f8a34da-b6b2-42f0-939b-dbd4c3448655").unwrap(),
|
||||
actions: vec![Action::DocumentsAll],
|
||||
indexes: vec![StarOr::Other(IndexUid::from_str("doggos").unwrap())],
|
||||
indexes: vec![IndexUidPattern::from_str("doggos").unwrap()],
|
||||
expires_at: Some(datetime!(4130-03-14 12:21 UTC)),
|
||||
created_at: datetime!(1960-11-15 0:00 UTC),
|
||||
updated_at: datetime!(2022-11-10 0:00 UTC),
|
||||
@ -351,7 +350,7 @@ pub(crate) mod test {
|
||||
name: Some(S("master_key")),
|
||||
uid: Uuid::from_str("4622f717-1c00-47bb-a494-39d76a49b591").unwrap(),
|
||||
actions: vec![Action::All],
|
||||
indexes: vec![StarOr::Star],
|
||||
indexes: vec![IndexUidPattern::all()],
|
||||
expires_at: None,
|
||||
created_at: datetime!(0000-01-01 00:01 UTC),
|
||||
updated_at: datetime!(1964-05-04 17:25 UTC),
|
||||
@ -410,7 +409,7 @@ pub(crate) mod test {
|
||||
// create the dump
|
||||
let mut file = tempfile::tempfile().unwrap();
|
||||
dump.persist_to(&mut file).unwrap();
|
||||
file.seek(SeekFrom::Start(0)).unwrap();
|
||||
file.rewind().unwrap();
|
||||
|
||||
file
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ expression: products.settings().unwrap()
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
|
@ -13,13 +13,17 @@ expression: movies.settings().unwrap()
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"asc(release_date)"
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
|
@ -10,6 +10,7 @@ expression: spells.settings().unwrap()
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
|
@ -1,4 +1,3 @@
|
||||
use std::collections::BTreeSet;
|
||||
use std::str::FromStr;
|
||||
|
||||
use super::v2_to_v3::CompatV2ToV3;
|
||||
@ -102,14 +101,15 @@ impl CompatIndexV1ToV2 {
|
||||
|
||||
impl From<v1::settings::Settings> for v2::Settings<v2::Unchecked> {
|
||||
fn from(source: v1::settings::Settings) -> Self {
|
||||
let displayed_attributes = source
|
||||
.displayed_attributes
|
||||
.map(|opt| opt.map(|displayed_attributes| displayed_attributes.into_iter().collect()));
|
||||
let attributes_for_faceting = source.attributes_for_faceting.map(|opt| {
|
||||
opt.map(|attributes_for_faceting| attributes_for_faceting.into_iter().collect())
|
||||
});
|
||||
let ranking_rules = source.ranking_rules.map(|opt| {
|
||||
opt.map(|ranking_rules| {
|
||||
Self {
|
||||
displayed_attributes: option_to_setting(source.displayed_attributes)
|
||||
.map(|displayed| displayed.into_iter().collect()),
|
||||
searchable_attributes: option_to_setting(source.searchable_attributes),
|
||||
filterable_attributes: option_to_setting(source.attributes_for_faceting.clone())
|
||||
.map(|filterable| filterable.into_iter().collect()),
|
||||
sortable_attributes: option_to_setting(source.attributes_for_faceting)
|
||||
.map(|sortable| sortable.into_iter().collect()),
|
||||
ranking_rules: option_to_setting(source.ranking_rules).map(|ranking_rules| {
|
||||
ranking_rules
|
||||
.into_iter()
|
||||
.filter_map(|ranking_rule| {
|
||||
@ -119,26 +119,33 @@ impl From<v1::settings::Settings> for v2::Settings<v2::Unchecked> {
|
||||
ranking_rule.into();
|
||||
criterion.as_ref().map(ToString::to_string)
|
||||
}
|
||||
Err(()) => Some(ranking_rule),
|
||||
Err(()) => {
|
||||
log::warn!(
|
||||
"Could not import the following ranking rule: `{}`.",
|
||||
ranking_rule
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
});
|
||||
|
||||
Self {
|
||||
displayed_attributes,
|
||||
searchable_attributes: source.searchable_attributes,
|
||||
filterable_attributes: attributes_for_faceting,
|
||||
ranking_rules,
|
||||
stop_words: source.stop_words,
|
||||
synonyms: source.synonyms,
|
||||
distinct_attribute: source.distinct_attribute,
|
||||
}),
|
||||
stop_words: option_to_setting(source.stop_words),
|
||||
synonyms: option_to_setting(source.synonyms),
|
||||
distinct_attribute: option_to_setting(source.distinct_attribute),
|
||||
_kind: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn option_to_setting<T>(opt: Option<Option<T>>) -> v2::Setting<T> {
|
||||
match opt {
|
||||
Some(Some(t)) => v2::Setting::Set(t),
|
||||
None => v2::Setting::NotSet,
|
||||
Some(None) => v2::Setting::Reset,
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v1::update::UpdateStatus> for Option<v2::updates::UpdateStatus> {
|
||||
fn from(source: v1::update::UpdateStatus) -> Self {
|
||||
use v1::update::UpdateStatus as UpdateStatusV1;
|
||||
@ -251,38 +258,27 @@ impl From<v1::update::UpdateType> for Option<v2::updates::UpdateMeta> {
|
||||
|
||||
impl From<v1::settings::SettingsUpdate> for v2::Settings<v2::Unchecked> {
|
||||
fn from(source: v1::settings::SettingsUpdate) -> Self {
|
||||
let displayed_attributes: Option<Option<BTreeSet<String>>> =
|
||||
source.displayed_attributes.into();
|
||||
|
||||
let attributes_for_faceting: Option<Option<Vec<String>>> =
|
||||
source.attributes_for_faceting.into();
|
||||
|
||||
let ranking_rules: Option<Option<Vec<v1::settings::RankingRule>>> =
|
||||
source.ranking_rules.into();
|
||||
let ranking_rules = v2::Setting::from(source.ranking_rules);
|
||||
|
||||
// go from the concrete types of v1 (RankingRule) to the concrete type of v2 (Criterion),
|
||||
// and then back to string as this is what the settings manipulate
|
||||
let ranking_rules = ranking_rules.map(|opt| {
|
||||
opt.map(|ranking_rules| {
|
||||
ranking_rules
|
||||
.into_iter()
|
||||
// filter out the WordsPosition ranking rule that exists in v1 but not v2
|
||||
.filter_map(|ranking_rule| {
|
||||
Option::<v2::settings::Criterion>::from(ranking_rule)
|
||||
})
|
||||
.map(|criterion| criterion.to_string())
|
||||
.collect()
|
||||
})
|
||||
let ranking_rules = ranking_rules.map(|ranking_rules| {
|
||||
ranking_rules
|
||||
.into_iter()
|
||||
// filter out the WordsPosition ranking rule that exists in v1 but not v2
|
||||
.filter_map(Option::<v2::settings::Criterion>::from)
|
||||
.map(|criterion| criterion.to_string())
|
||||
.collect()
|
||||
});
|
||||
|
||||
Self {
|
||||
displayed_attributes: displayed_attributes.map(|opt| {
|
||||
opt.map(|displayed_attributes| displayed_attributes.into_iter().collect())
|
||||
}),
|
||||
displayed_attributes: v2::Setting::from(source.displayed_attributes)
|
||||
.map(|displayed_attributes| displayed_attributes.into_iter().collect()),
|
||||
searchable_attributes: source.searchable_attributes.into(),
|
||||
filterable_attributes: attributes_for_faceting.map(|opt| {
|
||||
opt.map(|attributes_for_faceting| attributes_for_faceting.into_iter().collect())
|
||||
}),
|
||||
filterable_attributes: v2::Setting::from(source.attributes_for_faceting.clone())
|
||||
.map(|attributes_for_faceting| attributes_for_faceting.into_iter().collect()),
|
||||
sortable_attributes: v2::Setting::from(source.attributes_for_faceting)
|
||||
.map(|attributes_for_faceting| attributes_for_faceting.into_iter().collect()),
|
||||
ranking_rules,
|
||||
stop_words: source.stop_words.into(),
|
||||
synonyms: source.synonyms.into(),
|
||||
@ -314,12 +310,12 @@ impl From<v1::settings::RankingRule> for Option<v2::settings::Criterion> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<v1::settings::UpdateState<T>> for Option<Option<T>> {
|
||||
impl<T> From<v1::settings::UpdateState<T>> for v2::Setting<T> {
|
||||
fn from(source: v1::settings::UpdateState<T>) -> Self {
|
||||
match source {
|
||||
v1::settings::UpdateState::Update(new_value) => Some(Some(new_value)),
|
||||
v1::settings::UpdateState::Clear => Some(None),
|
||||
v1::settings::UpdateState::Nothing => None,
|
||||
v1::settings::UpdateState::Update(new_value) => v2::Setting::Set(new_value),
|
||||
v1::settings::UpdateState::Clear => v2::Setting::Reset,
|
||||
v1::settings::UpdateState::Nothing => v2::Setting::NotSet,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -352,7 +348,7 @@ pub(crate) mod test {
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"ad6245d98d1a8e30535f3339a9a8d223");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"2298010973ee98cf4670787314176a3a");
|
||||
assert_eq!(update_files.len(), 9);
|
||||
assert!(update_files[..].iter().all(|u| u.is_none())); // no update file in dumps v1
|
||||
|
||||
|
@ -361,28 +361,29 @@ impl From<String> for v3::Code {
|
||||
}
|
||||
}
|
||||
|
||||
fn option_to_setting<T>(opt: Option<Option<T>>) -> v3::Setting<T> {
|
||||
match opt {
|
||||
Some(Some(t)) => v3::Setting::Set(t),
|
||||
None => v3::Setting::NotSet,
|
||||
Some(None) => v3::Setting::Reset,
|
||||
impl<A> From<v2::Setting<A>> for v3::Setting<A> {
|
||||
fn from(setting: v2::Setting<A>) -> Self {
|
||||
match setting {
|
||||
v2::settings::Setting::Set(a) => v3::settings::Setting::Set(a),
|
||||
v2::settings::Setting::Reset => v3::settings::Setting::Reset,
|
||||
v2::settings::Setting::NotSet => v3::settings::Setting::NotSet,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<v2::Settings<T>> for v3::Settings<v3::Unchecked> {
|
||||
fn from(settings: v2::Settings<T>) -> Self {
|
||||
v3::Settings {
|
||||
displayed_attributes: option_to_setting(settings.displayed_attributes),
|
||||
searchable_attributes: option_to_setting(settings.searchable_attributes),
|
||||
filterable_attributes: option_to_setting(settings.filterable_attributes)
|
||||
.map(|f| f.into_iter().collect()),
|
||||
sortable_attributes: v3::Setting::NotSet,
|
||||
ranking_rules: option_to_setting(settings.ranking_rules).map(|criteria| {
|
||||
displayed_attributes: settings.displayed_attributes.into(),
|
||||
searchable_attributes: settings.searchable_attributes.into(),
|
||||
filterable_attributes: settings.filterable_attributes.into(),
|
||||
sortable_attributes: settings.sortable_attributes.into(),
|
||||
ranking_rules: v3::Setting::from(settings.ranking_rules).map(|criteria| {
|
||||
criteria.into_iter().map(|criterion| patch_ranking_rules(&criterion)).collect()
|
||||
}),
|
||||
stop_words: option_to_setting(settings.stop_words),
|
||||
synonyms: option_to_setting(settings.synonyms),
|
||||
distinct_attribute: option_to_setting(settings.distinct_attribute),
|
||||
stop_words: settings.stop_words.into(),
|
||||
synonyms: settings.synonyms.into(),
|
||||
distinct_attribute: settings.distinct_attribute.into(),
|
||||
_kind: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
@ -394,6 +395,7 @@ fn patch_ranking_rules(ranking_rule: &str) -> String {
|
||||
Ok(v2::settings::Criterion::Typo) => String::from("typo"),
|
||||
Ok(v2::settings::Criterion::Proximity) => String::from("proximity"),
|
||||
Ok(v2::settings::Criterion::Attribute) => String::from("attribute"),
|
||||
Ok(v2::settings::Criterion::Sort) => String::from("sort"),
|
||||
Ok(v2::settings::Criterion::Exactness) => String::from("exactness"),
|
||||
Ok(v2::settings::Criterion::Asc(name)) => format!("{name}:asc"),
|
||||
Ok(v2::settings::Criterion::Desc(name)) => format!("{name}:desc"),
|
||||
|
@ -1,3 +1,5 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use super::v4_to_v5::{CompatIndexV4ToV5, CompatV4ToV5};
|
||||
use crate::reader::{v5, v6, Document, UpdateFile};
|
||||
use crate::Result;
|
||||
@ -179,10 +181,8 @@ impl CompatV5ToV6 {
|
||||
.indexes
|
||||
.into_iter()
|
||||
.map(|index| match index {
|
||||
v5::StarOr::Star => v6::StarOr::Star,
|
||||
v5::StarOr::Other(uid) => {
|
||||
v6::StarOr::Other(v6::IndexUid::new_unchecked(uid.as_str()))
|
||||
}
|
||||
v5::StarOr::Star => v6::IndexUidPattern::all(),
|
||||
v5::StarOr::Other(uid) => v6::IndexUidPattern::new_unchecked(uid.as_str()),
|
||||
})
|
||||
.collect(),
|
||||
expires_at: key.expires_at,
|
||||
@ -254,51 +254,50 @@ impl<T> From<v5::Setting<T>> for v6::Setting<T> {
|
||||
impl From<v5::ResponseError> for v6::ResponseError {
|
||||
fn from(error: v5::ResponseError) -> Self {
|
||||
let code = match error.error_code.as_ref() {
|
||||
"index_creation_failed" => v6::Code::CreateIndex,
|
||||
"index_creation_failed" => v6::Code::IndexCreationFailed,
|
||||
"index_already_exists" => v6::Code::IndexAlreadyExists,
|
||||
"index_not_found" => v6::Code::IndexNotFound,
|
||||
"invalid_index_uid" => v6::Code::InvalidIndexUid,
|
||||
"invalid_min_word_length_for_typo" => v6::Code::InvalidMinWordLengthForTypo,
|
||||
"invalid_min_word_length_for_typo" => v6::Code::InvalidSettingsTypoTolerance,
|
||||
"invalid_state" => v6::Code::InvalidState,
|
||||
"primary_key_inference_failed" => v6::Code::NoPrimaryKeyCandidateFound,
|
||||
"index_primary_key_already_exists" => v6::Code::PrimaryKeyAlreadyPresent,
|
||||
"primary_key_inference_failed" => v6::Code::IndexPrimaryKeyNoCandidateFound,
|
||||
"index_primary_key_already_exists" => v6::Code::IndexPrimaryKeyAlreadyExists,
|
||||
"max_fields_limit_exceeded" => v6::Code::MaxFieldsLimitExceeded,
|
||||
"missing_document_id" => v6::Code::MissingDocumentId,
|
||||
"invalid_document_id" => v6::Code::InvalidDocumentId,
|
||||
"invalid_filter" => v6::Code::Filter,
|
||||
"invalid_sort" => v6::Code::Sort,
|
||||
"invalid_filter" => v6::Code::InvalidSettingsFilterableAttributes,
|
||||
"invalid_sort" => v6::Code::InvalidSettingsSortableAttributes,
|
||||
"bad_parameter" => v6::Code::BadParameter,
|
||||
"bad_request" => v6::Code::BadRequest,
|
||||
"database_size_limit_reached" => v6::Code::DatabaseSizeLimitReached,
|
||||
"document_not_found" => v6::Code::DocumentNotFound,
|
||||
"internal" => v6::Code::Internal,
|
||||
"invalid_geo_field" => v6::Code::InvalidDocumentGeoField,
|
||||
"invalid_ranking_rule" => v6::Code::InvalidRankingRule,
|
||||
"invalid_store_file" => v6::Code::InvalidStore,
|
||||
"invalid_api_key" => v6::Code::InvalidToken,
|
||||
"invalid_ranking_rule" => v6::Code::InvalidSettingsRankingRules,
|
||||
"invalid_store_file" => v6::Code::InvalidStoreFile,
|
||||
"invalid_api_key" => v6::Code::InvalidApiKey,
|
||||
"missing_authorization_header" => v6::Code::MissingAuthorizationHeader,
|
||||
"no_space_left_on_device" => v6::Code::NoSpaceLeftOnDevice,
|
||||
"dump_not_found" => v6::Code::DumpNotFound,
|
||||
"task_not_found" => v6::Code::TaskNotFound,
|
||||
"payload_too_large" => v6::Code::PayloadTooLarge,
|
||||
"unretrievable_document" => v6::Code::RetrieveDocument,
|
||||
"search_error" => v6::Code::SearchDocuments,
|
||||
"unretrievable_document" => v6::Code::UnretrievableDocument,
|
||||
"unsupported_media_type" => v6::Code::UnsupportedMediaType,
|
||||
"dump_already_processing" => v6::Code::DumpAlreadyInProgress,
|
||||
"dump_already_processing" => v6::Code::DumpAlreadyProcessing,
|
||||
"dump_process_failed" => v6::Code::DumpProcessFailed,
|
||||
"invalid_content_type" => v6::Code::InvalidContentType,
|
||||
"missing_content_type" => v6::Code::MissingContentType,
|
||||
"malformed_payload" => v6::Code::MalformedPayload,
|
||||
"missing_payload" => v6::Code::MissingPayload,
|
||||
"api_key_not_found" => v6::Code::ApiKeyNotFound,
|
||||
"missing_parameter" => v6::Code::UnretrievableErrorCode,
|
||||
"missing_parameter" => v6::Code::BadRequest,
|
||||
"invalid_api_key_actions" => v6::Code::InvalidApiKeyActions,
|
||||
"invalid_api_key_indexes" => v6::Code::InvalidApiKeyIndexes,
|
||||
"invalid_api_key_expires_at" => v6::Code::InvalidApiKeyExpiresAt,
|
||||
"invalid_api_key_description" => v6::Code::InvalidApiKeyDescription,
|
||||
"invalid_api_key_name" => v6::Code::InvalidApiKeyName,
|
||||
"invalid_api_key_uid" => v6::Code::InvalidApiKeyUid,
|
||||
"immutable_field" => v6::Code::ImmutableField,
|
||||
"immutable_field" => v6::Code::BadRequest,
|
||||
"api_key_already_exists" => v6::Code::ApiKeyAlreadyExists,
|
||||
other => {
|
||||
log::warn!("Unknown error code {}", other);
|
||||
@ -316,7 +315,26 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
||||
searchable_attributes: settings.searchable_attributes.into(),
|
||||
filterable_attributes: settings.filterable_attributes.into(),
|
||||
sortable_attributes: settings.sortable_attributes.into(),
|
||||
ranking_rules: settings.ranking_rules.into(),
|
||||
ranking_rules: {
|
||||
match settings.ranking_rules {
|
||||
v5::settings::Setting::Set(ranking_rules) => {
|
||||
let mut new_ranking_rules = vec![];
|
||||
for rule in ranking_rules {
|
||||
match v6::RankingRuleView::from_str(&rule) {
|
||||
Ok(new_rule) => {
|
||||
new_ranking_rules.push(new_rule);
|
||||
}
|
||||
Err(_) => {
|
||||
log::warn!("Error while importing settings. The ranking rule `{rule}` does not exist anymore.")
|
||||
}
|
||||
}
|
||||
}
|
||||
v6::Setting::Set(new_ranking_rules)
|
||||
}
|
||||
v5::settings::Setting::Reset => v6::Setting::Reset,
|
||||
v5::settings::Setting::NotSet => v6::Setting::NotSet,
|
||||
}
|
||||
},
|
||||
stop_words: settings.stop_words.into(),
|
||||
synonyms: settings.synonyms.into(),
|
||||
distinct_attribute: settings.distinct_attribute.into(),
|
||||
@ -419,7 +437,7 @@ pub(crate) mod test {
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"10c673c97f053830aa659876d7aa0b53");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"41f91d3a94911b2735ec41b07540df5c");
|
||||
assert_eq!(update_files.len(), 22);
|
||||
assert!(update_files[0].is_none()); // the dump creation
|
||||
assert!(update_files[1].is_some()); // the enqueued document addition
|
||||
|
@ -201,7 +201,7 @@ pub(crate) mod test {
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"10c673c97f053830aa659876d7aa0b53");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"41f91d3a94911b2735ec41b07540df5c");
|
||||
assert_eq!(update_files.len(), 22);
|
||||
assert!(update_files[0].is_none()); // the dump creation
|
||||
assert!(update_files[1].is_some()); // the enqueued document addition
|
||||
@ -279,7 +279,7 @@ pub(crate) mod test {
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"12eca43d5d1e1f334200eb4df653b0c9");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"c2445ddd1785528b80f2ba534d3bd00c");
|
||||
assert_eq!(update_files.len(), 10);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
@ -356,7 +356,7 @@ pub(crate) mod test {
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"2f51c6345fabccf47b18c82bad618ffe");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"cd12efd308fe3ed226356a727ab42ed3");
|
||||
assert_eq!(update_files.len(), 10);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
@ -449,7 +449,7 @@ pub(crate) mod test {
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"b27292d0bb86d4b4dd1b375a46b33890");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"bc616290adfe7d09a624cf6065ca9069");
|
||||
assert_eq!(update_files.len(), 9);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
@ -530,6 +530,82 @@ pub(crate) mod test {
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_dump_v2_from_meilisearch_v0_22_0_issue_3435() {
|
||||
let dump = File::open("tests/assets/v2-v0.22.0.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2023-01-30 16:26:09.247261 +00:00:00");
|
||||
assert_eq!(dump.instance_uid().unwrap(), None);
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"2db37756d8af1fb7623436b76e8956a6");
|
||||
assert_eq!(update_files.len(), 8);
|
||||
assert!(update_files[0..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"0227598af846e574139ee0b80e03a720");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_dump_v1() {
|
||||
let dump = File::open("tests/assets/v1.dump").unwrap();
|
||||
@ -542,7 +618,7 @@ pub(crate) mod test {
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"9725ccfceea3f8d5846c44006c9e1e7b");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"8df6eab075a44b3c1af6b726f9fd9a43");
|
||||
assert_eq!(update_files.len(), 9);
|
||||
assert!(update_files[..].iter().all(|u| u.is_none())); // no update file in dump v1
|
||||
|
||||
|
@ -10,6 +10,7 @@ expression: spells.settings().unwrap()
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
|
@ -10,6 +10,7 @@ expression: products.settings().unwrap()
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
|
@ -13,6 +13,10 @@ expression: movies.settings().unwrap()
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -41,6 +41,7 @@ use super::Document;
|
||||
use crate::{IndexMetadata, Result, Version};
|
||||
|
||||
pub type Settings<T> = settings::Settings<T>;
|
||||
pub type Setting<T> = settings::Setting<T>;
|
||||
pub type Checked = settings::Checked;
|
||||
pub type Unchecked = settings::Unchecked;
|
||||
|
||||
@ -306,4 +307,81 @@ pub(crate) mod test {
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_dump_v2_from_meilisearch_v0_22_0_issue_3435() {
|
||||
let dump = File::open("tests/assets/v2-v0.22.0.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(dir.path()).unwrap();
|
||||
|
||||
let mut dump = V2Reader::open(dir).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2023-01-30 16:26:09.247261 +00:00:00");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"aca8ba13046272664eb3ea2da3031633");
|
||||
assert_eq!(update_files.len(), 8);
|
||||
assert!(update_files[0..].iter().all(|u| u.is_none())); // everything has already been processed
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"0227598af846e574139ee0b80e03a720");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
}
|
||||
|
@ -1,35 +1,33 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::fmt::Display;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::str::FromStr;
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Deserializer};
|
||||
|
||||
#[cfg(test)]
|
||||
fn serialize_with_wildcard<S>(
|
||||
field: &Option<Option<Vec<String>>>,
|
||||
field: &Setting<Vec<String>>,
|
||||
s: S,
|
||||
) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let wildcard = vec!["*".to_string()];
|
||||
s.serialize_some(&field.as_ref().map(|o| o.as_ref().unwrap_or(&wildcard)))
|
||||
}
|
||||
use serde::Serialize;
|
||||
|
||||
fn deserialize_some<'de, T, D>(deserializer: D) -> std::result::Result<Option<T>, D::Error>
|
||||
where
|
||||
T: Deserialize<'de>,
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Deserialize::deserialize(deserializer).map(Some)
|
||||
let wildcard = vec!["*".to_string()];
|
||||
match field {
|
||||
Setting::Set(value) => Some(value),
|
||||
Setting::Reset => Some(&wildcard),
|
||||
Setting::NotSet => None,
|
||||
}
|
||||
.serialize(s)
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, Debug)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct Checked;
|
||||
|
||||
#[derive(Clone, Default, Debug, Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct Unchecked;
|
||||
@ -42,75 +40,54 @@ pub struct Unchecked;
|
||||
pub struct Settings<T> {
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
serialize_with = "serialize_with_wildcard",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
skip_serializing_if = "Setting::is_not_set"
|
||||
)]
|
||||
pub displayed_attributes: Option<Option<Vec<String>>>,
|
||||
pub displayed_attributes: Setting<Vec<String>>,
|
||||
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
serialize_with = "serialize_with_wildcard",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
skip_serializing_if = "Setting::is_not_set"
|
||||
)]
|
||||
pub searchable_attributes: Option<Option<Vec<String>>>,
|
||||
pub searchable_attributes: Setting<Vec<String>>,
|
||||
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
pub filterable_attributes: Option<Option<BTreeSet<String>>>,
|
||||
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
pub ranking_rules: Option<Option<Vec<String>>>,
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
pub stop_words: Option<Option<BTreeSet<String>>>,
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
pub synonyms: Option<Option<BTreeMap<String, Vec<String>>>>,
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
pub distinct_attribute: Option<Option<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub filterable_attributes: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub sortable_attributes: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub ranking_rules: Setting<Vec<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub stop_words: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub synonyms: Setting<BTreeMap<String, Vec<String>>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub distinct_attribute: Setting<String>,
|
||||
|
||||
#[serde(skip)]
|
||||
pub _kind: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl Settings<Unchecked> {
|
||||
pub fn check(mut self) -> Settings<Checked> {
|
||||
let displayed_attributes = match self.displayed_attributes.take() {
|
||||
Some(Some(fields)) => {
|
||||
pub fn check(self) -> Settings<Checked> {
|
||||
let displayed_attributes = match self.displayed_attributes {
|
||||
Setting::Set(fields) => {
|
||||
if fields.iter().any(|f| f == "*") {
|
||||
Some(None)
|
||||
Setting::Reset
|
||||
} else {
|
||||
Some(Some(fields))
|
||||
Setting::Set(fields)
|
||||
}
|
||||
}
|
||||
otherwise => otherwise,
|
||||
};
|
||||
|
||||
let searchable_attributes = match self.searchable_attributes.take() {
|
||||
Some(Some(fields)) => {
|
||||
let searchable_attributes = match self.searchable_attributes {
|
||||
Setting::Set(fields) => {
|
||||
if fields.iter().any(|f| f == "*") {
|
||||
Some(None)
|
||||
Setting::Reset
|
||||
} else {
|
||||
Some(Some(fields))
|
||||
Setting::Set(fields)
|
||||
}
|
||||
}
|
||||
otherwise => otherwise,
|
||||
@ -120,6 +97,7 @@ impl Settings<Unchecked> {
|
||||
displayed_attributes,
|
||||
searchable_attributes,
|
||||
filterable_attributes: self.filterable_attributes,
|
||||
sortable_attributes: self.sortable_attributes,
|
||||
ranking_rules: self.ranking_rules,
|
||||
stop_words: self.stop_words,
|
||||
synonyms: self.synonyms,
|
||||
@ -129,10 +107,61 @@ impl Settings<Unchecked> {
|
||||
}
|
||||
}
|
||||
|
||||
static ASC_DESC_REGEX: Lazy<Regex> =
|
||||
Lazy::new(|| Regex::new(r#"(asc|desc)\(([\w_-]+)\)"#).unwrap());
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum Setting<T> {
|
||||
Set(T),
|
||||
Reset,
|
||||
NotSet,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, PartialEq, Eq)]
|
||||
impl<T> Default for Setting<T> {
|
||||
fn default() -> Self {
|
||||
Self::NotSet
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Setting<T> {
|
||||
pub const fn is_not_set(&self) -> bool {
|
||||
matches!(self, Self::NotSet)
|
||||
}
|
||||
|
||||
pub fn map<A>(self, f: fn(T) -> A) -> Setting<A> {
|
||||
match self {
|
||||
Setting::Set(a) => Setting::Set(f(a)),
|
||||
Setting::Reset => Setting::Reset,
|
||||
Setting::NotSet => Setting::NotSet,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl<T: serde::Serialize> serde::Serialize for Setting<T> {
|
||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Self::Set(value) => Some(value),
|
||||
// Usually not_set isn't serialized by setting skip_serializing_if field attribute
|
||||
Self::NotSet | Self::Reset => None,
|
||||
}
|
||||
.serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, T: Deserialize<'de>> Deserialize<'de> for Setting<T> {
|
||||
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Deserialize::deserialize(deserializer).map(|x| match x {
|
||||
Some(x) => Self::Set(x),
|
||||
None => Self::Reset, // Reset is forced by sending null value
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Criterion {
|
||||
/// Sorted by decreasing number of matched query terms.
|
||||
/// Query words at the front of an attribute is considered better than if it was at the back.
|
||||
@ -142,8 +171,11 @@ pub enum Criterion {
|
||||
/// Sorted by increasing distance between matched query terms.
|
||||
Proximity,
|
||||
/// Documents with quey words contained in more important
|
||||
/// attributes are considred better.
|
||||
/// attributes are considered better.
|
||||
Attribute,
|
||||
/// Dynamically sort at query time the documents. None, one or multiple Asc/Desc sortable
|
||||
/// attributes can be used in place of this criterion at query time.
|
||||
Sort,
|
||||
/// Sorted by the similarity of the matched words with the query words.
|
||||
Exactness,
|
||||
/// Sorted by the increasing value of the field specified.
|
||||
@ -152,40 +184,86 @@ pub enum Criterion {
|
||||
Desc(String),
|
||||
}
|
||||
|
||||
impl Criterion {
|
||||
/// Returns the field name parameter of this criterion.
|
||||
pub fn field_name(&self) -> Option<&str> {
|
||||
match self {
|
||||
Criterion::Asc(name) | Criterion::Desc(name) => Some(name),
|
||||
_otherwise => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Criterion {
|
||||
// since we're not going to show the custom error message we can override the
|
||||
// error type.
|
||||
type Err = ();
|
||||
|
||||
fn from_str(txt: &str) -> Result<Criterion, Self::Err> {
|
||||
match txt {
|
||||
fn from_str(text: &str) -> Result<Criterion, Self::Err> {
|
||||
match text {
|
||||
"words" => Ok(Criterion::Words),
|
||||
"typo" => Ok(Criterion::Typo),
|
||||
"proximity" => Ok(Criterion::Proximity),
|
||||
"attribute" => Ok(Criterion::Attribute),
|
||||
"sort" => Ok(Criterion::Sort),
|
||||
"exactness" => Ok(Criterion::Exactness),
|
||||
text => {
|
||||
let caps = ASC_DESC_REGEX.captures(text).ok_or(())?;
|
||||
let order = caps.get(1).unwrap().as_str();
|
||||
let field_name = caps.get(2).unwrap().as_str();
|
||||
match order {
|
||||
"asc" => Ok(Criterion::Asc(field_name.to_string())),
|
||||
"desc" => Ok(Criterion::Desc(field_name.to_string())),
|
||||
_text => Err(()),
|
||||
}
|
||||
}
|
||||
text => match AscDesc::from_str(text) {
|
||||
Ok(AscDesc::Asc(field)) => Ok(Criterion::Asc(field)),
|
||||
Ok(AscDesc::Desc(field)) => Ok(Criterion::Desc(field)),
|
||||
Err(_) => Err(()),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Criterion {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Criterion::Words => write!(f, "words"),
|
||||
Criterion::Typo => write!(f, "typo"),
|
||||
Criterion::Proximity => write!(f, "proximity"),
|
||||
Criterion::Attribute => write!(f, "attribute"),
|
||||
Criterion::Exactness => write!(f, "exactness"),
|
||||
Criterion::Asc(field_name) => write!(f, "asc({})", field_name),
|
||||
Criterion::Desc(field_name) => write!(f, "desc({})", field_name),
|
||||
#[derive(Debug, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub enum AscDesc {
|
||||
Asc(String),
|
||||
Desc(String),
|
||||
}
|
||||
|
||||
impl FromStr for AscDesc {
|
||||
type Err = ();
|
||||
|
||||
// since we don't know if this comes from the old or new syntax we need to check
|
||||
// for both syntax.
|
||||
// WARN: this code doesn't come from the original meilisearch v0.22.0 but was
|
||||
// written specifically to be able to import the dump of meilisearch v0.21.0 AND
|
||||
// meilisearch v0.22.0.
|
||||
fn from_str(text: &str) -> Result<AscDesc, Self::Err> {
|
||||
if let Some((field_name, asc_desc)) = text.rsplit_once(':') {
|
||||
match asc_desc {
|
||||
"asc" => Ok(AscDesc::Asc(field_name.to_string())),
|
||||
"desc" => Ok(AscDesc::Desc(field_name.to_string())),
|
||||
_ => Err(()),
|
||||
}
|
||||
} else if text.starts_with("asc(") && text.ends_with(')') {
|
||||
Ok(AscDesc::Asc(
|
||||
text.strip_prefix("asc(").unwrap().strip_suffix(')').unwrap().to_string(),
|
||||
))
|
||||
} else if text.starts_with("desc(") && text.ends_with(')') {
|
||||
Ok(AscDesc::Desc(
|
||||
text.strip_prefix("desc(").unwrap().strip_suffix(')').unwrap().to_string(),
|
||||
))
|
||||
} else {
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Criterion {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use Criterion::*;
|
||||
|
||||
match self {
|
||||
Words => f.write_str("words"),
|
||||
Typo => f.write_str("typo"),
|
||||
Proximity => f.write_str("proximity"),
|
||||
Attribute => f.write_str("attribute"),
|
||||
Sort => f.write_str("sort"),
|
||||
Exactness => f.write_str("exactness"),
|
||||
Asc(attr) => write!(f, "{}:asc", attr),
|
||||
Desc(attr) => write!(f, "{}:desc", attr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -112,8 +112,11 @@ impl V3Reader {
|
||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V3IndexReader>> + '_> {
|
||||
Ok(self.index_uuid.iter().map(|index| -> Result<_> {
|
||||
V3IndexReader::new(
|
||||
index.uid.clone(),
|
||||
&self.dump.path().join("indexes").join(index.uuid.to_string()),
|
||||
index,
|
||||
BufReader::new(
|
||||
File::open(self.dump.path().join("updates").join("data.jsonl")).unwrap(),
|
||||
),
|
||||
)
|
||||
}))
|
||||
}
|
||||
@ -155,16 +158,42 @@ pub struct V3IndexReader {
|
||||
}
|
||||
|
||||
impl V3IndexReader {
|
||||
pub fn new(name: String, path: &Path) -> Result<Self> {
|
||||
pub fn new(path: &Path, index_uuid: &IndexUuid, tasks: BufReader<File>) -> Result<Self> {
|
||||
let meta = File::open(path.join("meta.json"))?;
|
||||
let meta: DumpMeta = serde_json::from_reader(meta)?;
|
||||
|
||||
let mut created_at = None;
|
||||
let mut updated_at = None;
|
||||
|
||||
for line in tasks.lines() {
|
||||
let task: Task = serde_json::from_str(&line?)?;
|
||||
|
||||
if !(task.uuid == index_uuid.uuid && task.is_finished()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let new_created_at = match task.update.meta() {
|
||||
Kind::DocumentAddition { .. } | Kind::Settings(_) => task.update.finished_at(),
|
||||
_ => None,
|
||||
};
|
||||
let new_updated_at = task.update.finished_at();
|
||||
|
||||
if created_at.is_none() || created_at > new_created_at {
|
||||
created_at = new_created_at;
|
||||
}
|
||||
|
||||
if updated_at.is_none() || updated_at < new_updated_at {
|
||||
updated_at = new_updated_at;
|
||||
}
|
||||
}
|
||||
|
||||
let current_time = OffsetDateTime::now_utc();
|
||||
|
||||
let metadata = IndexMetadata {
|
||||
uid: name,
|
||||
uid: index_uuid.uid.clone(),
|
||||
primary_key: meta.primary_key,
|
||||
// FIXME: Iterate over the whole task queue to find the creation and last update date.
|
||||
created_at: OffsetDateTime::now_utc(),
|
||||
updated_at: OffsetDateTime::now_utc(),
|
||||
created_at: created_at.unwrap_or(current_time),
|
||||
updated_at: updated_at.unwrap_or(current_time),
|
||||
};
|
||||
|
||||
let ret = V3IndexReader {
|
||||
@ -263,12 +292,12 @@ pub(crate) mod test {
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-07T11:38:54.74389899Z",
|
||||
"updatedAt": "2022-10-07T11:38:55.963185778Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -278,12 +307,12 @@ pub(crate) mod test {
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-07T11:38:54.026649575Z",
|
||||
"updatedAt": "2022-10-07T11:39:04.188852537Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
@ -308,12 +337,12 @@ pub(crate) mod test {
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-07T11:38:56.265951133Z",
|
||||
"updatedAt": "2022-10-07T11:38:56.521004328Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
|
@ -74,6 +74,26 @@ impl UpdateStatus {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enqueued_at(&self) -> Option<OffsetDateTime> {
|
||||
match self {
|
||||
UpdateStatus::Processing(u) => Some(u.from.enqueued_at),
|
||||
UpdateStatus::Enqueued(u) => Some(u.enqueued_at),
|
||||
UpdateStatus::Processed(u) => Some(u.from.from.enqueued_at),
|
||||
UpdateStatus::Aborted(u) => Some(u.from.enqueued_at),
|
||||
UpdateStatus::Failed(u) => Some(u.from.from.enqueued_at),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finished_at(&self) -> Option<OffsetDateTime> {
|
||||
match self {
|
||||
UpdateStatus::Processing(_) => None,
|
||||
UpdateStatus::Enqueued(_) => None,
|
||||
UpdateStatus::Processed(u) => Some(u.processed_at),
|
||||
UpdateStatus::Aborted(_) => None,
|
||||
UpdateStatus::Failed(u) => Some(u.failed_at),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
|
@ -5,10 +5,8 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[cfg_attr(feature = "test-traits", derive(proptest_derive::Arbitrary))]
|
||||
pub struct ResponseError {
|
||||
#[serde(skip)]
|
||||
#[cfg_attr(feature = "test-traits", proptest(strategy = "strategy::status_code_strategy()"))]
|
||||
pub code: StatusCode,
|
||||
pub message: String,
|
||||
#[serde(rename = "code")]
|
||||
|
@ -5,7 +5,6 @@ use serde::Deserialize;
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, PartialEq, Eq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[cfg_attr(feature = "test-traits", derive(proptest_derive::Arbitrary))]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct ResponseError {
|
||||
#[serde(skip)]
|
||||
|
@ -33,7 +33,7 @@
|
||||
//!
|
||||
|
||||
use std::fs::{self, File};
|
||||
use std::io::{BufRead, BufReader, ErrorKind, Seek, SeekFrom};
|
||||
use std::io::{BufRead, BufReader, ErrorKind, Seek};
|
||||
use std::path::Path;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
@ -178,7 +178,7 @@ impl V5Reader {
|
||||
}
|
||||
|
||||
pub fn keys(&mut self) -> Result<Box<dyn Iterator<Item = Result<Key>> + '_>> {
|
||||
self.keys.seek(SeekFrom::Start(0))?;
|
||||
self.keys.rewind()?;
|
||||
Ok(Box::new(
|
||||
(&mut self.keys).lines().map(|line| -> Result<_> { Ok(serde_json::from_str(&line?)?) }),
|
||||
))
|
||||
|
@ -26,7 +26,7 @@ pub type Kind = crate::KindDump;
|
||||
pub type Details = meilisearch_types::tasks::Details;
|
||||
|
||||
// everything related to the settings
|
||||
pub type Setting<T> = meilisearch_types::settings::Setting<T>;
|
||||
pub type Setting<T> = meilisearch_types::milli::update::Setting<T>;
|
||||
pub type TypoTolerance = meilisearch_types::settings::TypoSettings;
|
||||
pub type MinWordSizeForTypos = meilisearch_types::settings::MinWordSizeTyposSetting;
|
||||
pub type FacetingSettings = meilisearch_types::settings::FacetingSettings;
|
||||
@ -34,12 +34,12 @@ pub type PaginationSettings = meilisearch_types::settings::PaginationSettings;
|
||||
|
||||
// everything related to the api keys
|
||||
pub type Action = meilisearch_types::keys::Action;
|
||||
pub type StarOr<T> = meilisearch_types::star_or::StarOr<T>;
|
||||
pub type IndexUid = meilisearch_types::index_uid::IndexUid;
|
||||
pub type IndexUidPattern = meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
|
||||
// everything related to the errors
|
||||
pub type ResponseError = meilisearch_types::error::ResponseError;
|
||||
pub type Code = meilisearch_types::error::Code;
|
||||
pub type RankingRuleView = meilisearch_types::settings::RankingRuleView;
|
||||
|
||||
pub struct V6Reader {
|
||||
dump: TempDir,
|
||||
|
BIN
dump/tests/assets/v2-v0.22.0.dump
Normal file
BIN
dump/tests/assets/v2-v0.22.0.dump
Normal file
Binary file not shown.
@ -1,7 +1,14 @@
|
||||
[package]
|
||||
name = "file-store"
|
||||
version = "1.0.0"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
tempfile = "3.3.0"
|
||||
|
@ -1,4 +1,3 @@
|
||||
use std::collections::BTreeSet;
|
||||
use std::fs::File as StdFile;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::path::{Path, PathBuf};
|
||||
@ -11,10 +10,14 @@ const UPDATE_FILES_PATH: &str = "updates/updates_files";
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error("Could not parse file name as utf-8")]
|
||||
CouldNotParseFileNameAsUtf8,
|
||||
#[error(transparent)]
|
||||
IoError(#[from] std::io::Error),
|
||||
#[error(transparent)]
|
||||
PersistError(#[from] tempfile::PersistError),
|
||||
#[error(transparent)]
|
||||
UuidError(#[from] uuid::Error),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@ -33,13 +36,11 @@ impl DerefMut for File {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(test, faux::create)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FileStore {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
impl FileStore {
|
||||
pub fn new(path: impl AsRef<Path>) -> Result<FileStore> {
|
||||
let path = path.as_ref().to_path_buf();
|
||||
@ -48,7 +49,6 @@ impl FileStore {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(test, faux::methods)]
|
||||
impl FileStore {
|
||||
/// Creates a new temporary update file.
|
||||
/// A call to `persist` is needed to persist the file in the database.
|
||||
@ -94,7 +94,17 @@ impl FileStore {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_size(&self, uuid: Uuid) -> Result<u64> {
|
||||
/// Compute the size of all the updates contained in the file store.
|
||||
pub fn compute_total_size(&self) -> Result<u64> {
|
||||
let mut total = 0;
|
||||
for uuid in self.all_uuids()? {
|
||||
total += self.compute_size(uuid?).unwrap_or_default();
|
||||
}
|
||||
Ok(total)
|
||||
}
|
||||
|
||||
/// Compute the size of one update
|
||||
pub fn compute_size(&self, uuid: Uuid) -> Result<u64> {
|
||||
Ok(self.get_update(uuid)?.metadata()?.len())
|
||||
}
|
||||
|
||||
@ -105,17 +115,12 @@ impl FileStore {
|
||||
}
|
||||
|
||||
/// List the Uuids of the files in the FileStore
|
||||
///
|
||||
/// This function is meant to be used by tests only.
|
||||
#[doc(hidden)]
|
||||
pub fn __all_uuids(&self) -> BTreeSet<Uuid> {
|
||||
let mut uuids = BTreeSet::new();
|
||||
for entry in self.path.read_dir().unwrap() {
|
||||
let entry = entry.unwrap();
|
||||
let uuid = Uuid::from_str(entry.file_name().to_str().unwrap()).unwrap();
|
||||
uuids.insert(uuid);
|
||||
}
|
||||
uuids
|
||||
pub fn all_uuids(&self) -> Result<impl Iterator<Item = Result<Uuid>>> {
|
||||
Ok(self.path.read_dir()?.map(|entry| {
|
||||
Ok(Uuid::from_str(
|
||||
entry?.file_name().to_str().ok_or(Error::CouldNotParseFileNameAsUtf8)?,
|
||||
)?)
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
|
19
filter-parser/Cargo.toml
Normal file
19
filter-parser/Cargo.toml
Normal file
@ -0,0 +1,19 @@
|
||||
[package]
|
||||
name = "filter-parser"
|
||||
description = "The parser for the Meilisearch filter syntax"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
# description.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
nom = "7.1.1"
|
||||
nom_locate = "4.0.0"
|
||||
|
||||
[dev-dependencies]
|
||||
insta = "1.21.0"
|
36
filter-parser/README.md
Normal file
36
filter-parser/README.md
Normal file
@ -0,0 +1,36 @@
|
||||
# Filter parser
|
||||
|
||||
This workspace is dedicated to the parsing of the Meilisearch filters.
|
||||
|
||||
Most of the code and explanation are in the [`lib.rs`](./src/lib.rs). Especially, the BNF of the filters at the top of this file.
|
||||
|
||||
The parser use [nom](https://docs.rs/nom/) to do most of its work and [nom-locate](https://docs.rs/nom_locate/) to keep track of what we were doing when we encountered an error.
|
||||
|
||||
## Cli
|
||||
A simple main is provided to quick-test if a filter can be parsed or not without bringing milli.
|
||||
It takes one argument and try to parse it.
|
||||
```
|
||||
cargo run -- 'field = value' # success
|
||||
cargo run -- 'field = "doggo' # error => missing closing delimiter "
|
||||
```
|
||||
|
||||
## Fuzz
|
||||
The workspace have been fuzzed with [cargo-fuzz](https://rust-fuzz.github.io/book/cargo-fuzz.html).
|
||||
|
||||
### Setup
|
||||
You'll need rust-nightly to execute the fuzzer.
|
||||
|
||||
```
|
||||
cargo install cargo-fuzz
|
||||
```
|
||||
|
||||
### Run
|
||||
When the filter parser is executed by the fuzzer it's triggering a stackoverflow really fast. We can avoid this problem by limiting the `max_len` of [libfuzzer](https://llvm.org/docs/LibFuzzer.html) at 500 characters.
|
||||
```
|
||||
cargo fuzz run parse -- -max_len=500
|
||||
```
|
||||
|
||||
## What to do if you find a bug in the parser
|
||||
|
||||
- Write a test at the end of the [`lib.rs`](./src/lib.rs) to ensure it never happens again.
|
||||
- Add a file in [the corpus directory](./fuzz/corpus/parse/) with your filter to help the fuzzer find new bugs. Since this directory is going to be heavily polluted by the execution of the fuzzer it's in the gitignore and you'll need to force push your new test.
|
3
filter-parser/fuzz/.gitignore
vendored
Normal file
3
filter-parser/fuzz/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
/corpus/
|
||||
/artifacts/
|
||||
/target/
|
25
filter-parser/fuzz/Cargo.toml
Normal file
25
filter-parser/fuzz/Cargo.toml
Normal file
@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "filter-parser-fuzz"
|
||||
version = "0.0.0"
|
||||
authors = ["Automatically generated"]
|
||||
publish = false
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata]
|
||||
cargo-fuzz = true
|
||||
|
||||
[dependencies]
|
||||
libfuzzer-sys = "0.4"
|
||||
|
||||
[dependencies.filter-parser]
|
||||
path = ".."
|
||||
|
||||
# Prevent this from interfering with workspaces
|
||||
[workspace]
|
||||
members = ["."]
|
||||
|
||||
[[bin]]
|
||||
name = "parse"
|
||||
path = "fuzz_targets/parse.rs"
|
||||
test = false
|
||||
doc = false
|
1
filter-parser/fuzz/corpus/parse/test_1
Normal file
1
filter-parser/fuzz/corpus/parse/test_1
Normal file
@ -0,0 +1 @@
|
||||
channel = Ponce
|
1
filter-parser/fuzz/corpus/parse/test_10
Normal file
1
filter-parser/fuzz/corpus/parse/test_10
Normal file
@ -0,0 +1 @@
|
||||
channel != ponce
|
1
filter-parser/fuzz/corpus/parse/test_11
Normal file
1
filter-parser/fuzz/corpus/parse/test_11
Normal file
@ -0,0 +1 @@
|
||||
NOT channel = ponce
|
1
filter-parser/fuzz/corpus/parse/test_12
Normal file
1
filter-parser/fuzz/corpus/parse/test_12
Normal file
@ -0,0 +1 @@
|
||||
subscribers < 1000
|
1
filter-parser/fuzz/corpus/parse/test_13
Normal file
1
filter-parser/fuzz/corpus/parse/test_13
Normal file
@ -0,0 +1 @@
|
||||
subscribers > 1000
|
1
filter-parser/fuzz/corpus/parse/test_14
Normal file
1
filter-parser/fuzz/corpus/parse/test_14
Normal file
@ -0,0 +1 @@
|
||||
subscribers <= 1000
|
1
filter-parser/fuzz/corpus/parse/test_15
Normal file
1
filter-parser/fuzz/corpus/parse/test_15
Normal file
@ -0,0 +1 @@
|
||||
subscribers >= 1000
|
1
filter-parser/fuzz/corpus/parse/test_16
Normal file
1
filter-parser/fuzz/corpus/parse/test_16
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers < 1000
|
1
filter-parser/fuzz/corpus/parse/test_17
Normal file
1
filter-parser/fuzz/corpus/parse/test_17
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers > 1000
|
1
filter-parser/fuzz/corpus/parse/test_18
Normal file
1
filter-parser/fuzz/corpus/parse/test_18
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers <= 1000
|
1
filter-parser/fuzz/corpus/parse/test_19
Normal file
1
filter-parser/fuzz/corpus/parse/test_19
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers >= 1000
|
1
filter-parser/fuzz/corpus/parse/test_2
Normal file
1
filter-parser/fuzz/corpus/parse/test_2
Normal file
@ -0,0 +1 @@
|
||||
subscribers = 12
|
1
filter-parser/fuzz/corpus/parse/test_20
Normal file
1
filter-parser/fuzz/corpus/parse/test_20
Normal file
@ -0,0 +1 @@
|
||||
subscribers 100 TO 1000
|
1
filter-parser/fuzz/corpus/parse/test_21
Normal file
1
filter-parser/fuzz/corpus/parse/test_21
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers 100 TO 1000
|
1
filter-parser/fuzz/corpus/parse/test_22
Normal file
1
filter-parser/fuzz/corpus/parse/test_22
Normal file
@ -0,0 +1 @@
|
||||
_geoRadius(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_23
Normal file
1
filter-parser/fuzz/corpus/parse/test_23
Normal file
@ -0,0 +1 @@
|
||||
NOT _geoRadius(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_24
Normal file
1
filter-parser/fuzz/corpus/parse/test_24
Normal file
@ -0,0 +1 @@
|
||||
channel = ponce AND 'dog race' != 'bernese mountain'
|
1
filter-parser/fuzz/corpus/parse/test_25
Normal file
1
filter-parser/fuzz/corpus/parse/test_25
Normal file
@ -0,0 +1 @@
|
||||
channel = ponce OR 'dog race' != 'bernese mountain'
|
1
filter-parser/fuzz/corpus/parse/test_26
Normal file
1
filter-parser/fuzz/corpus/parse/test_26
Normal file
@ -0,0 +1 @@
|
||||
channel = ponce AND 'dog race' != 'bernese mountain' OR subscribers > 1000
|
1
filter-parser/fuzz/corpus/parse/test_27
Normal file
1
filter-parser/fuzz/corpus/parse/test_27
Normal file
@ -0,0 +1 @@
|
||||
channel = ponce AND ( 'dog race' != 'bernese mountain' OR subscribers > 1000 )
|
1
filter-parser/fuzz/corpus/parse/test_28
Normal file
1
filter-parser/fuzz/corpus/parse/test_28
Normal file
@ -0,0 +1 @@
|
||||
(channel = ponce AND 'dog race' != 'bernese mountain' OR subscribers > 1000) AND _geoRadius(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_29
Normal file
1
filter-parser/fuzz/corpus/parse/test_29
Normal file
@ -0,0 +1 @@
|
||||
channel = Ponce = 12
|
1
filter-parser/fuzz/corpus/parse/test_3
Normal file
1
filter-parser/fuzz/corpus/parse/test_3
Normal file
@ -0,0 +1 @@
|
||||
channel = 'Mister Mv'
|
1
filter-parser/fuzz/corpus/parse/test_30
Normal file
1
filter-parser/fuzz/corpus/parse/test_30
Normal file
@ -0,0 +1 @@
|
||||
channel =
|
1
filter-parser/fuzz/corpus/parse/test_31
Normal file
1
filter-parser/fuzz/corpus/parse/test_31
Normal file
@ -0,0 +1 @@
|
||||
channel = 🐻
|
1
filter-parser/fuzz/corpus/parse/test_32
Normal file
1
filter-parser/fuzz/corpus/parse/test_32
Normal file
@ -0,0 +1 @@
|
||||
OR
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user