mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-07-20 21:30:58 +00:00
Compare commits
1016 Commits
optimize-l
...
v1.0.2
Author | SHA1 | Date | |
---|---|---|---|
572608c121 | |||
55a13c4bcc | |||
de1f0114bf | |||
6870c45285 | |||
3a3acd5126 | |||
b43b4cf4b9 | |||
d4773bda77 | |||
cf1db80348 | |||
9e5ef14a25 | |||
51d144bb28 | |||
5e12af88e2 | |||
231067a1c4 | |||
20f8184c06 | |||
2f8ebd0501 | |||
6be9a828fa | |||
4b7b2d6a90 | |||
e269027cdd | |||
544b581b15 | |||
924d5d4c11 | |||
771a367b97 | |||
07603373f3 | |||
4549e0a36e | |||
cac93f149e | |||
481df7a8b6 | |||
8356f109c1 | |||
934f2b3cb5 | |||
a3f1b8fdb9 | |||
d963c2ce55 | |||
5beb1aab7d | |||
a858531574 | |||
bf94f89035 | |||
3bcff60d1c | |||
04c4487660 | |||
c92948b143 | |||
b3c2a4ae27 | |||
c7b2e3be87 | |||
aa17a54feb | |||
898160587f | |||
32364e9919 | |||
ea3b269b77 | |||
a4be4c49e8 | |||
7d1ebb7295 | |||
e664f09045 | |||
767cb725a5 | |||
13c2cd700d | |||
fea41ca788 | |||
217504fff3 | |||
5672118bfa | |||
57682cbabe | |||
5dd582918d | |||
74747b65b1 | |||
c79b6a1ee4 | |||
f0e6b9c0c5 | |||
56db54486c | |||
a9b3f91467 | |||
0de9a3ffe7 | |||
8fb685f5aa | |||
e3742a38d4 | |||
e16b5c615a | |||
d2420f5c8f | |||
72e2b220ed | |||
b0c33ed6d2 | |||
a1e9c44fe5 | |||
7df1dda002 | |||
3d8ca62c35 | |||
e8e7070cc6 | |||
f857d9c2df | |||
d0988e115f | |||
5dcb920fb4 | |||
6f7e0c431a | |||
00f6af6475 | |||
3e5b3df487 | |||
e89973f1bf | |||
d3c796af38 | |||
182eea1f17 | |||
a4476c20f8 | |||
57da80900d | |||
7322f4e78e | |||
0f727d079b | |||
82bdb54537 | |||
b6ec1f1c6d | |||
41a970247e | |||
e225608337 | |||
56e79fa850 | |||
c71a8ea183 | |||
b781f9a0f9 | |||
07b90dec08 | |||
9194508a0f | |||
49ddaaef49 | |||
766dd830ae | |||
436ae4e466 | |||
a5c4fbbcea | |||
21b8cd53b7 | |||
7f80b116bc | |||
341f8478b4 | |||
79c7f65c30 | |||
2bc60c29fc | |||
680ea39bba | |||
a524dfb713 | |||
705fcaa3b8 | |||
a09b6a341d | |||
387874ea26 | |||
5c1a7c3b9a | |||
6d658f4c52 | |||
bf573885ea | |||
a68ac3a1dc | |||
b252c87197 | |||
b0b7ad7caf | |||
c91ffec72e | |||
1fc11264e8 | |||
2bc2e99ff3 | |||
808e184069 | |||
7a30d98264 | |||
d0a85057a3 | |||
b3574de809 | |||
59704c000c | |||
b117c688f5 | |||
5ec85b7dfb | |||
d80be0c28d | |||
398c0c32cd | |||
d4157c0ce4 | |||
98dffbf213 | |||
f63fee5e97 | |||
f0d408c295 | |||
d308684395 | |||
e27bb8ab3e | |||
ff843881c5 | |||
ae08fba76e | |||
af6d4b3031 | |||
b03ee54fe0 | |||
d17efb9ed6 | |||
9ab791bedc | |||
96105a5e8d | |||
e706628bb1 | |||
3c630891bb | |||
97854274b4 | |||
0646f63404 | |||
ce3e8794a2 | |||
50ce0409bc | |||
839b05c43d | |||
cc699fae40 | |||
aa4b813237 | |||
eb08a0fb0b | |||
cda529c07b | |||
1f8ddb366c | |||
8a3da0c2a7 | |||
c840d55e89 | |||
c7a3992510 | |||
28408816ef | |||
0eaa8ca255 | |||
201bc633d2 | |||
ba839852f5 | |||
f9aa897ab5 | |||
2d74678b51 | |||
db7eaf23f4 | |||
32f7cfa5cb | |||
a402fc4486 | |||
502d9e4b24 | |||
a85ff1f690 | |||
233372abea | |||
13d4ae264a | |||
c766e06003 | |||
fcbd47281b | |||
b6d80293f7 | |||
0e98a71a24 | |||
5cb566b165 | |||
9d46caba29 | |||
c4aa5cc7d0 | |||
12c3d432f9 | |||
d082ded7ad | |||
947f08793a | |||
17dac72464 | |||
b821c72459 | |||
7b2575c646 | |||
ab655a85e8 | |||
6425e06cf2 | |||
1692f58b83 | |||
9ba4d0f921 | |||
4b6ffe0cd1 | |||
336c77aa45 | |||
776acb5ed3 | |||
3e9834abff | |||
3cba476a9f | |||
57e851d8a9 | |||
9c45850bd2 | |||
3e0e8164a3 | |||
0bc4572905 | |||
4e6c663a2e | |||
e2775c6f49 | |||
c07a5932cb | |||
528a944997 | |||
13fb5ce974 | |||
a43a0712fa | |||
1be4619b91 | |||
cf50f85986 | |||
61b3a29ff3 | |||
66e18eae79 | |||
9a39c4e40d | |||
df176aaf01 | |||
0893b175dc | |||
d5978d11e1 | |||
2780e365e2 | |||
bf2a401a05 | |||
9925309492 | |||
9e0cce5ca4 | |||
336ea57384 | |||
c637bfba37 | |||
3040172562 | |||
52aa34d984 | |||
2c86d42a44 | |||
8ce3a34ffa | |||
259c04eb28 | |||
d8fb506c92 | |||
aa03e02fdc | |||
7ef23addb6 | |||
b3fce7c366 | |||
5099a40484 | |||
19ee9a828f | |||
869d331680 | |||
913eff5b2f | |||
b4a73f2d74 | |||
4e175ae882 | |||
5a0a0468df | |||
60ebf0ea0b | |||
867279f2a4 | |||
ce84a59873 | |||
d66bb3a53f | |||
6c0b8edab5 | |||
fbbc6eaeca | |||
60c3bac108 | |||
9491fe0704 | |||
240c73d292 | |||
d3eb8d2d5c | |||
660be071b5 | |||
89542d7d8b | |||
f62e7a3501 | |||
a08cc82983 | |||
7b2f2a4f9c | |||
5d5615ef45 | |||
526793b5b2 | |||
271685cceb | |||
1af590d3bc | |||
dab2634ca8 | |||
8a7f90250c | |||
23c1b223b3 | |||
87ae0032bf | |||
7c24fea9f2 | |||
27d1bee0bb | |||
b1c3174061 | |||
fa46dfb7bb | |||
40d9b73aaf | |||
169682d3ec | |||
21b926cb00 | |||
34a6f2598b | |||
14824cee86 | |||
796e61ec7e | |||
9a3f9577b8 | |||
2c8eb92537 | |||
1bf5c0edb9 | |||
b1ffbe561e | |||
84204b8cd5 | |||
346fca5608 | |||
4631f4d97f | |||
6f1c30b247 | |||
abba54e913 | |||
c426fa1478 | |||
574be942cd | |||
2262766494 | |||
ad2b1467da | |||
ee37d5e724 | |||
ded2a50d14 | |||
58327979f1 | |||
50d9fe036e | |||
026cf223b3 | |||
af6f7f8462 | |||
5023d36ee7 | |||
f4dc4c5d8d | |||
717dd36547 | |||
538030c2da | |||
1d5294d11a | |||
34c3e5ec5e | |||
1c3a326199 | |||
34c0f11c26 | |||
be300138e4 | |||
2ed6017603 | |||
c1337f9e08 | |||
9acac28574 | |||
cb1d184904 | |||
2841b09789 | |||
35f3dd68b6 | |||
f1de3aa75a | |||
e4e4370a3c | |||
24c79b79f9 | |||
5db7c4057c | |||
2867d2e91a | |||
1458a12531 | |||
cbb8d0f97b | |||
bef81065f9 | |||
180511795b | |||
3bef6e6690 | |||
9b23885e85 | |||
8b46093117 | |||
9c89e3dadc | |||
b0cf431614 | |||
afe520a67e | |||
688911ed34 | |||
776af129bf | |||
492fd2829a | |||
ffa6d1ed4e | |||
293efb7485 | |||
6766712840 | |||
980776b646 | |||
6bdd37beb8 | |||
8b6eba4f0b | |||
e510ace179 | |||
f056fc118f | |||
5a770ffe47 | |||
7b08d700f7 | |||
c63748723d | |||
40339919ad | |||
7f3653ec31 | |||
a1f5ec1e9e | |||
dd6593f7b6 | |||
fc905f29e9 | |||
f892d122de | |||
7e314664fc | |||
a92d67b79f | |||
daa0f2e9aa | |||
e35db5e59b | |||
b727fe7179 | |||
1260c32d18 | |||
e03c216952 | |||
c7749127fa | |||
c8841344e2 | |||
b8de369e33 | |||
93b59d55e3 | |||
b9a8533de1 | |||
d44652209d | |||
5d22c7bcce | |||
da8044f91e | |||
9e3b1eb7a8 | |||
eab1156f8c | |||
8d405fad12 | |||
bf96b6df93 | |||
9c28632498 | |||
38982d13fe | |||
6150aa73b0 | |||
14840a24c6 | |||
41eb986f65 | |||
457a473b72 | |||
914f8b118c | |||
6ecd486d1b | |||
7b8641a7af | |||
f509d81ec9 | |||
8d73ae80bb | |||
00129e112a | |||
46c275f0e4 | |||
f4d4f313ea | |||
5e1fa53354 | |||
fe59a6f628 | |||
647b7a20e9 | |||
e81b349658 | |||
a84fad5ce6 | |||
0a102d601c | |||
8a14f6f545 | |||
079357ee1f | |||
06e7db7a1f | |||
9e189f5041 | |||
fe980f9e88 | |||
32cd9e4852 | |||
8d79f501f3 | |||
c05abc2b0d | |||
a441fe5ae5 | |||
7331da0410 | |||
72c4db4553 | |||
953b2ec438 | |||
09e71fdeb6 | |||
ab3056cc66 | |||
c7caadb54e | |||
2254bbf3bd | |||
0219ef25fe | |||
510afda590 | |||
fea9fdcd7e | |||
3911fd64b5 | |||
dd1011ba76 | |||
20258461a8 | |||
87cac158c4 | |||
c9f89d38e3 | |||
01687c87a2 | |||
313f204f39 | |||
d16ea755d8 | |||
8152ab5dfc | |||
8dd7942656 | |||
2c31d7c50a | |||
b76f0ace26 | |||
5b535a82ea | |||
e67673bd12 | |||
44d6f3e7a0 | |||
68f80dbacf | |||
08db774699 | |||
4d9e9f4a9d | |||
4f4fc20acf | |||
78ffa00f98 | |||
7b93ba40bd | |||
b44cc62320 | |||
fae17ed590 | |||
7e355958e0 | |||
8bc602a7dd | |||
6c2ecec4d0 | |||
6280bd51a9 | |||
54d0aff4cf | |||
b804cba4ca | |||
3cf8aaa4d0 | |||
225405bb0d | |||
0dd8e00929 | |||
a99ddf85f7 | |||
7307c4dacd | |||
6aa816d96a | |||
866a3676eb | |||
fa84eae0f1 | |||
33996071ea | |||
953055e3d7 | |||
7c908fadcf | |||
07d39776f9 | |||
3979c9f02b | |||
8ec3681cf8 | |||
2ba5e3b519 | |||
861a07792e | |||
ee6597da60 | |||
314b89ca30 | |||
8ebb49d1b1 | |||
0ba6253eed | |||
e8cd571820 | |||
4f955e68b3 | |||
6c98752922 | |||
4e1b6b514e | |||
64e55b4db9 | |||
9b43528bbb | |||
e641d08846 | |||
36c9f05998 | |||
3b158bb966 | |||
08b5123380 | |||
1f75caae88 | |||
a16604af80 | |||
1d014a538e | |||
29bdcb880c | |||
a3fc0d3bd9 | |||
2de8a0711a | |||
2f577b6fcd | |||
eccbdb74cf | |||
033794d209 | |||
a85d5b4981 | |||
71b50853dc | |||
7074872a78 | |||
035e8eeff5 | |||
e35fe33712 | |||
4736e00253 | |||
942b7c338b | |||
4cafc63561 | |||
89e127e4f4 | |||
eec43ec953 | |||
c063f154fb | |||
e0548e42e7 | |||
4d43a9f5b1 | |||
901c405919 | |||
c641888a23 | |||
6db90ba6cc | |||
e0821ad4b0 | |||
61f0940f8c | |||
241300d2d8 | |||
570b2d1167 | |||
d92425658e | |||
12669bf07c | |||
16fac10074 | |||
0aca5e84b9 | |||
7ed3f00b1e | |||
9c00b159ba | |||
7e52f1effb | |||
4d25c159e6 | |||
e9cd6cbbee | |||
424202d773 | |||
4a35eb9849 | |||
493a8cff31 | |||
4de445d386 | |||
e3848b5f28 | |||
ecf4e43b3d | |||
3ea489421e | |||
2808be9d45 | |||
92c41f0ef6 | |||
1214a68a41 | |||
8a23e707c1 | |||
eb4bdde432 | |||
735a5da257 | |||
1d04ce611d | |||
e9055f5572 | |||
874499a2d2 | |||
ecdcbf350f | |||
de7d4200d8 | |||
2c0fde4a1a | |||
e2ce8f2d32 | |||
c8ee453b6c | |||
f6963f9662 | |||
a8de5368e5 | |||
b3265a8e1f | |||
9bb2e3c790 | |||
cb48a02f94 | |||
99144b1419 | |||
e107f1b282 | |||
1bef5d119d | |||
ca4234b445 | |||
8d1408c65e | |||
131fe30934 | |||
50386921df | |||
32cfac0cfd | |||
80b2e70ee7 | |||
52e858a588 | |||
8b0427f0c4 | |||
fce0996e17 | |||
2a7ef3b352 | |||
ce4dcf47f0 | |||
ca8c922f35 | |||
4c42130ec7 | |||
788262e588 | |||
61edcd585a | |||
72ec4ce96b | |||
75857bf476 | |||
0bbf80186f | |||
b6a0abea9f | |||
5303bbffab | |||
fc944c39a5 | |||
a1d4cc673d | |||
28d9f2c041 | |||
d9218578e3 | |||
d20b5ddda0 | |||
11fee30f47 | |||
17cd2a4aa0 | |||
28bd8b6c6b | |||
169f386418 | |||
66c3b93ef1 | |||
bdb17954d2 | |||
23b01a58df | |||
ec3391808d | |||
10a547df4f | |||
22cf0559fe | |||
5765883600 | |||
cff003c928 | |||
ab8f1c2865 | |||
6730e190db | |||
50b8b9df6a | |||
ec0a5a9f01 | |||
6460b78e08 | |||
d21651c968 | |||
6e904d0997 | |||
b373d19831 | |||
3cbfacb616 | |||
79c4275bfc | |||
f9c8fe5eaa | |||
c2ec4a089b | |||
751e9bac3b | |||
290945e258 | |||
725158b454 | |||
b2c5bc67b7 | |||
591527a99d | |||
1ca9a67c49 | |||
f177c97671 | |||
703ba7a1fb | |||
c9523c6f39 | |||
e645c4c4d6 | |||
ea60d35c71 | |||
f7e546eea3 | |||
b45c430165 | |||
634eb52926 | |||
d1a6fb2971 | |||
bea81ae37b | |||
9e85f050b2 | |||
2f748480a1 | |||
6bd6321226 | |||
655705eb2b | |||
9fe24fbff2 | |||
83f3c5ec57 | |||
78ce29f461 | |||
dd70daaae3 | |||
d0e91555d1 | |||
e0221fc0a3 | |||
a9eeb070b8 | |||
3872a1b8d1 | |||
ba150f2127 | |||
554600dfd8 | |||
e9295c03ce | |||
955d3339f0 | |||
d481669b7e | |||
dd506e5d87 | |||
208c785697 | |||
d976e680c5 | |||
c051166bcc | |||
72a906ae75 | |||
b7f9c94f4a | |||
8954b1bd1d | |||
8defad6c38 | |||
f32b973945 | |||
fbd2be2ec8 | |||
441417447e | |||
8c6aeaada5 | |||
8bb0fcd144 | |||
9522b75454 | |||
e4d461ecba | |||
b029369653 | |||
408d00136c | |||
2c24c7d403 | |||
7034803712 | |||
c192146fbe | |||
b6c84e53ba | |||
2f1eb78b1d | |||
510ce9fc51 | |||
fa4c1de019 | |||
3e4337c91f | |||
0af00f6b32 | |||
141a1c9464 | |||
667c282e19 | |||
9a74ea0943 | |||
eabac9676b | |||
ab4e649221 | |||
568199fc0d | |||
13a72f8757 | |||
4c55c30027 | |||
dc81992eb2 | |||
fe84f2648b | |||
e2a766acb5 | |||
db9d1b18ca | |||
19c6f8303f | |||
b311eb3bed | |||
f026ac3115 | |||
f176382b34 | |||
4bd9e4d723 | |||
c6f4fb5f7d | |||
2ae0806773 | |||
7579a363ab | |||
0284764b5e | |||
9117fde712 | |||
f622ef9836 | |||
dc0f307d61 | |||
06fadb3004 | |||
6107540ad4 | |||
7e18f92635 | |||
43496b97bd | |||
6f327a00c7 | |||
58ef80a2a7 | |||
22ffbf3676 | |||
089106a970 | |||
026f6fb06a | |||
efe0a5f422 | |||
47e0288747 | |||
2f47443458 | |||
a8128678a4 | |||
c50b44039e | |||
6dcc5851b5 | |||
0972587cfc | |||
afd5fe0783 | |||
1473a71e33 | |||
101f55ce8b | |||
e845cc2b6f | |||
7bd6f63001 | |||
699ae1b190 | |||
f041d474a5 | |||
ece6c3f6e7 | |||
87a6a337aa | |||
123f47dbc4 | |||
068a4b2884 | |||
87212cfd20 | |||
f1b1cfdbcc | |||
a083c9e452 | |||
b24b13b036 | |||
566c15fb74 | |||
6b3b05fb73 | |||
36e5efde0d | |||
2fbdd104b8 | |||
da363a92ac | |||
0543cba6eb | |||
cf6084151b | |||
c70f375669 | |||
91e13c2824 | |||
d76634a36c | |||
9e8242c57d | |||
5fa214abb1 | |||
9a9e98fb77 | |||
5d21c790ef | |||
31de33d5ee | |||
3b343a930e | |||
07286fcc79 | |||
f68906f5dc | |||
5174c78f87 | |||
025bb5f616 | |||
41ec737e73 | |||
7b4a913704 | |||
a6a1043abb | |||
7a0f17c912 | |||
c2899fe9b2 | |||
c759fd6924 | |||
fba9aa214a | |||
2c8f1a43e9 | |||
0ba1c46e19 | |||
22bfb5a7a0 | |||
d8d3499aec | |||
64e132ce53 | |||
9e1f38ec7c | |||
6f4dcc0c38 | |||
84cd5cef0b | |||
ae86a8ccd6 | |||
ce2dfecc03 | |||
cb4feabca2 | |||
19154e48fe | |||
8d51c1f389 | |||
250410495c | |||
8f0fd35358 | |||
8770e07397 | |||
edd8344dc9 | |||
e547552702 | |||
925971809a | |||
1ea9c0b4c0 | |||
4846a7c501 | |||
9ff0fe952e | |||
a8b18b2c96 | |||
5436b996ab | |||
7d0c8a3379 | |||
fc098022c7 | |||
b816535e33 | |||
38e4ffe73c | |||
366a344474 | |||
7b6673dc1d | |||
03aca2e452 | |||
4129783019 | |||
1804416afa | |||
c97d51a624 | |||
803f2157af | |||
b7c5b71a53 | |||
5cc8f96237 | |||
94e29a9f5f | |||
48138c21a9 | |||
76597fc382 | |||
2afb381f95 | |||
a9844bd4f6 | |||
a0588d6b94 | |||
b3c9b128d9 | |||
448f44f631 | |||
f638774764 | |||
516860f342 | |||
6b9689a1c0 | |||
af0f5d6c0c | |||
5a7fcf2688 | |||
30d2b24689 | |||
72b2e68de4 | |||
7879189c6b | |||
46b8ebcab4 | |||
fa742f60e8 | |||
a7aa92df5f | |||
d8b8e04ad1 | |||
fe330e1be9 | |||
2c4e5ce8be | |||
705af94fd7 | |||
ed745591e1 | |||
22d24dba56 | |||
1a47949063 | |||
ab1800551f | |||
689bef7ad2 | |||
89c40c83c3 | |||
03ba830ab2 | |||
9cf3ff72a3 | |||
25ec51e783 | |||
f4021273b8 | |||
f0ecacb58d | |||
68c9751d49 | |||
9aef1031ca | |||
bc2a161f62 | |||
cd3748d412 | |||
079cfc70ae | |||
4afed4de4f | |||
a2314cf436 | |||
1d85eeecef | |||
0578aff8c9 | |||
1d217cef19 | |||
c02ae4dfc0 | |||
506d08a9f4 | |||
b423ef72be | |||
77e718214f | |||
e35ea2ad55 | |||
dfa70e47f7 | |||
815fba9cc3 | |||
e9d493c052 | |||
0fa5c9b515 | |||
062d17fbc0 | |||
30410e870f | |||
b1bf6722e8 | |||
f2279f4615 | |||
1a61209596 | |||
48058b5e56 | |||
1cf6efa740 | |||
96acbf815d | |||
c3ecdb3d8b | |||
b0749407f3 | |||
5d895dd7da | |||
136a87f7bb | |||
5dafdd9a23 | |||
53e5229b4a | |||
9ebc73e6ac | |||
fc5a7e376c | |||
5a98ecaf2a | |||
2dce44f4c1 | |||
b69f8d67c3 | |||
99e2788ee7 | |||
a9e6a8901b | |||
3c3ae3ff98 | |||
343828a76e | |||
72c1aef1c4 | |||
91accc0194 | |||
0f024e7d97 | |||
3ebd88c03b | |||
c958097e99 | |||
f513ac1233 | |||
97c202db51 | |||
a5e23aa6e4 | |||
c5cd743eb6 | |||
343a677566 | |||
9dbc71cb6d | |||
11b986a81d | |||
7607a62531 | |||
c883b23cca | |||
d1c10d6d68 | |||
1f40c3e48c | |||
2681e92d4e | |||
da25328c2b | |||
9e5ef8eb69 | |||
6285c5949c | |||
b55ec7db4d | |||
1b72eba1f3 | |||
c98d3209ad | |||
abb0233077 | |||
8c526c31da | |||
ed6c2fb22c | |||
ab17c0acd5 | |||
425692287d | |||
05af8f0e46 | |||
dc93853946 | |||
435778f328 | |||
8134c26a6f | |||
6933ba54fb | |||
4a022acd33 | |||
a2c91a87fe | |||
59f1091c5e | |||
d1fdca2bce | |||
9da34dc55c | |||
221e3edf48 | |||
add793462f | |||
cc2a271287 | |||
77cdecaf3a | |||
a7d2c9572e | |||
53c7c1b07f | |||
a93e3dead3 | |||
62b06e6088 | |||
8ad5c3043c | |||
8fa8eb9767 | |||
069374c23a | |||
458775c7ad | |||
23db798f45 | |||
6a67afa48b | |||
4ccfc837c1 | |||
ce75bb5ef8 | |||
f7f34fb714 | |||
fcedce8578 | |||
a93eec1d34 | |||
135f656e8f | |||
88e69f4302 | |||
459829631f | |||
20589a41b5 | |||
61a518a384 | |||
7905dae7ad | |||
05f93541d8 | |||
2827ff7957 | |||
d166a97d67 | |||
248d727e61 | |||
56d72d4493 | |||
740926e747 | |||
f4b81fa0a1 | |||
0fdbf128e9 | |||
d3b984d862 | |||
d406fe901b | |||
4dfae44478 | |||
935f18efcf | |||
5b57114771 | |||
c2ab7a7939 | |||
fa315352da | |||
268d59ccb1 | |||
5901d4e407 | |||
aabd67a9fa | |||
a690ace36e | |||
579fa3f1ad | |||
3fd6af25f9 | |||
7f267ec4be | |||
441492f1c8 | |||
528a4721c1 | |||
5a4f1508d0 | |||
d1a30df23d | |||
92b0c51bfe | |||
135499f398 | |||
b3ffcb2d97 | |||
5cbd047989 | |||
ef3fa92536 | |||
6520d3c474 | |||
403226a029 | |||
07f45251e9 | |||
3a62e97c9f | |||
37dc6537c3 | |||
55aa83d75a | |||
0c962f6a76 | |||
2e0221cfc6 | |||
2d34b239ab | |||
55853815a5 | |||
b897ce8dfa | |||
5f5b787483 | |||
d0aa8042e2 | |||
9cb1e4af5c | |||
cc09aa8868 | |||
2eca723a91 | |||
ae14567f97 | |||
d0f1054f5c | |||
3878c289df | |||
d1b3642923 | |||
f45ddff312 | |||
8f3b590436 | |||
4e37427de8 | |||
50434d35d0 | |||
60792eebcf | |||
4fbc8c705f | |||
80b1f3e830 | |||
e315547ffc | |||
833ade80a6 | |||
f117c90c46 | |||
3efd06168f | |||
07bb32b622 | |||
1131400694 | |||
9a789daa58 | |||
0826aa35e2 | |||
6aa3ad6b6c | |||
b774adfbf7 | |||
47a1aa69f0 | |||
b3d89da74d | |||
0798170a64 | |||
446dfccc8c | |||
43175cfb78 | |||
a1bb49c351 | |||
bebd76064a | |||
f0b2ac6efb | |||
08d86e33ca | |||
2c2efc7ab6 | |||
381df43be4 | |||
f87ebfe477 | |||
c445334070 | |||
651a22b1ed | |||
ff59ae56f4 | |||
b2577aac52 | |||
c9bb111ef3 | |||
e2af8dccb8 | |||
5e206ee84b | |||
aff4b64265 | |||
0a2ef0037f | |||
40ae26478a | |||
6fe3f285ce | |||
72f8adaa70 | |||
e659c08ac4 | |||
ea365126b4 | |||
a277cc9a18 | |||
a37c7ba1bb | |||
ef1d6b1694 | |||
099abefc6d | |||
a05101af4d | |||
109540011a | |||
2f92169e48 | |||
a58b00d8f1 | |||
2b8f3c26ec | |||
0b6ca73790 | |||
1f1482e97c | |||
25fecf9360 | |||
4bee0565e8 | |||
d5da063666 | |||
43bb5176a9 | |||
a0734c991c | |||
cb29d7d124 | |||
e32d5ef2b3 | |||
ee69ede1ce | |||
9b2036ac05 | |||
5c543f9d94 | |||
0c03ed3c1e | |||
54a0b47c2b | |||
947fb5c956 | |||
cd18459484 | |||
225d9936ed | |||
93daa4c464 | |||
d08c77706c | |||
de58ccd4ba | |||
62240b7e19 | |||
22874ce300 | |||
b5f91b91c3 | |||
b6e6a08f7d | |||
8198bb9da2 | |||
68a7d6bc61 | |||
83e20027fd | |||
f21a4d61da | |||
12538d5a44 | |||
ae174c2cca | |||
e6b806e0cf | |||
cf955a77db | |||
3a48de136e | |||
e6f03f82df | |||
58d2aad309 | |||
e3426d5b7a | |||
73d4869e5e | |||
fe32097964 |
8
.github/ISSUE_TEMPLATE/config.yml
vendored
8
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -1,13 +1,13 @@
|
||||
contact_links:
|
||||
- name: Support questions & other
|
||||
url: https://github.com/meilisearch/meilisearch/discussions/new
|
||||
about: For any other question, open a discussion in this repository
|
||||
- name: Language support request & feedback
|
||||
url: https://github.com/meilisearch/product/discussions/categories/feedback-feature-proposal?discussions_q=label%3Aproduct%3Acore%3Atokenizer+category%3A%22Feedback+%26+Feature+Proposal%22
|
||||
about: The requests and feedback regarding Language support are not managed in this repository. Please upvote the related discussion in our dedicated product repository or open a new one if it doesn't exist.
|
||||
- name: Feature request & feedback
|
||||
- name: Any other feature request & feedback
|
||||
url: https://github.com/meilisearch/product/discussions/categories/feedback-feature-proposal
|
||||
about: The feature requests and feedback regarding the already existing features are not managed in this repository. Please open a discussion in our dedicated product repository
|
||||
- name: Documentation issue
|
||||
url: https://github.com/meilisearch/documentation/issues/new
|
||||
about: For documentation issues, open an issue or a PR in the documentation repository
|
||||
- name: Support questions & other
|
||||
url: https://github.com/meilisearch/meilisearch/discussions/new
|
||||
about: For any other question, open a discussion in this repository
|
||||
|
134
.github/scripts/is-latest-release.sh
vendored
134
.github/scripts/is-latest-release.sh
vendored
@ -1,127 +1,41 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Was used in our CIs to publish the latest docker image. Not used anymore, will be used again when v1 and v2 will be out and we will want to maintain multiple stable versions.
|
||||
# Returns "true" or "false" (as a string) to be used in the `if` in GHA
|
||||
# Used in our CIs to publish the latest Docker image.
|
||||
|
||||
# Checks if the current tag should be the latest (in terms of semver and not of release date).
|
||||
# Ex: previous tag -> v2.1.1
|
||||
# new tag -> v1.20.3
|
||||
# The new tag (v1.20.3) should NOT be the latest
|
||||
# So it returns "false", the `latest` tag should not be updated for the release v1.20.3 and still need to correspond to v2.1.1
|
||||
# Checks if the current tag ($GITHUB_REF) corresponds to the latest release tag on GitHub
|
||||
# Returns "true" or "false" (as a string).
|
||||
|
||||
# GLOBAL
|
||||
GREP_SEMVER_REGEXP='v\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)$' # i.e. v[number].[number].[number]
|
||||
GITHUB_API='https://api.github.com/repos/meilisearch/meilisearch/releases'
|
||||
PNAME='meilisearch'
|
||||
|
||||
# FUNCTIONS
|
||||
|
||||
# semverParseInto and semverLT from https://github.com/cloudflare/semver_bash/blob/master/semver.sh
|
||||
|
||||
# usage: semverParseInto version major minor patch special
|
||||
# version: the string version
|
||||
# major, minor, patch, special: will be assigned by the function
|
||||
semverParseInto() {
|
||||
local RE='[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)'
|
||||
#MAJOR
|
||||
eval $2=`echo $1 | sed -e "s#$RE#\1#"`
|
||||
#MINOR
|
||||
eval $3=`echo $1 | sed -e "s#$RE#\2#"`
|
||||
#MINOR
|
||||
eval $4=`echo $1 | sed -e "s#$RE#\3#"`
|
||||
#SPECIAL
|
||||
eval $5=`echo $1 | sed -e "s#$RE#\4#"`
|
||||
}
|
||||
|
||||
# usage: semverLT version1 version2
|
||||
semverLT() {
|
||||
local MAJOR_A=0
|
||||
local MINOR_A=0
|
||||
local PATCH_A=0
|
||||
local SPECIAL_A=0
|
||||
|
||||
local MAJOR_B=0
|
||||
local MINOR_B=0
|
||||
local PATCH_B=0
|
||||
local SPECIAL_B=0
|
||||
|
||||
semverParseInto $1 MAJOR_A MINOR_A PATCH_A SPECIAL_A
|
||||
semverParseInto $2 MAJOR_B MINOR_B PATCH_B SPECIAL_B
|
||||
|
||||
if [ $MAJOR_A -lt $MAJOR_B ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ $MAJOR_A -le $MAJOR_B ] && [ $MINOR_A -lt $MINOR_B ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ $MAJOR_A -le $MAJOR_B ] && [ $MINOR_A -le $MINOR_B ] && [ $PATCH_A -lt $PATCH_B ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ "_$SPECIAL_A" == "_" ] && [ "_$SPECIAL_B" == "_" ] ; then
|
||||
return 1
|
||||
fi
|
||||
if [ "_$SPECIAL_A" == "_" ] && [ "_$SPECIAL_B" != "_" ] ; then
|
||||
return 1
|
||||
fi
|
||||
if [ "_$SPECIAL_A" != "_" ] && [ "_$SPECIAL_B" == "_" ] ; then
|
||||
return 0
|
||||
fi
|
||||
if [ "_$SPECIAL_A" < "_$SPECIAL_B" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Returns the tag of the latest stable release (in terms of semver and not of release date)
|
||||
# Returns the version of the latest stable version of Meilisearch by setting the $latest variable.
|
||||
get_latest() {
|
||||
temp_file='temp_file' # temp_file needed because the grep would start before the download is over
|
||||
curl -s 'https://api.github.com/repos/meilisearch/meilisearch/releases' > "$temp_file"
|
||||
releases=$(cat "$temp_file" | \
|
||||
grep -E "tag_name|draft|prerelease" \
|
||||
| tr -d ',"' | cut -d ':' -f2 | tr -d ' ')
|
||||
# Returns a list of [tag_name draft_boolean prerelease_boolean ...]
|
||||
# Ex: v0.10.1 false false v0.9.1-rc.1 false true v0.9.0 false false...
|
||||
# temp_file is needed because the grep would start before the download is over
|
||||
temp_file=$(mktemp -q /tmp/$PNAME.XXXXXXXXX)
|
||||
latest_release="$GITHUB_API/latest"
|
||||
|
||||
i=0
|
||||
latest=""
|
||||
current_tag=""
|
||||
for release_info in $releases; do
|
||||
if [ $i -eq 0 ]; then # Cheking tag_name
|
||||
if echo "$release_info" | grep -q "$GREP_SEMVER_REGEXP"; then # If it's not an alpha or beta release
|
||||
current_tag=$release_info
|
||||
else
|
||||
current_tag=""
|
||||
fi
|
||||
i=1
|
||||
elif [ $i -eq 1 ]; then # Checking draft boolean
|
||||
if [ "$release_info" = "true" ]; then
|
||||
current_tag=""
|
||||
fi
|
||||
i=2
|
||||
elif [ $i -eq 2 ]; then # Checking prerelease boolean
|
||||
if [ "$release_info" = "true" ]; then
|
||||
current_tag=""
|
||||
fi
|
||||
i=0
|
||||
if [ "$current_tag" != "" ]; then # If the current_tag is valid
|
||||
if [ "$latest" = "" ]; then # If there is no latest yet
|
||||
latest="$current_tag"
|
||||
else
|
||||
semverLT $current_tag $latest # Comparing latest and the current tag
|
||||
if [ $? -eq 1 ]; then
|
||||
latest="$current_tag"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "$0: Can't create temp file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$GITHUB_PAT" ]; then
|
||||
curl -s "$latest_release" > "$temp_file" || return 1
|
||||
else
|
||||
curl -H "Authorization: token $GITHUB_PAT" -s "$latest_release" > "$temp_file" || return 1
|
||||
fi
|
||||
|
||||
latest="$(cat "$temp_file" | grep '"tag_name":' | cut -d ':' -f2 | tr -d '"' | tr -d ',' | tr -d ' ')"
|
||||
|
||||
rm -f "$temp_file"
|
||||
echo $latest
|
||||
return 0
|
||||
}
|
||||
|
||||
# MAIN
|
||||
current_tag="$(echo $GITHUB_REF | tr -d 'refs/tags/')"
|
||||
latest="$(get_latest)"
|
||||
get_latest
|
||||
|
||||
if [ "$current_tag" != "$latest" ]; then
|
||||
# The current release tag is not the latest
|
||||
@ -130,3 +44,5 @@ else
|
||||
# The current release tag is the latest
|
||||
echo "true"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
33
.github/workflows/coverage.yml
vendored
33
.github/workflows/coverage.yml
vendored
@ -1,33 +0,0 @@
|
||||
---
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
name: Execute code coverage
|
||||
|
||||
jobs:
|
||||
nightly-coverage:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clean
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --all-features --no-fail-fast
|
||||
env:
|
||||
CARGO_INCREMENTAL: "0"
|
||||
RUSTFLAGS: "-Zprofile -Ccodegen-units=1 -Cinline-threshold=0 -Clink-dead-code -Coverflow-checks=off -Cpanic=unwind -Zpanic_abort_tests"
|
||||
- uses: actions-rs/grcov@v0.1
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
file: ${{ steps.coverage.outputs.report }}
|
||||
yml: ./codecov.yml
|
||||
fail_ci_if_error: true
|
@ -3,7 +3,7 @@ on:
|
||||
schedule:
|
||||
- cron: '0 0 1 */3 *'
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
create-issue:
|
||||
runs-on: ubuntu-latest
|
||||
@ -12,12 +12,12 @@ jobs:
|
||||
- name: Create an issue
|
||||
uses: actions-ecosystem/action-create-issue@v1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
github_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
title: Upgrade dependencies
|
||||
body: |
|
||||
We need to update the dependencies of the Meilisearch repository, and, if possible, the dependencies of all the core-team repositories that Meilisearch depends on (milli, charabia, heed...).
|
||||
We need to update the dependencies of the Meilisearch repository, and, if possible, the dependencies of all the engine-team repositories that Meilisearch depends on (milli, charabia, heed...).
|
||||
|
||||
⚠️ This issue should only be done at the beginning of the sprint!
|
||||
⚠️ This issue should only be done at the beginning of the sprint!
|
||||
labels: |
|
||||
dependencies
|
||||
maintenance
|
||||
|
17
.github/workflows/flaky.yml
vendored
17
.github/workflows/flaky.yml
vendored
@ -1,14 +1,25 @@
|
||||
name: Look for flaky tests
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 12 * * FRI" # every friday at 12:00PM
|
||||
- cron: "0 12 * * FRI" # Every Friday at 12:00PM
|
||||
|
||||
jobs:
|
||||
flaky:
|
||||
runs-on: ubuntu-18.04
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Install cargo-flaky
|
||||
run: cargo install cargo-flaky
|
||||
- name: Run cargo flaky 100 times
|
||||
|
29
.github/workflows/latest-git-tag.yml
vendored
Normal file
29
.github/workflows/latest-git-tag.yml
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
# Create or update a latest git tag when releasing a stable version of Meilisearch
|
||||
name: Update latest git tag
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
check-version:
|
||||
name: Check the version validity
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Check release validity
|
||||
if: github.event_name == 'release'
|
||||
run: bash .github/scripts/check-release.sh
|
||||
|
||||
update-latest-tag:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-version
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: rickstaa/action-create-tag@v1
|
||||
with:
|
||||
tag: "latest"
|
||||
message: "Latest stable release of Meilisearch"
|
||||
# Move the tag if `latest` already exists
|
||||
force_push_tag: true
|
||||
github_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
154
.github/workflows/milestone-workflow.yml
vendored
Normal file
154
.github/workflows/milestone-workflow.yml
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
name: Milestone's workflow
|
||||
|
||||
# /!\ No git flow are handled here
|
||||
|
||||
# For each Milestone created (not opened!), and if the release is NOT a patch release (only the patch changed)
|
||||
# - the roadmap issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/roadmap-issue.md
|
||||
# - the changelog issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/changelog-issue.md
|
||||
|
||||
# For each Milestone closed
|
||||
# - the `release_version` label is created
|
||||
# - this label is applied to all issues/PRs in the Milestone
|
||||
|
||||
on:
|
||||
milestone:
|
||||
types: [created, closed]
|
||||
|
||||
env:
|
||||
MILESTONE_VERSION: ${{ github.event.milestone.title }}
|
||||
MILESTONE_URL: ${{ github.event.milestone.html_url }}
|
||||
MILESTONE_DUE_ON: ${{ github.event.milestone.due_on }}
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
|
||||
# -----------------
|
||||
# MILESTONE CREATED
|
||||
# -----------------
|
||||
|
||||
get-release-version:
|
||||
if: github.event.action == 'created'
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
is-patch: ${{ steps.check-patch.outputs.is-patch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Check if this release is a patch release only
|
||||
id: check-patch
|
||||
run: |
|
||||
echo version: $MILESTONE_VERSION
|
||||
if [[ $MILESTONE_VERSION =~ ^v[0-9]+\.[0-9]+\.0$ ]]; then
|
||||
echo 'This is NOT a patch release'
|
||||
echo "is-patch=false" >> $GITHUB_OUTPUT
|
||||
elif [[ $MILESTONE_VERSION =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo 'This is a patch release'
|
||||
echo "is-patch=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Not a valid format of release, check the Milestone's title."
|
||||
echo 'Should be vX.Y.Z'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
create-roadmap-issue:
|
||||
needs: get-release-version
|
||||
# Create the roadmap issue if the release is not only a patch release
|
||||
if: github.event.action == 'created' && needs.get-release-version.outputs.is-patch == 'false'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
ISSUE_TEMPLATE: issue-template.md
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Download the issue template
|
||||
run: curl -s https://raw.githubusercontent.com/meilisearch/engine-team/main/issue-templates/roadmap-issue.md > $ISSUE_TEMPLATE
|
||||
- name: Replace all empty occurrences in the templates
|
||||
run: |
|
||||
# Replace all <<version>> occurrences
|
||||
sed -i "s/<<version>>/$MILESTONE_VERSION/g" $ISSUE_TEMPLATE
|
||||
|
||||
# Replace all <<milestone_id>> occurrences
|
||||
milestone_id=$(echo $MILESTONE_URL | cut -d '/' -f 7)
|
||||
sed -i "s/<<milestone_id>>/$milestone_id/g" $ISSUE_TEMPLATE
|
||||
|
||||
# Replace release date if exists
|
||||
if [[ ! -z $MILESTONE_DUE_ON ]]; then
|
||||
date=$(echo $MILESTONE_DUE_ON | cut -d 'T' -f 1)
|
||||
sed -i "s/Release date\: 20XX-XX-XX/Release date\: $date/g" $ISSUE_TEMPLATE
|
||||
fi
|
||||
- name: Create the issue
|
||||
run: |
|
||||
gh issue create \
|
||||
--title "$MILESTONE_VERSION ROADMAP" \
|
||||
--label 'epic,impacts docs,impacts integrations,impacts cloud' \
|
||||
--body-file $ISSUE_TEMPLATE \
|
||||
--milestone $MILESTONE_VERSION
|
||||
|
||||
create-changelog-issue:
|
||||
needs: get-release-version
|
||||
# Create the changelog issue if the release is not only a patch release
|
||||
if: github.event.action == 'created' && needs.get-release-version.outputs.is-patch == 'false'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
ISSUE_TEMPLATE: issue-template.md
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Download the issue template
|
||||
run: curl -s https://raw.githubusercontent.com/meilisearch/engine-team/main/issue-templates/changelog-issue.md > $ISSUE_TEMPLATE
|
||||
- name: Replace all empty occurrences in the templates
|
||||
run: |
|
||||
# Replace all <<version>> occurrences
|
||||
sed -i "s/<<version>>/$MILESTONE_VERSION/g" $ISSUE_TEMPLATE
|
||||
|
||||
# Replace all <<milestone_id>> occurrences
|
||||
milestone_id=$(echo $MILESTONE_URL | cut -d '/' -f 7)
|
||||
sed -i "s/<<milestone_id>>/$milestone_id/g" $ISSUE_TEMPLATE
|
||||
- name: Create the issue
|
||||
run: |
|
||||
gh issue create \
|
||||
--title "Create release changelogs for $MILESTONE_VERSION" \
|
||||
--label 'impacts docs,documentation' \
|
||||
--body-file $ISSUE_TEMPLATE \
|
||||
--milestone $MILESTONE_VERSION \
|
||||
--assignee curquiza
|
||||
|
||||
# ----------------
|
||||
# MILESTONE CLOSED
|
||||
# ----------------
|
||||
|
||||
create-release-label:
|
||||
if: github.event.action == 'closed'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Create the ${{ env.MILESTONE_VERSION }} label
|
||||
run: |
|
||||
label_description="PRs/issues solved in $MILESTONE_VERSION"
|
||||
if [[ ! -z $MILESTONE_DUE_ON ]]; then
|
||||
date=$(echo $MILESTONE_DUE_ON | cut -d 'T' -f 1)
|
||||
label_description="$label_description released on $date"
|
||||
fi
|
||||
|
||||
gh api repos/meilisearch/meilisearch/labels \
|
||||
--method POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-f name="$MILESTONE_VERSION" \
|
||||
-f description="$label_description" \
|
||||
-f color='ff5ba3'
|
||||
|
||||
labelize-all-milestone-content:
|
||||
if: github.event.action == 'closed'
|
||||
needs: create-release-label
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Add label ${{ env.MILESTONE_VERSION }} to all PRs in the Milestone
|
||||
run: |
|
||||
prs=$(gh pr list --search milestone:"$MILESTONE_VERSION" --limit 1000 --state all --json number --template '{{range .}}{{tablerow (printf "%v" .number)}}{{end}}')
|
||||
for pr in $prs; do
|
||||
gh pr edit $pr --add-label $MILESTONE_VERSION
|
||||
done
|
||||
- name: Add label ${{ env.MILESTONE_VERSION }} to all issues in the Milestone
|
||||
run: |
|
||||
issues=$(gh issue list --search milestone:"$MILESTONE_VERSION" --limit 1000 --state all --json number --template '{{range .}}{{tablerow (printf "%v" .number)}}{{end}}')
|
||||
for issue in $issues; do
|
||||
gh issue edit $issue --add-label $MILESTONE_VERSION
|
||||
done
|
120
.github/workflows/publish-binaries.yml
vendored
120
.github/workflows/publish-binaries.yml
vendored
@ -1,4 +1,7 @@
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 2 * * *' # Every day at 2:00am
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
@ -8,65 +11,129 @@ jobs:
|
||||
check-version:
|
||||
name: Check the version validity
|
||||
runs-on: ubuntu-latest
|
||||
# No need to check the version for dry run (cron)
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
# Check if the tag has the v<nmumber>.<number>.<number> format.
|
||||
# If yes, it means we are publishing an official release.
|
||||
# If no, we are releasing a RC, so no need to check the version.
|
||||
- name: Check tag format
|
||||
if: github.event_name != 'schedule'
|
||||
if: github.event_name == 'release'
|
||||
id: check-tag-format
|
||||
run: |
|
||||
escaped_tag=$(printf "%q" ${{ github.ref_name }})
|
||||
|
||||
if [[ $escaped_tag =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo ::set-output name=stable::true
|
||||
echo "stable=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo ::set-output name=stable::false
|
||||
echo "stable=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Check release validity
|
||||
if: steps.check-tag-format.outputs.stable == 'true'
|
||||
if: github.event_name == 'release' && steps.check-tag-format.outputs.stable == 'true'
|
||||
run: bash .github/scripts/check-release.sh
|
||||
|
||||
publish:
|
||||
publish-linux:
|
||||
name: Publish binary for Linux
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-version
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Build
|
||||
run: cargo build --release --locked
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/meilisearch
|
||||
asset_name: meilisearch-linux-amd64
|
||||
tag: ${{ github.ref }}
|
||||
|
||||
publish-macos-windows:
|
||||
name: Publish binary for ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: check-version
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-18.04, macos-latest, windows-latest]
|
||||
os: [macos-12, windows-2022]
|
||||
include:
|
||||
- os: ubuntu-18.04
|
||||
artifact_name: meilisearch
|
||||
asset_name: meilisearch-linux-amd64
|
||||
- os: macos-latest
|
||||
- os: macos-12
|
||||
artifact_name: meilisearch
|
||||
asset_name: meilisearch-macos-amd64
|
||||
- os: windows-latest
|
||||
- os: windows-2022
|
||||
artifact_name: meilisearch.exe
|
||||
asset_name: meilisearch-windows-amd64.exe
|
||||
|
||||
steps:
|
||||
- uses: hecrj/setup-rust-action@master
|
||||
with:
|
||||
rust-version: stable
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Build
|
||||
run: cargo build --release --locked
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
uses: svenstaro/upload-release-action@v1-release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
with:
|
||||
repo_token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/${{ matrix.artifact_name }}
|
||||
asset_name: ${{ matrix.asset_name }}
|
||||
tag: ${{ github.ref }}
|
||||
|
||||
publish-macos-apple-silicon:
|
||||
name: Publish binary for macOS silicon
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: check-version
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: macos-12
|
||||
target: aarch64-apple-darwin
|
||||
asset_name: meilisearch-macos-apple-silicon
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: Installing Rust toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
target: ${{ matrix.target }}
|
||||
override: true
|
||||
- name: Cargo build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --release --target ${{ matrix.target }}
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
asset_name: ${{ matrix.asset_name }}
|
||||
tag: ${{ github.ref }}
|
||||
|
||||
publish-aarch64:
|
||||
name: Publish binary for aarch64
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: check-version
|
||||
continue-on-error: false
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@ -77,11 +144,9 @@ jobs:
|
||||
linker: gcc-aarch64-linux-gnu
|
||||
use-cross: true
|
||||
asset_name: meilisearch-linux-aarch64
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Installing Rust toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
@ -89,16 +154,13 @@ jobs:
|
||||
profile: minimal
|
||||
target: ${{ matrix.target }}
|
||||
override: true
|
||||
|
||||
- name: APT update
|
||||
run: |
|
||||
sudo apt update
|
||||
|
||||
- name: Install target specific tools
|
||||
if: matrix.use-cross
|
||||
run: |
|
||||
sudo apt-get install -y ${{ matrix.linker }}
|
||||
|
||||
- name: Configure target aarch64 GNU
|
||||
if: matrix.target == 'aarch64-unknown-linux-gnu'
|
||||
## Environment variable is not passed using env:
|
||||
@ -110,22 +172,20 @@ jobs:
|
||||
echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config
|
||||
echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
||||
echo 'JEMALLOC_SYS_WITH_LG_PAGE=16' >> $GITHUB_ENV
|
||||
echo RUSTFLAGS="-Clink-arg=-fuse-ld=gold" >> $GITHUB_ENV
|
||||
|
||||
- name: Cargo build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
use-cross: ${{ matrix.use-cross }}
|
||||
args: --release --target ${{ matrix.target }}
|
||||
|
||||
- name: List target output files
|
||||
run: ls -lR ./target
|
||||
|
||||
- name: Upload the binary to release
|
||||
uses: svenstaro/upload-release-action@v1-release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
with:
|
||||
repo_token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
asset_name: ${{ matrix.asset_name }}
|
||||
tag: ${{ github.ref }}
|
||||
|
28
.github/workflows/publish-deb-brew-pkg.yml
vendored
28
.github/workflows/publish-deb-brew-pkg.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: Publish deb pkg to GitHub release & APT repository & Homebrew
|
||||
name: Publish to APT repository & Homebrew
|
||||
|
||||
on:
|
||||
release:
|
||||
@ -9,27 +9,35 @@ jobs:
|
||||
name: Check the version validity
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Check release validity
|
||||
run: bash .github/scripts/check-release.sh
|
||||
|
||||
debian:
|
||||
name: Publish debian packagge
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-version
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: hecrj/setup-rust-action@master
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
rust-version: stable
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Install cargo-deb
|
||||
run: cargo install cargo-deb
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build deb package
|
||||
run: cargo deb -p meilisearch-http -o target/debian/meilisearch.deb
|
||||
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
||||
- name: Upload debian pkg to release
|
||||
uses: svenstaro/upload-release-action@v1-release
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/debian/meilisearch.deb
|
||||
asset_name: meilisearch.deb
|
||||
tag: ${{ github.ref }}
|
||||
@ -38,11 +46,11 @@ jobs:
|
||||
|
||||
homebrew:
|
||||
name: Bump Homebrew formula
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-version
|
||||
steps:
|
||||
- name: Create PR to Homebrew
|
||||
uses: mislav/bump-homebrew-formula-action@v1
|
||||
uses: mislav/bump-homebrew-formula-action@v2
|
||||
with:
|
||||
formula-name: meilisearch
|
||||
env:
|
||||
|
77
.github/workflows/publish-docker-images.yml
vendored
77
.github/workflows/publish-docker-images.yml
vendored
@ -1,10 +1,16 @@
|
||||
---
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 4 * * *' # Every day at 4:00am
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
# Will run for every tag pushed except `latest`
|
||||
# When the `latest` git tag is created with this [CI](../latest-git-tag.yml)
|
||||
# we don't need to create a Docker `latest` image again.
|
||||
# The `latest` Docker image push is already done in this CI when releasing a stable version of Meilisearch.
|
||||
tags-ignore:
|
||||
- latest
|
||||
# Both `schedule` and `workflow_dispatch` build the nightly tag
|
||||
schedule:
|
||||
- cron: '0 23 * * *' # Every day at 11:00pm
|
||||
workflow_dispatch:
|
||||
|
||||
name: Publish tagged images to Docker Hub
|
||||
|
||||
@ -12,29 +18,48 @@ jobs:
|
||||
docker:
|
||||
runs-on: docker
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
# Check if the tag has the v<nmumber>.<number>.<number> format. If yes, it means we are publishing an official release.
|
||||
# If we are running a cron or manual job ('schedule' or 'workflow_dispatch' event), it means we are publishing the `nightly` tag, so not considered stable.
|
||||
# If we have pushed a tag, and the tag has the v<nmumber>.<number>.<number> format, it means we are publishing an official release, so considered stable.
|
||||
# In this situation, we need to set `output.stable` to create/update the following tags (additionally to the `vX.Y.Z` Docker tag):
|
||||
# - a `vX.Y` (without patch version) Docker tag
|
||||
# - a `latest` Docker tag
|
||||
- name: Check tag format
|
||||
if: github.event_name != 'schedule'
|
||||
# For any other tag pushed, this is not considered stable.
|
||||
- name: Define if stable and latest release
|
||||
id: check-tag-format
|
||||
env:
|
||||
# To avoid request limit with the .github/scripts/is-latest-release.sh script
|
||||
GITHUB_PATH: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
run: |
|
||||
escaped_tag=$(printf "%q" ${{ github.ref_name }})
|
||||
echo "latest=false" >> $GITHUB_OUTPUT
|
||||
|
||||
if [[ $escaped_tag =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo ::set-output name=stable::true
|
||||
if [[ ${{ github.event_name }} != 'push' ]]; then
|
||||
echo "stable=false" >> $GITHUB_OUTPUT
|
||||
elif [[ $escaped_tag =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "stable=true" >> $GITHUB_OUTPUT
|
||||
echo "latest=$(sh .github/scripts/is-latest-release.sh)" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo ::set-output name=stable::false
|
||||
echo "stable=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Check only the validity of the tag for official releases (not for pre-releases or other tags)
|
||||
# Check only the validity of the tag for stable releases (not for pre-releases or other tags)
|
||||
- name: Check release validity
|
||||
if: github.event_name != 'schedule' && steps.check-tag-format.outputs.stable == 'true'
|
||||
if: steps.check-tag-format.outputs.stable == 'true'
|
||||
run: bash .github/scripts/check-release.sh
|
||||
|
||||
- name: Set build-args for Docker buildx
|
||||
id: build-metadata
|
||||
run: |
|
||||
# Define ownership
|
||||
git config --global --add safe.directory /home/meili/actions-runner/_work/meilisearch/meilisearch
|
||||
|
||||
# Extract commit date
|
||||
commit_date=$(git show -s --format=%cd --date=iso-strict ${{ github.sha }})
|
||||
|
||||
echo "date=$commit_date" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
@ -42,7 +67,6 @@ jobs:
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: github.event_name != 'schedule'
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
@ -53,19 +77,32 @@ jobs:
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: getmeili/meilisearch
|
||||
# The lastest and `vX.Y` tags are only pushed for the official Meilisearch releases
|
||||
# See https://github.com/docker/metadata-action#latest-tag
|
||||
# Prevent `latest` to be updated for each new tag pushed.
|
||||
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
|
||||
flavor: latest=false
|
||||
tags: |
|
||||
type=ref,event=tag
|
||||
type=raw,value=nightly,enable=${{ github.event_name != 'push' }}
|
||||
type=semver,pattern=v{{major}}.{{minor}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
||||
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
# We do not push tags for the cron jobs, this is only for test purposes
|
||||
push: ${{ github.event_name != 'schedule' }}
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
build-args: |
|
||||
COMMIT_SHA=${{ github.sha }}
|
||||
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
||||
|
||||
# /!\ Don't touch this without checking with Cloud team
|
||||
- name: Send CI information to Cloud team
|
||||
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
repository: meilisearch/meilisearch-cloud
|
||||
event-type: cloud-docker-build
|
||||
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
||||
|
63
.github/workflows/rust.yml
vendored
63
.github/workflows/rust.yml
vendored
@ -15,18 +15,46 @@ env:
|
||||
RUSTFLAGS: "-D warnings"
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
test-linux:
|
||||
name: Tests on ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --locked --release --no-default-features
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release
|
||||
|
||||
test-others:
|
||||
name: Tests on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-18.04, macos-latest, windows-latest]
|
||||
os: [macos-12, windows-2022]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: rui314/setup-mold@v1 # Optimize link time
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.0.0
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -41,17 +69,22 @@ jobs:
|
||||
# We run tests in debug also, to make sure that the debug_assertions are hit
|
||||
test-debug:
|
||||
name: Run tests in debug
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: rui314/setup-mold@v1 # Optimize link time
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.0.0
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run tests in debug
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -60,10 +93,9 @@ jobs:
|
||||
|
||||
clippy:
|
||||
name: Run Clippy
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: rui314/setup-mold@v1 # Optimize link time
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
@ -71,26 +103,25 @@ jobs:
|
||||
override: true
|
||||
components: clippy
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.0.0
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo clippy
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
args: --all-targets -- --deny warnings
|
||||
args: --all-targets -- --deny warnings --allow clippy::uninlined_format_args
|
||||
|
||||
fmt:
|
||||
name: Run Rustfmt
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: rui314/setup-mold@v1 # Optimize link time
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
toolchain: nightly
|
||||
override: true
|
||||
components: rustfmt
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.0.0
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
|
48
.github/workflows/update-cargo-toml-version.yml
vendored
Normal file
48
.github/workflows/update-cargo-toml-version.yml
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
name: Update Meilisearch version in all Cargo.toml files
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
new_version:
|
||||
description: 'The new version (vX.Y.Z)'
|
||||
required: true
|
||||
|
||||
env:
|
||||
NEW_VERSION: ${{ github.event.inputs.new_version }}
|
||||
NEW_BRANCH: update-version-${{ github.event.inputs.new_version }}
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
|
||||
update-version-cargo-toml:
|
||||
name: Update version in Cargo.toml files
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Install sd
|
||||
run: cargo install sd
|
||||
- name: Update Cargo.toml files
|
||||
run: |
|
||||
raw_new_version=$(echo $NEW_VERSION | cut -d 'v' -f 2)
|
||||
new_string="version = \"$raw_new_version\""
|
||||
sd '^version = "\d+.\d+.\w+"$' "$new_string" */Cargo.toml
|
||||
- name: Build Meilisearch to update Cargo.lock
|
||||
run: cargo build
|
||||
- name: Commit and push the changes to the ${{ env.NEW_BRANCH }} branch
|
||||
uses: EndBug/add-and-commit@v9
|
||||
with:
|
||||
message: "Update version for the next release (${{ env.NEW_VERSION }}) in Cargo.toml files"
|
||||
new_branch: ${{ env.NEW_BRANCH }}
|
||||
- name: Create the PR pointing to ${{ github.ref_name }}
|
||||
run: |
|
||||
gh pr create \
|
||||
--title "Update version for the next release ($NEW_VERSION) in Cargo.toml files" \
|
||||
--body '⚠️ This PR is automatically generated. Check the new version is the expected one before merging.' \
|
||||
--label 'skip changelog' \
|
||||
--milestone $NEW_VERSION \
|
||||
--base $GITHUB_REF_NAME
|
7
.gitignore
vendored
7
.gitignore
vendored
@ -7,3 +7,10 @@
|
||||
/data.ms
|
||||
/snapshots
|
||||
/dumps
|
||||
|
||||
|
||||
# Snapshots
|
||||
## ... large
|
||||
*.full.snap
|
||||
## ... unreviewed
|
||||
*.snap.new
|
||||
|
5
.rustfmt.toml
Normal file
5
.rustfmt.toml
Normal file
@ -0,0 +1,5 @@
|
||||
unstable_features = true
|
||||
|
||||
use_small_heuristics = "max"
|
||||
imports_granularity = "Module"
|
||||
group_imports = "StdExternalCrate"
|
@ -4,6 +4,10 @@ First, thank you for contributing to Meilisearch! The goal of this document is t
|
||||
|
||||
Remember that there are many ways to contribute other than writing code: writing [tutorials or blog posts](https://github.com/meilisearch/awesome-meilisearch), improving [the documentation](https://github.com/meilisearch/documentation), submitting [bug reports](https://github.com/meilisearch/meilisearch/issues/new?assignees=&labels=&template=bug_report.md&title=) and [feature requests](https://github.com/meilisearch/product/discussions/categories/feedback-feature-proposal)...
|
||||
|
||||
The code in this repository is only concerned with managing multiple indexes, handling the update store, and exposing an HTTP API. Search and indexation are the domain of our core engine, [`milli`](https://github.com/meilisearch/milli), while tokenization is handled by [our `charabia` library](https://github.com/meilisearch/charabia/).
|
||||
|
||||
If Meilisearch does not offer optimized support for your language, please consider contributing to `charabia` by following the [CONTRIBUTING.md file](https://github.com/meilisearch/charabia/blob/main/CONTRIBUTING.md) and integrating your intended normalizer/segmenter.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Assumptions](#assumptions)
|
||||
@ -93,12 +97,12 @@ _[Read more about this](https://github.com/meilisearch/integration-guides/blob/m
|
||||
|
||||
### How to Publish a new Release
|
||||
|
||||
The full Meilisearch release process is described in [this guide](https://github.com/meilisearch/core-team/blob/main/resources/meilisearch-release.md). Please follow it carefully before doing any release.
|
||||
The full Meilisearch release process is described in [this guide](https://github.com/meilisearch/engine-team/blob/main/resources/meilisearch-release.md). Please follow it carefully before doing any release.
|
||||
|
||||
### Release assets
|
||||
|
||||
For each release, the following assets are created:
|
||||
- Binaries for differents platforms (Linux, MacOS, Windows and ARM architectures) are attached to the GitHub release
|
||||
- Binaries for different platforms (Linux, MacOS, Windows and ARM architectures) are attached to the GitHub release
|
||||
- Binaries are pushed to HomeBrew and APT (not published for RC)
|
||||
- Docker tags are created/updated:
|
||||
- `vX.Y.Z`
|
||||
|
2380
Cargo.lock
generated
2380
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
10
Cargo.toml
10
Cargo.toml
@ -1,13 +1,19 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"meilisearch-http",
|
||||
"meilisearch",
|
||||
"meilisearch-types",
|
||||
"meilisearch-lib",
|
||||
"meilisearch-auth",
|
||||
"meili-snap",
|
||||
"index-scheduler",
|
||||
"dump",
|
||||
"file-store",
|
||||
"permissive-json-pointer",
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
|
||||
[profile.dev.package.flate2]
|
||||
opt-level = 3
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Compile
|
||||
FROM rust:alpine3.14 AS compiler
|
||||
FROM rust:alpine3.16 AS compiler
|
||||
|
||||
RUN apk add -q --update-cache --no-cache build-base openssl-dev
|
||||
|
||||
@ -7,7 +7,7 @@ WORKDIR /meilisearch
|
||||
|
||||
ARG COMMIT_SHA
|
||||
ARG COMMIT_DATE
|
||||
ENV COMMIT_SHA=${COMMIT_SHA} COMMIT_DATE=${COMMIT_DATE}
|
||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE}
|
||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||
|
||||
COPY . .
|
||||
@ -19,7 +19,7 @@ RUN set -eux; \
|
||||
cargo build --release
|
||||
|
||||
# Run
|
||||
FROM alpine:3.14
|
||||
FROM alpine:3.16
|
||||
|
||||
ENV MEILI_HTTP_ADDR 0.0.0.0:7700
|
||||
ENV MEILI_SERVER_PROVIDER docker
|
||||
|
212
README.md
212
README.md
@ -1,205 +1,103 @@
|
||||
<p align="center">
|
||||
<img src="assets/logo.svg" alt="Meilisearch" width="200" height="200" />
|
||||
<img src="assets/meilisearch-logo-light.svg?sanitize=true#gh-light-mode-only">
|
||||
<img src="assets/meilisearch-logo-dark.svg?sanitize=true#gh-dark-mode-only">
|
||||
</p>
|
||||
|
||||
<h1 align="center">Meilisearch</h1>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://www.meilisearch.com">Website</a> |
|
||||
<a href="https://roadmap.meilisearch.com/tabs/1-under-consideration">Roadmap</a> |
|
||||
<a href="https://blog.meilisearch.com">Blog</a> |
|
||||
<a href="https://fr.linkedin.com/company/meilisearch">LinkedIn</a> |
|
||||
<a href="https://twitter.com/meilisearch">Twitter</a> |
|
||||
<a href="https://docs.meilisearch.com">Documentation</a> |
|
||||
<a href="https://docs.meilisearch.com/faq/">FAQ</a>
|
||||
<a href="https://docs.meilisearch.com/faq/">FAQ</a> |
|
||||
<a href="https://discord.meilisearch.com">Discord</a>
|
||||
</h4>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/meilisearch/meilisearch/actions"><img src="https://github.com/meilisearch/meilisearch/workflows/Cargo%20test/badge.svg" alt="Build Status"></a>
|
||||
<a href="https://deps.rs/repo/github/meilisearch/meilisearch"><img src="https://deps.rs/repo/github/meilisearch/meilisearch/status.svg" alt="Dependency status"></a>
|
||||
<a href="https://github.com/meilisearch/meilisearch/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-informational" alt="License"></a>
|
||||
<a href="https://slack.meilisearch.com"><img src="https://img.shields.io/badge/slack-meilisearch-blue.svg?logo=slack" alt="Slack"></a>
|
||||
<a href="https://github.com/meilisearch/meilisearch/discussions" alt="Discussions"><img src="https://img.shields.io/badge/github-discussions-red" /></a>
|
||||
<a href="https://app.bors.tech/repositories/26457"><img src="https://bors.tech/images/badge_small.svg" alt="Bors enabled"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">⚡ Lightning Fast, Ultra Relevant, and Typo-Tolerant Search Engine 🔍</p>
|
||||
<p align="center">⚡ A lightning-fast search engine that fits effortlessly into your apps, websites, and workflow 🔍</p>
|
||||
|
||||
**Meilisearch** is a powerful, fast, open-source, easy to use and deploy search engine. Both searching and indexing are highly customizable. Features such as typo-tolerance, filters, and synonyms are provided out-of-the-box.
|
||||
For more information about features go to [our documentation](https://docs.meilisearch.com/).
|
||||
Meilisearch helps you shape a delightful search experience in a snap, offering features that work out-of-the-box to speed up your workflow.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/trumen-fast.gif" alt="Web interface gif" />
|
||||
<p align="center" name="demo">
|
||||
<a href="https://where2watch.meilisearch.com/#gh-light-mode-only" target="_blank">
|
||||
<img src="assets/demo-light.gif#gh-light-mode-only" alt="A bright colored application for finding movies screening near the user">
|
||||
</a>
|
||||
<a href="https://where2watch.meilisearch.com/#gh-dark-mode-only" target="_blank">
|
||||
<img src="assets/demo-dark.gif#gh-dark-mode-only" alt="A dark colored application for finding movies screening near the user">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
🔥 [**Try it!**](https://where2watch.meilisearch.com/) 🔥
|
||||
|
||||
## ✨ Features
|
||||
* Search-as-you-type experience (answers < 50 milliseconds)
|
||||
* Full-text search
|
||||
* Typo tolerant (understands typos and misspelling)
|
||||
* Faceted search and filters
|
||||
* Supports hanzi (Chinese characters)
|
||||
* Supports synonyms
|
||||
* Easy to install, deploy, and maintain
|
||||
* Whole documents are returned
|
||||
* Highly customizable
|
||||
* RESTful API
|
||||
|
||||
## Getting started
|
||||
- **Search-as-you-type:** find search results in less than 50 milliseconds
|
||||
- **[Typo tolerance](https://docs.meilisearch.com/learn/getting_started/customizing_relevancy.html#typo-tolerance):** get relevant matches even when queries contain typos and misspellings
|
||||
- **[Filtering and faceted search](https://docs.meilisearch.com/learn/advanced/filtering_and_faceted_search.html):** enhance your user's search experience with custom filters and build a faceted search interface in a few lines of code
|
||||
- **[Sorting](https://docs.meilisearch.com/learn/advanced/sorting.html):** sort results based on price, date, or pretty much anything else your users need
|
||||
- **[Synonym support](https://docs.meilisearch.com/learn/getting_started/customizing_relevancy.html#synonyms):** configure synonyms to include more relevant content in your search results
|
||||
- **[Geosearch](https://docs.meilisearch.com/learn/advanced/geosearch.html):** filter and sort documents based on geographic data
|
||||
- **[Extensive language support](https://docs.meilisearch.com/learn/what_is_meilisearch/language.html):** search datasets in any language, with optimized support for Chinese, Japanese, Hebrew, and languages using the Latin alphabet
|
||||
- **[Security management](https://docs.meilisearch.com/learn/security/master_api_keys.html):** control which users can access what data with API keys that allow fine-grained permissions handling
|
||||
- **[Multi-Tenancy](https://docs.meilisearch.com/learn/security/tenant_tokens.html):** personalize search results for any number of application tenants
|
||||
- **Highly Customizable:** customize Meilisearch to your specific needs or use our out-of-the-box and hassle-free presets
|
||||
- **[RESTful API](https://docs.meilisearch.com/reference/api/overview.html):** integrate Meilisearch in your technical stack with our plugins and SDKs
|
||||
- **Easy to install, deploy, and maintain**
|
||||
|
||||
### Deploy the Server
|
||||
## 📖 Documentation
|
||||
|
||||
#### Homebrew (Mac OS)
|
||||
You can consult Meilisearch's documentation at [https://docs.meilisearch.com](https://docs.meilisearch.com/).
|
||||
|
||||
```bash
|
||||
brew update && brew install meilisearch
|
||||
meilisearch
|
||||
```
|
||||
## 🚀 Getting started
|
||||
|
||||
#### Docker
|
||||
For basic instructions on how to set up Meilisearch, add documents to an index, and search for documents, take a look at our [Quick Start](https://docs.meilisearch.com/learn/getting_started/quick_start.html) guide.
|
||||
|
||||
```bash
|
||||
docker run -p 7700:7700 -v "$(pwd)/meili_data:/meili_data" getmeili/meilisearch
|
||||
```
|
||||
You may also want to check out [Meilisearch 101](https://docs.meilisearch.com/learn/getting_started/filtering_and_sorting.html) for an introduction to some of Meilisearch's most popular features.
|
||||
|
||||
#### Announcing a cloud-hosted Meilisearch
|
||||
## ☁️ Meilisearch cloud
|
||||
|
||||
Join the closed beta by filling out this [form](https://meilisearch.typeform.com/to/VI2cI2rv).
|
||||
Let us manage your infrastructure so you can focus on integrating a great search experience. Try [Meilisearch Cloud](https://meilisearch.com/pricing) today.
|
||||
|
||||
#### Try Meilisearch in our Sandbox
|
||||
## 🧰 SDKs & integration tools
|
||||
|
||||
Create a Meilisearch instance in [Meilisearch Sandbox](https://sandbox.meilisearch.com/). This instance is free, and will be active for 48 hours.
|
||||
Install one of our SDKs in your project for seamless integration between Meilisearch and your favorite language or framework!
|
||||
|
||||
#### Run on Digital Ocean
|
||||
Take a look at the complete [Meilisearch integration list](https://docs.meilisearch.com/learn/what_is_meilisearch/sdks.html).
|
||||
|
||||
[](https://marketplace.digitalocean.com/apps/meilisearch?action=deploy&refcode=7c67bd97e101)
|
||||

|
||||
|
||||
#### Deploy on Platform.sh
|
||||
## ⚙️ Advanced usage
|
||||
|
||||
<a href="https://console.platform.sh/projects/create-project?template=https://raw.githubusercontent.com/platformsh/template-builder/master/templates/meilisearch/.platform.template.yaml&utm_content=meilisearch&utm_source=github&utm_medium=button&utm_campaign=deploy_on_platform">
|
||||
<img src="https://platform.sh/images/deploy/lg-blue.svg" alt="Deploy on Platform.sh" width="180px" />
|
||||
</a>
|
||||
Experienced users will want to keep our [API Reference](https://docs.meilisearch.com/reference/api) close at hand.
|
||||
|
||||
#### APT (Debian & Ubuntu)
|
||||
We also offer a wide range of dedicated guides to all Meilisearch features, such as [filtering](https://docs.meilisearch.com/learn/advanced/filtering_and_faceted_search.html), [sorting](https://docs.meilisearch.com/learn/advanced/sorting.html), [geosearch](https://docs.meilisearch.com/learn/advanced/geosearch.html), [API keys](https://docs.meilisearch.com/learn/security/master_api_keys.html), and [tenant tokens](https://docs.meilisearch.com/learn/security/tenant_tokens.html).
|
||||
|
||||
```bash
|
||||
echo "deb [trusted=yes] https://apt.fury.io/meilisearch/ /" > /etc/apt/sources.list.d/fury.list
|
||||
apt update && apt install meilisearch-http
|
||||
meilisearch
|
||||
```
|
||||
Finally, for more in-depth information, refer to our articles explaining fundamental Meilisearch concepts such as [documents](https://docs.meilisearch.com/learn/core_concepts/documents.html) and [indexes](https://docs.meilisearch.com/learn/core_concepts/indexes.html).
|
||||
|
||||
#### Download the binary (Linux & Mac OS)
|
||||
## 📊 Telemetry
|
||||
|
||||
```bash
|
||||
curl -L https://install.meilisearch.com | sh
|
||||
./meilisearch
|
||||
```
|
||||
Meilisearch collects **anonymized** data from users to help us improve our product. You can [deactivate this](https://docs.meilisearch.com/learn/what_is_meilisearch/telemetry.html#how-to-disable-data-collection) whenever you want.
|
||||
|
||||
#### Compile and run it from sources
|
||||
To request deletion of collected data, please write to us at [privacy@meilisearch.com](mailto:privacy@meilisearch.com). Don't forget to include your `Instance UID` in the message, as this helps us quickly find and delete your data.
|
||||
|
||||
If you have the latest stable Rust toolchain installed on your local system, clone the repository and change it to your working directory.
|
||||
If you want to know more about the kind of data we collect and what we use it for, check the [telemetry section](https://docs.meilisearch.com/learn/what_is_meilisearch/telemetry.html) of our documentation.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/meilisearch/meilisearch.git
|
||||
cd meilisearch
|
||||
cargo run --release
|
||||
```
|
||||
## 📫 Get in touch!
|
||||
|
||||
### Create an Index and Upload Some Documents
|
||||
Meilisearch is a search engine created by [Meili](https://www.welcometothejungle.com/en/companies/meilisearch), a software development company based in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/)
|
||||
|
||||
Let's create an index! If you need a sample dataset, use [this movie database](https://www.notion.so/meilisearch/A-movies-dataset-to-test-Meili-1cbf7c9cfa4247249c40edfa22d7ca87#b5ae399b81834705ba5420ac70358a65). You can also find it in the `datasets/` directory.
|
||||
🗞 [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
|
||||
|
||||
```bash
|
||||
curl -L https://docs.meilisearch.com/movies.json -o movies.json
|
||||
```
|
||||
💌 Want to make a suggestion or give feedback? Here are some of the channels where you can reach us:
|
||||
|
||||
Now, you're ready to index some data.
|
||||
- For feature requests, please visit our [product repository](https://github.com/meilisearch/product/discussions)
|
||||
- Found a bug? Open an [issue](https://github.com/meilisearch/meilisearch/issues)!
|
||||
- Want to be part of our Discord community? [Join us!](https://discord.gg/meilisearch)
|
||||
- For everything else, please check [this page listing some of the other places where you can find us](https://docs.meilisearch.com/learn/what_is_meilisearch/contact.html)
|
||||
|
||||
```bash
|
||||
curl -i -X POST 'http://127.0.0.1:7700/indexes/movies/documents' \
|
||||
--header 'content-type: application/json' \
|
||||
--data-binary @movies.json
|
||||
```
|
||||
|
||||
### Search for Documents
|
||||
|
||||
#### In command line
|
||||
|
||||
The search engine is now aware of your documents and can serve those via an HTTP server.
|
||||
|
||||
The [`jq` command-line tool](https://stedolan.github.io/jq/) can greatly help you read the server responses.
|
||||
|
||||
```bash
|
||||
curl 'http://127.0.0.1:7700/indexes/movies/search?q=botman+robin&limit=2' | jq
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"id": "415",
|
||||
"title": "Batman & Robin",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/79AYCcxw3kSKbhGpx1LiqaCAbwo.jpg",
|
||||
"overview": "Along with crime-fighting partner Robin and new recruit Batgirl, Batman battles the dual threat of frosty genius Mr. Freeze and homicidal horticulturalist Poison Ivy. Freeze plans to put Gotham City on ice, while Ivy tries to drive a wedge between the dynamic duo.",
|
||||
"release_date": 866768400
|
||||
},
|
||||
{
|
||||
"id": "411736",
|
||||
"title": "Batman: Return of the Caped Crusaders",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/GW3IyMW5Xgl0cgCN8wu96IlNpD.jpg",
|
||||
"overview": "Adam West and Burt Ward returns to their iconic roles of Batman and Robin. Featuring the voices of Adam West, Burt Ward, and Julie Newmar, the film sees the superheroes going up against classic villains like The Joker, The Riddler, The Penguin and Catwoman, both in Gotham City… and in space.",
|
||||
"release_date": 1475888400
|
||||
}
|
||||
],
|
||||
"nbHits": 8,
|
||||
"exhaustiveNbHits": false,
|
||||
"query": "botman robin",
|
||||
"limit": 2,
|
||||
"offset": 0,
|
||||
"processingTimeMs": 2
|
||||
}
|
||||
```
|
||||
|
||||
#### Use the Web Interface
|
||||
|
||||
We also deliver an **out-of-the-box [web interface](https://github.com/meilisearch/mini-dashboard)** in which you can test Meilisearch interactively.
|
||||
|
||||
You can access the web interface in your web browser at the root of the server. The default URL is [http://127.0.0.1:7700](http://127.0.0.1:7700). All you need to do is open your web browser and enter Meilisearch’s address to visit it. This will lead you to a web page with a search bar that will allow you to search in the selected index.
|
||||
|
||||
| [See the gif above](#demo)
|
||||
|
||||
## Documentation
|
||||
|
||||
Now that your Meilisearch server is up and running, you can learn more about how to tune your search engine in [the documentation](https://docs.meilisearch.com).
|
||||
|
||||
## Contributing
|
||||
|
||||
Hey! We're glad you're thinking about contributing to Meilisearch! Feel free to pick an [issue labeled as `good first issue`](https://github.com/meilisearch/meilisearch/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22), and to ask any question you need. Some points might not be clear and we are available to help you!
|
||||
|
||||
Also, we recommend following the [CONTRIBUTING](./CONTRIBUTING.md) to create your PR.
|
||||
|
||||
## Core engine and tokenizer
|
||||
|
||||
The code in this repository is only concerned with managing multiple indexes, handling the update store, and exposing an HTTP API.
|
||||
|
||||
Search and indexation are the domain of our core engine, [`milli`](https://github.com/meilisearch/milli), while tokenization is handled by [our `tokenizer` library](https://github.com/meilisearch/tokenizer/).
|
||||
## Telemetry
|
||||
|
||||
Meilisearch collects anonymous data regarding general usage.
|
||||
This helps us better understand developers' usage of Meilisearch features.
|
||||
|
||||
To find out more on what information we're retrieving, please see our documentation on [Telemetry](https://docs.meilisearch.com/learn/what_is_meilisearch/telemetry.html).
|
||||
|
||||
This program is optional, you can disable these analytics by using the `MEILI_NO_ANALYTICS` env variable.
|
||||
|
||||
## Feature request
|
||||
|
||||
The feature requests are not managed in this repository. Please visit our [dedicated repository](https://github.com/meilisearch/product) to see our work about the Meilisearch product.
|
||||
|
||||
If you have a feature request or any feedback about an existing feature, please open [a discussion](https://github.com/meilisearch/product/discussions).
|
||||
Also, feel free to participate in the current discussions, we are looking forward to reading your comments.
|
||||
|
||||
## 💌 Contact
|
||||
|
||||
Please visit [this page](https://docs.meilisearch.com/learn/what_is_meilisearch/contact.html#contact-us).
|
||||
|
||||
Meilisearch is developed by [Meili](https://www.meilisearch.com), a young company. To know more about us, you can [read our blog](https://blog.meilisearch.com). Any suggestion or feedback is highly appreciated. Thank you for your support!
|
||||
Thank you for your support!
|
||||
|
BIN
assets/demo-dark.gif
Normal file
BIN
assets/demo-dark.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.8 MiB |
BIN
assets/demo-light.gif
Normal file
BIN
assets/demo-light.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.7 MiB |
BIN
assets/integrations.png
Normal file
BIN
assets/integrations.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 799 KiB |
30
assets/meilisearch-logo-dark.svg
Normal file
30
assets/meilisearch-logo-dark.svg
Normal file
@ -0,0 +1,30 @@
|
||||
<svg width="495" height="74" viewBox="0 0 495 74" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M181.842 42.5349C181.842 37.6137 184.201 34.715 188.718 34.715C192.965 34.715 194.381 37.7486 194.381 41.6585V62.6238H203.953V40.5799C203.953 32.3556 199.639 26.4907 191.145 26.4907C186.089 26.4907 182.516 28.0412 179.415 31.4792C177.393 28.3782 173.955 26.4907 169.168 26.4907C164.112 26.4907 160.607 28.5805 158.989 31.614V27.2996H150.158V62.6238H159.731V42.3326C159.731 37.6137 162.157 34.715 166.607 34.715C170.854 34.715 172.269 37.7486 172.269 41.6585V62.6238H181.842V42.5349Z" fill="white"/>
|
||||
<path d="M243.245 47.7256C243.245 47.7256 243.379 46.4448 243.379 44.8943C243.379 34.4454 236.301 26.4907 225.852 26.4907C215.403 26.4907 208.123 34.4454 208.123 44.8943C208.123 55.7477 215.471 63.4327 225.92 63.4327C234.077 63.4327 240.548 58.5116 242.638 51.3659H232.998C231.852 53.9276 229.088 55.2084 226.189 55.2084C221.403 55.2084 218.302 52.5793 217.628 47.7256H243.245ZM225.785 34.1757C230.234 34.1757 233.133 36.8722 233.807 40.8495H217.763C218.572 36.8048 221.403 34.1757 225.785 34.1757Z" fill="white"/>
|
||||
<path d="M244.791 35.524H249.038V62.6238H258.61V27.2996H244.791V35.524ZM253.824 22.7156C257.195 22.7156 259.622 20.3561 259.622 16.9855C259.622 13.6149 257.195 11.188 253.824 11.188C250.454 11.188 248.027 13.6149 248.027 16.9855C248.027 20.3561 250.454 22.7156 253.824 22.7156Z" fill="white"/>
|
||||
<path d="M278.432 54.3995C278.163 54.3995 277.758 54.4669 277.152 54.4669C274.994 54.4669 274.725 53.4557 274.725 51.9726V12.0644H265.152V52.6467C265.152 59.6576 267.849 62.7586 275.466 62.7586C276.747 62.7586 277.96 62.6238 278.432 62.5564V54.3995Z" fill="white"/>
|
||||
<path d="M279.521 35.524H283.768V62.6238H293.341V27.2996H279.521V35.524ZM288.555 22.7156C291.925 22.7156 294.352 20.3561 294.352 16.9855C294.352 13.6149 291.925 11.188 288.555 11.188C285.184 11.188 282.757 13.6149 282.757 16.9855C282.757 20.3561 285.184 22.7156 288.555 22.7156Z" fill="white"/>
|
||||
<path d="M312.557 62.9937C321.86 62.9937 326.242 58.0726 326.242 52.8819C326.242 38.4556 305.007 46.4777 305.007 36.9725C305.007 33.8716 307.636 31.2425 312.962 31.2425C318.422 31.2425 320.984 34.2086 321.388 37.9163H326.175C325.77 33.2648 322.602 27.0629 313.097 27.0629C304.94 27.0629 300.356 31.9166 300.356 37.1748C300.356 51.264 321.591 43.1745 321.591 53.0167C321.591 56.4547 318.355 58.8142 312.557 58.8142C306.625 58.8142 303.659 55.848 303.322 51.4662H298.468C298.873 57.4659 302.648 62.9937 312.557 62.9937Z" fill="white"/>
|
||||
<path d="M364.256 46.4103C364.256 46.4103 364.324 45.3317 364.324 44.5901C364.324 34.8827 358.054 27.0629 347.808 27.0629C337.494 27.0629 330.955 35.4894 330.955 44.9946C330.955 54.6346 337.022 62.9937 347.875 62.9937C356.032 62.9937 361.695 58.0052 363.717 51.4662H358.729C357.245 55.6458 353.201 58.6794 347.943 58.6794C340.729 58.6794 336.213 53.3538 335.741 46.4103H364.256ZM347.808 31.3773C354.549 31.3773 358.931 35.8939 359.538 42.5004H335.876C336.685 36.1636 341.134 31.3773 347.808 31.3773Z" fill="white"/>
|
||||
<path d="M394.037 45.871V49.1068C394.037 54.9717 389.79 59.0164 381.634 59.0164C376.578 59.0164 373.814 56.9266 373.814 52.41C373.814 50.118 374.892 48.3652 376.578 47.4215C378.33 46.4777 380.69 45.871 394.037 45.871ZM381.094 62.9937C387.027 62.9937 391.813 61.1062 394.24 57.1963V62.1848H398.824V39.7364C398.824 32.1188 394.442 27.0629 384.532 27.0629C375.027 27.0629 370.848 31.8492 369.971 37.9837H374.623C375.566 33.13 379.274 31.1751 384.33 31.1751C390.802 31.1751 394.037 33.8716 394.037 39.669V41.8936C383.184 41.8936 378.667 42.0959 375.297 43.4441C371.387 44.9946 369.095 48.4327 369.095 52.5448C369.095 58.5445 372.937 62.9937 381.094 62.9937Z" fill="white"/>
|
||||
<path d="M424.991 27.6022C424.991 27.6022 424.182 27.5348 423.845 27.5348C417.509 27.5348 414.138 30.838 412.857 33.1974V27.8718H408.273V62.1848H413.059V42.7026C413.059 35.5569 417.441 32.0514 423.306 32.0514C424.182 32.0514 424.991 32.1188 424.991 32.1188V27.6022Z" fill="white"/>
|
||||
<path d="M425.809 45.062C425.809 54.4324 432.28 62.9937 442.729 62.9937C452.032 62.9937 457.425 56.7918 458.773 49.9831H453.92C452.504 55.3087 448.594 58.6794 442.729 58.6794C435.516 58.6794 430.662 52.9493 430.662 45.062C430.662 37.1073 435.516 31.3773 442.729 31.3773C448.594 31.3773 452.504 34.7479 453.92 40.0735H458.773C457.425 33.2648 452.032 27.0629 442.729 27.0629C432.28 27.0629 425.809 35.6243 425.809 45.062Z" fill="white"/>
|
||||
<path d="M470.041 11.6254H465.255V62.1848H470.041V41.8936C470.041 34.8827 474.558 31.2425 480.355 31.2425C486.49 31.2425 489.389 35.0176 489.389 41.2195V62.1848H494.175V40.2757C494.175 32.6581 489.658 27.0629 481.164 27.0629C474.76 27.0629 471.255 30.5683 470.041 32.6581V11.6254Z" fill="white"/>
|
||||
<path d="M0.825012 73.993L24.0688 14.5224C27.3443 6.14179 35.4223 0.625977 44.4203 0.625977H58.4336L35.1899 60.0966C31.9143 68.4772 23.8363 73.993 14.8384 73.993H0.825012Z" fill="url(#paint0_linear_0_3)"/>
|
||||
<path d="M34.9246 73.9932L58.1684 14.5226C61.444 6.14197 69.5219 0.626152 78.5199 0.626152H92.5333L69.2895 60.0968C66.014 68.4774 57.936 73.9932 48.938 73.9932H34.9246Z" fill="url(#paint1_linear_0_3)"/>
|
||||
<path d="M69.0262 73.9932L92.27 14.5226C95.5456 6.14197 103.624 0.626152 112.622 0.626152H126.635L103.391 60.0968C100.116 68.4774 92.0376 73.9932 83.0396 73.9932H69.0262Z" fill="url(#paint2_linear_0_3)"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_0_3" x1="126.635" y1="-4.97799" x2="0.825008" y2="66.0978" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FF5CAA"/>
|
||||
<stop offset="1" stop-color="#FF4E62"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint1_linear_0_3" x1="126.635" y1="-4.97799" x2="0.825008" y2="66.0978" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FF5CAA"/>
|
||||
<stop offset="1" stop-color="#FF4E62"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint2_linear_0_3" x1="126.635" y1="-4.97799" x2="0.825008" y2="66.0978" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FF5CAA"/>
|
||||
<stop offset="1" stop-color="#FF4E62"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
After Width: | Height: | Size: 5.8 KiB |
30
assets/meilisearch-logo-light.svg
Normal file
30
assets/meilisearch-logo-light.svg
Normal file
@ -0,0 +1,30 @@
|
||||
<svg width="495" height="74" viewBox="0 0 495 74" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M181.84 42.5347C181.84 37.6136 184.199 34.7149 188.716 34.7149C192.963 34.7149 194.378 37.7484 194.378 41.6584V62.6237H203.951V40.5798C203.951 32.3554 199.637 26.4906 191.143 26.4906C186.087 26.4906 182.514 28.041 179.413 31.4791C177.39 28.3781 173.952 26.4906 169.166 26.4906C164.11 26.4906 160.605 28.5804 158.987 31.6139V27.2995H150.156V62.6237H159.728V42.3325C159.728 37.6136 162.155 34.7149 166.604 34.7149C170.851 34.7149 172.267 37.7484 172.267 41.6584V62.6237H181.84V42.5347Z" fill="#21004B"/>
|
||||
<path d="M243.242 47.7255C243.242 47.7255 243.377 46.4447 243.377 44.8942C243.377 34.4452 236.299 26.4906 225.85 26.4906C215.401 26.4906 208.12 34.4452 208.12 44.8942C208.12 55.7476 215.468 63.4326 225.917 63.4326C234.074 63.4326 240.546 58.5115 242.636 51.3658H232.996C231.85 53.9274 229.086 55.2083 226.187 55.2083C221.401 55.2083 218.3 52.5792 217.626 47.7255H243.242ZM225.783 34.1756C230.232 34.1756 233.131 36.8721 233.805 40.8494H217.76C218.569 36.8047 221.401 34.1756 225.783 34.1756Z" fill="#21004B"/>
|
||||
<path d="M244.789 35.5238H249.036V62.6237H258.608V27.2995H244.789V35.5238ZM253.822 22.7155C257.193 22.7155 259.619 20.356 259.619 16.9854C259.619 13.6148 257.193 11.1879 253.822 11.1879C250.451 11.1879 248.024 13.6148 248.024 16.9854C248.024 20.356 250.451 22.7155 253.822 22.7155Z" fill="#21004B"/>
|
||||
<path d="M278.43 54.3993C278.16 54.3993 277.756 54.4667 277.149 54.4667C274.992 54.4667 274.722 53.4556 274.722 51.9725V12.0643H265.15V52.6466C265.15 59.6575 267.846 62.7585 275.464 62.7585C276.745 62.7585 277.958 62.6237 278.43 62.5562V54.3993Z" fill="#21004B"/>
|
||||
<path d="M279.519 35.5238H283.766V62.6237H293.339V27.2995H279.519V35.5238ZM288.553 22.7155C291.923 22.7155 294.35 20.356 294.35 16.9854C294.35 13.6148 291.923 11.1879 288.553 11.1879C285.182 11.1879 282.755 13.6148 282.755 16.9854C282.755 20.356 285.182 22.7155 288.553 22.7155Z" fill="#21004B"/>
|
||||
<path d="M312.557 62.9939C321.86 62.9939 326.242 58.0728 326.242 52.882C326.242 38.4557 305.007 46.4778 305.007 36.9726C305.007 33.8717 307.636 31.2426 312.962 31.2426C318.422 31.2426 320.984 34.2087 321.388 37.9164H326.175C325.77 33.265 322.602 27.063 313.097 27.063C304.94 27.063 300.356 31.9167 300.356 37.1749C300.356 51.2641 321.591 43.1746 321.591 53.0168C321.591 56.4548 318.355 58.8143 312.557 58.8143C306.625 58.8143 303.659 55.8481 303.322 51.4663H298.468C298.872 57.466 302.648 62.9939 312.557 62.9939Z" fill="#21004B"/>
|
||||
<path d="M364.256 46.4104C364.256 46.4104 364.324 45.3318 364.324 44.5903C364.324 34.8829 358.054 27.063 347.808 27.063C337.494 27.063 330.955 35.4896 330.955 44.9947C330.955 54.6347 337.022 62.9939 347.875 62.9939C356.032 62.9939 361.695 58.0053 363.717 51.4663H358.728C357.245 55.6459 353.201 58.6795 347.942 58.6795C340.729 58.6795 336.213 53.3539 335.741 46.4104H364.256ZM347.808 31.3774C354.549 31.3774 358.931 35.894 359.537 42.5005H335.876C336.685 36.1637 341.134 31.3774 347.808 31.3774Z" fill="#21004B"/>
|
||||
<path d="M394.037 45.8711V49.1069C394.037 54.9718 389.79 59.0165 381.633 59.0165C376.578 59.0165 373.814 56.9267 373.814 52.4101C373.814 50.1181 374.892 48.3654 376.578 47.4216C378.33 46.4778 380.69 45.8711 394.037 45.8711ZM381.094 62.9939C387.026 62.9939 391.813 61.1063 394.24 57.1964V62.1849H398.824V39.7366C398.824 32.1189 394.442 27.063 384.532 27.063C375.027 27.063 370.847 31.8493 369.971 37.9838H374.623C375.566 33.1301 379.274 31.1752 384.33 31.1752C390.802 31.1752 394.037 33.8717 394.037 39.6691V41.8938C383.184 41.8938 378.667 42.096 375.297 43.4442C371.387 44.9947 369.095 48.4328 369.095 52.5449C369.095 58.5446 372.937 62.9939 381.094 62.9939Z" fill="#21004B"/>
|
||||
<path d="M424.991 27.6023C424.991 27.6023 424.182 27.5349 423.845 27.5349C417.508 27.5349 414.138 30.8381 412.857 33.1975V27.872H408.273V62.1849H413.059V42.7027C413.059 35.557 417.441 32.0515 423.306 32.0515C424.182 32.0515 424.991 32.1189 424.991 32.1189V27.6023Z" fill="#21004B"/>
|
||||
<path d="M425.809 45.0621C425.809 54.4325 432.28 62.9939 442.729 62.9939C452.032 62.9939 457.425 56.7919 458.773 49.9832H453.92C452.504 55.3088 448.594 58.6795 442.729 58.6795C435.516 58.6795 430.662 52.9494 430.662 45.0621C430.662 37.1075 435.516 31.3774 442.729 31.3774C448.594 31.3774 452.504 34.748 453.92 40.0736H458.773C457.425 33.265 452.032 27.063 442.729 27.063C432.28 27.063 425.809 35.6244 425.809 45.0621Z" fill="#21004B"/>
|
||||
<path d="M470.041 11.6255H465.255V62.1849H470.041V41.8938C470.041 34.8829 474.558 31.2426 480.355 31.2426C486.49 31.2426 489.389 35.0177 489.389 41.2196V62.1849H494.175V40.2759C494.175 32.6582 489.658 27.063 481.164 27.063C474.76 27.063 471.255 30.5685 470.041 32.6582V11.6255Z" fill="#21004B"/>
|
||||
<path d="M0.824951 73.993L24.0688 14.5224C27.3443 6.14179 35.4223 0.625977 44.4202 0.625977H58.4336L35.1898 60.0966C31.9143 68.4772 23.8363 73.993 14.8383 73.993H0.824951Z" fill="url(#paint0_linear_0_15)"/>
|
||||
<path d="M34.9246 73.9932L58.1684 14.5226C61.4439 6.14197 69.5219 0.626152 78.5199 0.626152H92.5332L69.2894 60.0968C66.0139 68.4774 57.9359 73.9932 48.9379 73.9932H34.9246Z" fill="url(#paint1_linear_0_15)"/>
|
||||
<path d="M69.0262 73.9932L92.27 14.5226C95.5455 6.14197 103.623 0.626152 112.621 0.626152H126.635L103.391 60.0968C100.115 68.4774 92.0375 73.9932 83.0395 73.9932H69.0262Z" fill="url(#paint2_linear_0_15)"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_0_15" x1="126.635" y1="-4.97799" x2="0.824952" y2="66.0978" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FF5CAA"/>
|
||||
<stop offset="1" stop-color="#FF4E62"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint1_linear_0_15" x1="126.635" y1="-4.97799" x2="0.824952" y2="66.0978" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FF5CAA"/>
|
||||
<stop offset="1" stop-color="#FF4E62"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint2_linear_0_15" x1="126.635" y1="-4.97799" x2="0.824952" y2="66.0978" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FF5CAA"/>
|
||||
<stop offset="1" stop-color="#FF4E62"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
After Width: | Height: | Size: 5.9 KiB |
@ -1,7 +1,7 @@
|
||||
status = [
|
||||
'Tests on ubuntu-18.04',
|
||||
'Tests on macos-latest',
|
||||
'Tests on windows-latest',
|
||||
'Tests on macos-12',
|
||||
'Tests on windows-2022',
|
||||
'Run Clippy',
|
||||
'Run Rustfmt',
|
||||
'Run tests in debug',
|
||||
|
120
config.toml
Normal file
120
config.toml
Normal file
@ -0,0 +1,120 @@
|
||||
# This file shows the default configuration of Meilisearch.
|
||||
# All variables are defined here: https://docs.meilisearch.com/learn/configuration/instance_options.html#environment-variables
|
||||
|
||||
db_path = "./data.ms"
|
||||
# Designates the location where database files will be created and retrieved.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#database-path
|
||||
|
||||
env = "development"
|
||||
# Configures the instance's environment. Value must be either `production` or `development`.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#environment
|
||||
|
||||
http_addr = "localhost:7700"
|
||||
# The address on which the HTTP server will listen.
|
||||
|
||||
# master_key = "YOUR_MASTER_KEY_VALUE"
|
||||
# Sets the instance's master key, automatically protecting all routes except GET /health.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#master-key
|
||||
|
||||
# no_analytics = true
|
||||
# Deactivates Meilisearch's built-in telemetry when provided.
|
||||
# Meilisearch automatically collects data from all instances that do not opt out using this flag.
|
||||
# All gathered data is used solely for the purpose of improving Meilisearch, and can be deleted at any time.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#disable-analytics
|
||||
|
||||
http_payload_size_limit = "100 MB"
|
||||
# Sets the maximum size of accepted payloads.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#payload-limit-size
|
||||
|
||||
log_level = "INFO"
|
||||
# Defines how much detail should be present in Meilisearch's logs.
|
||||
# Meilisearch currently supports six log levels, listed in order of increasing verbosity: `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#log-level
|
||||
|
||||
# max_indexing_memory = "2 GiB"
|
||||
# Sets the maximum amount of RAM Meilisearch can use when indexing.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#max-indexing-memory
|
||||
|
||||
# max_indexing_threads = 4
|
||||
# Sets the maximum number of threads Meilisearch can use during indexing.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#max-indexing-threads
|
||||
|
||||
#############
|
||||
### DUMPS ###
|
||||
#############
|
||||
|
||||
dump_dir = "dumps/"
|
||||
# Sets the directory where Meilisearch will create dump files.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#dumps-destination
|
||||
|
||||
# import_dump = "./path/to/my/file.dump"
|
||||
# Imports the dump file located at the specified path. Path must point to a .dump file.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#import-dump
|
||||
|
||||
ignore_missing_dump = false
|
||||
# Prevents Meilisearch from throwing an error when `import_dump` does not point to a valid dump file.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ignore-missing-dump
|
||||
|
||||
ignore_dump_if_db_exists = false
|
||||
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_dump`.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ignore-dump-if-db-exists
|
||||
|
||||
|
||||
#################
|
||||
### SNAPSHOTS ###
|
||||
#################
|
||||
|
||||
schedule_snapshot = false
|
||||
# Enables scheduled snapshots when true, disable when false (the default).
|
||||
# If the value is given as an integer, then enables the scheduled snapshot with the passed value as the interval
|
||||
# between each snapshot, in seconds.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#schedule-snapshot-creation
|
||||
|
||||
snapshot_dir = "snapshots/"
|
||||
# Sets the directory where Meilisearch will store snapshots.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#snapshot-destination
|
||||
|
||||
# import_snapshot = "./path/to/my/snapshot"
|
||||
# Launches Meilisearch after importing a previously-generated snapshot at the given filepath.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#import-snapshot
|
||||
|
||||
ignore_missing_snapshot = false
|
||||
# Prevents a Meilisearch instance from throwing an error when `import_snapshot` does not point to a valid snapshot file.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ignore-missing-snapshot
|
||||
|
||||
ignore_snapshot_if_db_exists = false
|
||||
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_snapshot`.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ignore-snapshot-if-db-exists
|
||||
|
||||
|
||||
###########
|
||||
### SSL ###
|
||||
###########
|
||||
|
||||
# ssl_auth_path = "./path/to/root"
|
||||
# Enables client authentication in the specified path.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-authentication-path
|
||||
|
||||
# ssl_cert_path = "./path/to/certfile"
|
||||
# Sets the server's SSL certificates.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-certificates-path
|
||||
|
||||
# ssl_key_path = "./path/to/private-key"
|
||||
# Sets the server's SSL key files.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-key-path
|
||||
|
||||
# ssl_ocsp_path = "./path/to/ocsp-file"
|
||||
# Sets the server's OCSP file.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-ocsp-path
|
||||
|
||||
ssl_require_auth = false
|
||||
# Makes SSL authentication mandatory.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-require-auth
|
||||
|
||||
ssl_resumption = false
|
||||
# Activates SSL session resumption.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-resumption
|
||||
|
||||
ssl_tickets = false
|
||||
# Activates SSL tickets.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-tickets
|
@ -1,129 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
# COLORS
|
||||
# This script can optionally use a GitHub token to increase your request limit (for example, if using this script in a CI).
|
||||
# To use a GitHub token, pass it through the GITHUB_PAT environment variable.
|
||||
|
||||
# GLOBALS
|
||||
|
||||
# Colors
|
||||
RED='\033[31m'
|
||||
GREEN='\033[32m'
|
||||
DEFAULT='\033[0m'
|
||||
|
||||
# GLOBALS
|
||||
GREP_SEMVER_REGEXP='v\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)$' # i.e. v[number].[number].[number]
|
||||
# Project name
|
||||
PNAME='meilisearch'
|
||||
|
||||
# GitHub API address
|
||||
GITHUB_API='https://api.github.com/repos/meilisearch/meilisearch/releases'
|
||||
# GitHub Release address
|
||||
GITHUB_REL='https://github.com/meilisearch/meilisearch/releases/download/'
|
||||
|
||||
# FUNCTIONS
|
||||
|
||||
# semverParseInto and semverLT from https://github.com/cloudflare/semver_bash/blob/master/semver.sh
|
||||
|
||||
# usage: semverParseInto version major minor patch special
|
||||
# version: the string version
|
||||
# major, minor, patch, special: will be assigned by the function
|
||||
semverParseInto() {
|
||||
local RE='[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)'
|
||||
#MAJOR
|
||||
eval $2=`echo $1 | sed -e "s#$RE#\1#"`
|
||||
#MINOR
|
||||
eval $3=`echo $1 | sed -e "s#$RE#\2#"`
|
||||
#PATCH
|
||||
eval $4=`echo $1 | sed -e "s#$RE#\3#"`
|
||||
#SPECIAL
|
||||
eval $5=`echo $1 | sed -e "s#$RE#\4#"`
|
||||
}
|
||||
|
||||
# usage: semverLT version1 version2
|
||||
semverLT() {
|
||||
local MAJOR_A=0
|
||||
local MINOR_A=0
|
||||
local PATCH_A=0
|
||||
local SPECIAL_A=0
|
||||
|
||||
local MAJOR_B=0
|
||||
local MINOR_B=0
|
||||
local PATCH_B=0
|
||||
local SPECIAL_B=0
|
||||
|
||||
semverParseInto $1 MAJOR_A MINOR_A PATCH_A SPECIAL_A
|
||||
semverParseInto $2 MAJOR_B MINOR_B PATCH_B SPECIAL_B
|
||||
|
||||
if [ $MAJOR_A -lt $MAJOR_B ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ $MAJOR_A -le $MAJOR_B ] && [ $MINOR_A -lt $MINOR_B ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ $MAJOR_A -le $MAJOR_B ] && [ $MINOR_A -le $MINOR_B ] && [ $PATCH_A -lt $PATCH_B ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ "_$SPECIAL_A" == '_' ] && [ "_$SPECIAL_B" == '_' ] ; then
|
||||
return 1
|
||||
fi
|
||||
if [ "_$SPECIAL_A" == '_' ] && [ "_$SPECIAL_B" != '_' ] ; then
|
||||
return 1
|
||||
fi
|
||||
if [ "_$SPECIAL_A" != '_' ] && [ "_$SPECIAL_B" == '_' ] ; then
|
||||
return 0
|
||||
fi
|
||||
if [ "_$SPECIAL_A" < "_$SPECIAL_B" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Get a token from https://github.com/settings/tokens to increase rate limit (from 60 to 5000), make sure the token scope is set to 'public_repo'
|
||||
# Create GITHUB_PAT environment variable once you acquired the token to start using it
|
||||
# Returns the tag of the latest stable release (in terms of semver and not of release date)
|
||||
# Gets the version of the latest stable version of Meilisearch by setting the $latest variable.
|
||||
# Returns 0 in case of success, 1 otherwise.
|
||||
get_latest() {
|
||||
temp_file='temp_file' # temp_file needed because the grep would start before the download is over
|
||||
# temp_file is needed because the grep would start before the download is over
|
||||
temp_file=$(mktemp -q /tmp/$PNAME.XXXXXXXXX)
|
||||
latest_release="$GITHUB_API/latest"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "$0: Can't create temp file."
|
||||
fetch_release_failure_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$GITHUB_PAT" ]; then
|
||||
curl -s 'https://api.github.com/repos/meilisearch/meilisearch/releases' > "$temp_file" || return 1
|
||||
curl -s "$latest_release" > "$temp_file" || return 1
|
||||
else
|
||||
curl -H "Authorization: token $GITHUB_PAT" -s 'https://api.github.com/repos/meilisearch/meilisearch/releases' > "$temp_file" || return 1
|
||||
curl -H "Authorization: token $GITHUB_PAT" -s "$latest_release" > "$temp_file" || return 1
|
||||
fi
|
||||
|
||||
releases=$(cat "$temp_file" | \
|
||||
grep -E '"tag_name":|"draft":|"prerelease":' \
|
||||
| tr -d ',"' | cut -d ':' -f2 | tr -d ' ')
|
||||
# Returns a list of [tag_name draft_boolean prerelease_boolean ...]
|
||||
# Ex: v0.10.1 false false v0.9.1-rc.1 false true v0.9.0 false false...
|
||||
|
||||
i=0
|
||||
latest=''
|
||||
current_tag=''
|
||||
for release_info in $releases; do
|
||||
if [ $i -eq 0 ]; then # Checking tag_name
|
||||
if echo "$release_info" | grep -q "$GREP_SEMVER_REGEXP"; then # If it's not an alpha or beta release
|
||||
current_tag=$release_info
|
||||
else
|
||||
current_tag=''
|
||||
fi
|
||||
i=1
|
||||
elif [ $i -eq 1 ]; then # Checking draft boolean
|
||||
if [ "$release_info" = 'true' ]; then
|
||||
current_tag=''
|
||||
fi
|
||||
i=2
|
||||
elif [ $i -eq 2 ]; then # Checking prerelease boolean
|
||||
if [ "$release_info" = 'true' ]; then
|
||||
current_tag=''
|
||||
fi
|
||||
i=0
|
||||
if [ "$current_tag" != '' ]; then # If the current_tag is valid
|
||||
if [ "$latest" = '' ]; then # If there is no latest yet
|
||||
latest="$current_tag"
|
||||
else
|
||||
semverLT $current_tag $latest # Comparing latest and the current tag
|
||||
if [ $? -eq 1 ]; then
|
||||
latest="$current_tag"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
latest="$(cat "$temp_file" | grep '"tag_name":' | cut -d ':' -f2 | tr -d '"' | tr -d ',' | tr -d ' ')"
|
||||
|
||||
rm -f "$temp_file"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Gets the OS by setting the $os variable
|
||||
# Gets the OS by setting the $os variable.
|
||||
# Returns 0 in case of success, 1 otherwise.
|
||||
get_os() {
|
||||
os_name=$(uname -s)
|
||||
@ -134,7 +56,7 @@ get_os() {
|
||||
'Linux')
|
||||
os='linux'
|
||||
;;
|
||||
'MINGW'*)
|
||||
'MINGW'*)
|
||||
os='windows'
|
||||
;;
|
||||
*)
|
||||
@ -143,7 +65,7 @@ get_os() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# Gets the architecture by setting the $archi variable
|
||||
# Gets the architecture by setting the $archi variable.
|
||||
# Returns 0 in case of success, 1 otherwise.
|
||||
get_archi() {
|
||||
architecture=$(uname -m)
|
||||
@ -152,8 +74,9 @@ get_archi() {
|
||||
archi='amd64'
|
||||
;;
|
||||
'arm64')
|
||||
if [ $os = 'macos' ]; then # MacOS M1
|
||||
archi='amd64'
|
||||
# macOS M1/M2
|
||||
if [ $os = 'macos' ]; then
|
||||
archi='apple-silicon'
|
||||
else
|
||||
archi='aarch64'
|
||||
fi
|
||||
@ -171,9 +94,9 @@ success_usage() {
|
||||
printf "$GREEN%s\n$DEFAULT" "Meilisearch $latest binary successfully downloaded as '$binary_name' file."
|
||||
echo ''
|
||||
echo 'Run it:'
|
||||
echo ' $ ./meilisearch'
|
||||
echo " $ ./$PNAME"
|
||||
echo 'Usage:'
|
||||
echo ' $ ./meilisearch --help'
|
||||
echo " $ ./$PNAME --help"
|
||||
}
|
||||
|
||||
not_available_failure_usage() {
|
||||
@ -187,54 +110,58 @@ fetch_release_failure_usage() {
|
||||
echo ''
|
||||
printf "$RED%s\n$DEFAULT" 'ERROR: Impossible to get the latest stable version of Meilisearch.'
|
||||
echo 'Please let us know about this issue: https://github.com/meilisearch/meilisearch/issues/new/choose'
|
||||
echo ''
|
||||
echo 'In the meantime, you can manually download the appropriate binary from the GitHub release assets here: https://github.com/meilisearch/meilisearch/releases/latest'
|
||||
}
|
||||
|
||||
fill_release_variables() {
|
||||
# Fill $latest variable.
|
||||
if ! get_latest; then
|
||||
fetch_release_failure_usage
|
||||
exit 1
|
||||
fi
|
||||
if [ "$latest" = '' ]; then
|
||||
fetch_release_failure_usage
|
||||
exit 1
|
||||
fi
|
||||
# Fill $os variable.
|
||||
if ! get_os; then
|
||||
not_available_failure_usage
|
||||
exit 1
|
||||
fi
|
||||
# Fill $archi variable.
|
||||
if ! get_archi; then
|
||||
not_available_failure_usage
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
download_binary() {
|
||||
fill_release_variables
|
||||
echo "Downloading Meilisearch binary $latest for $os, architecture $archi..."
|
||||
case "$os" in
|
||||
'windows')
|
||||
release_file="$PNAME-$os-$archi.exe"
|
||||
binary_name="$PNAME.exe"
|
||||
;;
|
||||
*)
|
||||
release_file="$PNAME-$os-$archi"
|
||||
binary_name="$PNAME"
|
||||
esac
|
||||
# Fetch the Meilisearch binary.
|
||||
curl --fail -OL "$GITHUB_REL/$latest/$release_file"
|
||||
if [ $? -ne 0 ]; then
|
||||
fetch_release_failure_usage
|
||||
exit 1
|
||||
fi
|
||||
mv "$release_file" "$binary_name"
|
||||
chmod 744 "$binary_name"
|
||||
success_usage
|
||||
}
|
||||
|
||||
# MAIN
|
||||
|
||||
# Fill $latest variable
|
||||
if ! get_latest; then
|
||||
fetch_release_failure_usage # TO CHANGE
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$latest" = '' ]; then
|
||||
fetch_release_failure_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Fill $os variable
|
||||
if ! get_os; then
|
||||
not_available_failure_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Fill $archi variable
|
||||
if ! get_archi; then
|
||||
not_available_failure_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Downloading Meilisearch binary $latest for $os, architecture $archi..."
|
||||
case "$os" in
|
||||
'windows')
|
||||
release_file="meilisearch-$os-$archi.exe"
|
||||
binary_name='meilisearch.exe'
|
||||
|
||||
;;
|
||||
*)
|
||||
release_file="meilisearch-$os-$archi"
|
||||
binary_name='meilisearch'
|
||||
|
||||
esac
|
||||
|
||||
# Fetch the Meilisearch binary
|
||||
link="https://github.com/meilisearch/meilisearch/releases/download/$latest/$release_file"
|
||||
curl --fail -OL "$link"
|
||||
if [ $? -ne 0 ]; then
|
||||
fetch_release_failure_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv "$release_file" "$binary_name"
|
||||
chmod 744 "$binary_name"
|
||||
success_usage
|
||||
main() {
|
||||
download_binary
|
||||
}
|
||||
main
|
||||
|
28
dump/Cargo.toml
Normal file
28
dump/Cargo.toml
Normal file
@ -0,0 +1,28 @@
|
||||
[package]
|
||||
name = "dump"
|
||||
version = "1.0.2"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.65"
|
||||
flate2 = "1.0.22"
|
||||
http = "0.2.8"
|
||||
log = "0.4.17"
|
||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
||||
once_cell = "1.15.0"
|
||||
regex = "1.6.0"
|
||||
roaring = { version = "0.10.0", features = ["serde"] }
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = { version = "1.0.85", features = ["preserve_order"] }
|
||||
tar = "0.4.38"
|
||||
tempfile = "3.3.0"
|
||||
thiserror = "1.0.30"
|
||||
time = { version = "0.3.7", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||
uuid = { version = "1.1.2", features = ["serde", "v4"] }
|
||||
|
||||
[dev-dependencies]
|
||||
big_s = "1.0.2"
|
||||
maplit = "1.0.2"
|
||||
meili-snap = { path = "../meili-snap" }
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
17
dump/README.md
Normal file
17
dump/README.md
Normal file
@ -0,0 +1,17 @@
|
||||
```
|
||||
dump
|
||||
├── indexes
|
||||
│ ├── cattos
|
||||
│ │ ├── documents.jsonl
|
||||
│ │ └── settings.json
|
||||
│ └── doggos
|
||||
│ ├── documents.jsonl
|
||||
│ └── settings.json
|
||||
├── instance-uid.uuid
|
||||
├── keys.jsonl
|
||||
├── metadata.json
|
||||
└── tasks
|
||||
├── update_files
|
||||
│ └── [task_id].jsonl
|
||||
└── queue.jsonl
|
||||
```
|
34
dump/src/error.rs
Normal file
34
dump/src/error.rs
Normal file
@ -0,0 +1,34 @@
|
||||
use meilisearch_types::error::{Code, ErrorCode};
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("Bad index name.")]
|
||||
BadIndexName,
|
||||
#[error("Malformed task.")]
|
||||
MalformedTask,
|
||||
|
||||
#[error(transparent)]
|
||||
Io(#[from] std::io::Error),
|
||||
#[error(transparent)]
|
||||
Serde(#[from] serde_json::Error),
|
||||
#[error(transparent)]
|
||||
Uuid(#[from] uuid::Error),
|
||||
}
|
||||
|
||||
impl ErrorCode for Error {
|
||||
fn error_code(&self) -> Code {
|
||||
match self {
|
||||
Error::Io(e) => e.error_code(),
|
||||
|
||||
// These errors either happen when creating a dump and don't need any error code,
|
||||
// or come from an internal bad deserialization.
|
||||
Error::Serde(_) => Code::Internal,
|
||||
Error::Uuid(_) => Code::Internal,
|
||||
|
||||
// all these errors should never be raised when creating a dump, thus no error code should be associated.
|
||||
Error::BadIndexName => Code::Internal,
|
||||
Error::MalformedTask => Code::Internal,
|
||||
}
|
||||
}
|
||||
}
|
463
dump/src/lib.rs
Normal file
463
dump/src/lib.rs
Normal file
@ -0,0 +1,463 @@
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![allow(clippy::wrong_self_convention)]
|
||||
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::keys::Key;
|
||||
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
||||
use meilisearch_types::settings::Unchecked;
|
||||
use meilisearch_types::tasks::{Details, IndexSwap, KindWithContent, Status, Task, TaskId};
|
||||
use meilisearch_types::InstanceUid;
|
||||
use roaring::RoaringBitmap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
mod error;
|
||||
mod reader;
|
||||
mod writer;
|
||||
|
||||
pub use error::Error;
|
||||
pub use reader::{DumpReader, UpdateFile};
|
||||
pub use writer::DumpWriter;
|
||||
|
||||
const CURRENT_DUMP_VERSION: Version = Version::V6;
|
||||
|
||||
type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Metadata {
|
||||
pub dump_version: Version,
|
||||
pub db_version: String,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub dump_date: OffsetDateTime,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct IndexMetadata {
|
||||
pub uid: String,
|
||||
pub primary_key: Option<String>,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub created_at: OffsetDateTime,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub updated_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)]
|
||||
pub enum Version {
|
||||
V1,
|
||||
V2,
|
||||
V3,
|
||||
V4,
|
||||
V5,
|
||||
V6,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TaskDump {
|
||||
pub uid: TaskId,
|
||||
#[serde(default)]
|
||||
pub index_uid: Option<String>,
|
||||
pub status: Status,
|
||||
#[serde(rename = "type")]
|
||||
pub kind: KindDump,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub canceled_by: Option<TaskId>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub details: Option<Details>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<ResponseError>,
|
||||
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub enqueued_at: OffsetDateTime,
|
||||
#[serde(
|
||||
with = "time::serde::rfc3339::option",
|
||||
skip_serializing_if = "Option::is_none",
|
||||
default
|
||||
)]
|
||||
pub started_at: Option<OffsetDateTime>,
|
||||
#[serde(
|
||||
with = "time::serde::rfc3339::option",
|
||||
skip_serializing_if = "Option::is_none",
|
||||
default
|
||||
)]
|
||||
pub finished_at: Option<OffsetDateTime>,
|
||||
}
|
||||
|
||||
// A `Kind` specific version made for the dump. If modified you may break the dump.
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum KindDump {
|
||||
DocumentImport {
|
||||
primary_key: Option<String>,
|
||||
method: IndexDocumentsMethod,
|
||||
documents_count: u64,
|
||||
allow_index_creation: bool,
|
||||
},
|
||||
DocumentDeletion {
|
||||
documents_ids: Vec<String>,
|
||||
},
|
||||
DocumentClear,
|
||||
Settings {
|
||||
settings: Box<meilisearch_types::settings::Settings<Unchecked>>,
|
||||
is_deletion: bool,
|
||||
allow_index_creation: bool,
|
||||
},
|
||||
IndexDeletion,
|
||||
IndexCreation {
|
||||
primary_key: Option<String>,
|
||||
},
|
||||
IndexUpdate {
|
||||
primary_key: Option<String>,
|
||||
},
|
||||
IndexSwap {
|
||||
swaps: Vec<IndexSwap>,
|
||||
},
|
||||
TaskCancelation {
|
||||
query: String,
|
||||
tasks: RoaringBitmap,
|
||||
},
|
||||
TasksDeletion {
|
||||
query: String,
|
||||
tasks: RoaringBitmap,
|
||||
},
|
||||
DumpCreation {
|
||||
keys: Vec<Key>,
|
||||
instance_uid: Option<InstanceUid>,
|
||||
},
|
||||
SnapshotCreation,
|
||||
}
|
||||
|
||||
impl From<Task> for TaskDump {
|
||||
fn from(task: Task) -> Self {
|
||||
TaskDump {
|
||||
uid: task.uid,
|
||||
index_uid: task.index_uid().map(|uid| uid.to_string()),
|
||||
status: task.status,
|
||||
kind: task.kind.into(),
|
||||
canceled_by: task.canceled_by,
|
||||
details: task.details,
|
||||
error: task.error,
|
||||
enqueued_at: task.enqueued_at,
|
||||
started_at: task.started_at,
|
||||
finished_at: task.finished_at,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<KindWithContent> for KindDump {
|
||||
fn from(kind: KindWithContent) -> Self {
|
||||
match kind {
|
||||
KindWithContent::DocumentAdditionOrUpdate {
|
||||
primary_key,
|
||||
method,
|
||||
documents_count,
|
||||
allow_index_creation,
|
||||
..
|
||||
} => KindDump::DocumentImport {
|
||||
primary_key,
|
||||
method,
|
||||
documents_count,
|
||||
allow_index_creation,
|
||||
},
|
||||
KindWithContent::DocumentDeletion { documents_ids, .. } => {
|
||||
KindDump::DocumentDeletion { documents_ids }
|
||||
}
|
||||
KindWithContent::DocumentClear { .. } => KindDump::DocumentClear,
|
||||
KindWithContent::SettingsUpdate {
|
||||
new_settings,
|
||||
is_deletion,
|
||||
allow_index_creation,
|
||||
..
|
||||
} => KindDump::Settings { settings: new_settings, is_deletion, allow_index_creation },
|
||||
KindWithContent::IndexDeletion { .. } => KindDump::IndexDeletion,
|
||||
KindWithContent::IndexCreation { primary_key, .. } => {
|
||||
KindDump::IndexCreation { primary_key }
|
||||
}
|
||||
KindWithContent::IndexUpdate { primary_key, .. } => {
|
||||
KindDump::IndexUpdate { primary_key }
|
||||
}
|
||||
KindWithContent::IndexSwap { swaps } => KindDump::IndexSwap { swaps },
|
||||
KindWithContent::TaskCancelation { query, tasks } => {
|
||||
KindDump::TaskCancelation { query, tasks }
|
||||
}
|
||||
KindWithContent::TaskDeletion { query, tasks } => {
|
||||
KindDump::TasksDeletion { query, tasks }
|
||||
}
|
||||
KindWithContent::DumpCreation { keys, instance_uid } => {
|
||||
KindDump::DumpCreation { keys, instance_uid }
|
||||
}
|
||||
KindWithContent::SnapshotCreation => KindDump::SnapshotCreation,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
use std::io::Seek;
|
||||
use std::str::FromStr;
|
||||
|
||||
use big_s::S;
|
||||
use maplit::btreeset;
|
||||
use meilisearch_types::index_uid::IndexUid;
|
||||
use meilisearch_types::keys::{Action, Key};
|
||||
use meilisearch_types::milli::update::Setting;
|
||||
use meilisearch_types::milli::{self};
|
||||
use meilisearch_types::settings::{Checked, Settings};
|
||||
use meilisearch_types::star_or::StarOr;
|
||||
use meilisearch_types::tasks::{Details, Status};
|
||||
use serde_json::{json, Map, Value};
|
||||
use time::macros::datetime;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::reader::Document;
|
||||
use crate::{DumpReader, DumpWriter, IndexMetadata, KindDump, TaskDump, Version};
|
||||
|
||||
pub fn create_test_instance_uid() -> Uuid {
|
||||
Uuid::parse_str("9e15e977-f2ae-4761-943f-1eaf75fd736d").unwrap()
|
||||
}
|
||||
|
||||
pub fn create_test_index_metadata() -> IndexMetadata {
|
||||
IndexMetadata {
|
||||
uid: S("doggo"),
|
||||
primary_key: None,
|
||||
created_at: datetime!(2022-11-20 12:00 UTC),
|
||||
updated_at: datetime!(2022-11-21 00:00 UTC),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_test_documents() -> Vec<Map<String, Value>> {
|
||||
vec![
|
||||
json!({ "id": 1, "race": "golden retriever", "name": "paul", "age": 4 })
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.clone(),
|
||||
json!({ "id": 2, "race": "bernese mountain", "name": "tamo", "age": 6 })
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.clone(),
|
||||
json!({ "id": 3, "race": "great pyrenees", "name": "patou", "age": 5 })
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.clone(),
|
||||
]
|
||||
}
|
||||
|
||||
pub fn create_test_settings() -> Settings<Checked> {
|
||||
let settings = Settings {
|
||||
displayed_attributes: Setting::Set(vec![S("race"), S("name")]),
|
||||
searchable_attributes: Setting::Set(vec![S("name"), S("race")]),
|
||||
filterable_attributes: Setting::Set(btreeset! { S("race"), S("age") }),
|
||||
sortable_attributes: Setting::Set(btreeset! { S("age") }),
|
||||
ranking_rules: Setting::NotSet,
|
||||
stop_words: Setting::NotSet,
|
||||
synonyms: Setting::NotSet,
|
||||
distinct_attribute: Setting::NotSet,
|
||||
typo_tolerance: Setting::NotSet,
|
||||
faceting: Setting::NotSet,
|
||||
pagination: Setting::NotSet,
|
||||
_kind: std::marker::PhantomData,
|
||||
};
|
||||
settings.check()
|
||||
}
|
||||
|
||||
pub fn create_test_tasks() -> Vec<(TaskDump, Option<Vec<Document>>)> {
|
||||
vec![
|
||||
(
|
||||
TaskDump {
|
||||
uid: 0,
|
||||
index_uid: Some(S("doggo")),
|
||||
status: Status::Succeeded,
|
||||
kind: KindDump::DocumentImport {
|
||||
method: milli::update::IndexDocumentsMethod::UpdateDocuments,
|
||||
allow_index_creation: true,
|
||||
primary_key: Some(S("bone")),
|
||||
documents_count: 12,
|
||||
},
|
||||
canceled_by: None,
|
||||
details: Some(Details::DocumentAdditionOrUpdate {
|
||||
received_documents: 12,
|
||||
indexed_documents: Some(10),
|
||||
}),
|
||||
error: None,
|
||||
enqueued_at: datetime!(2022-11-11 0:00 UTC),
|
||||
started_at: Some(datetime!(2022-11-20 0:00 UTC)),
|
||||
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
||||
},
|
||||
None,
|
||||
),
|
||||
(
|
||||
TaskDump {
|
||||
uid: 1,
|
||||
index_uid: Some(S("doggo")),
|
||||
status: Status::Enqueued,
|
||||
kind: KindDump::DocumentImport {
|
||||
method: milli::update::IndexDocumentsMethod::UpdateDocuments,
|
||||
allow_index_creation: true,
|
||||
primary_key: None,
|
||||
documents_count: 2,
|
||||
},
|
||||
canceled_by: None,
|
||||
details: Some(Details::DocumentAdditionOrUpdate {
|
||||
received_documents: 2,
|
||||
indexed_documents: None,
|
||||
}),
|
||||
error: None,
|
||||
enqueued_at: datetime!(2022-11-11 0:00 UTC),
|
||||
started_at: None,
|
||||
finished_at: None,
|
||||
},
|
||||
Some(vec![
|
||||
json!({ "id": 4, "race": "leonberg" }).as_object().unwrap().clone(),
|
||||
json!({ "id": 5, "race": "patou" }).as_object().unwrap().clone(),
|
||||
]),
|
||||
),
|
||||
(
|
||||
TaskDump {
|
||||
uid: 5,
|
||||
index_uid: Some(S("catto")),
|
||||
status: Status::Enqueued,
|
||||
kind: KindDump::IndexDeletion,
|
||||
canceled_by: None,
|
||||
details: None,
|
||||
error: None,
|
||||
enqueued_at: datetime!(2022-11-15 0:00 UTC),
|
||||
started_at: None,
|
||||
finished_at: None,
|
||||
},
|
||||
None,
|
||||
),
|
||||
]
|
||||
}
|
||||
|
||||
pub fn create_test_api_keys() -> Vec<Key> {
|
||||
vec![
|
||||
Key {
|
||||
description: Some(S("The main key to manage all the doggos")),
|
||||
name: Some(S("doggos_key")),
|
||||
uid: Uuid::from_str("9f8a34da-b6b2-42f0-939b-dbd4c3448655").unwrap(),
|
||||
actions: vec![Action::DocumentsAll],
|
||||
indexes: vec![StarOr::Other(IndexUid::from_str("doggos").unwrap())],
|
||||
expires_at: Some(datetime!(4130-03-14 12:21 UTC)),
|
||||
created_at: datetime!(1960-11-15 0:00 UTC),
|
||||
updated_at: datetime!(2022-11-10 0:00 UTC),
|
||||
},
|
||||
Key {
|
||||
description: Some(S("The master key for everything and even the doggos")),
|
||||
name: Some(S("master_key")),
|
||||
uid: Uuid::from_str("4622f717-1c00-47bb-a494-39d76a49b591").unwrap(),
|
||||
actions: vec![Action::All],
|
||||
indexes: vec![StarOr::Star],
|
||||
expires_at: None,
|
||||
created_at: datetime!(0000-01-01 00:01 UTC),
|
||||
updated_at: datetime!(1964-05-04 17:25 UTC),
|
||||
},
|
||||
Key {
|
||||
description: Some(S("The useless key to for nothing nor the doggos")),
|
||||
name: Some(S("useless_key")),
|
||||
uid: Uuid::from_str("fb80b58b-0a34-412f-8ba7-1ce868f8ac5c").unwrap(),
|
||||
actions: vec![],
|
||||
indexes: vec![],
|
||||
expires_at: None,
|
||||
created_at: datetime!(400-02-29 0:00 UTC),
|
||||
updated_at: datetime!(1024-02-29 0:00 UTC),
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
pub fn create_test_dump() -> File {
|
||||
let instance_uid = create_test_instance_uid();
|
||||
let dump = DumpWriter::new(Some(instance_uid)).unwrap();
|
||||
|
||||
// ========== Adding an index
|
||||
let documents = create_test_documents();
|
||||
let settings = create_test_settings();
|
||||
|
||||
let mut index = dump.create_index("doggos", &create_test_index_metadata()).unwrap();
|
||||
for document in &documents {
|
||||
index.push_document(document).unwrap();
|
||||
}
|
||||
index.flush().unwrap();
|
||||
index.settings(&settings).unwrap();
|
||||
|
||||
// ========== pushing the task queue
|
||||
let tasks = create_test_tasks();
|
||||
|
||||
let mut task_queue = dump.create_tasks_queue().unwrap();
|
||||
for (task, update_file) in &tasks {
|
||||
let mut update = task_queue.push_task(task).unwrap();
|
||||
if let Some(update_file) = update_file {
|
||||
for u in update_file {
|
||||
update.push_document(u).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
task_queue.flush().unwrap();
|
||||
|
||||
// ========== pushing the api keys
|
||||
let api_keys = create_test_api_keys();
|
||||
|
||||
let mut keys = dump.create_keys().unwrap();
|
||||
for key in &api_keys {
|
||||
keys.push_key(key).unwrap();
|
||||
}
|
||||
keys.flush().unwrap();
|
||||
|
||||
// create the dump
|
||||
let mut file = tempfile::tempfile().unwrap();
|
||||
dump.persist_to(&mut file).unwrap();
|
||||
file.rewind().unwrap();
|
||||
|
||||
file
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_creating_and_read_dump() {
|
||||
let mut file = create_test_dump();
|
||||
let mut dump = DumpReader::open(&mut file).unwrap();
|
||||
|
||||
// ==== checking the top level infos
|
||||
assert_eq!(dump.version(), Version::V6);
|
||||
assert!(dump.date().is_some());
|
||||
assert_eq!(dump.instance_uid().unwrap().unwrap(), create_test_instance_uid());
|
||||
|
||||
// ==== checking the index
|
||||
let mut indexes = dump.indexes().unwrap();
|
||||
let mut index = indexes.next().unwrap().unwrap();
|
||||
assert!(indexes.next().is_none()); // there was only one index in the dump
|
||||
|
||||
for (document, expected) in index.documents().unwrap().zip(create_test_documents()) {
|
||||
assert_eq!(document.unwrap(), expected);
|
||||
}
|
||||
assert_eq!(index.settings().unwrap(), create_test_settings());
|
||||
assert_eq!(index.metadata(), &create_test_index_metadata());
|
||||
|
||||
drop(index);
|
||||
drop(indexes);
|
||||
|
||||
// ==== checking the task queue
|
||||
for (task, expected) in dump.tasks().unwrap().zip(create_test_tasks()) {
|
||||
let (task, content_file) = task.unwrap();
|
||||
assert_eq!(task, expected.0);
|
||||
|
||||
if let Some(expected_update) = expected.1 {
|
||||
assert!(
|
||||
content_file.is_some(),
|
||||
"A content file was expected for the task {}.",
|
||||
expected.0.uid
|
||||
);
|
||||
let updates = content_file.unwrap().collect::<Result<Vec<_>, _>>().unwrap();
|
||||
assert_eq!(updates, expected_update);
|
||||
}
|
||||
}
|
||||
|
||||
// ==== checking the keys
|
||||
for (key, expected) in dump.keys().unwrap().zip(create_test_api_keys()) {
|
||||
assert_eq!(key.unwrap(), expected);
|
||||
}
|
||||
}
|
||||
}
|
5
dump/src/reader/compat/mod.rs
Normal file
5
dump/src/reader/compat/mod.rs
Normal file
@ -0,0 +1,5 @@
|
||||
pub mod v1_to_v2;
|
||||
pub mod v2_to_v3;
|
||||
pub mod v3_to_v4;
|
||||
pub mod v4_to_v5;
|
||||
pub mod v5_to_v6;
|
@ -0,0 +1,38 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v1_to_v2.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v1_to_v2.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v1_to_v2.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v2_to_v3.rs
|
||||
expression: movies2.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v2_to_v3.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,37 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v2_to_v3.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v2_to_v3.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v3_to_v4.rs
|
||||
expression: movies2.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v3_to_v4.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v3_to_v4.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v3_to_v4.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,56 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v4_to_v5.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": "Reset",
|
||||
"searchableAttributes": "Reset",
|
||||
"filterableAttributes": {
|
||||
"Set": []
|
||||
},
|
||||
"sortableAttributes": {
|
||||
"Set": []
|
||||
},
|
||||
"rankingRules": {
|
||||
"Set": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
]
|
||||
},
|
||||
"stopWords": {
|
||||
"Set": []
|
||||
},
|
||||
"synonyms": {
|
||||
"Set": {}
|
||||
},
|
||||
"distinctAttribute": "Reset",
|
||||
"typoTolerance": {
|
||||
"Set": {
|
||||
"enabled": {
|
||||
"Set": true
|
||||
},
|
||||
"minWordSizeForTypos": {
|
||||
"Set": {
|
||||
"oneTypo": {
|
||||
"Set": 5
|
||||
},
|
||||
"twoTypos": {
|
||||
"Set": 9
|
||||
}
|
||||
}
|
||||
},
|
||||
"disableOnWords": {
|
||||
"Set": []
|
||||
},
|
||||
"disableOnAttributes": {
|
||||
"Set": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"faceting": "NotSet",
|
||||
"pagination": "NotSet"
|
||||
}
|
@ -0,0 +1,70 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v4_to_v5.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": "Reset",
|
||||
"searchableAttributes": "Reset",
|
||||
"filterableAttributes": {
|
||||
"Set": []
|
||||
},
|
||||
"sortableAttributes": {
|
||||
"Set": []
|
||||
},
|
||||
"rankingRules": {
|
||||
"Set": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
]
|
||||
},
|
||||
"stopWords": {
|
||||
"Set": []
|
||||
},
|
||||
"synonyms": {
|
||||
"Set": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
}
|
||||
},
|
||||
"distinctAttribute": "Reset",
|
||||
"typoTolerance": {
|
||||
"Set": {
|
||||
"enabled": {
|
||||
"Set": true
|
||||
},
|
||||
"minWordSizeForTypos": {
|
||||
"Set": {
|
||||
"oneTypo": {
|
||||
"Set": 5
|
||||
},
|
||||
"twoTypos": {
|
||||
"Set": 9
|
||||
}
|
||||
}
|
||||
},
|
||||
"disableOnWords": {
|
||||
"Set": []
|
||||
},
|
||||
"disableOnAttributes": {
|
||||
"Set": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"faceting": "NotSet",
|
||||
"pagination": "NotSet"
|
||||
}
|
@ -0,0 +1,62 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v4_to_v5.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": "Reset",
|
||||
"searchableAttributes": "Reset",
|
||||
"filterableAttributes": {
|
||||
"Set": [
|
||||
"genres",
|
||||
"id"
|
||||
]
|
||||
},
|
||||
"sortableAttributes": {
|
||||
"Set": [
|
||||
"release_date"
|
||||
]
|
||||
},
|
||||
"rankingRules": {
|
||||
"Set": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
]
|
||||
},
|
||||
"stopWords": {
|
||||
"Set": []
|
||||
},
|
||||
"synonyms": {
|
||||
"Set": {}
|
||||
},
|
||||
"distinctAttribute": "Reset",
|
||||
"typoTolerance": {
|
||||
"Set": {
|
||||
"enabled": {
|
||||
"Set": true
|
||||
},
|
||||
"minWordSizeForTypos": {
|
||||
"Set": {
|
||||
"oneTypo": {
|
||||
"Set": 5
|
||||
},
|
||||
"twoTypos": {
|
||||
"Set": 9
|
||||
}
|
||||
}
|
||||
},
|
||||
"disableOnWords": {
|
||||
"Set": []
|
||||
},
|
||||
"disableOnAttributes": {
|
||||
"Set": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"faceting": "NotSet",
|
||||
"pagination": "NotSet"
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v5_to_v6.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
}
|
@ -0,0 +1,54 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v5_to_v6.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v5_to_v6.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
}
|
410
dump/src/reader/compat/v1_to_v2.rs
Normal file
410
dump/src/reader/compat/v1_to_v2.rs
Normal file
@ -0,0 +1,410 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use super::v2_to_v3::CompatV2ToV3;
|
||||
use crate::reader::{v1, v2, Document};
|
||||
use crate::Result;
|
||||
|
||||
pub struct CompatV1ToV2 {
|
||||
pub from: v1::V1Reader,
|
||||
}
|
||||
|
||||
impl CompatV1ToV2 {
|
||||
pub fn new(v1: v1::V1Reader) -> Self {
|
||||
Self { from: v1 }
|
||||
}
|
||||
|
||||
pub fn to_v3(self) -> CompatV2ToV3 {
|
||||
CompatV2ToV3::Compat(self)
|
||||
}
|
||||
|
||||
pub fn version(&self) -> crate::Version {
|
||||
self.from.version()
|
||||
}
|
||||
|
||||
pub fn date(&self) -> Option<time::OffsetDateTime> {
|
||||
self.from.date()
|
||||
}
|
||||
|
||||
pub fn index_uuid(&self) -> Vec<v2::meta::IndexUuid> {
|
||||
self.from
|
||||
.index_uuid()
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
// we use the index of the index 😬 as UUID for the index, so that we can link the v2::Task to their index
|
||||
.map(|(index, index_uuid)| v2::meta::IndexUuid {
|
||||
uid: index_uuid.uid,
|
||||
uuid: uuid::Uuid::from_u128(index as u128),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<CompatIndexV1ToV2>> + '_> {
|
||||
Ok(self.from.indexes()?.map(|index_reader| Ok(CompatIndexV1ToV2 { from: index_reader? })))
|
||||
}
|
||||
|
||||
pub fn tasks(
|
||||
&mut self,
|
||||
) -> Box<dyn Iterator<Item = Result<(v2::Task, Option<v2::UpdateFile>)>> + '_> {
|
||||
// Convert an error here to an iterator yielding the error
|
||||
let indexes = match self.from.indexes() {
|
||||
Ok(indexes) => indexes,
|
||||
Err(err) => return Box::new(std::iter::once(Err(err))),
|
||||
};
|
||||
let it = indexes.enumerate().flat_map(
|
||||
move |(index, index_reader)| -> Box<dyn Iterator<Item = _>> {
|
||||
let index_reader = match index_reader {
|
||||
Ok(index_reader) => index_reader,
|
||||
Err(err) => return Box::new(std::iter::once(Err(err))),
|
||||
};
|
||||
Box::new(
|
||||
index_reader
|
||||
.tasks()
|
||||
// Filter out the UpdateStatus::Customs variant that is not supported in v2
|
||||
// and enqueued tasks, that don't contain the necessary update file in v1
|
||||
.filter_map(move |task| -> Option<_> {
|
||||
let task = match task {
|
||||
Ok(task) => task,
|
||||
Err(err) => return Some(Err(err)),
|
||||
};
|
||||
Some(Ok((
|
||||
v2::Task {
|
||||
uuid: uuid::Uuid::from_u128(index as u128),
|
||||
update: Option::from(task)?,
|
||||
},
|
||||
None,
|
||||
)))
|
||||
}),
|
||||
)
|
||||
},
|
||||
);
|
||||
Box::new(it)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CompatIndexV1ToV2 {
|
||||
pub from: v1::V1IndexReader,
|
||||
}
|
||||
|
||||
impl CompatIndexV1ToV2 {
|
||||
pub fn metadata(&self) -> &crate::IndexMetadata {
|
||||
self.from.metadata()
|
||||
}
|
||||
|
||||
pub fn documents(&mut self) -> Result<Box<dyn Iterator<Item = Result<Document>> + '_>> {
|
||||
self.from.documents().map(|it| Box::new(it) as Box<dyn Iterator<Item = _>>)
|
||||
}
|
||||
|
||||
pub fn settings(&mut self) -> Result<v2::settings::Settings<v2::settings::Checked>> {
|
||||
Ok(v2::settings::Settings::<v2::settings::Unchecked>::from(self.from.settings()?).check())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v1::settings::Settings> for v2::Settings<v2::Unchecked> {
|
||||
fn from(source: v1::settings::Settings) -> Self {
|
||||
Self {
|
||||
displayed_attributes: option_to_setting(source.displayed_attributes)
|
||||
.map(|displayed| displayed.into_iter().collect()),
|
||||
searchable_attributes: option_to_setting(source.searchable_attributes),
|
||||
filterable_attributes: option_to_setting(source.attributes_for_faceting.clone())
|
||||
.map(|filterable| filterable.into_iter().collect()),
|
||||
sortable_attributes: option_to_setting(source.attributes_for_faceting)
|
||||
.map(|sortable| sortable.into_iter().collect()),
|
||||
ranking_rules: option_to_setting(source.ranking_rules).map(|ranking_rules| {
|
||||
ranking_rules
|
||||
.into_iter()
|
||||
.filter_map(|ranking_rule| {
|
||||
match v1::settings::RankingRule::from_str(&ranking_rule) {
|
||||
Ok(ranking_rule) => {
|
||||
let criterion: Option<v2::settings::Criterion> =
|
||||
ranking_rule.into();
|
||||
criterion.as_ref().map(ToString::to_string)
|
||||
}
|
||||
Err(()) => {
|
||||
log::warn!(
|
||||
"Could not import the following ranking rule: `{}`.",
|
||||
ranking_rule
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}),
|
||||
stop_words: option_to_setting(source.stop_words),
|
||||
synonyms: option_to_setting(source.synonyms),
|
||||
distinct_attribute: option_to_setting(source.distinct_attribute),
|
||||
_kind: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn option_to_setting<T>(opt: Option<Option<T>>) -> v2::Setting<T> {
|
||||
match opt {
|
||||
Some(Some(t)) => v2::Setting::Set(t),
|
||||
None => v2::Setting::NotSet,
|
||||
Some(None) => v2::Setting::Reset,
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v1::update::UpdateStatus> for Option<v2::updates::UpdateStatus> {
|
||||
fn from(source: v1::update::UpdateStatus) -> Self {
|
||||
use v1::update::UpdateStatus as UpdateStatusV1;
|
||||
use v2::updates::UpdateStatus as UpdateStatusV2;
|
||||
Some(match source {
|
||||
UpdateStatusV1::Enqueued { content } => {
|
||||
log::warn!(
|
||||
"Cannot import task {} (importing enqueued tasks from v1 dumps is unsupported)",
|
||||
content.update_id
|
||||
);
|
||||
log::warn!("Task will be skipped in the queue of imported tasks.");
|
||||
|
||||
return None;
|
||||
}
|
||||
UpdateStatusV1::Failed { content } => UpdateStatusV2::Failed(v2::updates::Failed {
|
||||
from: v2::updates::Processing {
|
||||
from: v2::updates::Enqueued {
|
||||
update_id: content.update_id,
|
||||
meta: Option::from(content.update_type)?,
|
||||
enqueued_at: content.enqueued_at,
|
||||
content: None,
|
||||
},
|
||||
started_processing_at: content.processed_at
|
||||
- std::time::Duration::from_secs_f64(content.duration),
|
||||
},
|
||||
error: v2::ResponseError {
|
||||
// error code is ignored by serialization, and so always default in deserialized v2 dumps
|
||||
// that's a good thing, because we don't have them in v1 dump 😅
|
||||
code: http::StatusCode::default(),
|
||||
message: content.error.unwrap_or_default(),
|
||||
// error codes are unchanged between v1 and v2
|
||||
error_code: content.error_code.unwrap_or_default(),
|
||||
// error types are unchanged between v1 and v2
|
||||
error_type: content.error_type.unwrap_or_default(),
|
||||
// error links are unchanged between v1 and v2
|
||||
error_link: content.error_link.unwrap_or_default(),
|
||||
},
|
||||
failed_at: content.processed_at,
|
||||
}),
|
||||
UpdateStatusV1::Processed { content } => {
|
||||
UpdateStatusV2::Processed(v2::updates::Processed {
|
||||
success: match &content.update_type {
|
||||
v1::update::UpdateType::ClearAll => {
|
||||
v2::updates::UpdateResult::DocumentDeletion { deleted: u64::MAX }
|
||||
}
|
||||
v1::update::UpdateType::Customs => v2::updates::UpdateResult::Other,
|
||||
v1::update::UpdateType::DocumentsAddition { number } => {
|
||||
v2::updates::UpdateResult::DocumentsAddition(
|
||||
v2::updates::DocumentAdditionResult { nb_documents: *number },
|
||||
)
|
||||
}
|
||||
v1::update::UpdateType::DocumentsPartial { number } => {
|
||||
v2::updates::UpdateResult::DocumentsAddition(
|
||||
v2::updates::DocumentAdditionResult { nb_documents: *number },
|
||||
)
|
||||
}
|
||||
v1::update::UpdateType::DocumentsDeletion { number } => {
|
||||
v2::updates::UpdateResult::DocumentDeletion { deleted: *number as u64 }
|
||||
}
|
||||
v1::update::UpdateType::Settings { .. } => v2::updates::UpdateResult::Other,
|
||||
},
|
||||
processed_at: content.processed_at,
|
||||
from: v2::updates::Processing {
|
||||
from: v2::updates::Enqueued {
|
||||
update_id: content.update_id,
|
||||
meta: Option::from(content.update_type)?,
|
||||
enqueued_at: content.enqueued_at,
|
||||
content: None,
|
||||
},
|
||||
started_processing_at: content.processed_at
|
||||
- std::time::Duration::from_secs_f64(content.duration),
|
||||
},
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v1::update::UpdateType> for Option<v2::updates::UpdateMeta> {
|
||||
fn from(source: v1::update::UpdateType) -> Self {
|
||||
Some(match source {
|
||||
v1::update::UpdateType::ClearAll => v2::updates::UpdateMeta::ClearDocuments,
|
||||
v1::update::UpdateType::Customs => {
|
||||
log::warn!("Ignoring task with type 'Customs' that is no longer supported");
|
||||
return None;
|
||||
}
|
||||
v1::update::UpdateType::DocumentsAddition { .. } => {
|
||||
v2::updates::UpdateMeta::DocumentsAddition {
|
||||
method: v2::updates::IndexDocumentsMethod::ReplaceDocuments,
|
||||
format: v2::updates::UpdateFormat::Json,
|
||||
primary_key: None,
|
||||
}
|
||||
}
|
||||
v1::update::UpdateType::DocumentsPartial { .. } => {
|
||||
v2::updates::UpdateMeta::DocumentsAddition {
|
||||
method: v2::updates::IndexDocumentsMethod::UpdateDocuments,
|
||||
format: v2::updates::UpdateFormat::Json,
|
||||
primary_key: None,
|
||||
}
|
||||
}
|
||||
v1::update::UpdateType::DocumentsDeletion { .. } => {
|
||||
v2::updates::UpdateMeta::DeleteDocuments { ids: vec![] }
|
||||
}
|
||||
v1::update::UpdateType::Settings { settings } => {
|
||||
v2::updates::UpdateMeta::Settings((*settings).into())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v1::settings::SettingsUpdate> for v2::Settings<v2::Unchecked> {
|
||||
fn from(source: v1::settings::SettingsUpdate) -> Self {
|
||||
let ranking_rules = v2::Setting::from(source.ranking_rules);
|
||||
|
||||
// go from the concrete types of v1 (RankingRule) to the concrete type of v2 (Criterion),
|
||||
// and then back to string as this is what the settings manipulate
|
||||
let ranking_rules = ranking_rules.map(|ranking_rules| {
|
||||
ranking_rules
|
||||
.into_iter()
|
||||
// filter out the WordsPosition ranking rule that exists in v1 but not v2
|
||||
.filter_map(Option::<v2::settings::Criterion>::from)
|
||||
.map(|criterion| criterion.to_string())
|
||||
.collect()
|
||||
});
|
||||
|
||||
Self {
|
||||
displayed_attributes: v2::Setting::from(source.displayed_attributes)
|
||||
.map(|displayed_attributes| displayed_attributes.into_iter().collect()),
|
||||
searchable_attributes: source.searchable_attributes.into(),
|
||||
filterable_attributes: v2::Setting::from(source.attributes_for_faceting.clone())
|
||||
.map(|attributes_for_faceting| attributes_for_faceting.into_iter().collect()),
|
||||
sortable_attributes: v2::Setting::from(source.attributes_for_faceting)
|
||||
.map(|attributes_for_faceting| attributes_for_faceting.into_iter().collect()),
|
||||
ranking_rules,
|
||||
stop_words: source.stop_words.into(),
|
||||
synonyms: source.synonyms.into(),
|
||||
distinct_attribute: source.distinct_attribute.into(),
|
||||
_kind: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v1::settings::RankingRule> for Option<v2::settings::Criterion> {
|
||||
fn from(source: v1::settings::RankingRule) -> Self {
|
||||
match source {
|
||||
v1::settings::RankingRule::Typo => Some(v2::settings::Criterion::Typo),
|
||||
v1::settings::RankingRule::Words => Some(v2::settings::Criterion::Words),
|
||||
v1::settings::RankingRule::Proximity => Some(v2::settings::Criterion::Proximity),
|
||||
v1::settings::RankingRule::Attribute => Some(v2::settings::Criterion::Attribute),
|
||||
v1::settings::RankingRule::WordsPosition => {
|
||||
log::warn!("Removing the 'WordsPosition' ranking rule that is no longer supported, please check the resulting ranking rules of your indexes");
|
||||
None
|
||||
}
|
||||
v1::settings::RankingRule::Exactness => Some(v2::settings::Criterion::Exactness),
|
||||
v1::settings::RankingRule::Asc(field_name) => {
|
||||
Some(v2::settings::Criterion::Asc(field_name))
|
||||
}
|
||||
v1::settings::RankingRule::Desc(field_name) => {
|
||||
Some(v2::settings::Criterion::Desc(field_name))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<v1::settings::UpdateState<T>> for v2::Setting<T> {
|
||||
fn from(source: v1::settings::UpdateState<T>) -> Self {
|
||||
match source {
|
||||
v1::settings::UpdateState::Update(new_value) => v2::Setting::Set(new_value),
|
||||
v1::settings::UpdateState::Clear => v2::Setting::Reset,
|
||||
v1::settings::UpdateState::Nothing => v2::Setting::NotSet,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
|
||||
use flate2::bufread::GzDecoder;
|
||||
use meili_snap::insta;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn compat_v1_v2() {
|
||||
let dump = File::open("tests/assets/v1.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(dir.path()).unwrap();
|
||||
|
||||
let mut dump = v1::V1Reader::open(dir).unwrap().to_v2();
|
||||
|
||||
// top level infos
|
||||
assert_eq!(dump.date(), None);
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"2298010973ee98cf4670787314176a3a");
|
||||
assert_eq!(update_files.len(), 9);
|
||||
assert!(update_files[..].iter().all(|u| u.is_none())); // no update file in dumps v1
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "2022-10-02T13:23:39.976870431Z",
|
||||
"updatedAt": "2022-10-02T13:27:54.353262482Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "2022-10-02T13:15:29.477512777Z",
|
||||
"updatedAt": "2022-10-02T13:21:12.671204856Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b63dbed5bbc059f3e32bc471ae699bf5");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "2022-10-02T13:38:26.358882984Z",
|
||||
"updatedAt": "2022-10-02T13:38:26.385609433Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"aa24c0cfc733d66c396237ad44263bed");
|
||||
}
|
||||
}
|
514
dump/src/reader/compat/v2_to_v3.rs
Normal file
514
dump/src/reader/compat/v2_to_v3.rs
Normal file
@ -0,0 +1,514 @@
|
||||
use std::convert::TryInto;
|
||||
use std::str::FromStr;
|
||||
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::v1_to_v2::{CompatIndexV1ToV2, CompatV1ToV2};
|
||||
use super::v3_to_v4::CompatV3ToV4;
|
||||
use crate::reader::{v2, v3, Document};
|
||||
use crate::Result;
|
||||
|
||||
pub enum CompatV2ToV3 {
|
||||
V2(v2::V2Reader),
|
||||
Compat(CompatV1ToV2),
|
||||
}
|
||||
|
||||
impl CompatV2ToV3 {
|
||||
pub fn new(v2: v2::V2Reader) -> CompatV2ToV3 {
|
||||
CompatV2ToV3::V2(v2)
|
||||
}
|
||||
|
||||
pub fn index_uuid(&self) -> Vec<v3::meta::IndexUuid> {
|
||||
let v2_uuids = match self {
|
||||
CompatV2ToV3::V2(from) => from.index_uuid(),
|
||||
CompatV2ToV3::Compat(compat) => compat.index_uuid(),
|
||||
};
|
||||
v2_uuids
|
||||
.into_iter()
|
||||
.into_iter()
|
||||
.map(|index| v3::meta::IndexUuid { uid: index.uid, uuid: index.uuid })
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn to_v4(self) -> CompatV3ToV4 {
|
||||
CompatV3ToV4::Compat(self)
|
||||
}
|
||||
|
||||
pub fn version(&self) -> crate::Version {
|
||||
match self {
|
||||
CompatV2ToV3::V2(from) => from.version(),
|
||||
CompatV2ToV3::Compat(compat) => compat.version(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn date(&self) -> Option<time::OffsetDateTime> {
|
||||
match self {
|
||||
CompatV2ToV3::V2(from) => from.date(),
|
||||
CompatV2ToV3::Compat(compat) => compat.date(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn instance_uid(&self) -> Result<Option<uuid::Uuid>> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<CompatIndexV2ToV3>> + '_> {
|
||||
Ok(match self {
|
||||
CompatV2ToV3::V2(from) => Box::new(from.indexes()?.map(|index_reader| -> Result<_> {
|
||||
let compat = CompatIndexV2ToV3::new(index_reader?);
|
||||
Ok(compat)
|
||||
}))
|
||||
as Box<dyn Iterator<Item = Result<CompatIndexV2ToV3>> + '_>,
|
||||
CompatV2ToV3::Compat(compat) => Box::new(compat.indexes()?.map(|index_reader| {
|
||||
let compat = CompatIndexV2ToV3::Compat(Box::new(index_reader?));
|
||||
Ok(compat)
|
||||
}))
|
||||
as Box<dyn Iterator<Item = Result<CompatIndexV2ToV3>> + '_>,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn tasks(
|
||||
&mut self,
|
||||
) -> Box<
|
||||
dyn Iterator<Item = Result<(v3::Task, Option<Box<dyn Iterator<Item = Result<Document>>>>)>>
|
||||
+ '_,
|
||||
> {
|
||||
let tasks = match self {
|
||||
CompatV2ToV3::V2(from) => from.tasks(),
|
||||
CompatV2ToV3::Compat(compat) => compat.tasks(),
|
||||
};
|
||||
|
||||
Box::new(
|
||||
tasks
|
||||
.map(move |task| {
|
||||
task.map(|(task, content_file)| {
|
||||
let task = v3::Task { uuid: task.uuid, update: task.update.into() };
|
||||
|
||||
Some((
|
||||
task,
|
||||
content_file.map(|content_file| {
|
||||
Box::new(content_file) as Box<dyn Iterator<Item = Result<Document>>>
|
||||
}),
|
||||
))
|
||||
})
|
||||
})
|
||||
.filter_map(|res| res.transpose()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub enum CompatIndexV2ToV3 {
|
||||
V2(v2::V2IndexReader),
|
||||
Compat(Box<CompatIndexV1ToV2>),
|
||||
}
|
||||
|
||||
impl CompatIndexV2ToV3 {
|
||||
pub fn new(v2: v2::V2IndexReader) -> CompatIndexV2ToV3 {
|
||||
CompatIndexV2ToV3::V2(v2)
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &crate::IndexMetadata {
|
||||
match self {
|
||||
CompatIndexV2ToV3::V2(from) => from.metadata(),
|
||||
CompatIndexV2ToV3::Compat(compat) => compat.metadata(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn documents(&mut self) -> Result<Box<dyn Iterator<Item = Result<Document>> + '_>> {
|
||||
match self {
|
||||
CompatIndexV2ToV3::V2(from) => from
|
||||
.documents()
|
||||
.map(|iter| Box::new(iter) as Box<dyn Iterator<Item = Result<Document>> + '_>),
|
||||
CompatIndexV2ToV3::Compat(compat) => compat.documents(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn settings(&mut self) -> Result<v3::Settings<v3::Checked>> {
|
||||
let settings = match self {
|
||||
CompatIndexV2ToV3::V2(from) => from.settings()?,
|
||||
CompatIndexV2ToV3::Compat(compat) => compat.settings()?,
|
||||
};
|
||||
Ok(v3::Settings::<v3::Unchecked>::from(settings).check())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v2::updates::UpdateStatus> for v3::updates::UpdateStatus {
|
||||
fn from(update: v2::updates::UpdateStatus) -> Self {
|
||||
match update {
|
||||
v2::updates::UpdateStatus::Processing(processing) => {
|
||||
match (processing.from.meta.clone(), processing.from.content).try_into() {
|
||||
Ok(meta) => v3::updates::UpdateStatus::Processing(v3::updates::Processing {
|
||||
from: v3::updates::Enqueued {
|
||||
update_id: processing.from.update_id,
|
||||
meta,
|
||||
enqueued_at: processing.from.enqueued_at,
|
||||
},
|
||||
started_processing_at: processing.started_processing_at,
|
||||
}),
|
||||
Err(e) => {
|
||||
log::warn!("Error with task {}: {}", processing.from.update_id, e);
|
||||
log::warn!("Task will be marked as `Failed`.");
|
||||
v3::updates::UpdateStatus::Failed(v3::updates::Failed {
|
||||
from: v3::updates::Processing {
|
||||
from: v3::updates::Enqueued {
|
||||
update_id: processing.from.update_id,
|
||||
meta: update_from_unchecked_update_meta(processing.from.meta),
|
||||
enqueued_at: processing.from.enqueued_at,
|
||||
},
|
||||
started_processing_at: processing.started_processing_at,
|
||||
},
|
||||
msg: e.to_string(),
|
||||
code: v3::Code::MalformedDump,
|
||||
failed_at: OffsetDateTime::now_utc(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
v2::updates::UpdateStatus::Enqueued(enqueued) => {
|
||||
match (enqueued.meta.clone(), enqueued.content).try_into() {
|
||||
Ok(meta) => v3::updates::UpdateStatus::Enqueued(v3::updates::Enqueued {
|
||||
update_id: enqueued.update_id,
|
||||
meta,
|
||||
enqueued_at: enqueued.enqueued_at,
|
||||
}),
|
||||
Err(e) => {
|
||||
log::warn!("Error with task {}: {}", enqueued.update_id, e);
|
||||
log::warn!("Task will be marked as `Failed`.");
|
||||
v3::updates::UpdateStatus::Failed(v3::updates::Failed {
|
||||
from: v3::updates::Processing {
|
||||
from: v3::updates::Enqueued {
|
||||
update_id: enqueued.update_id,
|
||||
meta: update_from_unchecked_update_meta(enqueued.meta),
|
||||
enqueued_at: enqueued.enqueued_at,
|
||||
},
|
||||
started_processing_at: OffsetDateTime::now_utc(),
|
||||
},
|
||||
msg: e.to_string(),
|
||||
code: v3::Code::MalformedDump,
|
||||
failed_at: OffsetDateTime::now_utc(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
v2::updates::UpdateStatus::Processed(processed) => {
|
||||
v3::updates::UpdateStatus::Processed(v3::updates::Processed {
|
||||
success: processed.success.into(),
|
||||
processed_at: processed.processed_at,
|
||||
from: v3::updates::Processing {
|
||||
from: v3::updates::Enqueued {
|
||||
update_id: processed.from.from.update_id,
|
||||
// since we're never going to read the content_file again it's ok to generate a fake one.
|
||||
meta: update_from_unchecked_update_meta(processed.from.from.meta),
|
||||
enqueued_at: processed.from.from.enqueued_at,
|
||||
},
|
||||
started_processing_at: processed.from.started_processing_at,
|
||||
},
|
||||
})
|
||||
}
|
||||
v2::updates::UpdateStatus::Aborted(aborted) => {
|
||||
v3::updates::UpdateStatus::Aborted(v3::updates::Aborted {
|
||||
from: v3::updates::Enqueued {
|
||||
update_id: aborted.from.update_id,
|
||||
// since we're never going to read the content_file again it's ok to generate a fake one.
|
||||
meta: update_from_unchecked_update_meta(aborted.from.meta),
|
||||
enqueued_at: aborted.from.enqueued_at,
|
||||
},
|
||||
aborted_at: aborted.aborted_at,
|
||||
})
|
||||
}
|
||||
v2::updates::UpdateStatus::Failed(failed) => {
|
||||
v3::updates::UpdateStatus::Failed(v3::updates::Failed {
|
||||
from: v3::updates::Processing {
|
||||
from: v3::updates::Enqueued {
|
||||
update_id: failed.from.from.update_id,
|
||||
// since we're never going to read the content_file again it's ok to generate a fake one.
|
||||
meta: update_from_unchecked_update_meta(failed.from.from.meta),
|
||||
enqueued_at: failed.from.from.enqueued_at,
|
||||
},
|
||||
started_processing_at: failed.from.started_processing_at,
|
||||
},
|
||||
msg: failed.error.message,
|
||||
code: failed.error.error_code.into(),
|
||||
failed_at: failed.failed_at,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<(v2::updates::UpdateMeta, Option<Uuid>)> for v3::updates::Update {
|
||||
type Error = crate::Error;
|
||||
|
||||
fn try_from((update, uuid): (v2::updates::UpdateMeta, Option<Uuid>)) -> Result<Self> {
|
||||
Ok(match update {
|
||||
v2::updates::UpdateMeta::DocumentsAddition { method, format: _, primary_key }
|
||||
if uuid.is_some() =>
|
||||
{
|
||||
v3::updates::Update::DocumentAddition {
|
||||
primary_key,
|
||||
method: match method {
|
||||
v2::updates::IndexDocumentsMethod::ReplaceDocuments => {
|
||||
v3::updates::IndexDocumentsMethod::ReplaceDocuments
|
||||
}
|
||||
v2::updates::IndexDocumentsMethod::UpdateDocuments => {
|
||||
v3::updates::IndexDocumentsMethod::UpdateDocuments
|
||||
}
|
||||
},
|
||||
content_uuid: uuid.unwrap(),
|
||||
}
|
||||
}
|
||||
v2::updates::UpdateMeta::DocumentsAddition { .. } => {
|
||||
return Err(crate::Error::MalformedTask)
|
||||
}
|
||||
v2::updates::UpdateMeta::ClearDocuments => v3::updates::Update::ClearDocuments,
|
||||
v2::updates::UpdateMeta::DeleteDocuments { ids } => {
|
||||
v3::updates::Update::DeleteDocuments(ids)
|
||||
}
|
||||
v2::updates::UpdateMeta::Settings(settings) => {
|
||||
v3::updates::Update::Settings(settings.into())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_from_unchecked_update_meta(update: v2::updates::UpdateMeta) -> v3::updates::Update {
|
||||
match update {
|
||||
v2::updates::UpdateMeta::DocumentsAddition { method, format: _, primary_key } => {
|
||||
v3::updates::Update::DocumentAddition {
|
||||
primary_key,
|
||||
method: match method {
|
||||
v2::updates::IndexDocumentsMethod::ReplaceDocuments => {
|
||||
v3::updates::IndexDocumentsMethod::ReplaceDocuments
|
||||
}
|
||||
v2::updates::IndexDocumentsMethod::UpdateDocuments => {
|
||||
v3::updates::IndexDocumentsMethod::UpdateDocuments
|
||||
}
|
||||
},
|
||||
// we use this special uuid so we can recognize it if one day there is a bug related to this field.
|
||||
content_uuid: Uuid::from_str("00112233-4455-6677-8899-aabbccddeeff").unwrap(),
|
||||
}
|
||||
}
|
||||
v2::updates::UpdateMeta::ClearDocuments => v3::updates::Update::ClearDocuments,
|
||||
v2::updates::UpdateMeta::DeleteDocuments { ids } => {
|
||||
v3::updates::Update::DeleteDocuments(ids)
|
||||
}
|
||||
v2::updates::UpdateMeta::Settings(settings) => {
|
||||
v3::updates::Update::Settings(settings.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v2::updates::UpdateResult> for v3::updates::UpdateResult {
|
||||
fn from(result: v2::updates::UpdateResult) -> Self {
|
||||
match result {
|
||||
v2::updates::UpdateResult::DocumentsAddition(addition) => {
|
||||
v3::updates::UpdateResult::DocumentsAddition(v3::updates::DocumentAdditionResult {
|
||||
nb_documents: addition.nb_documents,
|
||||
})
|
||||
}
|
||||
v2::updates::UpdateResult::DocumentDeletion { deleted } => {
|
||||
v3::updates::UpdateResult::DocumentDeletion { deleted }
|
||||
}
|
||||
v2::updates::UpdateResult::Other => v3::updates::UpdateResult::Other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for v3::Code {
|
||||
fn from(code: String) -> Self {
|
||||
match code.as_ref() {
|
||||
"create_index" => v3::Code::CreateIndex,
|
||||
"index_already_exists" => v3::Code::IndexAlreadyExists,
|
||||
"index_not_found" => v3::Code::IndexNotFound,
|
||||
"invalid_index_uid" => v3::Code::InvalidIndexUid,
|
||||
"invalid_state" => v3::Code::InvalidState,
|
||||
"missing_primary_key" => v3::Code::MissingPrimaryKey,
|
||||
"primary_key_already_present" => v3::Code::PrimaryKeyAlreadyPresent,
|
||||
"max_fields_limit_exceeded" => v3::Code::MaxFieldsLimitExceeded,
|
||||
"missing_document_id" => v3::Code::MissingDocumentId,
|
||||
"invalid_document_id" => v3::Code::InvalidDocumentId,
|
||||
"filter" => v3::Code::Filter,
|
||||
"sort" => v3::Code::Sort,
|
||||
"bad_parameter" => v3::Code::BadParameter,
|
||||
"bad_request" => v3::Code::BadRequest,
|
||||
"database_size_limit_reached" => v3::Code::DatabaseSizeLimitReached,
|
||||
"document_not_found" => v3::Code::DocumentNotFound,
|
||||
"internal" => v3::Code::Internal,
|
||||
"invalid_geo_field" => v3::Code::InvalidGeoField,
|
||||
"invalid_ranking_rule" => v3::Code::InvalidRankingRule,
|
||||
"invalid_store" => v3::Code::InvalidStore,
|
||||
"invalid_token" => v3::Code::InvalidToken,
|
||||
"missing_authorization_header" => v3::Code::MissingAuthorizationHeader,
|
||||
"no_space_left_on_device" => v3::Code::NoSpaceLeftOnDevice,
|
||||
"dump_not_found" => v3::Code::DumpNotFound,
|
||||
"task_not_found" => v3::Code::TaskNotFound,
|
||||
"payload_too_large" => v3::Code::PayloadTooLarge,
|
||||
"retrieve_document" => v3::Code::RetrieveDocument,
|
||||
"search_documents" => v3::Code::SearchDocuments,
|
||||
"unsupported_media_type" => v3::Code::UnsupportedMediaType,
|
||||
"dump_already_in_progress" => v3::Code::DumpAlreadyInProgress,
|
||||
"dump_process_failed" => v3::Code::DumpProcessFailed,
|
||||
"invalid_content_type" => v3::Code::InvalidContentType,
|
||||
"missing_content_type" => v3::Code::MissingContentType,
|
||||
"malformed_payload" => v3::Code::MalformedPayload,
|
||||
"missing_payload" => v3::Code::MissingPayload,
|
||||
other => {
|
||||
log::warn!("Unknown error code {}", other);
|
||||
v3::Code::UnretrievableErrorCode
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> From<v2::Setting<A>> for v3::Setting<A> {
|
||||
fn from(setting: v2::Setting<A>) -> Self {
|
||||
match setting {
|
||||
v2::settings::Setting::Set(a) => v3::settings::Setting::Set(a),
|
||||
v2::settings::Setting::Reset => v3::settings::Setting::Reset,
|
||||
v2::settings::Setting::NotSet => v3::settings::Setting::NotSet,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<v2::Settings<T>> for v3::Settings<v3::Unchecked> {
|
||||
fn from(settings: v2::Settings<T>) -> Self {
|
||||
v3::Settings {
|
||||
displayed_attributes: settings.displayed_attributes.into(),
|
||||
searchable_attributes: settings.searchable_attributes.into(),
|
||||
filterable_attributes: settings.filterable_attributes.into(),
|
||||
sortable_attributes: settings.sortable_attributes.into(),
|
||||
ranking_rules: v3::Setting::from(settings.ranking_rules).map(|criteria| {
|
||||
criteria.into_iter().map(|criterion| patch_ranking_rules(&criterion)).collect()
|
||||
}),
|
||||
stop_words: settings.stop_words.into(),
|
||||
synonyms: settings.synonyms.into(),
|
||||
distinct_attribute: settings.distinct_attribute.into(),
|
||||
_kind: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn patch_ranking_rules(ranking_rule: &str) -> String {
|
||||
match v2::settings::Criterion::from_str(ranking_rule) {
|
||||
Ok(v2::settings::Criterion::Words) => String::from("words"),
|
||||
Ok(v2::settings::Criterion::Typo) => String::from("typo"),
|
||||
Ok(v2::settings::Criterion::Proximity) => String::from("proximity"),
|
||||
Ok(v2::settings::Criterion::Attribute) => String::from("attribute"),
|
||||
Ok(v2::settings::Criterion::Sort) => String::from("sort"),
|
||||
Ok(v2::settings::Criterion::Exactness) => String::from("exactness"),
|
||||
Ok(v2::settings::Criterion::Asc(name)) => format!("{name}:asc"),
|
||||
Ok(v2::settings::Criterion::Desc(name)) => format!("{name}:desc"),
|
||||
// we want to forward the error to the current version of meilisearch
|
||||
Err(_) => ranking_rule.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
|
||||
use flate2::bufread::GzDecoder;
|
||||
use meili_snap::insta;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn compat_v2_v3() {
|
||||
let dump = File::open("tests/assets/v2.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(dir.path()).unwrap();
|
||||
|
||||
let mut dump = v2::V2Reader::open(dir).unwrap().to_v3();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-09 20:27:59.904096267 +00:00:00");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, mut update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"9507711db47c7171c79bc6d57d0bed79");
|
||||
assert_eq!(update_files.len(), 9);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
let update_file = update_files.remove(0).unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(update_file), @"7b8889539b669c7b9ddba448bafa385d");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies2 = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d153b5a81d8b3cdcbe1dec270b574022");
|
||||
|
||||
// movies2
|
||||
insta::assert_json_snapshot!(movies2.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies_2",
|
||||
"primaryKey": null,
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies2.settings().unwrap());
|
||||
let documents = movies2.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 0);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
}
|
449
dump/src/reader/compat/v3_to_v4.rs
Normal file
449
dump/src/reader/compat/v3_to_v4.rs
Normal file
@ -0,0 +1,449 @@
|
||||
use super::v2_to_v3::{CompatIndexV2ToV3, CompatV2ToV3};
|
||||
use super::v4_to_v5::CompatV4ToV5;
|
||||
use crate::reader::{v3, v4, UpdateFile};
|
||||
use crate::Result;
|
||||
|
||||
pub enum CompatV3ToV4 {
|
||||
V3(v3::V3Reader),
|
||||
Compat(CompatV2ToV3),
|
||||
}
|
||||
|
||||
impl CompatV3ToV4 {
|
||||
pub fn new(v3: v3::V3Reader) -> CompatV3ToV4 {
|
||||
CompatV3ToV4::V3(v3)
|
||||
}
|
||||
|
||||
pub fn to_v5(self) -> CompatV4ToV5 {
|
||||
CompatV4ToV5::Compat(self)
|
||||
}
|
||||
|
||||
pub fn version(&self) -> crate::Version {
|
||||
match self {
|
||||
CompatV3ToV4::V3(v3) => v3.version(),
|
||||
CompatV3ToV4::Compat(compat) => compat.version(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn date(&self) -> Option<time::OffsetDateTime> {
|
||||
match self {
|
||||
CompatV3ToV4::V3(v3) => v3.date(),
|
||||
CompatV3ToV4::Compat(compat) => compat.date(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn instance_uid(&self) -> Result<Option<uuid::Uuid>> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<CompatIndexV3ToV4>> + '_> {
|
||||
Ok(match self {
|
||||
CompatV3ToV4::V3(v3) => {
|
||||
Box::new(v3.indexes()?.map(|index| index.map(CompatIndexV3ToV4::from)))
|
||||
as Box<dyn Iterator<Item = Result<CompatIndexV3ToV4>> + '_>
|
||||
}
|
||||
|
||||
CompatV3ToV4::Compat(compat) => {
|
||||
Box::new(compat.indexes()?.map(|index| index.map(CompatIndexV3ToV4::from)))
|
||||
as Box<dyn Iterator<Item = Result<CompatIndexV3ToV4>> + '_>
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn tasks(
|
||||
&mut self,
|
||||
) -> Box<dyn Iterator<Item = Result<(v4::Task, Option<Box<UpdateFile>>)>> + '_> {
|
||||
let indexes = match self {
|
||||
CompatV3ToV4::V3(v3) => v3.index_uuid(),
|
||||
CompatV3ToV4::Compat(compat) => compat.index_uuid(),
|
||||
};
|
||||
let tasks = match self {
|
||||
CompatV3ToV4::V3(v3) => v3.tasks(),
|
||||
CompatV3ToV4::Compat(compat) => compat.tasks(),
|
||||
};
|
||||
|
||||
Box::new(
|
||||
tasks
|
||||
// we need to override the old task ids that were generated
|
||||
// by index in favor of a global unique incremental ID.
|
||||
.enumerate()
|
||||
.map(move |(task_id, task)| {
|
||||
task.map(|(task, content_file)| {
|
||||
let index_uid = indexes
|
||||
.iter()
|
||||
.find(|index| index.uuid == task.uuid)
|
||||
.map(|index| index.uid.clone());
|
||||
|
||||
let index_uid = match index_uid {
|
||||
Some(uid) => uid,
|
||||
None => {
|
||||
log::warn!(
|
||||
"Error while importing the update {}.",
|
||||
task.update.id()
|
||||
);
|
||||
log::warn!(
|
||||
"The index associated to the uuid `{}` could not be retrieved.",
|
||||
task.uuid.to_string()
|
||||
);
|
||||
if task.update.is_finished() {
|
||||
// we're fucking with his history but not his data, that's ok-ish.
|
||||
log::warn!("The index-uuid will be set as `unknown`.");
|
||||
String::from("unknown")
|
||||
} else {
|
||||
log::warn!("The task will be ignored.");
|
||||
return None;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let task = v4::Task {
|
||||
id: task_id as u32,
|
||||
index_uid: v4::meta::IndexUid(index_uid),
|
||||
content: match task.update.meta() {
|
||||
v3::Kind::DeleteDocuments(documents) => {
|
||||
v4::tasks::TaskContent::DocumentDeletion(
|
||||
v4::tasks::DocumentDeletion::Ids(documents.clone()),
|
||||
)
|
||||
}
|
||||
v3::Kind::DocumentAddition {
|
||||
primary_key,
|
||||
method,
|
||||
content_uuid,
|
||||
} => v4::tasks::TaskContent::DocumentAddition {
|
||||
merge_strategy: match method {
|
||||
v3::updates::IndexDocumentsMethod::ReplaceDocuments => {
|
||||
v4::tasks::IndexDocumentsMethod::ReplaceDocuments
|
||||
}
|
||||
v3::updates::IndexDocumentsMethod::UpdateDocuments => {
|
||||
v4::tasks::IndexDocumentsMethod::UpdateDocuments
|
||||
}
|
||||
},
|
||||
primary_key: primary_key.clone(),
|
||||
documents_count: 0, // we don't have this info
|
||||
allow_index_creation: true, // there was no API-key in the v3
|
||||
content_uuid: *content_uuid,
|
||||
},
|
||||
v3::Kind::Settings(settings) => {
|
||||
v4::tasks::TaskContent::SettingsUpdate {
|
||||
settings: v4::Settings::from(settings.clone()),
|
||||
is_deletion: false, // that didn't exist at this time
|
||||
allow_index_creation: true, // there was no API-key in the v3
|
||||
}
|
||||
}
|
||||
v3::Kind::ClearDocuments => {
|
||||
v4::tasks::TaskContent::DocumentDeletion(
|
||||
v4::tasks::DocumentDeletion::Clear,
|
||||
)
|
||||
}
|
||||
},
|
||||
events: match task.update {
|
||||
v3::Status::Processing(processing) => {
|
||||
vec![v4::tasks::TaskEvent::Created(processing.from.enqueued_at)]
|
||||
}
|
||||
v3::Status::Enqueued(enqueued) => {
|
||||
vec![v4::tasks::TaskEvent::Created(enqueued.enqueued_at)]
|
||||
}
|
||||
v3::Status::Processed(processed) => {
|
||||
vec![
|
||||
v4::tasks::TaskEvent::Created(
|
||||
processed.from.from.enqueued_at,
|
||||
),
|
||||
v4::tasks::TaskEvent::Processing(
|
||||
processed.from.started_processing_at,
|
||||
),
|
||||
v4::tasks::TaskEvent::Succeded {
|
||||
result: match processed.success {
|
||||
v3::updates::UpdateResult::DocumentsAddition(
|
||||
document_addition,
|
||||
) => v4::tasks::TaskResult::DocumentAddition {
|
||||
indexed_documents: document_addition
|
||||
.nb_documents
|
||||
as u64,
|
||||
},
|
||||
v3::updates::UpdateResult::DocumentDeletion {
|
||||
deleted,
|
||||
} => v4::tasks::TaskResult::DocumentDeletion {
|
||||
deleted_documents: deleted,
|
||||
},
|
||||
v3::updates::UpdateResult::Other => {
|
||||
v4::tasks::TaskResult::Other
|
||||
}
|
||||
},
|
||||
timestamp: processed.processed_at,
|
||||
},
|
||||
]
|
||||
}
|
||||
v3::Status::Failed(failed) => vec![
|
||||
v4::tasks::TaskEvent::Created(failed.from.from.enqueued_at),
|
||||
v4::tasks::TaskEvent::Processing(
|
||||
failed.from.started_processing_at,
|
||||
),
|
||||
v4::tasks::TaskEvent::Failed {
|
||||
error: v4::ResponseError::from_msg(
|
||||
failed.msg.to_string(),
|
||||
failed.code.into(),
|
||||
),
|
||||
timestamp: failed.failed_at,
|
||||
},
|
||||
],
|
||||
v3::Status::Aborted(aborted) => vec![
|
||||
v4::tasks::TaskEvent::Created(aborted.from.enqueued_at),
|
||||
v4::tasks::TaskEvent::Failed {
|
||||
error: v4::ResponseError::from_msg(
|
||||
"Task was aborted in a previous version of meilisearch."
|
||||
.to_string(),
|
||||
v4::errors::Code::UnretrievableErrorCode,
|
||||
),
|
||||
timestamp: aborted.aborted_at,
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
Some((task, content_file))
|
||||
})
|
||||
})
|
||||
.filter_map(|res| res.transpose()),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn keys(&mut self) -> Box<dyn Iterator<Item = Result<v4::Key>> + '_> {
|
||||
Box::new(std::iter::empty())
|
||||
}
|
||||
}
|
||||
|
||||
pub enum CompatIndexV3ToV4 {
|
||||
V3(v3::V3IndexReader),
|
||||
Compat(CompatIndexV2ToV3),
|
||||
}
|
||||
|
||||
impl From<v3::V3IndexReader> for CompatIndexV3ToV4 {
|
||||
fn from(index_reader: v3::V3IndexReader) -> Self {
|
||||
Self::V3(index_reader)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CompatIndexV2ToV3> for CompatIndexV3ToV4 {
|
||||
fn from(index_reader: CompatIndexV2ToV3) -> Self {
|
||||
Self::Compat(index_reader)
|
||||
}
|
||||
}
|
||||
|
||||
impl CompatIndexV3ToV4 {
|
||||
pub fn new(v3: v3::V3IndexReader) -> CompatIndexV3ToV4 {
|
||||
CompatIndexV3ToV4::V3(v3)
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &crate::IndexMetadata {
|
||||
match self {
|
||||
CompatIndexV3ToV4::V3(v3) => v3.metadata(),
|
||||
CompatIndexV3ToV4::Compat(compat) => compat.metadata(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn documents(&mut self) -> Result<Box<dyn Iterator<Item = Result<v4::Document>> + '_>> {
|
||||
match self {
|
||||
CompatIndexV3ToV4::V3(v3) => v3
|
||||
.documents()
|
||||
.map(|iter| Box::new(iter) as Box<dyn Iterator<Item = Result<v4::Document>> + '_>),
|
||||
|
||||
CompatIndexV3ToV4::Compat(compat) => compat
|
||||
.documents()
|
||||
.map(|iter| Box::new(iter) as Box<dyn Iterator<Item = Result<v4::Document>> + '_>),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn settings(&mut self) -> Result<v4::Settings<v4::Checked>> {
|
||||
Ok(match self {
|
||||
CompatIndexV3ToV4::V3(v3) => {
|
||||
v4::Settings::<v4::Unchecked>::from(v3.settings()?).check()
|
||||
}
|
||||
CompatIndexV3ToV4::Compat(compat) => {
|
||||
v4::Settings::<v4::Unchecked>::from(compat.settings()?).check()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<v3::Setting<T>> for v4::Setting<T> {
|
||||
fn from(setting: v3::Setting<T>) -> Self {
|
||||
match setting {
|
||||
v3::Setting::Set(t) => v4::Setting::Set(t),
|
||||
v3::Setting::Reset => v4::Setting::Reset,
|
||||
v3::Setting::NotSet => v4::Setting::NotSet,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v3::Code> for v4::Code {
|
||||
fn from(code: v3::Code) -> Self {
|
||||
match code {
|
||||
v3::Code::CreateIndex => v4::Code::CreateIndex,
|
||||
v3::Code::IndexAlreadyExists => v4::Code::IndexAlreadyExists,
|
||||
v3::Code::IndexNotFound => v4::Code::IndexNotFound,
|
||||
v3::Code::InvalidIndexUid => v4::Code::InvalidIndexUid,
|
||||
v3::Code::InvalidState => v4::Code::InvalidState,
|
||||
v3::Code::MissingPrimaryKey => v4::Code::MissingPrimaryKey,
|
||||
v3::Code::PrimaryKeyAlreadyPresent => v4::Code::PrimaryKeyAlreadyPresent,
|
||||
v3::Code::MaxFieldsLimitExceeded => v4::Code::MaxFieldsLimitExceeded,
|
||||
v3::Code::MissingDocumentId => v4::Code::MissingDocumentId,
|
||||
v3::Code::InvalidDocumentId => v4::Code::InvalidDocumentId,
|
||||
v3::Code::Filter => v4::Code::Filter,
|
||||
v3::Code::Sort => v4::Code::Sort,
|
||||
v3::Code::BadParameter => v4::Code::BadParameter,
|
||||
v3::Code::BadRequest => v4::Code::BadRequest,
|
||||
v3::Code::DatabaseSizeLimitReached => v4::Code::DatabaseSizeLimitReached,
|
||||
v3::Code::DocumentNotFound => v4::Code::DocumentNotFound,
|
||||
v3::Code::Internal => v4::Code::Internal,
|
||||
v3::Code::InvalidGeoField => v4::Code::InvalidGeoField,
|
||||
v3::Code::InvalidRankingRule => v4::Code::InvalidRankingRule,
|
||||
v3::Code::InvalidStore => v4::Code::InvalidStore,
|
||||
v3::Code::InvalidToken => v4::Code::InvalidToken,
|
||||
v3::Code::MissingAuthorizationHeader => v4::Code::MissingAuthorizationHeader,
|
||||
v3::Code::NoSpaceLeftOnDevice => v4::Code::NoSpaceLeftOnDevice,
|
||||
v3::Code::DumpNotFound => v4::Code::DumpNotFound,
|
||||
v3::Code::TaskNotFound => v4::Code::TaskNotFound,
|
||||
v3::Code::PayloadTooLarge => v4::Code::PayloadTooLarge,
|
||||
v3::Code::RetrieveDocument => v4::Code::RetrieveDocument,
|
||||
v3::Code::SearchDocuments => v4::Code::SearchDocuments,
|
||||
v3::Code::UnsupportedMediaType => v4::Code::UnsupportedMediaType,
|
||||
v3::Code::DumpAlreadyInProgress => v4::Code::DumpAlreadyInProgress,
|
||||
v3::Code::DumpProcessFailed => v4::Code::DumpProcessFailed,
|
||||
v3::Code::InvalidContentType => v4::Code::InvalidContentType,
|
||||
v3::Code::MissingContentType => v4::Code::MissingContentType,
|
||||
v3::Code::MalformedPayload => v4::Code::MalformedPayload,
|
||||
v3::Code::MissingPayload => v4::Code::MissingPayload,
|
||||
v3::Code::UnretrievableErrorCode => v4::Code::UnretrievableErrorCode,
|
||||
v3::Code::MalformedDump => v4::Code::MalformedDump,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<v3::Settings<T>> for v4::Settings<v4::Unchecked> {
|
||||
fn from(settings: v3::Settings<T>) -> Self {
|
||||
v4::Settings {
|
||||
displayed_attributes: settings.displayed_attributes.into(),
|
||||
searchable_attributes: settings.searchable_attributes.into(),
|
||||
filterable_attributes: settings.filterable_attributes.into(),
|
||||
sortable_attributes: settings.sortable_attributes.into(),
|
||||
ranking_rules: settings.ranking_rules.into(),
|
||||
stop_words: settings.stop_words.into(),
|
||||
synonyms: settings.synonyms.into(),
|
||||
distinct_attribute: settings.distinct_attribute.into(),
|
||||
typo_tolerance: v4::Setting::NotSet,
|
||||
_kind: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
|
||||
use flate2::bufread::GzDecoder;
|
||||
use meili_snap::insta;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn compat_v3_v4() {
|
||||
let dump = File::open("tests/assets/v3.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(dir.path()).unwrap();
|
||||
|
||||
let mut dump = v3::V3Reader::open(dir).unwrap().to_v4();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-07 11:39:03.709153554 +00:00:00");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, mut update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"79bc053583a1a7172bbaaafb1edaeb78");
|
||||
assert_eq!(update_files.len(), 10);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
let update_file = update_files.remove(0).unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(update_file), @"7b8889539b669c7b9ddba448bafa385d");
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys, { "[].uid" => "[uuid]" }), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies2 = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d153b5a81d8b3cdcbe1dec270b574022");
|
||||
|
||||
// movies2
|
||||
insta::assert_json_snapshot!(movies2.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies_2",
|
||||
"primaryKey": null,
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies2.settings().unwrap());
|
||||
let documents = movies2.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 0);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
}
|
467
dump/src/reader/compat/v4_to_v5.rs
Normal file
467
dump/src/reader/compat/v4_to_v5.rs
Normal file
@ -0,0 +1,467 @@
|
||||
use super::v3_to_v4::{CompatIndexV3ToV4, CompatV3ToV4};
|
||||
use super::v5_to_v6::CompatV5ToV6;
|
||||
use crate::reader::{v4, v5, Document};
|
||||
use crate::Result;
|
||||
|
||||
pub enum CompatV4ToV5 {
|
||||
V4(v4::V4Reader),
|
||||
Compat(CompatV3ToV4),
|
||||
}
|
||||
|
||||
impl CompatV4ToV5 {
|
||||
pub fn new(v4: v4::V4Reader) -> CompatV4ToV5 {
|
||||
CompatV4ToV5::V4(v4)
|
||||
}
|
||||
|
||||
pub fn to_v6(self) -> CompatV5ToV6 {
|
||||
CompatV5ToV6::Compat(self)
|
||||
}
|
||||
|
||||
pub fn version(&self) -> crate::Version {
|
||||
match self {
|
||||
CompatV4ToV5::V4(v4) => v4.version(),
|
||||
CompatV4ToV5::Compat(compat) => compat.version(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn date(&self) -> Option<time::OffsetDateTime> {
|
||||
match self {
|
||||
CompatV4ToV5::V4(v4) => v4.date(),
|
||||
CompatV4ToV5::Compat(compat) => compat.date(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn instance_uid(&self) -> Result<Option<uuid::Uuid>> {
|
||||
match self {
|
||||
CompatV4ToV5::V4(v4) => v4.instance_uid(),
|
||||
CompatV4ToV5::Compat(compat) => compat.instance_uid(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn indexes(&self) -> Result<Box<dyn Iterator<Item = Result<CompatIndexV4ToV5>> + '_>> {
|
||||
Ok(match self {
|
||||
CompatV4ToV5::V4(v4) => {
|
||||
Box::new(v4.indexes()?.map(|index| index.map(CompatIndexV4ToV5::from)))
|
||||
as Box<dyn Iterator<Item = Result<CompatIndexV4ToV5>> + '_>
|
||||
}
|
||||
|
||||
CompatV4ToV5::Compat(compat) => {
|
||||
Box::new(compat.indexes()?.map(|index| index.map(CompatIndexV4ToV5::from)))
|
||||
as Box<dyn Iterator<Item = Result<CompatIndexV4ToV5>> + '_>
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn tasks(
|
||||
&mut self,
|
||||
) -> Box<dyn Iterator<Item = Result<(v5::Task, Option<Box<crate::reader::UpdateFile>>)>> + '_>
|
||||
{
|
||||
let tasks = match self {
|
||||
CompatV4ToV5::V4(v4) => v4.tasks(),
|
||||
CompatV4ToV5::Compat(compat) => compat.tasks(),
|
||||
};
|
||||
Box::new(tasks.map(|task| {
|
||||
task.map(|(task, content_file)| {
|
||||
let task = v5::Task {
|
||||
id: task.id,
|
||||
content: match task.content {
|
||||
v4::tasks::TaskContent::DocumentAddition {
|
||||
content_uuid,
|
||||
merge_strategy,
|
||||
primary_key,
|
||||
documents_count,
|
||||
allow_index_creation,
|
||||
} => v5::tasks::TaskContent::DocumentAddition {
|
||||
index_uid: v5::meta::IndexUid(task.index_uid.0),
|
||||
content_uuid,
|
||||
merge_strategy: match merge_strategy {
|
||||
v4::tasks::IndexDocumentsMethod::ReplaceDocuments => {
|
||||
v5::tasks::IndexDocumentsMethod::ReplaceDocuments
|
||||
}
|
||||
v4::tasks::IndexDocumentsMethod::UpdateDocuments => {
|
||||
v5::tasks::IndexDocumentsMethod::UpdateDocuments
|
||||
}
|
||||
},
|
||||
primary_key,
|
||||
documents_count,
|
||||
allow_index_creation,
|
||||
},
|
||||
v4::tasks::TaskContent::DocumentDeletion(deletion) => {
|
||||
v5::tasks::TaskContent::DocumentDeletion {
|
||||
index_uid: v5::meta::IndexUid(task.index_uid.0),
|
||||
deletion: match deletion {
|
||||
v4::tasks::DocumentDeletion::Clear => {
|
||||
v5::tasks::DocumentDeletion::Clear
|
||||
}
|
||||
v4::tasks::DocumentDeletion::Ids(ids) => {
|
||||
v5::tasks::DocumentDeletion::Ids(ids)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
v4::tasks::TaskContent::SettingsUpdate {
|
||||
settings,
|
||||
is_deletion,
|
||||
allow_index_creation,
|
||||
} => v5::tasks::TaskContent::SettingsUpdate {
|
||||
index_uid: v5::meta::IndexUid(task.index_uid.0),
|
||||
settings: settings.into(),
|
||||
is_deletion,
|
||||
allow_index_creation,
|
||||
},
|
||||
v4::tasks::TaskContent::IndexDeletion => {
|
||||
v5::tasks::TaskContent::IndexDeletion {
|
||||
index_uid: v5::meta::IndexUid(task.index_uid.0),
|
||||
}
|
||||
}
|
||||
v4::tasks::TaskContent::IndexCreation { primary_key } => {
|
||||
v5::tasks::TaskContent::IndexCreation {
|
||||
index_uid: v5::meta::IndexUid(task.index_uid.0),
|
||||
primary_key,
|
||||
}
|
||||
}
|
||||
v4::tasks::TaskContent::IndexUpdate { primary_key } => {
|
||||
v5::tasks::TaskContent::IndexUpdate {
|
||||
index_uid: v5::meta::IndexUid(task.index_uid.0),
|
||||
primary_key,
|
||||
}
|
||||
}
|
||||
},
|
||||
events: task
|
||||
.events
|
||||
.into_iter()
|
||||
.map(|event| match event {
|
||||
v4::tasks::TaskEvent::Created(date) => {
|
||||
v5::tasks::TaskEvent::Created(date)
|
||||
}
|
||||
v4::tasks::TaskEvent::Batched { timestamp, batch_id } => {
|
||||
v5::tasks::TaskEvent::Batched { timestamp, batch_id }
|
||||
}
|
||||
v4::tasks::TaskEvent::Processing(date) => {
|
||||
v5::tasks::TaskEvent::Processing(date)
|
||||
}
|
||||
v4::tasks::TaskEvent::Succeded { result, timestamp } => {
|
||||
v5::tasks::TaskEvent::Succeeded {
|
||||
result: match result {
|
||||
v4::tasks::TaskResult::DocumentAddition {
|
||||
indexed_documents,
|
||||
} => v5::tasks::TaskResult::DocumentAddition {
|
||||
indexed_documents,
|
||||
},
|
||||
v4::tasks::TaskResult::DocumentDeletion {
|
||||
deleted_documents,
|
||||
} => v5::tasks::TaskResult::DocumentDeletion {
|
||||
deleted_documents,
|
||||
},
|
||||
v4::tasks::TaskResult::ClearAll { deleted_documents } => {
|
||||
v5::tasks::TaskResult::ClearAll { deleted_documents }
|
||||
}
|
||||
v4::tasks::TaskResult::Other => {
|
||||
v5::tasks::TaskResult::Other
|
||||
}
|
||||
},
|
||||
timestamp,
|
||||
}
|
||||
}
|
||||
v4::tasks::TaskEvent::Failed { error, timestamp } => {
|
||||
v5::tasks::TaskEvent::Failed {
|
||||
error: v5::ResponseError::from(error),
|
||||
timestamp,
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
|
||||
(task, content_file)
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn keys(&mut self) -> Box<dyn Iterator<Item = Result<v5::Key>> + '_> {
|
||||
let keys = match self {
|
||||
CompatV4ToV5::V4(v4) => v4.keys(),
|
||||
CompatV4ToV5::Compat(compat) => compat.keys(),
|
||||
};
|
||||
Box::new(keys.map(|key| {
|
||||
key.map(|key| v5::Key {
|
||||
description: key.description,
|
||||
name: None,
|
||||
uid: v5::keys::KeyId::new_v4(),
|
||||
actions: key.actions.into_iter().filter_map(|action| action.into()).collect(),
|
||||
indexes: key
|
||||
.indexes
|
||||
.into_iter()
|
||||
.map(|index| match index.as_str() {
|
||||
"*" => v5::StarOr::Star,
|
||||
_ => v5::StarOr::Other(v5::meta::IndexUid(index)),
|
||||
})
|
||||
.collect(),
|
||||
expires_at: key.expires_at,
|
||||
created_at: key.created_at,
|
||||
updated_at: key.updated_at,
|
||||
})
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
pub enum CompatIndexV4ToV5 {
|
||||
V4(v4::V4IndexReader),
|
||||
Compat(CompatIndexV3ToV4),
|
||||
}
|
||||
|
||||
impl From<v4::V4IndexReader> for CompatIndexV4ToV5 {
|
||||
fn from(index_reader: v4::V4IndexReader) -> Self {
|
||||
Self::V4(index_reader)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CompatIndexV3ToV4> for CompatIndexV4ToV5 {
|
||||
fn from(index_reader: CompatIndexV3ToV4) -> Self {
|
||||
Self::Compat(index_reader)
|
||||
}
|
||||
}
|
||||
|
||||
impl CompatIndexV4ToV5 {
|
||||
pub fn metadata(&self) -> &crate::IndexMetadata {
|
||||
match self {
|
||||
CompatIndexV4ToV5::V4(v4) => v4.metadata(),
|
||||
CompatIndexV4ToV5::Compat(compat) => compat.metadata(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn documents(&mut self) -> Result<Box<dyn Iterator<Item = Result<Document>> + '_>> {
|
||||
match self {
|
||||
CompatIndexV4ToV5::V4(v4) => v4
|
||||
.documents()
|
||||
.map(|iter| Box::new(iter) as Box<dyn Iterator<Item = Result<Document>> + '_>),
|
||||
CompatIndexV4ToV5::Compat(compat) => compat
|
||||
.documents()
|
||||
.map(|iter| Box::new(iter) as Box<dyn Iterator<Item = Result<Document>> + '_>),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn settings(&mut self) -> Result<v5::Settings<v5::Checked>> {
|
||||
match self {
|
||||
CompatIndexV4ToV5::V4(v4) => Ok(v5::Settings::from(v4.settings()?).check()),
|
||||
CompatIndexV4ToV5::Compat(compat) => Ok(v5::Settings::from(compat.settings()?).check()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<v4::Setting<T>> for v5::Setting<T> {
|
||||
fn from(setting: v4::Setting<T>) -> Self {
|
||||
match setting {
|
||||
v4::Setting::Set(t) => v5::Setting::Set(t),
|
||||
v4::Setting::Reset => v5::Setting::Reset,
|
||||
v4::Setting::NotSet => v5::Setting::NotSet,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v4::ResponseError> for v5::ResponseError {
|
||||
fn from(error: v4::ResponseError) -> Self {
|
||||
let code = match error.error_code.as_ref() {
|
||||
"index_creation_failed" => v5::Code::CreateIndex,
|
||||
"index_already_exists" => v5::Code::IndexAlreadyExists,
|
||||
"index_not_found" => v5::Code::IndexNotFound,
|
||||
"invalid_index_uid" => v5::Code::InvalidIndexUid,
|
||||
"invalid_min_word_length_for_typo" => v5::Code::InvalidMinWordLengthForTypo,
|
||||
"invalid_state" => v5::Code::InvalidState,
|
||||
"primary_key_inference_failed" => v5::Code::MissingPrimaryKey,
|
||||
"index_primary_key_already_exists" => v5::Code::PrimaryKeyAlreadyPresent,
|
||||
"max_fields_limit_exceeded" => v5::Code::MaxFieldsLimitExceeded,
|
||||
"missing_document_id" => v5::Code::MissingDocumentId,
|
||||
"invalid_document_id" => v5::Code::InvalidDocumentId,
|
||||
"invalid_filter" => v5::Code::Filter,
|
||||
"invalid_sort" => v5::Code::Sort,
|
||||
"bad_parameter" => v5::Code::BadParameter,
|
||||
"bad_request" => v5::Code::BadRequest,
|
||||
"database_size_limit_reached" => v5::Code::DatabaseSizeLimitReached,
|
||||
"document_not_found" => v5::Code::DocumentNotFound,
|
||||
"internal" => v5::Code::Internal,
|
||||
"invalid_geo_field" => v5::Code::InvalidGeoField,
|
||||
"invalid_ranking_rule" => v5::Code::InvalidRankingRule,
|
||||
"invalid_store_file" => v5::Code::InvalidStore,
|
||||
"invalid_api_key" => v5::Code::InvalidToken,
|
||||
"missing_authorization_header" => v5::Code::MissingAuthorizationHeader,
|
||||
"no_space_left_on_device" => v5::Code::NoSpaceLeftOnDevice,
|
||||
"dump_not_found" => v5::Code::DumpNotFound,
|
||||
"task_not_found" => v5::Code::TaskNotFound,
|
||||
"payload_too_large" => v5::Code::PayloadTooLarge,
|
||||
"unretrievable_document" => v5::Code::RetrieveDocument,
|
||||
"search_error" => v5::Code::SearchDocuments,
|
||||
"unsupported_media_type" => v5::Code::UnsupportedMediaType,
|
||||
"dump_already_processing" => v5::Code::DumpAlreadyInProgress,
|
||||
"dump_process_failed" => v5::Code::DumpProcessFailed,
|
||||
"invalid_content_type" => v5::Code::InvalidContentType,
|
||||
"missing_content_type" => v5::Code::MissingContentType,
|
||||
"malformed_payload" => v5::Code::MalformedPayload,
|
||||
"missing_payload" => v5::Code::MissingPayload,
|
||||
"api_key_not_found" => v5::Code::ApiKeyNotFound,
|
||||
"missing_parameter" => v5::Code::MissingParameter,
|
||||
"invalid_api_key_actions" => v5::Code::InvalidApiKeyActions,
|
||||
"invalid_api_key_indexes" => v5::Code::InvalidApiKeyIndexes,
|
||||
"invalid_api_key_expires_at" => v5::Code::InvalidApiKeyExpiresAt,
|
||||
"invalid_api_key_description" => v5::Code::InvalidApiKeyDescription,
|
||||
other => {
|
||||
log::warn!("Unknown error code {}", other);
|
||||
v5::Code::UnretrievableErrorCode
|
||||
}
|
||||
};
|
||||
v5::ResponseError::from_msg(error.message, code)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<v4::Settings<T>> for v5::Settings<v5::Unchecked> {
|
||||
fn from(settings: v4::Settings<T>) -> Self {
|
||||
v5::Settings {
|
||||
displayed_attributes: settings.displayed_attributes.into(),
|
||||
searchable_attributes: settings.searchable_attributes.into(),
|
||||
filterable_attributes: settings.filterable_attributes.into(),
|
||||
sortable_attributes: settings.sortable_attributes.into(),
|
||||
ranking_rules: settings.ranking_rules.into(),
|
||||
stop_words: settings.stop_words.into(),
|
||||
synonyms: settings.synonyms.into(),
|
||||
distinct_attribute: settings.distinct_attribute.into(),
|
||||
typo_tolerance: match settings.typo_tolerance {
|
||||
v4::Setting::Set(typo) => v5::Setting::Set(v5::TypoTolerance {
|
||||
enabled: typo.enabled.into(),
|
||||
min_word_size_for_typos: match typo.min_word_size_for_typos {
|
||||
v4::Setting::Set(t) => v5::Setting::Set(v5::MinWordSizeForTypos {
|
||||
one_typo: t.one_typo.into(),
|
||||
two_typos: t.two_typos.into(),
|
||||
}),
|
||||
v4::Setting::Reset => v5::Setting::Reset,
|
||||
v4::Setting::NotSet => v5::Setting::NotSet,
|
||||
},
|
||||
disable_on_words: typo.disable_on_words.into(),
|
||||
disable_on_attributes: typo.disable_on_attributes.into(),
|
||||
}),
|
||||
v4::Setting::Reset => v5::Setting::Reset,
|
||||
v4::Setting::NotSet => v5::Setting::NotSet,
|
||||
},
|
||||
faceting: v5::Setting::NotSet,
|
||||
pagination: v5::Setting::NotSet,
|
||||
_kind: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v4::Action> for Option<v5::Action> {
|
||||
fn from(key: v4::Action) -> Self {
|
||||
match key {
|
||||
v4::Action::All => Some(v5::Action::All),
|
||||
v4::Action::Search => Some(v5::Action::Search),
|
||||
v4::Action::DocumentsAdd => Some(v5::Action::DocumentsAdd),
|
||||
v4::Action::DocumentsGet => Some(v5::Action::DocumentsGet),
|
||||
v4::Action::DocumentsDelete => Some(v5::Action::DocumentsDelete),
|
||||
v4::Action::IndexesAdd => Some(v5::Action::IndexesAdd),
|
||||
v4::Action::IndexesGet => Some(v5::Action::IndexesGet),
|
||||
v4::Action::IndexesUpdate => Some(v5::Action::IndexesUpdate),
|
||||
v4::Action::IndexesDelete => Some(v5::Action::IndexesDelete),
|
||||
v4::Action::TasksGet => Some(v5::Action::TasksGet),
|
||||
v4::Action::SettingsGet => Some(v5::Action::SettingsGet),
|
||||
v4::Action::SettingsUpdate => Some(v5::Action::SettingsUpdate),
|
||||
v4::Action::StatsGet => Some(v5::Action::StatsGet),
|
||||
v4::Action::DumpsCreate => Some(v5::Action::DumpsCreate),
|
||||
v4::Action::DumpsGet => None,
|
||||
v4::Action::Version => Some(v5::Action::Version),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
|
||||
use flate2::bufread::GzDecoder;
|
||||
use meili_snap::insta;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn compat_v4_v5() {
|
||||
let dump = File::open("tests/assets/v4.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(dir.path()).unwrap();
|
||||
|
||||
let mut dump = v4::V4Reader::open(dir).unwrap().to_v5();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-06 12:53:49.131989609 +00:00:00");
|
||||
insta::assert_display_snapshot!(dump.instance_uid().unwrap().unwrap(), @"9e15e977-f2ae-4761-943f-1eaf75fd736d");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"ed9a30cded4c046ef46f7cff7450347e");
|
||||
assert_eq!(update_files.len(), 10);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys, { "[].uid" => "[uuid]" }), @"1384361d734fd77c23804c9696228660");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"786022a66ecb992c8a2a60fee070a5ab");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
}
|
507
dump/src/reader/compat/v5_to_v6.rs
Normal file
507
dump/src/reader/compat/v5_to_v6.rs
Normal file
@ -0,0 +1,507 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use super::v4_to_v5::{CompatIndexV4ToV5, CompatV4ToV5};
|
||||
use crate::reader::{v5, v6, Document, UpdateFile};
|
||||
use crate::Result;
|
||||
|
||||
pub enum CompatV5ToV6 {
|
||||
V5(v5::V5Reader),
|
||||
Compat(CompatV4ToV5),
|
||||
}
|
||||
|
||||
impl CompatV5ToV6 {
|
||||
pub fn new_v5(v5: v5::V5Reader) -> CompatV5ToV6 {
|
||||
CompatV5ToV6::V5(v5)
|
||||
}
|
||||
|
||||
pub fn version(&self) -> crate::Version {
|
||||
match self {
|
||||
CompatV5ToV6::V5(v5) => v5.version(),
|
||||
CompatV5ToV6::Compat(compat) => compat.version(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn date(&self) -> Option<time::OffsetDateTime> {
|
||||
match self {
|
||||
CompatV5ToV6::V5(v5) => v5.date(),
|
||||
CompatV5ToV6::Compat(compat) => compat.date(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn instance_uid(&self) -> Result<Option<uuid::Uuid>> {
|
||||
match self {
|
||||
CompatV5ToV6::V5(v5) => v5.instance_uid(),
|
||||
CompatV5ToV6::Compat(compat) => compat.instance_uid(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn indexes(&self) -> Result<Box<dyn Iterator<Item = Result<CompatIndexV5ToV6>> + '_>> {
|
||||
let indexes = match self {
|
||||
CompatV5ToV6::V5(v5) => {
|
||||
Box::new(v5.indexes()?.map(|index| index.map(CompatIndexV5ToV6::from)))
|
||||
as Box<dyn Iterator<Item = Result<CompatIndexV5ToV6>> + '_>
|
||||
}
|
||||
|
||||
CompatV5ToV6::Compat(compat) => {
|
||||
Box::new(compat.indexes()?.map(|index| index.map(CompatIndexV5ToV6::from)))
|
||||
as Box<dyn Iterator<Item = Result<CompatIndexV5ToV6>> + '_>
|
||||
}
|
||||
};
|
||||
Ok(indexes)
|
||||
}
|
||||
|
||||
pub fn tasks(
|
||||
&mut self,
|
||||
) -> Result<Box<dyn Iterator<Item = Result<(v6::Task, Option<Box<UpdateFile>>)>> + '_>> {
|
||||
let instance_uid = self.instance_uid().ok().flatten();
|
||||
let keys = self.keys()?.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let tasks = match self {
|
||||
CompatV5ToV6::V5(v5) => v5.tasks(),
|
||||
CompatV5ToV6::Compat(compat) => compat.tasks(),
|
||||
};
|
||||
Ok(Box::new(tasks.map(move |task| {
|
||||
task.map(|(task, content_file)| {
|
||||
let mut task_view: v5::tasks::TaskView = task.clone().into();
|
||||
|
||||
if task_view.status == v5::Status::Processing {
|
||||
task_view.started_at = None;
|
||||
}
|
||||
|
||||
let task = v6::Task {
|
||||
uid: task_view.uid,
|
||||
index_uid: task_view.index_uid,
|
||||
status: match task_view.status {
|
||||
v5::Status::Enqueued => v6::Status::Enqueued,
|
||||
v5::Status::Processing => v6::Status::Enqueued,
|
||||
v5::Status::Succeeded => v6::Status::Succeeded,
|
||||
v5::Status::Failed => v6::Status::Failed,
|
||||
},
|
||||
kind: match task.content {
|
||||
v5::tasks::TaskContent::IndexCreation { primary_key, .. } => {
|
||||
v6::Kind::IndexCreation { primary_key }
|
||||
}
|
||||
v5::tasks::TaskContent::IndexUpdate { primary_key, .. } => {
|
||||
v6::Kind::IndexUpdate { primary_key }
|
||||
}
|
||||
v5::tasks::TaskContent::IndexDeletion { .. } => v6::Kind::IndexDeletion,
|
||||
v5::tasks::TaskContent::DocumentAddition {
|
||||
merge_strategy,
|
||||
allow_index_creation,
|
||||
primary_key,
|
||||
documents_count,
|
||||
..
|
||||
} => v6::Kind::DocumentImport {
|
||||
primary_key,
|
||||
documents_count: documents_count as u64,
|
||||
method: match merge_strategy {
|
||||
v5::tasks::IndexDocumentsMethod::ReplaceDocuments => {
|
||||
v6::milli::update::IndexDocumentsMethod::ReplaceDocuments
|
||||
}
|
||||
v5::tasks::IndexDocumentsMethod::UpdateDocuments => {
|
||||
v6::milli::update::IndexDocumentsMethod::UpdateDocuments
|
||||
}
|
||||
},
|
||||
allow_index_creation,
|
||||
},
|
||||
v5::tasks::TaskContent::DocumentDeletion { deletion, .. } => match deletion
|
||||
{
|
||||
v5::tasks::DocumentDeletion::Clear => v6::Kind::DocumentClear,
|
||||
v5::tasks::DocumentDeletion::Ids(documents_ids) => {
|
||||
v6::Kind::DocumentDeletion { documents_ids }
|
||||
}
|
||||
},
|
||||
v5::tasks::TaskContent::SettingsUpdate {
|
||||
allow_index_creation,
|
||||
is_deletion,
|
||||
settings,
|
||||
..
|
||||
} => v6::Kind::Settings {
|
||||
is_deletion,
|
||||
allow_index_creation,
|
||||
settings: Box::new(settings.into()),
|
||||
},
|
||||
v5::tasks::TaskContent::Dump { uid: _ } => {
|
||||
// in v6 we compute the dump_uid from the started_at processing time
|
||||
v6::Kind::DumpCreation { keys: keys.clone(), instance_uid }
|
||||
}
|
||||
},
|
||||
canceled_by: None,
|
||||
details: task_view.details.map(|details| match details {
|
||||
v5::Details::DocumentAddition { received_documents, indexed_documents } => {
|
||||
v6::Details::DocumentAdditionOrUpdate {
|
||||
received_documents: received_documents as u64,
|
||||
indexed_documents,
|
||||
}
|
||||
}
|
||||
v5::Details::Settings { settings } => {
|
||||
v6::Details::SettingsUpdate { settings: Box::new(settings.into()) }
|
||||
}
|
||||
v5::Details::IndexInfo { primary_key } => {
|
||||
v6::Details::IndexInfo { primary_key }
|
||||
}
|
||||
v5::Details::DocumentDeletion {
|
||||
received_document_ids,
|
||||
deleted_documents,
|
||||
} => v6::Details::DocumentDeletion {
|
||||
provided_ids: received_document_ids,
|
||||
deleted_documents,
|
||||
},
|
||||
v5::Details::ClearAll { deleted_documents } => {
|
||||
v6::Details::ClearAll { deleted_documents }
|
||||
}
|
||||
v5::Details::Dump { dump_uid } => {
|
||||
v6::Details::Dump { dump_uid: Some(dump_uid) }
|
||||
}
|
||||
}),
|
||||
error: task_view.error.map(|e| e.into()),
|
||||
enqueued_at: task_view.enqueued_at,
|
||||
started_at: task_view.started_at,
|
||||
finished_at: task_view.finished_at,
|
||||
};
|
||||
|
||||
(task, content_file)
|
||||
})
|
||||
})))
|
||||
}
|
||||
|
||||
pub fn keys(&mut self) -> Result<Box<dyn Iterator<Item = Result<v6::Key>> + '_>> {
|
||||
let keys = match self {
|
||||
CompatV5ToV6::V5(v5) => v5.keys()?,
|
||||
CompatV5ToV6::Compat(compat) => compat.keys(),
|
||||
};
|
||||
|
||||
Ok(Box::new(keys.map(|key| {
|
||||
key.map(|key| v6::Key {
|
||||
description: key.description,
|
||||
name: key.name,
|
||||
uid: key.uid,
|
||||
actions: key.actions.into_iter().map(|action| action.into()).collect(),
|
||||
indexes: key
|
||||
.indexes
|
||||
.into_iter()
|
||||
.map(|index| match index {
|
||||
v5::StarOr::Star => v6::StarOr::Star,
|
||||
v5::StarOr::Other(uid) => {
|
||||
v6::StarOr::Other(v6::IndexUid::new_unchecked(uid.as_str()))
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
expires_at: key.expires_at,
|
||||
created_at: key.created_at,
|
||||
updated_at: key.updated_at,
|
||||
})
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
pub enum CompatIndexV5ToV6 {
|
||||
V5(v5::V5IndexReader),
|
||||
Compat(CompatIndexV4ToV5),
|
||||
}
|
||||
|
||||
impl From<v5::V5IndexReader> for CompatIndexV5ToV6 {
|
||||
fn from(index_reader: v5::V5IndexReader) -> Self {
|
||||
Self::V5(index_reader)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CompatIndexV4ToV5> for CompatIndexV5ToV6 {
|
||||
fn from(index_reader: CompatIndexV4ToV5) -> Self {
|
||||
Self::Compat(index_reader)
|
||||
}
|
||||
}
|
||||
|
||||
impl CompatIndexV5ToV6 {
|
||||
pub fn new_v5(v5: v5::V5IndexReader) -> CompatIndexV5ToV6 {
|
||||
CompatIndexV5ToV6::V5(v5)
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &crate::IndexMetadata {
|
||||
match self {
|
||||
CompatIndexV5ToV6::V5(v5) => v5.metadata(),
|
||||
CompatIndexV5ToV6::Compat(compat) => compat.metadata(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn documents(&mut self) -> Result<Box<dyn Iterator<Item = Result<Document>> + '_>> {
|
||||
match self {
|
||||
CompatIndexV5ToV6::V5(v5) => v5
|
||||
.documents()
|
||||
.map(|iter| Box::new(iter) as Box<dyn Iterator<Item = Result<Document>> + '_>),
|
||||
CompatIndexV5ToV6::Compat(compat) => compat
|
||||
.documents()
|
||||
.map(|iter| Box::new(iter) as Box<dyn Iterator<Item = Result<Document>> + '_>),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn settings(&mut self) -> Result<v6::Settings<v6::Checked>> {
|
||||
match self {
|
||||
CompatIndexV5ToV6::V5(v5) => Ok(v6::Settings::from(v5.settings()?).check()),
|
||||
CompatIndexV5ToV6::Compat(compat) => Ok(v6::Settings::from(compat.settings()?).check()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<v5::Setting<T>> for v6::Setting<T> {
|
||||
fn from(setting: v5::Setting<T>) -> Self {
|
||||
match setting {
|
||||
v5::Setting::Set(t) => v6::Setting::Set(t),
|
||||
v5::Setting::Reset => v6::Setting::Reset,
|
||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v5::ResponseError> for v6::ResponseError {
|
||||
fn from(error: v5::ResponseError) -> Self {
|
||||
let code = match error.error_code.as_ref() {
|
||||
"index_creation_failed" => v6::Code::IndexCreationFailed,
|
||||
"index_already_exists" => v6::Code::IndexAlreadyExists,
|
||||
"index_not_found" => v6::Code::IndexNotFound,
|
||||
"invalid_index_uid" => v6::Code::InvalidIndexUid,
|
||||
"invalid_min_word_length_for_typo" => v6::Code::InvalidSettingsTypoTolerance,
|
||||
"invalid_state" => v6::Code::InvalidState,
|
||||
"primary_key_inference_failed" => v6::Code::IndexPrimaryKeyNoCandidateFound,
|
||||
"index_primary_key_already_exists" => v6::Code::IndexPrimaryKeyAlreadyExists,
|
||||
"max_fields_limit_exceeded" => v6::Code::MaxFieldsLimitExceeded,
|
||||
"missing_document_id" => v6::Code::MissingDocumentId,
|
||||
"invalid_document_id" => v6::Code::InvalidDocumentId,
|
||||
"invalid_filter" => v6::Code::InvalidSettingsFilterableAttributes,
|
||||
"invalid_sort" => v6::Code::InvalidSettingsSortableAttributes,
|
||||
"bad_parameter" => v6::Code::BadParameter,
|
||||
"bad_request" => v6::Code::BadRequest,
|
||||
"database_size_limit_reached" => v6::Code::DatabaseSizeLimitReached,
|
||||
"document_not_found" => v6::Code::DocumentNotFound,
|
||||
"internal" => v6::Code::Internal,
|
||||
"invalid_geo_field" => v6::Code::InvalidDocumentGeoField,
|
||||
"invalid_ranking_rule" => v6::Code::InvalidSettingsRankingRules,
|
||||
"invalid_store_file" => v6::Code::InvalidStoreFile,
|
||||
"invalid_api_key" => v6::Code::InvalidApiKey,
|
||||
"missing_authorization_header" => v6::Code::MissingAuthorizationHeader,
|
||||
"no_space_left_on_device" => v6::Code::NoSpaceLeftOnDevice,
|
||||
"dump_not_found" => v6::Code::DumpNotFound,
|
||||
"task_not_found" => v6::Code::TaskNotFound,
|
||||
"payload_too_large" => v6::Code::PayloadTooLarge,
|
||||
"unretrievable_document" => v6::Code::UnretrievableDocument,
|
||||
"unsupported_media_type" => v6::Code::UnsupportedMediaType,
|
||||
"dump_already_processing" => v6::Code::DumpAlreadyProcessing,
|
||||
"dump_process_failed" => v6::Code::DumpProcessFailed,
|
||||
"invalid_content_type" => v6::Code::InvalidContentType,
|
||||
"missing_content_type" => v6::Code::MissingContentType,
|
||||
"malformed_payload" => v6::Code::MalformedPayload,
|
||||
"missing_payload" => v6::Code::MissingPayload,
|
||||
"api_key_not_found" => v6::Code::ApiKeyNotFound,
|
||||
"missing_parameter" => v6::Code::BadRequest,
|
||||
"invalid_api_key_actions" => v6::Code::InvalidApiKeyActions,
|
||||
"invalid_api_key_indexes" => v6::Code::InvalidApiKeyIndexes,
|
||||
"invalid_api_key_expires_at" => v6::Code::InvalidApiKeyExpiresAt,
|
||||
"invalid_api_key_description" => v6::Code::InvalidApiKeyDescription,
|
||||
"invalid_api_key_name" => v6::Code::InvalidApiKeyName,
|
||||
"invalid_api_key_uid" => v6::Code::InvalidApiKeyUid,
|
||||
"immutable_field" => v6::Code::BadRequest,
|
||||
"api_key_already_exists" => v6::Code::ApiKeyAlreadyExists,
|
||||
other => {
|
||||
log::warn!("Unknown error code {}", other);
|
||||
v6::Code::UnretrievableErrorCode
|
||||
}
|
||||
};
|
||||
v6::ResponseError::from_msg(error.message, code)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
||||
fn from(settings: v5::Settings<T>) -> Self {
|
||||
v6::Settings {
|
||||
displayed_attributes: settings.displayed_attributes.into(),
|
||||
searchable_attributes: settings.searchable_attributes.into(),
|
||||
filterable_attributes: settings.filterable_attributes.into(),
|
||||
sortable_attributes: settings.sortable_attributes.into(),
|
||||
ranking_rules: {
|
||||
match settings.ranking_rules {
|
||||
v5::settings::Setting::Set(ranking_rules) => {
|
||||
let mut new_ranking_rules = vec![];
|
||||
for rule in ranking_rules {
|
||||
match v6::RankingRuleView::from_str(&rule) {
|
||||
Ok(new_rule) => {
|
||||
new_ranking_rules.push(new_rule);
|
||||
}
|
||||
Err(_) => {
|
||||
log::warn!("Error while importing settings. The ranking rule `{rule}` does not exist anymore.")
|
||||
}
|
||||
}
|
||||
}
|
||||
v6::Setting::Set(new_ranking_rules)
|
||||
}
|
||||
v5::settings::Setting::Reset => v6::Setting::Reset,
|
||||
v5::settings::Setting::NotSet => v6::Setting::NotSet,
|
||||
}
|
||||
},
|
||||
stop_words: settings.stop_words.into(),
|
||||
synonyms: settings.synonyms.into(),
|
||||
distinct_attribute: settings.distinct_attribute.into(),
|
||||
typo_tolerance: match settings.typo_tolerance {
|
||||
v5::Setting::Set(typo) => v6::Setting::Set(v6::TypoTolerance {
|
||||
enabled: typo.enabled.into(),
|
||||
min_word_size_for_typos: match typo.min_word_size_for_typos {
|
||||
v5::Setting::Set(t) => v6::Setting::Set(v6::MinWordSizeForTypos {
|
||||
one_typo: t.one_typo.into(),
|
||||
two_typos: t.two_typos.into(),
|
||||
}),
|
||||
v5::Setting::Reset => v6::Setting::Reset,
|
||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||
},
|
||||
disable_on_words: typo.disable_on_words.into(),
|
||||
disable_on_attributes: typo.disable_on_attributes.into(),
|
||||
}),
|
||||
v5::Setting::Reset => v6::Setting::Reset,
|
||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||
},
|
||||
faceting: match settings.faceting {
|
||||
v5::Setting::Set(faceting) => v6::Setting::Set(v6::FacetingSettings {
|
||||
max_values_per_facet: faceting.max_values_per_facet.into(),
|
||||
}),
|
||||
v5::Setting::Reset => v6::Setting::Reset,
|
||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||
},
|
||||
pagination: match settings.pagination {
|
||||
v5::Setting::Set(pagination) => v6::Setting::Set(v6::PaginationSettings {
|
||||
max_total_hits: pagination.max_total_hits.into(),
|
||||
}),
|
||||
v5::Setting::Reset => v6::Setting::Reset,
|
||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||
},
|
||||
_kind: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v5::Action> for v6::Action {
|
||||
fn from(key: v5::Action) -> Self {
|
||||
match key {
|
||||
v5::Action::All => v6::Action::All,
|
||||
v5::Action::Search => v6::Action::Search,
|
||||
v5::Action::DocumentsAll => v6::Action::DocumentsAll,
|
||||
v5::Action::DocumentsAdd => v6::Action::DocumentsAdd,
|
||||
v5::Action::DocumentsGet => v6::Action::DocumentsGet,
|
||||
v5::Action::DocumentsDelete => v6::Action::DocumentsDelete,
|
||||
v5::Action::IndexesAll => v6::Action::IndexesAll,
|
||||
v5::Action::IndexesAdd => v6::Action::IndexesAdd,
|
||||
v5::Action::IndexesGet => v6::Action::IndexesGet,
|
||||
v5::Action::IndexesUpdate => v6::Action::IndexesUpdate,
|
||||
v5::Action::IndexesDelete => v6::Action::IndexesDelete,
|
||||
v5::Action::TasksAll => v6::Action::TasksAll,
|
||||
v5::Action::TasksGet => v6::Action::TasksGet,
|
||||
v5::Action::SettingsAll => v6::Action::SettingsAll,
|
||||
v5::Action::SettingsGet => v6::Action::SettingsGet,
|
||||
v5::Action::SettingsUpdate => v6::Action::SettingsUpdate,
|
||||
v5::Action::StatsAll => v6::Action::StatsAll,
|
||||
v5::Action::StatsGet => v6::Action::StatsGet,
|
||||
v5::Action::MetricsAll => v6::Action::MetricsAll,
|
||||
v5::Action::MetricsGet => v6::Action::MetricsGet,
|
||||
v5::Action::DumpsAll => v6::Action::DumpsAll,
|
||||
v5::Action::DumpsCreate => v6::Action::DumpsCreate,
|
||||
v5::Action::Version => v6::Action::Version,
|
||||
v5::Action::KeysAdd => v6::Action::KeysAdd,
|
||||
v5::Action::KeysGet => v6::Action::KeysGet,
|
||||
v5::Action::KeysUpdate => v6::Action::KeysUpdate,
|
||||
v5::Action::KeysDelete => v6::Action::KeysDelete,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
|
||||
use flate2::bufread::GzDecoder;
|
||||
use meili_snap::insta;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn compat_v5_v6() {
|
||||
let dump = File::open("tests/assets/v5.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(dir.path()).unwrap();
|
||||
|
||||
let mut dump = v5::V5Reader::open(dir).unwrap().to_v6();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-04 15:55:10.344982459 +00:00:00");
|
||||
insta::assert_display_snapshot!(dump.instance_uid().unwrap().unwrap(), @"9e15e977-f2ae-4761-943f-1eaf75fd736d");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"41f91d3a94911b2735ec41b07540df5c");
|
||||
assert_eq!(update_files.len(), 22);
|
||||
assert!(update_files[0].is_none()); // the dump creation
|
||||
assert!(update_files[1].is_some()); // the enqueued document addition
|
||||
assert!(update_files[2..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys), @"c9d2b467fe2fca0b35580d8a999808fb");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 200);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"e962baafd2fbae4cdd14e876053b0c5a");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
}
|
685
dump/src/reader/mod.rs
Normal file
685
dump/src/reader/mod.rs
Normal file
@ -0,0 +1,685 @@
|
||||
use std::fs::File;
|
||||
use std::io::{BufReader, Read};
|
||||
|
||||
use flate2::bufread::GzDecoder;
|
||||
use serde::Deserialize;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use self::compat::v4_to_v5::CompatV4ToV5;
|
||||
use self::compat::v5_to_v6::{CompatIndexV5ToV6, CompatV5ToV6};
|
||||
use self::v5::V5Reader;
|
||||
use self::v6::{V6IndexReader, V6Reader};
|
||||
use crate::{Result, Version};
|
||||
|
||||
mod compat;
|
||||
|
||||
pub(self) mod v1;
|
||||
pub(self) mod v2;
|
||||
pub(self) mod v3;
|
||||
pub(self) mod v4;
|
||||
pub(self) mod v5;
|
||||
pub(self) mod v6;
|
||||
|
||||
pub type Document = serde_json::Map<String, serde_json::Value>;
|
||||
pub type UpdateFile = dyn Iterator<Item = Result<Document>>;
|
||||
|
||||
pub enum DumpReader {
|
||||
Current(V6Reader),
|
||||
Compat(CompatV5ToV6),
|
||||
}
|
||||
|
||||
impl DumpReader {
|
||||
pub fn open(dump: impl Read) -> Result<DumpReader> {
|
||||
let path = TempDir::new()?;
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(path.path())?;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct MetadataVersion {
|
||||
pub dump_version: Version,
|
||||
}
|
||||
let mut meta_file = File::open(path.path().join("metadata.json"))?;
|
||||
let MetadataVersion { dump_version } = serde_json::from_reader(&mut meta_file)?;
|
||||
|
||||
match dump_version {
|
||||
Version::V1 => {
|
||||
Ok(v1::V1Reader::open(path)?.to_v2().to_v3().to_v4().to_v5().to_v6().into())
|
||||
}
|
||||
Version::V2 => Ok(v2::V2Reader::open(path)?.to_v3().to_v4().to_v5().to_v6().into()),
|
||||
Version::V3 => Ok(v3::V3Reader::open(path)?.to_v4().to_v5().to_v6().into()),
|
||||
Version::V4 => Ok(v4::V4Reader::open(path)?.to_v5().to_v6().into()),
|
||||
Version::V5 => Ok(v5::V5Reader::open(path)?.to_v6().into()),
|
||||
Version::V6 => Ok(v6::V6Reader::open(path)?.into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn version(&self) -> crate::Version {
|
||||
match self {
|
||||
DumpReader::Current(current) => current.version(),
|
||||
DumpReader::Compat(compat) => compat.version(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn date(&self) -> Option<time::OffsetDateTime> {
|
||||
match self {
|
||||
DumpReader::Current(current) => current.date(),
|
||||
DumpReader::Compat(compat) => compat.date(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn instance_uid(&self) -> Result<Option<uuid::Uuid>> {
|
||||
match self {
|
||||
DumpReader::Current(current) => current.instance_uid(),
|
||||
DumpReader::Compat(compat) => compat.instance_uid(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn indexes(&self) -> Result<Box<dyn Iterator<Item = Result<DumpIndexReader>> + '_>> {
|
||||
match self {
|
||||
DumpReader::Current(current) => {
|
||||
let indexes = Box::new(current.indexes()?.map(|res| res.map(DumpIndexReader::from)))
|
||||
as Box<dyn Iterator<Item = Result<DumpIndexReader>> + '_>;
|
||||
Ok(indexes)
|
||||
}
|
||||
DumpReader::Compat(compat) => {
|
||||
let indexes = Box::new(compat.indexes()?.map(|res| res.map(DumpIndexReader::from)))
|
||||
as Box<dyn Iterator<Item = Result<DumpIndexReader>> + '_>;
|
||||
Ok(indexes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn tasks(
|
||||
&mut self,
|
||||
) -> Result<Box<dyn Iterator<Item = Result<(v6::Task, Option<Box<UpdateFile>>)>> + '_>> {
|
||||
match self {
|
||||
DumpReader::Current(current) => Ok(current.tasks()),
|
||||
DumpReader::Compat(compat) => compat.tasks(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn keys(&mut self) -> Result<Box<dyn Iterator<Item = Result<v6::Key>> + '_>> {
|
||||
match self {
|
||||
DumpReader::Current(current) => Ok(current.keys()),
|
||||
DumpReader::Compat(compat) => compat.keys(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<V6Reader> for DumpReader {
|
||||
fn from(value: V6Reader) -> Self {
|
||||
DumpReader::Current(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CompatV5ToV6> for DumpReader {
|
||||
fn from(value: CompatV5ToV6) -> Self {
|
||||
DumpReader::Compat(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<V5Reader> for DumpReader {
|
||||
fn from(value: V5Reader) -> Self {
|
||||
DumpReader::Compat(value.to_v6())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CompatV4ToV5> for DumpReader {
|
||||
fn from(value: CompatV4ToV5) -> Self {
|
||||
DumpReader::Compat(value.to_v6())
|
||||
}
|
||||
}
|
||||
|
||||
pub enum DumpIndexReader {
|
||||
Current(v6::V6IndexReader),
|
||||
Compat(Box<CompatIndexV5ToV6>),
|
||||
}
|
||||
|
||||
impl DumpIndexReader {
|
||||
pub fn new_v6(v6: v6::V6IndexReader) -> DumpIndexReader {
|
||||
DumpIndexReader::Current(v6)
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &crate::IndexMetadata {
|
||||
match self {
|
||||
DumpIndexReader::Current(v6) => v6.metadata(),
|
||||
DumpIndexReader::Compat(compat) => compat.metadata(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn documents(&mut self) -> Result<Box<dyn Iterator<Item = Result<Document>> + '_>> {
|
||||
match self {
|
||||
DumpIndexReader::Current(v6) => v6
|
||||
.documents()
|
||||
.map(|iter| Box::new(iter) as Box<dyn Iterator<Item = Result<Document>> + '_>),
|
||||
DumpIndexReader::Compat(compat) => compat
|
||||
.documents()
|
||||
.map(|iter| Box::new(iter) as Box<dyn Iterator<Item = Result<Document>> + '_>),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn settings(&mut self) -> Result<v6::Settings<v6::Checked>> {
|
||||
match self {
|
||||
DumpIndexReader::Current(v6) => v6.settings(),
|
||||
DumpIndexReader::Compat(compat) => compat.settings(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<V6IndexReader> for DumpIndexReader {
|
||||
fn from(value: V6IndexReader) -> Self {
|
||||
DumpIndexReader::Current(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CompatIndexV5ToV6> for DumpIndexReader {
|
||||
fn from(value: CompatIndexV5ToV6) -> Self {
|
||||
DumpIndexReader::Compat(Box::new(value))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
|
||||
use meili_snap::insta;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn import_dump_v5() {
|
||||
let dump = File::open("tests/assets/v5.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-04 15:55:10.344982459 +00:00:00");
|
||||
insta::assert_display_snapshot!(dump.instance_uid().unwrap().unwrap(), @"9e15e977-f2ae-4761-943f-1eaf75fd736d");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"41f91d3a94911b2735ec41b07540df5c");
|
||||
assert_eq!(update_files.len(), 22);
|
||||
assert!(update_files[0].is_none()); // the dump creation
|
||||
assert!(update_files[1].is_some()); // the enqueued document addition
|
||||
assert!(update_files[2..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys), @"c9d2b467fe2fca0b35580d8a999808fb");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "2022-10-04T15:51:35.939396731Z",
|
||||
"updatedAt": "2022-10-04T15:55:01.897325373Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "2022-10-04T15:51:35.291992167Z",
|
||||
"updatedAt": "2022-10-04T15:55:10.33561842Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 200);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"e962baafd2fbae4cdd14e876053b0c5a");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "2022-10-04T15:51:37.381094632Z",
|
||||
"updatedAt": "2022-10-04T15:55:02.394503431Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_dump_v4() {
|
||||
let dump = File::open("tests/assets/v4.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-06 12:53:49.131989609 +00:00:00");
|
||||
insta::assert_display_snapshot!(dump.instance_uid().unwrap().unwrap(), @"9e15e977-f2ae-4761-943f-1eaf75fd736d");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"c2445ddd1785528b80f2ba534d3bd00c");
|
||||
assert_eq!(update_files.len(), 10);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys, { "[].uid" => "[uuid]" }), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "2022-10-06T12:53:39.360187055Z",
|
||||
"updatedAt": "2022-10-06T12:53:40.603035979Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "2022-10-06T12:53:38.710611568Z",
|
||||
"updatedAt": "2022-10-06T12:53:49.785862546Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"786022a66ecb992c8a2a60fee070a5ab");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "2022-10-06T12:53:40.831649057Z",
|
||||
"updatedAt": "2022-10-06T12:53:41.116036186Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_dump_v3() {
|
||||
let dump = File::open("tests/assets/v3.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-07 11:39:03.709153554 +00:00:00");
|
||||
assert_eq!(dump.instance_uid().unwrap(), None);
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"cd12efd308fe3ed226356a727ab42ed3");
|
||||
assert_eq!(update_files.len(), 10);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies2 = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d153b5a81d8b3cdcbe1dec270b574022");
|
||||
|
||||
// movies2
|
||||
insta::assert_json_snapshot!(movies2.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies_2",
|
||||
"primaryKey": null,
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies2.settings().unwrap());
|
||||
let documents = movies2.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 0);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_dump_v2() {
|
||||
let dump = File::open("tests/assets/v2.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-09 20:27:59.904096267 +00:00:00");
|
||||
assert_eq!(dump.instance_uid().unwrap(), None);
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"bc616290adfe7d09a624cf6065ca9069");
|
||||
assert_eq!(update_files.len(), 9);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies2 = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d153b5a81d8b3cdcbe1dec270b574022");
|
||||
|
||||
// movies2
|
||||
insta::assert_json_snapshot!(movies2.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies_2",
|
||||
"primaryKey": null,
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies2.settings().unwrap());
|
||||
let documents = movies2.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 0);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_dump_v2_from_meilisearch_v0_22_0_issue_3435() {
|
||||
let dump = File::open("tests/assets/v2-v0.22.0.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2023-01-30 16:26:09.247261 +00:00:00");
|
||||
assert_eq!(dump.instance_uid().unwrap(), None);
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"2db37756d8af1fb7623436b76e8956a6");
|
||||
assert_eq!(update_files.len(), 8);
|
||||
assert!(update_files[0..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"0227598af846e574139ee0b80e03a720");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_dump_v1() {
|
||||
let dump = File::open("tests/assets/v1.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
assert_eq!(dump.date(), None);
|
||||
assert_eq!(dump.instance_uid().unwrap(), None);
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"8df6eab075a44b3c1af6b726f9fd9a43");
|
||||
assert_eq!(update_files.len(), 9);
|
||||
assert!(update_files[..].iter().all(|u| u.is_none())); // no update file in dump v1
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot!(meili_snap::json_string!(keys), @"[]");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "2022-10-02T13:23:39.976870431Z",
|
||||
"updatedAt": "2022-10-02T13:27:54.353262482Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "2022-10-02T13:15:29.477512777Z",
|
||||
"updatedAt": "2022-10-02T13:21:12.671204856Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b63dbed5bbc059f3e32bc471ae699bf5");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "2022-10-02T13:38:26.358882984Z",
|
||||
"updatedAt": "2022-10-02T13:38:26.385609433Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"aa24c0cfc733d66c396237ad44263bed");
|
||||
}
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,38 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies2.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,37 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies2.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
}
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
}
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
}
|
@ -0,0 +1,54 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
}
|
262
dump/src/reader/v1/mod.rs
Normal file
262
dump/src/reader/v1/mod.rs
Normal file
@ -0,0 +1,262 @@
|
||||
use std::fs::{self, File};
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use serde::Deserialize;
|
||||
use tempfile::TempDir;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use super::compat::v1_to_v2::CompatV1ToV2;
|
||||
use super::Document;
|
||||
use crate::{IndexMetadata, Result, Version};
|
||||
|
||||
pub mod settings;
|
||||
pub mod update;
|
||||
|
||||
pub struct V1Reader {
|
||||
pub dump: TempDir,
|
||||
pub db_version: String,
|
||||
pub dump_version: crate::Version,
|
||||
indexes: Vec<V1Index>,
|
||||
}
|
||||
|
||||
pub struct IndexUuid {
|
||||
pub name: String,
|
||||
pub uid: String,
|
||||
}
|
||||
pub type Task = self::update::UpdateStatus;
|
||||
|
||||
struct V1Index {
|
||||
metadata: IndexMetadataV1,
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl V1Index {
|
||||
pub fn new(path: PathBuf, metadata: Index) -> Self {
|
||||
Self { metadata: metadata.into(), path }
|
||||
}
|
||||
|
||||
pub fn open(&self) -> Result<V1IndexReader> {
|
||||
V1IndexReader::new(&self.path, self.metadata.clone())
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &IndexMetadata {
|
||||
&self.metadata.metadata
|
||||
}
|
||||
}
|
||||
|
||||
pub struct V1IndexReader {
|
||||
metadata: IndexMetadataV1,
|
||||
documents: BufReader<File>,
|
||||
settings: BufReader<File>,
|
||||
updates: BufReader<File>,
|
||||
}
|
||||
|
||||
impl V1IndexReader {
|
||||
pub fn new(path: &Path, metadata: IndexMetadataV1) -> Result<Self> {
|
||||
Ok(V1IndexReader {
|
||||
metadata,
|
||||
documents: BufReader::new(File::open(path.join("documents.jsonl"))?),
|
||||
settings: BufReader::new(File::open(path.join("settings.json"))?),
|
||||
updates: BufReader::new(File::open(path.join("updates.jsonl"))?),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &IndexMetadata {
|
||||
&self.metadata.metadata
|
||||
}
|
||||
|
||||
pub fn documents(&mut self) -> Result<impl Iterator<Item = Result<Document>> + '_> {
|
||||
Ok((&mut self.documents)
|
||||
.lines()
|
||||
.map(|line| -> Result<_> { Ok(serde_json::from_str(&line?)?) }))
|
||||
}
|
||||
|
||||
pub fn settings(&mut self) -> Result<self::settings::Settings> {
|
||||
Ok(serde_json::from_reader(&mut self.settings)?)
|
||||
}
|
||||
|
||||
pub fn tasks(self) -> impl Iterator<Item = Result<Task>> {
|
||||
self.updates.lines().map(|line| -> Result<_> { Ok(serde_json::from_str(&line?)?) })
|
||||
}
|
||||
}
|
||||
|
||||
impl V1Reader {
|
||||
pub fn open(dump: TempDir) -> Result<Self> {
|
||||
let meta_file = fs::read(dump.path().join("metadata.json"))?;
|
||||
let metadata: Metadata = serde_json::from_reader(&*meta_file)?;
|
||||
|
||||
let mut indexes = Vec::new();
|
||||
|
||||
for index in metadata.indexes.into_iter() {
|
||||
let index_path = dump.path().join(&index.uid);
|
||||
indexes.push(V1Index::new(index_path, index));
|
||||
}
|
||||
|
||||
Ok(V1Reader {
|
||||
dump,
|
||||
indexes,
|
||||
db_version: metadata.db_version,
|
||||
dump_version: metadata.dump_version,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_v2(self) -> CompatV1ToV2 {
|
||||
CompatV1ToV2 { from: self }
|
||||
}
|
||||
|
||||
pub fn index_uuid(&self) -> Vec<IndexUuid> {
|
||||
self.indexes
|
||||
.iter()
|
||||
.map(|index| IndexUuid {
|
||||
name: index.metadata.name.to_owned(),
|
||||
uid: index.metadata().uid.to_owned(),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn version(&self) -> Version {
|
||||
Version::V1
|
||||
}
|
||||
|
||||
pub fn date(&self) -> Option<OffsetDateTime> {
|
||||
None
|
||||
}
|
||||
|
||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V1IndexReader>> + '_> {
|
||||
Ok(self.indexes.iter().map(|index| index.open()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Index {
|
||||
pub name: String,
|
||||
pub uid: String,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
created_at: OffsetDateTime,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
updated_at: OffsetDateTime,
|
||||
pub primary_key: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct IndexMetadataV1 {
|
||||
pub name: String,
|
||||
pub metadata: crate::IndexMetadata,
|
||||
}
|
||||
|
||||
impl From<Index> for IndexMetadataV1 {
|
||||
fn from(index: Index) -> Self {
|
||||
IndexMetadataV1 {
|
||||
name: index.name,
|
||||
metadata: crate::IndexMetadata {
|
||||
uid: index.uid,
|
||||
primary_key: index.primary_key,
|
||||
created_at: index.created_at,
|
||||
updated_at: index.updated_at,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Metadata {
|
||||
pub indexes: Vec<Index>,
|
||||
pub db_version: String,
|
||||
pub dump_version: crate::Version,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
|
||||
use flate2::bufread::GzDecoder;
|
||||
use meili_snap::insta;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn read_dump_v1() {
|
||||
let dump = File::open("tests/assets/v1.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(dir.path()).unwrap();
|
||||
|
||||
let dump = V1Reader::open(dir).unwrap();
|
||||
|
||||
// top level infos
|
||||
assert_eq!(dump.date(), None);
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut dnd_spells = indexes.pop().unwrap();
|
||||
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "2022-10-02T13:23:39.976870431Z",
|
||||
"updatedAt": "2022-10-02T13:27:54.353262482Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// products tasks
|
||||
let tasks = products.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"91de507f206ad21964584021932ba7a7");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "2022-10-02T13:15:29.477512777Z",
|
||||
"updatedAt": "2022-10-02T13:21:12.671204856Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b63dbed5bbc059f3e32bc471ae699bf5");
|
||||
|
||||
// movies tasks
|
||||
let tasks = movies.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"55eef4de2bef7e84c5ce0bee47488f56");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(dnd_spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "2022-10-02T13:38:26.358882984Z",
|
||||
"updatedAt": "2022-10-02T13:38:26.385609433Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(dnd_spells.settings().unwrap());
|
||||
let documents = dnd_spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"aa24c0cfc733d66c396237ad44263bed");
|
||||
|
||||
// spells tasks
|
||||
let tasks = dnd_spells.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"836dd7d64d5ad20ad901c44b1b161a4c");
|
||||
}
|
||||
}
|
94
dump/src/reader/v1/settings.rs
Normal file
94
dump/src/reader/v1/settings.rs
Normal file
@ -0,0 +1,94 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::result::Result as StdResult;
|
||||
use std::str::FromStr;
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
|
||||
#[derive(Default, Clone, Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "camelCase", deny_unknown_fields)]
|
||||
pub struct Settings {
|
||||
#[serde(default, deserialize_with = "deserialize_some")]
|
||||
pub ranking_rules: Option<Option<Vec<String>>>,
|
||||
#[serde(default, deserialize_with = "deserialize_some")]
|
||||
pub distinct_attribute: Option<Option<String>>,
|
||||
#[serde(default, deserialize_with = "deserialize_some")]
|
||||
pub searchable_attributes: Option<Option<Vec<String>>>,
|
||||
#[serde(default, deserialize_with = "deserialize_some")]
|
||||
pub displayed_attributes: Option<Option<BTreeSet<String>>>,
|
||||
#[serde(default, deserialize_with = "deserialize_some")]
|
||||
pub stop_words: Option<Option<BTreeSet<String>>>,
|
||||
#[serde(default, deserialize_with = "deserialize_some")]
|
||||
pub synonyms: Option<Option<BTreeMap<String, Vec<String>>>>,
|
||||
#[serde(default, deserialize_with = "deserialize_some")]
|
||||
pub attributes_for_faceting: Option<Option<Vec<String>>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SettingsUpdate {
|
||||
pub ranking_rules: UpdateState<Vec<RankingRule>>,
|
||||
pub distinct_attribute: UpdateState<String>,
|
||||
pub primary_key: UpdateState<String>,
|
||||
pub searchable_attributes: UpdateState<Vec<String>>,
|
||||
pub displayed_attributes: UpdateState<BTreeSet<String>>,
|
||||
pub stop_words: UpdateState<BTreeSet<String>>,
|
||||
pub synonyms: UpdateState<BTreeMap<String, Vec<String>>>,
|
||||
pub attributes_for_faceting: UpdateState<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum UpdateState<T> {
|
||||
Update(T),
|
||||
Clear,
|
||||
Nothing,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum RankingRule {
|
||||
Typo,
|
||||
Words,
|
||||
Proximity,
|
||||
Attribute,
|
||||
WordsPosition,
|
||||
Exactness,
|
||||
Asc(String),
|
||||
Desc(String),
|
||||
}
|
||||
|
||||
static ASC_DESC_REGEX: Lazy<Regex> =
|
||||
Lazy::new(|| Regex::new(r#"(asc|desc)\(([\w_-]+)\)"#).unwrap());
|
||||
|
||||
impl FromStr for RankingRule {
|
||||
type Err = ();
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(match s {
|
||||
"typo" => Self::Typo,
|
||||
"words" => Self::Words,
|
||||
"proximity" => Self::Proximity,
|
||||
"attribute" => Self::Attribute,
|
||||
"wordsPosition" => Self::WordsPosition,
|
||||
"exactness" => Self::Exactness,
|
||||
text => {
|
||||
let caps = ASC_DESC_REGEX.captures(text).ok_or(())?;
|
||||
let order = caps.get(1).unwrap().as_str();
|
||||
let field_name = caps.get(2).unwrap().as_str();
|
||||
match order {
|
||||
"asc" => Self::Asc(field_name.to_string()),
|
||||
"desc" => Self::Desc(field_name.to_string()),
|
||||
_ => return Err(()),
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Any value that is present is considered Some value, including null.
|
||||
fn deserialize_some<'de, T, D>(deserializer: D) -> StdResult<Option<T>, D::Error>
|
||||
where
|
||||
T: Deserialize<'de>,
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Deserialize::deserialize(deserializer).map(Some)
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
---
|
||||
source: dump/src/reader/v1/mod.rs
|
||||
expression: dnd_spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"wordsPosition",
|
||||
"exactness"
|
||||
],
|
||||
"distinctAttribute": null,
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"attributesForFaceting": []
|
||||
}
|
@ -0,0 +1,38 @@
|
||||
---
|
||||
source: dump/src/reader/v1/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"wordsPosition",
|
||||
"exactness"
|
||||
],
|
||||
"distinctAttribute": null,
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"attributesForFaceting": []
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
---
|
||||
source: dump/src/reader/v1/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"wordsPosition",
|
||||
"exactness",
|
||||
"asc(release_date)"
|
||||
],
|
||||
"distinctAttribute": null,
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"attributesForFaceting": [
|
||||
"id",
|
||||
"genres"
|
||||
]
|
||||
}
|
74
dump/src/reader/v1/update.rs
Normal file
74
dump/src/reader/v1/update.rs
Normal file
@ -0,0 +1,74 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use super::settings::SettingsUpdate;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "name")]
|
||||
pub enum UpdateType {
|
||||
ClearAll,
|
||||
Customs,
|
||||
DocumentsAddition { number: usize },
|
||||
DocumentsPartial { number: usize },
|
||||
DocumentsDeletion { number: usize },
|
||||
Settings { settings: Box<SettingsUpdate> },
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ProcessedUpdateResult {
|
||||
pub update_id: u64,
|
||||
#[serde(rename = "type")]
|
||||
pub update_type: UpdateType,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error_type: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error_code: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error_link: Option<String>,
|
||||
pub duration: f64, // in seconds
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub enqueued_at: OffsetDateTime,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub processed_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct EnqueuedUpdateResult {
|
||||
pub update_id: u64,
|
||||
#[serde(rename = "type")]
|
||||
pub update_type: UpdateType,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub enqueued_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", tag = "status")]
|
||||
pub enum UpdateStatus {
|
||||
Enqueued {
|
||||
#[serde(flatten)]
|
||||
content: EnqueuedUpdateResult,
|
||||
},
|
||||
Failed {
|
||||
#[serde(flatten)]
|
||||
content: ProcessedUpdateResult,
|
||||
},
|
||||
Processed {
|
||||
#[serde(flatten)]
|
||||
content: ProcessedUpdateResult,
|
||||
},
|
||||
}
|
||||
|
||||
impl UpdateStatus {
|
||||
pub fn enqueued_at(&self) -> &OffsetDateTime {
|
||||
match self {
|
||||
UpdateStatus::Enqueued { content } => &content.enqueued_at,
|
||||
UpdateStatus::Failed { content } | UpdateStatus::Processed { content } => {
|
||||
&content.enqueued_at
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
14
dump/src/reader/v2/errors.rs
Normal file
14
dump/src/reader/v2/errors.rs
Normal file
@ -0,0 +1,14 @@
|
||||
use http::StatusCode;
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ResponseError {
|
||||
#[serde(skip)]
|
||||
pub code: StatusCode,
|
||||
pub message: String,
|
||||
pub error_code: String,
|
||||
pub error_type: String,
|
||||
pub error_link: String,
|
||||
}
|
18
dump/src/reader/v2/meta.rs
Normal file
18
dump/src/reader/v2/meta.rs
Normal file
@ -0,0 +1,18 @@
|
||||
use serde::Deserialize;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::Settings;
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct IndexUuid {
|
||||
pub uid: String,
|
||||
pub uuid: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct DumpMeta {
|
||||
pub settings: Settings<super::Unchecked>,
|
||||
pub primary_key: Option<String>,
|
||||
}
|
387
dump/src/reader/v2/mod.rs
Normal file
387
dump/src/reader/v2/mod.rs
Normal file
@ -0,0 +1,387 @@
|
||||
//! ```text
|
||||
//! .
|
||||
//! ├── indexes
|
||||
//! │ ├── index-40d14c5f-37ae-4873-9d51-b69e014a0d30
|
||||
//! │ │ ├── documents.jsonl
|
||||
//! │ │ └── meta.json
|
||||
//! │ ├── index-88202369-4524-4410-9b3d-3e924c867fec
|
||||
//! │ │ ├── documents.jsonl
|
||||
//! │ │ └── meta.json
|
||||
//! │ ├── index-b7f2d03b-bf9b-40d9-a25b-94dc5ec60c32
|
||||
//! │ │ ├── documents.jsonl
|
||||
//! │ │ └── meta.json
|
||||
//! │ └── index-dc9070b3-572d-4f30-ab45-d4903ab71708
|
||||
//! │ ├── documents.jsonl
|
||||
//! │ └── meta.json
|
||||
//! ├── index_uuids
|
||||
//! │ └── data.jsonl
|
||||
//! ├── metadata.json
|
||||
//! └── updates
|
||||
//! ├── data.jsonl
|
||||
//! └── update_files
|
||||
//! └── update_202573df-718b-4d80-9a65-2ee397c23dc3
|
||||
//! ```
|
||||
|
||||
use std::fs::{self, File};
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::path::Path;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tempfile::TempDir;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
pub mod errors;
|
||||
pub mod meta;
|
||||
pub mod settings;
|
||||
pub mod updates;
|
||||
|
||||
use self::meta::{DumpMeta, IndexUuid};
|
||||
use super::compat::v2_to_v3::CompatV2ToV3;
|
||||
use super::Document;
|
||||
use crate::{IndexMetadata, Result, Version};
|
||||
|
||||
pub type Settings<T> = settings::Settings<T>;
|
||||
pub type Setting<T> = settings::Setting<T>;
|
||||
pub type Checked = settings::Checked;
|
||||
pub type Unchecked = settings::Unchecked;
|
||||
|
||||
pub type Task = updates::UpdateEntry;
|
||||
|
||||
// everything related to the errors
|
||||
pub type ResponseError = errors::ResponseError;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Metadata {
|
||||
db_version: String,
|
||||
index_db_size: usize,
|
||||
update_db_size: usize,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
dump_date: OffsetDateTime,
|
||||
}
|
||||
|
||||
pub struct V2Reader {
|
||||
dump: TempDir,
|
||||
metadata: Metadata,
|
||||
tasks: BufReader<File>,
|
||||
pub index_uuid: Vec<IndexUuid>,
|
||||
}
|
||||
|
||||
impl V2Reader {
|
||||
pub fn open(dump: TempDir) -> Result<Self> {
|
||||
let meta_file = fs::read(dump.path().join("metadata.json"))?;
|
||||
let metadata = serde_json::from_reader(&*meta_file)?;
|
||||
let index_uuid = File::open(dump.path().join("index_uuids/data.jsonl"))?;
|
||||
let index_uuid = BufReader::new(index_uuid);
|
||||
let index_uuid = index_uuid
|
||||
.lines()
|
||||
.map(|line| -> Result<_> { Ok(serde_json::from_str(&line?)?) })
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
Ok(V2Reader {
|
||||
metadata,
|
||||
tasks: BufReader::new(
|
||||
File::open(dump.path().join("updates").join("data.jsonl")).unwrap(),
|
||||
),
|
||||
index_uuid,
|
||||
dump,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_v3(self) -> CompatV2ToV3 {
|
||||
CompatV2ToV3::new(self)
|
||||
}
|
||||
|
||||
pub fn index_uuid(&self) -> Vec<IndexUuid> {
|
||||
self.index_uuid.clone()
|
||||
}
|
||||
|
||||
pub fn version(&self) -> Version {
|
||||
Version::V2
|
||||
}
|
||||
|
||||
pub fn date(&self) -> Option<OffsetDateTime> {
|
||||
Some(self.metadata.dump_date)
|
||||
}
|
||||
|
||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V2IndexReader>> + '_> {
|
||||
Ok(self.index_uuid.iter().map(|index| -> Result<_> {
|
||||
V2IndexReader::new(
|
||||
index.uid.clone(),
|
||||
&self.dump.path().join("indexes").join(format!("index-{}", index.uuid)),
|
||||
)
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn tasks(&mut self) -> Box<dyn Iterator<Item = Result<(Task, Option<UpdateFile>)>> + '_> {
|
||||
Box::new((&mut self.tasks).lines().map(|line| -> Result<_> {
|
||||
let task: Task = serde_json::from_str(&line?)?;
|
||||
if !task.is_finished() {
|
||||
if let Some(uuid) = task.get_content_uuid() {
|
||||
let update_file_path = self
|
||||
.dump
|
||||
.path()
|
||||
.join("updates")
|
||||
.join("update_files")
|
||||
.join(format!("update_{}", uuid));
|
||||
Ok((task, Some(UpdateFile::new(&update_file_path)?)))
|
||||
} else {
|
||||
Ok((task, None))
|
||||
}
|
||||
} else {
|
||||
Ok((task, None))
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct V2IndexReader {
|
||||
metadata: IndexMetadata,
|
||||
settings: Settings<Checked>,
|
||||
|
||||
documents: BufReader<File>,
|
||||
}
|
||||
|
||||
impl V2IndexReader {
|
||||
pub fn new(name: String, path: &Path) -> Result<Self> {
|
||||
let meta = File::open(path.join("meta.json"))?;
|
||||
let meta: DumpMeta = serde_json::from_reader(meta)?;
|
||||
|
||||
let metadata = IndexMetadata {
|
||||
uid: name,
|
||||
primary_key: meta.primary_key,
|
||||
// FIXME: Iterate over the whole task queue to find the creation and last update date.
|
||||
created_at: OffsetDateTime::now_utc(),
|
||||
updated_at: OffsetDateTime::now_utc(),
|
||||
};
|
||||
|
||||
let ret = V2IndexReader {
|
||||
metadata,
|
||||
settings: meta.settings.check(),
|
||||
documents: BufReader::new(File::open(path.join("documents.jsonl"))?),
|
||||
};
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &IndexMetadata {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
pub fn documents(&mut self) -> Result<impl Iterator<Item = Result<Document>> + '_> {
|
||||
Ok((&mut self.documents)
|
||||
.lines()
|
||||
.map(|line| -> Result<_> { Ok(serde_json::from_str(&line?)?) }))
|
||||
}
|
||||
|
||||
pub fn settings(&mut self) -> Result<Settings<Checked>> {
|
||||
Ok(self.settings.clone())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct UpdateFile {
|
||||
documents: Vec<Document>,
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl UpdateFile {
|
||||
fn new(path: &Path) -> Result<Self> {
|
||||
let reader = BufReader::new(File::open(path)?);
|
||||
Ok(UpdateFile { documents: serde_json::from_reader(reader)?, index: 0 })
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for UpdateFile {
|
||||
type Item = Result<Document>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.index += 1;
|
||||
self.documents.get(self.index - 1).cloned().map(Ok)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
|
||||
use flate2::bufread::GzDecoder;
|
||||
use meili_snap::insta;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn read_dump_v2() {
|
||||
let dump = File::open("tests/assets/v2.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(dir.path()).unwrap();
|
||||
|
||||
let mut dump = V2Reader::open(dir).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-09 20:27:59.904096267 +00:00:00");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, mut update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"ec5fc0a14bf735ad4e361d5aa8a89ac6");
|
||||
assert_eq!(update_files.len(), 9);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
let update_file = update_files.remove(0).unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(update_file), @"7b8889539b669c7b9ddba448bafa385d");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies2 = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d153b5a81d8b3cdcbe1dec270b574022");
|
||||
|
||||
// movies2
|
||||
insta::assert_json_snapshot!(movies2.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies_2",
|
||||
"primaryKey": null,
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies2.settings().unwrap());
|
||||
let documents = movies2.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 0);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_dump_v2_from_meilisearch_v0_22_0_issue_3435() {
|
||||
let dump = File::open("tests/assets/v2-v0.22.0.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(dir.path()).unwrap();
|
||||
|
||||
let mut dump = V2Reader::open(dir).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2023-01-30 16:26:09.247261 +00:00:00");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"aca8ba13046272664eb3ea2da3031633");
|
||||
assert_eq!(update_files.len(), 8);
|
||||
assert!(update_files[0..].iter().all(|u| u.is_none())); // everything has already been processed
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"0227598af846e574139ee0b80e03a720");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
}
|
269
dump/src/reader/v2/settings.rs
Normal file
269
dump/src/reader/v2/settings.rs
Normal file
@ -0,0 +1,269 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::{Deserialize, Deserializer};
|
||||
|
||||
#[cfg(test)]
|
||||
fn serialize_with_wildcard<S>(
|
||||
field: &Setting<Vec<String>>,
|
||||
s: S,
|
||||
) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
use serde::Serialize;
|
||||
|
||||
let wildcard = vec!["*".to_string()];
|
||||
match field {
|
||||
Setting::Set(value) => Some(value),
|
||||
Setting::Reset => Some(&wildcard),
|
||||
Setting::NotSet => None,
|
||||
}
|
||||
.serialize(s)
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, Debug)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct Checked;
|
||||
|
||||
#[derive(Clone, Default, Debug, Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct Unchecked;
|
||||
|
||||
#[derive(Debug, Clone, Default, Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
#[serde(deny_unknown_fields)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[serde(bound(serialize = "T: serde::Serialize", deserialize = "T: Deserialize<'static>"))]
|
||||
pub struct Settings<T> {
|
||||
#[serde(
|
||||
default,
|
||||
serialize_with = "serialize_with_wildcard",
|
||||
skip_serializing_if = "Setting::is_not_set"
|
||||
)]
|
||||
pub displayed_attributes: Setting<Vec<String>>,
|
||||
|
||||
#[serde(
|
||||
default,
|
||||
serialize_with = "serialize_with_wildcard",
|
||||
skip_serializing_if = "Setting::is_not_set"
|
||||
)]
|
||||
pub searchable_attributes: Setting<Vec<String>>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub filterable_attributes: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub sortable_attributes: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub ranking_rules: Setting<Vec<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub stop_words: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub synonyms: Setting<BTreeMap<String, Vec<String>>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub distinct_attribute: Setting<String>,
|
||||
|
||||
#[serde(skip)]
|
||||
pub _kind: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl Settings<Unchecked> {
|
||||
pub fn check(self) -> Settings<Checked> {
|
||||
let displayed_attributes = match self.displayed_attributes {
|
||||
Setting::Set(fields) => {
|
||||
if fields.iter().any(|f| f == "*") {
|
||||
Setting::Reset
|
||||
} else {
|
||||
Setting::Set(fields)
|
||||
}
|
||||
}
|
||||
otherwise => otherwise,
|
||||
};
|
||||
|
||||
let searchable_attributes = match self.searchable_attributes {
|
||||
Setting::Set(fields) => {
|
||||
if fields.iter().any(|f| f == "*") {
|
||||
Setting::Reset
|
||||
} else {
|
||||
Setting::Set(fields)
|
||||
}
|
||||
}
|
||||
otherwise => otherwise,
|
||||
};
|
||||
|
||||
Settings {
|
||||
displayed_attributes,
|
||||
searchable_attributes,
|
||||
filterable_attributes: self.filterable_attributes,
|
||||
sortable_attributes: self.sortable_attributes,
|
||||
ranking_rules: self.ranking_rules,
|
||||
stop_words: self.stop_words,
|
||||
synonyms: self.synonyms,
|
||||
distinct_attribute: self.distinct_attribute,
|
||||
_kind: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum Setting<T> {
|
||||
Set(T),
|
||||
Reset,
|
||||
NotSet,
|
||||
}
|
||||
|
||||
impl<T> Default for Setting<T> {
|
||||
fn default() -> Self {
|
||||
Self::NotSet
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Setting<T> {
|
||||
pub const fn is_not_set(&self) -> bool {
|
||||
matches!(self, Self::NotSet)
|
||||
}
|
||||
|
||||
pub fn map<A>(self, f: fn(T) -> A) -> Setting<A> {
|
||||
match self {
|
||||
Setting::Set(a) => Setting::Set(f(a)),
|
||||
Setting::Reset => Setting::Reset,
|
||||
Setting::NotSet => Setting::NotSet,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl<T: serde::Serialize> serde::Serialize for Setting<T> {
|
||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Self::Set(value) => Some(value),
|
||||
// Usually not_set isn't serialized by setting skip_serializing_if field attribute
|
||||
Self::NotSet | Self::Reset => None,
|
||||
}
|
||||
.serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, T: Deserialize<'de>> Deserialize<'de> for Setting<T> {
|
||||
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Deserialize::deserialize(deserializer).map(|x| match x {
|
||||
Some(x) => Self::Set(x),
|
||||
None => Self::Reset, // Reset is forced by sending null value
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Criterion {
|
||||
/// Sorted by decreasing number of matched query terms.
|
||||
/// Query words at the front of an attribute is considered better than if it was at the back.
|
||||
Words,
|
||||
/// Sorted by increasing number of typos.
|
||||
Typo,
|
||||
/// Sorted by increasing distance between matched query terms.
|
||||
Proximity,
|
||||
/// Documents with quey words contained in more important
|
||||
/// attributes are considered better.
|
||||
Attribute,
|
||||
/// Dynamically sort at query time the documents. None, one or multiple Asc/Desc sortable
|
||||
/// attributes can be used in place of this criterion at query time.
|
||||
Sort,
|
||||
/// Sorted by the similarity of the matched words with the query words.
|
||||
Exactness,
|
||||
/// Sorted by the increasing value of the field specified.
|
||||
Asc(String),
|
||||
/// Sorted by the decreasing value of the field specified.
|
||||
Desc(String),
|
||||
}
|
||||
|
||||
impl Criterion {
|
||||
/// Returns the field name parameter of this criterion.
|
||||
pub fn field_name(&self) -> Option<&str> {
|
||||
match self {
|
||||
Criterion::Asc(name) | Criterion::Desc(name) => Some(name),
|
||||
_otherwise => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Criterion {
|
||||
// since we're not going to show the custom error message we can override the
|
||||
// error type.
|
||||
type Err = ();
|
||||
|
||||
fn from_str(text: &str) -> Result<Criterion, Self::Err> {
|
||||
match text {
|
||||
"words" => Ok(Criterion::Words),
|
||||
"typo" => Ok(Criterion::Typo),
|
||||
"proximity" => Ok(Criterion::Proximity),
|
||||
"attribute" => Ok(Criterion::Attribute),
|
||||
"sort" => Ok(Criterion::Sort),
|
||||
"exactness" => Ok(Criterion::Exactness),
|
||||
text => match AscDesc::from_str(text) {
|
||||
Ok(AscDesc::Asc(field)) => Ok(Criterion::Asc(field)),
|
||||
Ok(AscDesc::Desc(field)) => Ok(Criterion::Desc(field)),
|
||||
Err(_) => Err(()),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub enum AscDesc {
|
||||
Asc(String),
|
||||
Desc(String),
|
||||
}
|
||||
|
||||
impl FromStr for AscDesc {
|
||||
type Err = ();
|
||||
|
||||
// since we don't know if this comes from the old or new syntax we need to check
|
||||
// for both syntax.
|
||||
// WARN: this code doesn't come from the original meilisearch v0.22.0 but was
|
||||
// written specifically to be able to import the dump of meilisearch v0.21.0 AND
|
||||
// meilisearch v0.22.0.
|
||||
fn from_str(text: &str) -> Result<AscDesc, Self::Err> {
|
||||
if let Some((field_name, asc_desc)) = text.rsplit_once(':') {
|
||||
match asc_desc {
|
||||
"asc" => Ok(AscDesc::Asc(field_name.to_string())),
|
||||
"desc" => Ok(AscDesc::Desc(field_name.to_string())),
|
||||
_ => Err(()),
|
||||
}
|
||||
} else if text.starts_with("asc(") && text.ends_with(')') {
|
||||
Ok(AscDesc::Asc(
|
||||
text.strip_prefix("asc(").unwrap().strip_suffix(')').unwrap().to_string(),
|
||||
))
|
||||
} else if text.starts_with("desc(") && text.ends_with(')') {
|
||||
Ok(AscDesc::Desc(
|
||||
text.strip_prefix("desc(").unwrap().strip_suffix(')').unwrap().to_string(),
|
||||
))
|
||||
} else {
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Criterion {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use Criterion::*;
|
||||
|
||||
match self {
|
||||
Words => f.write_str("words"),
|
||||
Typo => f.write_str("typo"),
|
||||
Proximity => f.write_str("proximity"),
|
||||
Attribute => f.write_str("attribute"),
|
||||
Sort => f.write_str("sort"),
|
||||
Exactness => f.write_str("exactness"),
|
||||
Asc(attr) => write!(f, "{}:asc", attr),
|
||||
Desc(attr) => write!(f, "{}:desc", attr),
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: movies2.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,37 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"asc(release_date)"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
230
dump/src/reader/v2/updates.rs
Normal file
230
dump/src/reader/v2/updates.rs
Normal file
@ -0,0 +1,230 @@
|
||||
use serde::Deserialize;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::{ResponseError, Settings, Unchecked};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct UpdateEntry {
|
||||
pub uuid: Uuid,
|
||||
pub update: UpdateStatus,
|
||||
}
|
||||
|
||||
impl UpdateEntry {
|
||||
pub fn is_finished(&self) -> bool {
|
||||
match self.update {
|
||||
UpdateStatus::Processing(_) | UpdateStatus::Enqueued(_) => false,
|
||||
UpdateStatus::Processed(_) | UpdateStatus::Aborted(_) | UpdateStatus::Failed(_) => true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_content_uuid(&self) -> Option<&Uuid> {
|
||||
match &self.update {
|
||||
UpdateStatus::Enqueued(enqueued) => enqueued.content.as_ref(),
|
||||
UpdateStatus::Processing(processing) => processing.from.content.as_ref(),
|
||||
UpdateStatus::Processed(processed) => processed.from.from.content.as_ref(),
|
||||
UpdateStatus::Aborted(aborted) => aborted.from.content.as_ref(),
|
||||
UpdateStatus::Failed(failed) => failed.from.from.content.as_ref(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub enum UpdateResult {
|
||||
DocumentsAddition(DocumentAdditionResult),
|
||||
DocumentDeletion { deleted: u64 },
|
||||
Other,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct DocumentAdditionResult {
|
||||
pub nb_documents: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
#[non_exhaustive]
|
||||
pub enum IndexDocumentsMethod {
|
||||
/// Replace the previous document with the new one,
|
||||
/// removing all the already known attributes.
|
||||
ReplaceDocuments,
|
||||
|
||||
/// Merge the previous version of the document with the new version,
|
||||
/// replacing old attributes values with the new ones and add the new attributes.
|
||||
UpdateDocuments,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
#[non_exhaustive]
|
||||
pub enum UpdateFormat {
|
||||
/// The given update is a real **comma seperated** CSV with headers on the first line.
|
||||
Csv,
|
||||
/// The given update is a JSON array with documents inside.
|
||||
Json,
|
||||
/// The given update is a JSON stream with a document on each line.
|
||||
JsonStream,
|
||||
}
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
#[serde(tag = "type")]
|
||||
pub enum UpdateMeta {
|
||||
DocumentsAddition {
|
||||
method: IndexDocumentsMethod,
|
||||
format: UpdateFormat,
|
||||
primary_key: Option<String>,
|
||||
},
|
||||
ClearDocuments,
|
||||
DeleteDocuments {
|
||||
ids: Vec<String>,
|
||||
},
|
||||
Settings(Settings<Unchecked>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Enqueued {
|
||||
pub update_id: u64,
|
||||
pub meta: UpdateMeta,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub enqueued_at: OffsetDateTime,
|
||||
pub content: Option<Uuid>,
|
||||
}
|
||||
|
||||
impl Enqueued {
|
||||
pub fn meta(&self) -> &UpdateMeta {
|
||||
&self.meta
|
||||
}
|
||||
|
||||
pub fn id(&self) -> u64 {
|
||||
self.update_id
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Processed {
|
||||
pub success: UpdateResult,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub processed_at: OffsetDateTime,
|
||||
#[serde(flatten)]
|
||||
pub from: Processing,
|
||||
}
|
||||
|
||||
impl Processed {
|
||||
pub fn id(&self) -> u64 {
|
||||
self.from.id()
|
||||
}
|
||||
|
||||
pub fn meta(&self) -> &UpdateMeta {
|
||||
self.from.meta()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Processing {
|
||||
#[serde(flatten)]
|
||||
pub from: Enqueued,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub started_processing_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
impl Processing {
|
||||
pub fn id(&self) -> u64 {
|
||||
self.from.id()
|
||||
}
|
||||
|
||||
pub fn meta(&self) -> &UpdateMeta {
|
||||
self.from.meta()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Aborted {
|
||||
#[serde(flatten)]
|
||||
pub from: Enqueued,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub aborted_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
impl Aborted {
|
||||
pub fn id(&self) -> u64 {
|
||||
self.from.id()
|
||||
}
|
||||
|
||||
pub fn meta(&self) -> &UpdateMeta {
|
||||
self.from.meta()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Failed {
|
||||
#[serde(flatten)]
|
||||
pub from: Processing,
|
||||
pub error: ResponseError,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
pub failed_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
impl Failed {
|
||||
pub fn id(&self) -> u64 {
|
||||
self.from.id()
|
||||
}
|
||||
|
||||
pub fn meta(&self) -> &UpdateMeta {
|
||||
self.from.meta()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
#[serde(tag = "status", rename_all = "camelCase")]
|
||||
pub enum UpdateStatus {
|
||||
Processing(Processing),
|
||||
Enqueued(Enqueued),
|
||||
Processed(Processed),
|
||||
Aborted(Aborted),
|
||||
Failed(Failed),
|
||||
}
|
||||
|
||||
impl UpdateStatus {
|
||||
pub fn id(&self) -> u64 {
|
||||
match self {
|
||||
UpdateStatus::Processing(u) => u.id(),
|
||||
UpdateStatus::Enqueued(u) => u.id(),
|
||||
UpdateStatus::Processed(u) => u.id(),
|
||||
UpdateStatus::Aborted(u) => u.id(),
|
||||
UpdateStatus::Failed(u) => u.id(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn meta(&self) -> &UpdateMeta {
|
||||
match self {
|
||||
UpdateStatus::Processing(u) => u.meta(),
|
||||
UpdateStatus::Enqueued(u) => u.meta(),
|
||||
UpdateStatus::Processed(u) => u.meta(),
|
||||
UpdateStatus::Aborted(u) => u.meta(),
|
||||
UpdateStatus::Failed(u) => u.meta(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn processed(&self) -> Option<&Processed> {
|
||||
match self {
|
||||
UpdateStatus::Processed(p) => Some(p),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
51
dump/src/reader/v3/errors.rs
Normal file
51
dump/src/reader/v3/errors.rs
Normal file
@ -0,0 +1,51 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
||||
pub enum Code {
|
||||
// index related error
|
||||
CreateIndex,
|
||||
IndexAlreadyExists,
|
||||
IndexNotFound,
|
||||
InvalidIndexUid,
|
||||
|
||||
// invalid state error
|
||||
InvalidState,
|
||||
MissingPrimaryKey,
|
||||
PrimaryKeyAlreadyPresent,
|
||||
|
||||
MaxFieldsLimitExceeded,
|
||||
MissingDocumentId,
|
||||
InvalidDocumentId,
|
||||
|
||||
Filter,
|
||||
Sort,
|
||||
|
||||
BadParameter,
|
||||
BadRequest,
|
||||
DatabaseSizeLimitReached,
|
||||
DocumentNotFound,
|
||||
Internal,
|
||||
InvalidGeoField,
|
||||
InvalidRankingRule,
|
||||
InvalidStore,
|
||||
InvalidToken,
|
||||
MissingAuthorizationHeader,
|
||||
NoSpaceLeftOnDevice,
|
||||
DumpNotFound,
|
||||
TaskNotFound,
|
||||
PayloadTooLarge,
|
||||
RetrieveDocument,
|
||||
SearchDocuments,
|
||||
UnsupportedMediaType,
|
||||
|
||||
DumpAlreadyInProgress,
|
||||
DumpProcessFailed,
|
||||
|
||||
InvalidContentType,
|
||||
MissingContentType,
|
||||
MalformedPayload,
|
||||
MissingPayload,
|
||||
|
||||
MalformedDump,
|
||||
UnretrievableErrorCode,
|
||||
}
|
18
dump/src/reader/v3/meta.rs
Normal file
18
dump/src/reader/v3/meta.rs
Normal file
@ -0,0 +1,18 @@
|
||||
use serde::Deserialize;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::Settings;
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct IndexUuid {
|
||||
pub uid: String,
|
||||
pub uuid: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct DumpMeta {
|
||||
pub settings: Settings<super::Unchecked>,
|
||||
pub primary_key: Option<String>,
|
||||
}
|
325
dump/src/reader/v3/mod.rs
Normal file
325
dump/src/reader/v3/mod.rs
Normal file
@ -0,0 +1,325 @@
|
||||
//! ```text
|
||||
//! .
|
||||
//! ├── indexes
|
||||
//! │ ├── 01d7dd17-8241-4f1f-a7d1-2d1cb255f5b0
|
||||
//! │ │ ├── documents.jsonl
|
||||
//! │ │ └── meta.json
|
||||
//! │ ├── 78be64a3-cae1-449e-b7ed-13e77c9a8a0c
|
||||
//! │ │ ├── documents.jsonl
|
||||
//! │ │ └── meta.json
|
||||
//! │ ├── ba553439-18fe-4733-ba53-44eed898280c
|
||||
//! │ │ ├── documents.jsonl
|
||||
//! │ │ └── meta.json
|
||||
//! │ └── c408bc22-5859-49d1-8e9f-c88e2fa95cb0
|
||||
//! │ ├── documents.jsonl
|
||||
//! │ └── meta.json
|
||||
//! ├── index_uuids
|
||||
//! │ └── data.jsonl
|
||||
//! ├── metadata.json
|
||||
//! └── updates
|
||||
//! ├── data.jsonl
|
||||
//! └── updates_files
|
||||
//! └── 66d3f12d-fcf3-4b53-88cb-407017373de7
|
||||
//! ```
|
||||
|
||||
use std::fs::{self, File};
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::path::Path;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tempfile::TempDir;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
pub mod errors;
|
||||
pub mod meta;
|
||||
pub mod settings;
|
||||
pub mod updates;
|
||||
|
||||
use self::meta::{DumpMeta, IndexUuid};
|
||||
use super::compat::v3_to_v4::CompatV3ToV4;
|
||||
use super::Document;
|
||||
use crate::{Error, IndexMetadata, Result, Version};
|
||||
|
||||
pub type Settings<T> = settings::Settings<T>;
|
||||
pub type Checked = settings::Checked;
|
||||
pub type Unchecked = settings::Unchecked;
|
||||
|
||||
pub type Task = updates::UpdateEntry;
|
||||
|
||||
// ===== Other types to clarify the code of the compat module
|
||||
// everything related to the tasks
|
||||
pub type Status = updates::UpdateStatus;
|
||||
pub type Kind = updates::Update;
|
||||
|
||||
// everything related to the settings
|
||||
pub type Setting<T> = settings::Setting<T>;
|
||||
|
||||
// everything related to the errors
|
||||
pub type Code = errors::Code;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Metadata {
|
||||
db_version: String,
|
||||
index_db_size: usize,
|
||||
update_db_size: usize,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
dump_date: OffsetDateTime,
|
||||
}
|
||||
|
||||
pub struct V3Reader {
|
||||
dump: TempDir,
|
||||
metadata: Metadata,
|
||||
tasks: BufReader<File>,
|
||||
index_uuid: Vec<IndexUuid>,
|
||||
}
|
||||
|
||||
impl V3Reader {
|
||||
pub fn open(dump: TempDir) -> Result<Self> {
|
||||
let meta_file = fs::read(dump.path().join("metadata.json"))?;
|
||||
let metadata = serde_json::from_reader(&*meta_file)?;
|
||||
let index_uuid = File::open(dump.path().join("index_uuids/data.jsonl"))?;
|
||||
let index_uuid = BufReader::new(index_uuid);
|
||||
let index_uuid = index_uuid
|
||||
.lines()
|
||||
.map(|line| -> Result<_> { Ok(serde_json::from_str(&line?)?) })
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
Ok(V3Reader {
|
||||
metadata,
|
||||
tasks: BufReader::new(File::open(dump.path().join("updates").join("data.jsonl"))?),
|
||||
index_uuid,
|
||||
dump,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn index_uuid(&self) -> Vec<IndexUuid> {
|
||||
self.index_uuid.clone()
|
||||
}
|
||||
|
||||
pub fn to_v4(self) -> CompatV3ToV4 {
|
||||
CompatV3ToV4::new(self)
|
||||
}
|
||||
|
||||
pub fn version(&self) -> Version {
|
||||
Version::V3
|
||||
}
|
||||
|
||||
pub fn date(&self) -> Option<OffsetDateTime> {
|
||||
Some(self.metadata.dump_date)
|
||||
}
|
||||
|
||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V3IndexReader>> + '_> {
|
||||
Ok(self.index_uuid.iter().map(|index| -> Result<_> {
|
||||
V3IndexReader::new(
|
||||
index.uid.clone(),
|
||||
&self.dump.path().join("indexes").join(index.uuid.to_string()),
|
||||
)
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn tasks(
|
||||
&mut self,
|
||||
) -> Box<dyn Iterator<Item = Result<(Task, Option<Box<super::UpdateFile>>)>> + '_> {
|
||||
Box::new((&mut self.tasks).lines().map(|line| -> Result<_> {
|
||||
let task: Task = serde_json::from_str(&line?)?;
|
||||
if !task.is_finished() {
|
||||
if let Some(uuid) = task.get_content_uuid() {
|
||||
let update_file_path = self
|
||||
.dump
|
||||
.path()
|
||||
.join("updates")
|
||||
.join("updates_files")
|
||||
.join(uuid.to_string());
|
||||
Ok((
|
||||
task,
|
||||
Some(
|
||||
Box::new(UpdateFile::new(&update_file_path)?) as Box<super::UpdateFile>
|
||||
),
|
||||
))
|
||||
} else {
|
||||
Ok((task, None))
|
||||
}
|
||||
} else {
|
||||
Ok((task, None))
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct V3IndexReader {
|
||||
metadata: IndexMetadata,
|
||||
settings: Settings<Checked>,
|
||||
|
||||
documents: BufReader<File>,
|
||||
}
|
||||
|
||||
impl V3IndexReader {
|
||||
pub fn new(name: String, path: &Path) -> Result<Self> {
|
||||
let meta = File::open(path.join("meta.json"))?;
|
||||
let meta: DumpMeta = serde_json::from_reader(meta)?;
|
||||
|
||||
let metadata = IndexMetadata {
|
||||
uid: name,
|
||||
primary_key: meta.primary_key,
|
||||
// FIXME: Iterate over the whole task queue to find the creation and last update date.
|
||||
created_at: OffsetDateTime::now_utc(),
|
||||
updated_at: OffsetDateTime::now_utc(),
|
||||
};
|
||||
|
||||
let ret = V3IndexReader {
|
||||
metadata,
|
||||
settings: meta.settings.check(),
|
||||
documents: BufReader::new(File::open(path.join("documents.jsonl"))?),
|
||||
};
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &IndexMetadata {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
pub fn documents(&mut self) -> Result<impl Iterator<Item = Result<Document>> + '_> {
|
||||
Ok((&mut self.documents)
|
||||
.lines()
|
||||
.map(|line| -> Result<_> { Ok(serde_json::from_str(&line?)?) }))
|
||||
}
|
||||
|
||||
pub fn settings(&mut self) -> Result<Settings<Checked>> {
|
||||
Ok(self.settings.clone())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct UpdateFile {
|
||||
reader: BufReader<File>,
|
||||
}
|
||||
|
||||
impl UpdateFile {
|
||||
fn new(path: &Path) -> Result<Self> {
|
||||
Ok(UpdateFile { reader: BufReader::new(File::open(path)?) })
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for UpdateFile {
|
||||
type Item = Result<Document>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
(&mut self.reader)
|
||||
.lines()
|
||||
.map(|line| {
|
||||
line.map_err(Error::from)
|
||||
.and_then(|line| serde_json::from_str(&line).map_err(Error::from))
|
||||
})
|
||||
.next()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
|
||||
use flate2::bufread::GzDecoder;
|
||||
use meili_snap::insta;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn read_dump_v3() {
|
||||
let dump = File::open("tests/assets/v3.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(dir.path()).unwrap();
|
||||
|
||||
let mut dump = V3Reader::open(dir).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2022-10-07 11:39:03.709153554 +00:00:00");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, mut update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"63086d59c3f2074e4ab3fff7e8cc36c1");
|
||||
assert_eq!(update_files.len(), 10);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
let update_file = update_files.remove(0).unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(update_file), @"7b8889539b669c7b9ddba448bafa385d");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies2 = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d153b5a81d8b3cdcbe1dec270b574022");
|
||||
|
||||
// movies2
|
||||
insta::assert_json_snapshot!(movies2.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies_2",
|
||||
"primaryKey": null,
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies2.settings().unwrap());
|
||||
let documents = movies2.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 0);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
}
|
233
dump/src/reader/v3/settings.rs
Normal file
233
dump/src/reader/v3/settings.rs
Normal file
@ -0,0 +1,233 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::marker::PhantomData;
|
||||
use std::num::NonZeroUsize;
|
||||
|
||||
use serde::{Deserialize, Deserializer};
|
||||
|
||||
#[cfg(test)]
|
||||
fn serialize_with_wildcard<S>(
|
||||
field: &Setting<Vec<String>>,
|
||||
s: S,
|
||||
) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
use serde::Serialize;
|
||||
|
||||
let wildcard = vec!["*".to_string()];
|
||||
match field {
|
||||
Setting::Set(value) => Some(value),
|
||||
Setting::Reset => Some(&wildcard),
|
||||
Setting::NotSet => None,
|
||||
}
|
||||
.serialize(s)
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, Debug)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct Checked;
|
||||
|
||||
#[derive(Clone, Default, Debug, Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct Unchecked;
|
||||
|
||||
/// Holds all the settings for an index. `T` can either be `Checked` if they represents settings
|
||||
/// whose validity is guaranteed, or `Unchecked` if they need to be validated. In the later case, a
|
||||
/// call to `check` will return a `Settings<Checked>` from a `Settings<Unchecked>`.
|
||||
#[derive(Debug, Clone, Default, Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
#[serde(deny_unknown_fields)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[serde(bound(serialize = "T: serde::Serialize", deserialize = "T: Deserialize<'static>"))]
|
||||
pub struct Settings<T> {
|
||||
#[serde(
|
||||
default,
|
||||
serialize_with = "serialize_with_wildcard",
|
||||
skip_serializing_if = "Setting::is_not_set"
|
||||
)]
|
||||
pub displayed_attributes: Setting<Vec<String>>,
|
||||
|
||||
#[serde(
|
||||
default,
|
||||
serialize_with = "serialize_with_wildcard",
|
||||
skip_serializing_if = "Setting::is_not_set"
|
||||
)]
|
||||
pub searchable_attributes: Setting<Vec<String>>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub filterable_attributes: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub sortable_attributes: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub ranking_rules: Setting<Vec<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub stop_words: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub synonyms: Setting<BTreeMap<String, Vec<String>>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub distinct_attribute: Setting<String>,
|
||||
|
||||
#[serde(skip)]
|
||||
pub _kind: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl Settings<Checked> {
|
||||
pub fn cleared() -> Settings<Checked> {
|
||||
Settings {
|
||||
displayed_attributes: Setting::Reset,
|
||||
searchable_attributes: Setting::Reset,
|
||||
filterable_attributes: Setting::Reset,
|
||||
sortable_attributes: Setting::Reset,
|
||||
ranking_rules: Setting::Reset,
|
||||
stop_words: Setting::Reset,
|
||||
synonyms: Setting::Reset,
|
||||
distinct_attribute: Setting::Reset,
|
||||
_kind: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_unchecked(self) -> Settings<Unchecked> {
|
||||
let Self {
|
||||
displayed_attributes,
|
||||
searchable_attributes,
|
||||
filterable_attributes,
|
||||
sortable_attributes,
|
||||
ranking_rules,
|
||||
stop_words,
|
||||
synonyms,
|
||||
distinct_attribute,
|
||||
..
|
||||
} = self;
|
||||
|
||||
Settings {
|
||||
displayed_attributes,
|
||||
searchable_attributes,
|
||||
filterable_attributes,
|
||||
sortable_attributes,
|
||||
ranking_rules,
|
||||
stop_words,
|
||||
synonyms,
|
||||
distinct_attribute,
|
||||
_kind: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Settings<Unchecked> {
|
||||
pub fn check(self) -> Settings<Checked> {
|
||||
let displayed_attributes = match self.displayed_attributes {
|
||||
Setting::Set(fields) => {
|
||||
if fields.iter().any(|f| f == "*") {
|
||||
Setting::Reset
|
||||
} else {
|
||||
Setting::Set(fields)
|
||||
}
|
||||
}
|
||||
otherwise => otherwise,
|
||||
};
|
||||
|
||||
let searchable_attributes = match self.searchable_attributes {
|
||||
Setting::Set(fields) => {
|
||||
if fields.iter().any(|f| f == "*") {
|
||||
Setting::Reset
|
||||
} else {
|
||||
Setting::Set(fields)
|
||||
}
|
||||
}
|
||||
otherwise => otherwise,
|
||||
};
|
||||
|
||||
Settings {
|
||||
displayed_attributes,
|
||||
searchable_attributes,
|
||||
filterable_attributes: self.filterable_attributes,
|
||||
sortable_attributes: self.sortable_attributes,
|
||||
ranking_rules: self.ranking_rules,
|
||||
stop_words: self.stop_words,
|
||||
synonyms: self.synonyms,
|
||||
distinct_attribute: self.distinct_attribute,
|
||||
_kind: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
#[serde(deny_unknown_fields)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Facets {
|
||||
pub level_group_size: Option<NonZeroUsize>,
|
||||
pub min_level_size: Option<NonZeroUsize>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Setting<T> {
|
||||
Set(T),
|
||||
Reset,
|
||||
NotSet,
|
||||
}
|
||||
|
||||
impl<T> Default for Setting<T> {
|
||||
fn default() -> Self {
|
||||
Self::NotSet
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Setting<T> {
|
||||
pub fn map<U, F>(self, f: F) -> Setting<U>
|
||||
where
|
||||
F: FnOnce(T) -> U,
|
||||
{
|
||||
match self {
|
||||
Setting::Set(t) => Setting::Set(f(t)),
|
||||
Setting::Reset => Setting::Reset,
|
||||
Setting::NotSet => Setting::NotSet,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set(self) -> Option<T> {
|
||||
match self {
|
||||
Self::Set(value) => Some(value),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub const fn as_ref(&self) -> Setting<&T> {
|
||||
match *self {
|
||||
Self::Set(ref value) => Setting::Set(value),
|
||||
Self::Reset => Setting::Reset,
|
||||
Self::NotSet => Setting::NotSet,
|
||||
}
|
||||
}
|
||||
|
||||
pub const fn is_not_set(&self) -> bool {
|
||||
matches!(self, Self::NotSet)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl<T: serde::Serialize> serde::Serialize for Setting<T> {
|
||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Self::Set(value) => Some(value),
|
||||
// Usually not_set isn't serialized by setting skip_serializing_if field attribute
|
||||
Self::NotSet | Self::Reset => None,
|
||||
}
|
||||
.serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, T: Deserialize<'de>> Deserialize<'de> for Setting<T> {
|
||||
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Deserialize::deserialize(deserializer).map(|x| match x {
|
||||
Some(x) => Self::Set(x),
|
||||
None => Self::Reset, // Reset is forced by sending null value
|
||||
})
|
||||
}
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/v3/mod.rs
|
||||
expression: movies2.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/v3/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: dump/src/reader/v3/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user