mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-07-18 04:11:07 +00:00
Compare commits
372 Commits
stable-arc
...
prototype-
Author | SHA1 | Date | |
---|---|---|---|
c2f44f3580 | |||
5e12af88e2 | |||
231067a1c4 | |||
20f8184c06 | |||
2f8ebd0501 | |||
6be9a828fa | |||
4b7b2d6a90 | |||
e269027cdd | |||
544b581b15 | |||
924d5d4c11 | |||
771a367b97 | |||
07603373f3 | |||
4549e0a36e | |||
cac93f149e | |||
481df7a8b6 | |||
8356f109c1 | |||
934f2b3cb5 | |||
a3f1b8fdb9 | |||
d963c2ce55 | |||
5beb1aab7d | |||
a858531574 | |||
bf94f89035 | |||
3bcff60d1c | |||
04c4487660 | |||
c92948b143 | |||
b3c2a4ae27 | |||
c7b2e3be87 | |||
aa17a54feb | |||
898160587f | |||
32364e9919 | |||
ea3b269b77 | |||
a4be4c49e8 | |||
7d1ebb7295 | |||
e664f09045 | |||
767cb725a5 | |||
13c2cd700d | |||
fea41ca788 | |||
217504fff3 | |||
5672118bfa | |||
57682cbabe | |||
5dd582918d | |||
74747b65b1 | |||
c79b6a1ee4 | |||
f0e6b9c0c5 | |||
56db54486c | |||
a9b3f91467 | |||
0de9a3ffe7 | |||
8fb685f5aa | |||
e3742a38d4 | |||
e16b5c615a | |||
d2420f5c8f | |||
72e2b220ed | |||
b0c33ed6d2 | |||
a1e9c44fe5 | |||
7df1dda002 | |||
3d8ca62c35 | |||
e8e7070cc6 | |||
f857d9c2df | |||
d0988e115f | |||
5dcb920fb4 | |||
6f7e0c431a | |||
00f6af6475 | |||
3e5b3df487 | |||
e89973f1bf | |||
d3c796af38 | |||
182eea1f17 | |||
a4476c20f8 | |||
57da80900d | |||
7322f4e78e | |||
0f727d079b | |||
82bdb54537 | |||
b6ec1f1c6d | |||
41a970247e | |||
e225608337 | |||
56e79fa850 | |||
c71a8ea183 | |||
b781f9a0f9 | |||
07b90dec08 | |||
9194508a0f | |||
49ddaaef49 | |||
766dd830ae | |||
436ae4e466 | |||
a5c4fbbcea | |||
21b8cd53b7 | |||
7f80b116bc | |||
341f8478b4 | |||
79c7f65c30 | |||
2bc60c29fc | |||
680ea39bba | |||
a524dfb713 | |||
705fcaa3b8 | |||
a09b6a341d | |||
387874ea26 | |||
5c1a7c3b9a | |||
6d658f4c52 | |||
bf573885ea | |||
a68ac3a1dc | |||
b252c87197 | |||
b0b7ad7caf | |||
c91ffec72e | |||
1fc11264e8 | |||
2bc2e99ff3 | |||
808e184069 | |||
7a30d98264 | |||
d0a85057a3 | |||
b3574de809 | |||
59704c000c | |||
b117c688f5 | |||
5ec85b7dfb | |||
d80be0c28d | |||
398c0c32cd | |||
d4157c0ce4 | |||
98dffbf213 | |||
f63fee5e97 | |||
f0d408c295 | |||
d308684395 | |||
e27bb8ab3e | |||
ff843881c5 | |||
ae08fba76e | |||
af6d4b3031 | |||
b03ee54fe0 | |||
d17efb9ed6 | |||
9ab791bedc | |||
96105a5e8d | |||
e706628bb1 | |||
3c630891bb | |||
97854274b4 | |||
0646f63404 | |||
ce3e8794a2 | |||
50ce0409bc | |||
839b05c43d | |||
cc699fae40 | |||
aa4b813237 | |||
eb08a0fb0b | |||
cda529c07b | |||
1f8ddb366c | |||
8a3da0c2a7 | |||
c840d55e89 | |||
c7a3992510 | |||
28408816ef | |||
0eaa8ca255 | |||
201bc633d2 | |||
ba839852f5 | |||
f9aa897ab5 | |||
2d74678b51 | |||
db7eaf23f4 | |||
32f7cfa5cb | |||
a402fc4486 | |||
502d9e4b24 | |||
a85ff1f690 | |||
233372abea | |||
13d4ae264a | |||
c766e06003 | |||
fcbd47281b | |||
b6d80293f7 | |||
0e98a71a24 | |||
5cb566b165 | |||
9d46caba29 | |||
c4aa5cc7d0 | |||
12c3d432f9 | |||
d082ded7ad | |||
947f08793a | |||
17dac72464 | |||
b821c72459 | |||
7b2575c646 | |||
ab655a85e8 | |||
6425e06cf2 | |||
1692f58b83 | |||
9ba4d0f921 | |||
4b6ffe0cd1 | |||
336c77aa45 | |||
776acb5ed3 | |||
3e9834abff | |||
3cba476a9f | |||
57e851d8a9 | |||
9c45850bd2 | |||
3e0e8164a3 | |||
0bc4572905 | |||
4e6c663a2e | |||
e2775c6f49 | |||
c07a5932cb | |||
528a944997 | |||
13fb5ce974 | |||
a43a0712fa | |||
1be4619b91 | |||
cf50f85986 | |||
61b3a29ff3 | |||
66e18eae79 | |||
9a39c4e40d | |||
df176aaf01 | |||
0893b175dc | |||
d5978d11e1 | |||
2780e365e2 | |||
bf2a401a05 | |||
9925309492 | |||
9e0cce5ca4 | |||
336ea57384 | |||
c637bfba37 | |||
3040172562 | |||
52aa34d984 | |||
2c86d42a44 | |||
8ce3a34ffa | |||
259c04eb28 | |||
d8fb506c92 | |||
aa03e02fdc | |||
7ef23addb6 | |||
b3fce7c366 | |||
5099a40484 | |||
19ee9a828f | |||
869d331680 | |||
913eff5b2f | |||
b4a73f2d74 | |||
4e175ae882 | |||
5a0a0468df | |||
60ebf0ea0b | |||
867279f2a4 | |||
ce84a59873 | |||
d66bb3a53f | |||
6c0b8edab5 | |||
fbbc6eaeca | |||
60c3bac108 | |||
9491fe0704 | |||
240c73d292 | |||
d3eb8d2d5c | |||
660be071b5 | |||
89542d7d8b | |||
f62e7a3501 | |||
a08cc82983 | |||
7b2f2a4f9c | |||
5d5615ef45 | |||
526793b5b2 | |||
271685cceb | |||
1af590d3bc | |||
dab2634ca8 | |||
8a7f90250c | |||
23c1b223b3 | |||
87ae0032bf | |||
7c24fea9f2 | |||
27d1bee0bb | |||
b1c3174061 | |||
fa46dfb7bb | |||
40d9b73aaf | |||
169682d3ec | |||
21b926cb00 | |||
34a6f2598b | |||
14824cee86 | |||
796e61ec7e | |||
9a3f9577b8 | |||
2c8eb92537 | |||
1bf5c0edb9 | |||
b1ffbe561e | |||
84204b8cd5 | |||
346fca5608 | |||
4631f4d97f | |||
6f1c30b247 | |||
abba54e913 | |||
c426fa1478 | |||
574be942cd | |||
2262766494 | |||
ad2b1467da | |||
ee37d5e724 | |||
ded2a50d14 | |||
58327979f1 | |||
50d9fe036e | |||
026cf223b3 | |||
af6f7f8462 | |||
5023d36ee7 | |||
f4dc4c5d8d | |||
717dd36547 | |||
538030c2da | |||
1d5294d11a | |||
34c3e5ec5e | |||
1c3a326199 | |||
34c0f11c26 | |||
be300138e4 | |||
2ed6017603 | |||
c1337f9e08 | |||
9acac28574 | |||
cb1d184904 | |||
2841b09789 | |||
35f3dd68b6 | |||
f1de3aa75a | |||
e4e4370a3c | |||
24c79b79f9 | |||
5db7c4057c | |||
2867d2e91a | |||
1458a12531 | |||
cbb8d0f97b | |||
bef81065f9 | |||
180511795b | |||
3bef6e6690 | |||
9b23885e85 | |||
8b46093117 | |||
9c89e3dadc | |||
b0cf431614 | |||
afe520a67e | |||
688911ed34 | |||
776af129bf | |||
492fd2829a | |||
ffa6d1ed4e | |||
293efb7485 | |||
6766712840 | |||
980776b646 | |||
6bdd37beb8 | |||
8b6eba4f0b | |||
e510ace179 | |||
f056fc118f | |||
5a770ffe47 | |||
7b08d700f7 | |||
c63748723d | |||
40339919ad | |||
7f3653ec31 | |||
a1f5ec1e9e | |||
dd6593f7b6 | |||
fc905f29e9 | |||
f892d122de | |||
7e314664fc | |||
a92d67b79f | |||
daa0f2e9aa | |||
e35db5e59b | |||
b727fe7179 | |||
1260c32d18 | |||
e03c216952 | |||
c7749127fa | |||
c8841344e2 | |||
b8de369e33 | |||
93b59d55e3 | |||
b9a8533de1 | |||
d44652209d | |||
5d22c7bcce | |||
da8044f91e | |||
9e3b1eb7a8 | |||
eab1156f8c | |||
8d405fad12 | |||
bf96b6df93 | |||
9c28632498 | |||
38982d13fe | |||
6150aa73b0 | |||
14840a24c6 | |||
41eb986f65 | |||
457a473b72 | |||
914f8b118c | |||
6ecd486d1b | |||
7b8641a7af | |||
f509d81ec9 | |||
8d73ae80bb | |||
00129e112a | |||
46c275f0e4 | |||
f4d4f313ea | |||
5e1fa53354 | |||
fe59a6f628 | |||
647b7a20e9 | |||
e81b349658 | |||
a84fad5ce6 | |||
0a102d601c | |||
8a14f6f545 | |||
079357ee1f | |||
06e7db7a1f | |||
9e189f5041 | |||
fe980f9e88 | |||
32cd9e4852 | |||
8d79f501f3 | |||
c05abc2b0d | |||
a441fe5ae5 | |||
7331da0410 | |||
72c4db4553 | |||
953b2ec438 | |||
09e71fdeb6 | |||
ab3056cc66 | |||
c7caadb54e | |||
0219ef25fe | |||
3911fd64b5 |
134
.github/scripts/is-latest-release.sh
vendored
134
.github/scripts/is-latest-release.sh
vendored
@ -1,127 +1,41 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Was used in our CIs to publish the latest docker image. Not used anymore, will be used again when v1 and v2 will be out and we will want to maintain multiple stable versions.
|
||||
# Returns "true" or "false" (as a string) to be used in the `if` in GHA
|
||||
# Used in our CIs to publish the latest Docker image.
|
||||
|
||||
# Checks if the current tag should be the latest (in terms of semver and not of release date).
|
||||
# Ex: previous tag -> v2.1.1
|
||||
# new tag -> v1.20.3
|
||||
# The new tag (v1.20.3) should NOT be the latest
|
||||
# So it returns "false", the `latest` tag should not be updated for the release v1.20.3 and still need to correspond to v2.1.1
|
||||
# Checks if the current tag ($GITHUB_REF) corresponds to the latest release tag on GitHub
|
||||
# Returns "true" or "false" (as a string).
|
||||
|
||||
# GLOBAL
|
||||
GREP_SEMVER_REGEXP='v\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)$' # i.e. v[number].[number].[number]
|
||||
GITHUB_API='https://api.github.com/repos/meilisearch/meilisearch/releases'
|
||||
PNAME='meilisearch'
|
||||
|
||||
# FUNCTIONS
|
||||
|
||||
# semverParseInto and semverLT from https://github.com/cloudflare/semver_bash/blob/master/semver.sh
|
||||
|
||||
# usage: semverParseInto version major minor patch special
|
||||
# version: the string version
|
||||
# major, minor, patch, special: will be assigned by the function
|
||||
semverParseInto() {
|
||||
local RE='[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)'
|
||||
#MAJOR
|
||||
eval $2=`echo $1 | sed -e "s#$RE#\1#"`
|
||||
#MINOR
|
||||
eval $3=`echo $1 | sed -e "s#$RE#\2#"`
|
||||
#MINOR
|
||||
eval $4=`echo $1 | sed -e "s#$RE#\3#"`
|
||||
#SPECIAL
|
||||
eval $5=`echo $1 | sed -e "s#$RE#\4#"`
|
||||
}
|
||||
|
||||
# usage: semverLT version1 version2
|
||||
semverLT() {
|
||||
local MAJOR_A=0
|
||||
local MINOR_A=0
|
||||
local PATCH_A=0
|
||||
local SPECIAL_A=0
|
||||
|
||||
local MAJOR_B=0
|
||||
local MINOR_B=0
|
||||
local PATCH_B=0
|
||||
local SPECIAL_B=0
|
||||
|
||||
semverParseInto $1 MAJOR_A MINOR_A PATCH_A SPECIAL_A
|
||||
semverParseInto $2 MAJOR_B MINOR_B PATCH_B SPECIAL_B
|
||||
|
||||
if [ $MAJOR_A -lt $MAJOR_B ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ $MAJOR_A -le $MAJOR_B ] && [ $MINOR_A -lt $MINOR_B ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ $MAJOR_A -le $MAJOR_B ] && [ $MINOR_A -le $MINOR_B ] && [ $PATCH_A -lt $PATCH_B ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ "_$SPECIAL_A" == "_" ] && [ "_$SPECIAL_B" == "_" ] ; then
|
||||
return 1
|
||||
fi
|
||||
if [ "_$SPECIAL_A" == "_" ] && [ "_$SPECIAL_B" != "_" ] ; then
|
||||
return 1
|
||||
fi
|
||||
if [ "_$SPECIAL_A" != "_" ] && [ "_$SPECIAL_B" == "_" ] ; then
|
||||
return 0
|
||||
fi
|
||||
if [ "_$SPECIAL_A" < "_$SPECIAL_B" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Returns the tag of the latest stable release (in terms of semver and not of release date)
|
||||
# Returns the version of the latest stable version of Meilisearch by setting the $latest variable.
|
||||
get_latest() {
|
||||
temp_file='temp_file' # temp_file needed because the grep would start before the download is over
|
||||
curl -s 'https://api.github.com/repos/meilisearch/meilisearch/releases' > "$temp_file"
|
||||
releases=$(cat "$temp_file" | \
|
||||
grep -E "tag_name|draft|prerelease" \
|
||||
| tr -d ',"' | cut -d ':' -f2 | tr -d ' ')
|
||||
# Returns a list of [tag_name draft_boolean prerelease_boolean ...]
|
||||
# Ex: v0.10.1 false false v0.9.1-rc.1 false true v0.9.0 false false...
|
||||
# temp_file is needed because the grep would start before the download is over
|
||||
temp_file=$(mktemp -q /tmp/$PNAME.XXXXXXXXX)
|
||||
latest_release="$GITHUB_API/latest"
|
||||
|
||||
i=0
|
||||
latest=""
|
||||
current_tag=""
|
||||
for release_info in $releases; do
|
||||
if [ $i -eq 0 ]; then # Checking tag_name
|
||||
if echo "$release_info" | grep -q "$GREP_SEMVER_REGEXP"; then # If it's not an alpha or beta release
|
||||
current_tag=$release_info
|
||||
else
|
||||
current_tag=""
|
||||
fi
|
||||
i=1
|
||||
elif [ $i -eq 1 ]; then # Checking draft boolean
|
||||
if [ "$release_info" = "true" ]; then
|
||||
current_tag=""
|
||||
fi
|
||||
i=2
|
||||
elif [ $i -eq 2 ]; then # Checking prerelease boolean
|
||||
if [ "$release_info" = "true" ]; then
|
||||
current_tag=""
|
||||
fi
|
||||
i=0
|
||||
if [ "$current_tag" != "" ]; then # If the current_tag is valid
|
||||
if [ "$latest" = "" ]; then # If there is no latest yet
|
||||
latest="$current_tag"
|
||||
else
|
||||
semverLT $current_tag $latest # Comparing latest and the current tag
|
||||
if [ $? -eq 1 ]; then
|
||||
latest="$current_tag"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "$0: Can't create temp file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$GITHUB_PAT" ]; then
|
||||
curl -s "$latest_release" > "$temp_file" || return 1
|
||||
else
|
||||
curl -H "Authorization: token $GITHUB_PAT" -s "$latest_release" > "$temp_file" || return 1
|
||||
fi
|
||||
|
||||
latest="$(cat "$temp_file" | grep '"tag_name":' | cut -d ':' -f2 | tr -d '"' | tr -d ',' | tr -d ' ')"
|
||||
|
||||
rm -f "$temp_file"
|
||||
echo $latest
|
||||
return 0
|
||||
}
|
||||
|
||||
# MAIN
|
||||
current_tag="$(echo $GITHUB_REF | tr -d 'refs/tags/')"
|
||||
latest="$(get_latest)"
|
||||
get_latest
|
||||
|
||||
if [ "$current_tag" != "$latest" ]; then
|
||||
# The current release tag is not the latest
|
||||
@ -130,3 +44,5 @@ else
|
||||
# The current release tag is the latest
|
||||
echo "true"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
33
.github/workflows/coverage.yml
vendored
33
.github/workflows/coverage.yml
vendored
@ -1,33 +0,0 @@
|
||||
---
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
name: Execute code coverage
|
||||
|
||||
jobs:
|
||||
nightly-coverage:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clean
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --all-features --no-fail-fast
|
||||
env:
|
||||
CARGO_INCREMENTAL: "0"
|
||||
RUSTFLAGS: "-Zprofile -Ccodegen-units=1 -Cinline-threshold=0 -Clink-dead-code -Coverflow-checks=off -Cpanic=unwind -Zpanic_abort_tests"
|
||||
- uses: actions-rs/grcov@v0.1
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
file: ${{ steps.coverage.outputs.report }}
|
||||
yml: ./codecov.yml
|
||||
fail_ci_if_error: true
|
@ -15,7 +15,7 @@ jobs:
|
||||
github_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
title: Upgrade dependencies
|
||||
body: |
|
||||
We need to update the dependencies of the Meilisearch repository, and, if possible, the dependencies of all the core-team repositories that Meilisearch depends on (milli, charabia, heed...).
|
||||
We need to update the dependencies of the Meilisearch repository, and, if possible, the dependencies of all the engine-team repositories that Meilisearch depends on (milli, charabia, heed...).
|
||||
|
||||
⚠️ This issue should only be done at the beginning of the sprint!
|
||||
labels: |
|
||||
|
17
.github/workflows/flaky.yml
vendored
17
.github/workflows/flaky.yml
vendored
@ -1,14 +1,25 @@
|
||||
name: Look for flaky tests
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 12 * * FRI" # every friday at 12:00PM
|
||||
- cron: "0 12 * * FRI" # Every Friday at 12:00PM
|
||||
|
||||
jobs:
|
||||
flaky:
|
||||
runs-on: ubuntu-18.04
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Install cargo-flaky
|
||||
run: cargo install cargo-flaky
|
||||
- name: Run cargo flaky 100 times
|
||||
|
29
.github/workflows/latest-git-tag.yml
vendored
Normal file
29
.github/workflows/latest-git-tag.yml
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
# Create or update a latest git tag when releasing a stable version of Meilisearch
|
||||
name: Update latest git tag
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
check-version:
|
||||
name: Check the version validity
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Check release validity
|
||||
if: github.event_name == 'release'
|
||||
run: bash .github/scripts/check-release.sh
|
||||
|
||||
update-latest-tag:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-version
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: rickstaa/action-create-tag@v1
|
||||
with:
|
||||
tag: "latest"
|
||||
message: "Latest stable release of Meilisearch"
|
||||
# Move the tag if `latest` already exists
|
||||
force_push_tag: true
|
||||
github_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
14
.github/workflows/milestone-workflow.yml
vendored
14
.github/workflows/milestone-workflow.yml
vendored
@ -3,8 +3,8 @@ name: Milestone's workflow
|
||||
# /!\ No git flow are handled here
|
||||
|
||||
# For each Milestone created (not opened!), and if the release is NOT a patch release (only the patch changed)
|
||||
# - the roadmap issue is created, see https://github.com/meilisearch/core-team/blob/main/issue-templates/roadmap-issue.md
|
||||
# - the changelog issue is created, see https://github.com/meilisearch/core-team/blob/main/issue-templates/changelog-issue.md
|
||||
# - the roadmap issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/roadmap-issue.md
|
||||
# - the changelog issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/changelog-issue.md
|
||||
|
||||
# For each Milestone closed
|
||||
# - the `release_version` label is created
|
||||
@ -31,8 +31,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
is-patch: ${{ steps.check-patch.outputs.is-patch }}
|
||||
env:
|
||||
MILESTONE_VERSION: ${{ github.event.milestone.title }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Check if this release is a patch release only
|
||||
@ -41,10 +39,10 @@ jobs:
|
||||
echo version: $MILESTONE_VERSION
|
||||
if [[ $MILESTONE_VERSION =~ ^v[0-9]+\.[0-9]+\.0$ ]]; then
|
||||
echo 'This is NOT a patch release'
|
||||
echo ::set-output name=is-patch::false
|
||||
echo "is-patch=false" >> $GITHUB_OUTPUT
|
||||
elif [[ $MILESTONE_VERSION =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo 'This is a patch release'
|
||||
echo ::set-output name=is-patch::true
|
||||
echo "is-patch=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Not a valid format of release, check the Milestone's title."
|
||||
echo 'Should be vX.Y.Z'
|
||||
@ -61,7 +59,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Download the issue template
|
||||
run: curl -s https://raw.githubusercontent.com/meilisearch/core-team/main/issue-templates/roadmap-issue.md > $ISSUE_TEMPLATE
|
||||
run: curl -s https://raw.githubusercontent.com/meilisearch/engine-team/main/issue-templates/roadmap-issue.md > $ISSUE_TEMPLATE
|
||||
- name: Replace all empty occurrences in the templates
|
||||
run: |
|
||||
# Replace all <<version>> occurrences
|
||||
@ -94,7 +92,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Download the issue template
|
||||
run: curl -s https://raw.githubusercontent.com/meilisearch/core-team/main/issue-templates/changelog-issue.md > $ISSUE_TEMPLATE
|
||||
run: curl -s https://raw.githubusercontent.com/meilisearch/engine-team/main/issue-templates/changelog-issue.md > $ISSUE_TEMPLATE
|
||||
- name: Replace all empty occurrences in the templates
|
||||
run: |
|
||||
# Replace all <<version>> occurrences
|
||||
|
85
.github/workflows/publish-binaries.yml
vendored
85
.github/workflows/publish-binaries.yml
vendored
@ -1,4 +1,5 @@
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 2 * * *' # Every day at 2:00am
|
||||
release:
|
||||
@ -17,69 +18,93 @@ jobs:
|
||||
# If yes, it means we are publishing an official release.
|
||||
# If no, we are releasing a RC, so no need to check the version.
|
||||
- name: Check tag format
|
||||
if: github.event_name != 'schedule'
|
||||
if: github.event_name == 'release'
|
||||
id: check-tag-format
|
||||
run: |
|
||||
escaped_tag=$(printf "%q" ${{ github.ref_name }})
|
||||
|
||||
if [[ $escaped_tag =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo ::set-output name=stable::true
|
||||
echo "stable=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo ::set-output name=stable::false
|
||||
echo "stable=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Check release validity
|
||||
if: github.event_name != 'schedule' && steps.check-tag-format.outputs.stable == 'true'
|
||||
if: github.event_name == 'release' && steps.check-tag-format.outputs.stable == 'true'
|
||||
run: bash .github/scripts/check-release.sh
|
||||
|
||||
publish:
|
||||
publish-linux:
|
||||
name: Publish binary for Linux
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-version
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Build
|
||||
run: cargo build --release --locked
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/meilisearch
|
||||
asset_name: meilisearch-linux-amd64
|
||||
tag: ${{ github.ref }}
|
||||
|
||||
publish-macos-windows:
|
||||
name: Publish binary for ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: check-version
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-18.04, macos-latest, windows-latest]
|
||||
os: [macos-12, windows-2022]
|
||||
include:
|
||||
- os: ubuntu-18.04
|
||||
artifact_name: meilisearch
|
||||
asset_name: meilisearch-linux-amd64
|
||||
- os: macos-latest
|
||||
- os: macos-12
|
||||
artifact_name: meilisearch
|
||||
asset_name: meilisearch-macos-amd64
|
||||
- os: windows-latest
|
||||
- os: windows-2022
|
||||
artifact_name: meilisearch.exe
|
||||
asset_name: meilisearch-windows-amd64.exe
|
||||
|
||||
steps:
|
||||
- uses: hecrj/setup-rust-action@master
|
||||
with:
|
||||
rust-version: stable
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Build
|
||||
run: cargo build --release --locked
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name != 'schedule'
|
||||
uses: svenstaro/upload-release-action@v1-release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/${{ matrix.artifact_name }}
|
||||
asset_name: ${{ matrix.asset_name }}
|
||||
tag: ${{ github.ref }}
|
||||
|
||||
|
||||
publish-macos-apple-silicon:
|
||||
name: Publish binary for macOS silicon
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: check-version
|
||||
continue-on-error: false
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: macos-latest
|
||||
- os: macos-12
|
||||
target: aarch64-apple-darwin
|
||||
asset_name: meilisearch-macos-apple-silicon
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
@ -97,8 +122,8 @@ jobs:
|
||||
args: --release --target ${{ matrix.target }}
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name != 'schedule'
|
||||
uses: svenstaro/upload-release-action@v1-release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
@ -109,7 +134,6 @@ jobs:
|
||||
name: Publish binary for aarch64
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: check-version
|
||||
continue-on-error: false
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@ -120,11 +144,9 @@ jobs:
|
||||
linker: gcc-aarch64-linux-gnu
|
||||
use-cross: true
|
||||
asset_name: meilisearch-linux-aarch64
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Installing Rust toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
@ -132,16 +154,13 @@ jobs:
|
||||
profile: minimal
|
||||
target: ${{ matrix.target }}
|
||||
override: true
|
||||
|
||||
- name: APT update
|
||||
run: |
|
||||
sudo apt update
|
||||
|
||||
- name: Install target specific tools
|
||||
if: matrix.use-cross
|
||||
run: |
|
||||
sudo apt-get install -y ${{ matrix.linker }}
|
||||
|
||||
- name: Configure target aarch64 GNU
|
||||
if: matrix.target == 'aarch64-unknown-linux-gnu'
|
||||
## Environment variable is not passed using env:
|
||||
@ -153,22 +172,18 @@ jobs:
|
||||
echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config
|
||||
echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
||||
echo 'JEMALLOC_SYS_WITH_LG_PAGE=16' >> $GITHUB_ENV
|
||||
echo RUSTFLAGS="-Clink-arg=-fuse-ld=gold" >> $GITHUB_ENV
|
||||
|
||||
- name: Cargo build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
use-cross: ${{ matrix.use-cross }}
|
||||
args: --release --target ${{ matrix.target }}
|
||||
|
||||
- name: List target output files
|
||||
run: ls -lR ./target
|
||||
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name != 'schedule'
|
||||
uses: svenstaro/upload-release-action@v1-release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
|
24
.github/workflows/publish-deb-brew-pkg.yml
vendored
24
.github/workflows/publish-deb-brew-pkg.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: Publish deb pkg to GitHub release & APT repository & Homebrew
|
||||
name: Publish to APT repository & Homebrew
|
||||
|
||||
on:
|
||||
release:
|
||||
@ -15,19 +15,27 @@ jobs:
|
||||
|
||||
debian:
|
||||
name: Publish debian packagge
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-version
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: hecrj/setup-rust-action@master
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
rust-version: stable
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Install cargo-deb
|
||||
run: cargo install cargo-deb
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build deb package
|
||||
run: cargo deb -p meilisearch-http -o target/debian/meilisearch.deb
|
||||
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
||||
- name: Upload debian pkg to release
|
||||
uses: svenstaro/upload-release-action@v1-release
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/debian/meilisearch.deb
|
||||
@ -38,11 +46,11 @@ jobs:
|
||||
|
||||
homebrew:
|
||||
name: Bump Homebrew formula
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-version
|
||||
steps:
|
||||
- name: Create PR to Homebrew
|
||||
uses: mislav/bump-homebrew-formula-action@v1
|
||||
uses: mislav/bump-homebrew-formula-action@v2
|
||||
with:
|
||||
formula-name: meilisearch
|
||||
env:
|
||||
|
66
.github/workflows/publish-docker-images.yml
vendored
66
.github/workflows/publish-docker-images.yml
vendored
@ -1,10 +1,16 @@
|
||||
---
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 4 * * *' # Every day at 4:00am
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
# Will run for every tag pushed except `latest`
|
||||
# When the `latest` git tag is created with this [CI](../latest-git-tag.yml)
|
||||
# we don't need to create a Docker `latest` image again.
|
||||
# The `latest` Docker image push is already done in this CI when releasing a stable version of Meilisearch.
|
||||
tags-ignore:
|
||||
- latest
|
||||
# Both `schedule` and `workflow_dispatch` build the nightly tag
|
||||
schedule:
|
||||
- cron: '0 23 * * *' # Every day at 11:00pm
|
||||
workflow_dispatch:
|
||||
|
||||
name: Publish tagged images to Docker Hub
|
||||
|
||||
@ -14,27 +20,46 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
# Check if the tag has the v<nmumber>.<number>.<number> format. If yes, it means we are publishing an official release.
|
||||
# If we are running a cron or manual job ('schedule' or 'workflow_dispatch' event), it means we are publishing the `nightly` tag, so not considered stable.
|
||||
# If we have pushed a tag, and the tag has the v<nmumber>.<number>.<number> format, it means we are publishing an official release, so considered stable.
|
||||
# In this situation, we need to set `output.stable` to create/update the following tags (additionally to the `vX.Y.Z` Docker tag):
|
||||
# - a `vX.Y` (without patch version) Docker tag
|
||||
# - a `latest` Docker tag
|
||||
- name: Check tag format
|
||||
if: github.event_name != 'schedule'
|
||||
# For any other tag pushed, this is not considered stable.
|
||||
- name: Define if stable and latest release
|
||||
id: check-tag-format
|
||||
env:
|
||||
# To avoid request limit with the .github/scripts/is-latest-release.sh script
|
||||
GITHUB_PATH: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
run: |
|
||||
escaped_tag=$(printf "%q" ${{ github.ref_name }})
|
||||
echo "latest=false" >> $GITHUB_OUTPUT
|
||||
|
||||
if [[ $escaped_tag =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo ::set-output name=stable::true
|
||||
if [[ ${{ github.event_name }} != 'push' ]]; then
|
||||
echo "stable=false" >> $GITHUB_OUTPUT
|
||||
elif [[ $escaped_tag =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "stable=true" >> $GITHUB_OUTPUT
|
||||
echo "latest=$(sh .github/scripts/is-latest-release.sh)" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo ::set-output name=stable::false
|
||||
echo "stable=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Check only the validity of the tag for official releases (not for pre-releases or other tags)
|
||||
# Check only the validity of the tag for stable releases (not for pre-releases or other tags)
|
||||
- name: Check release validity
|
||||
if: github.event_name != 'schedule' && steps.check-tag-format.outputs.stable == 'true'
|
||||
if: steps.check-tag-format.outputs.stable == 'true'
|
||||
run: bash .github/scripts/check-release.sh
|
||||
|
||||
- name: Set build-args for Docker buildx
|
||||
id: build-metadata
|
||||
run: |
|
||||
# Define ownership
|
||||
git config --global --add safe.directory /home/meili/actions-runner/_work/meilisearch/meilisearch
|
||||
|
||||
# Extract commit date
|
||||
commit_date=$(git show -s --format=%cd --date=iso-strict ${{ github.sha }})
|
||||
|
||||
echo "date=$commit_date" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
@ -42,7 +67,6 @@ jobs:
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: github.event_name != 'schedule'
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
@ -53,25 +77,29 @@ jobs:
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: getmeili/meilisearch
|
||||
# The latest and `vX.Y` tags are only pushed for the official Meilisearch releases
|
||||
# See https://github.com/docker/metadata-action#latest-tag
|
||||
# Prevent `latest` to be updated for each new tag pushed.
|
||||
# We need latest and `vX.Y` tags to only be pushed for the stable Meilisearch releases.
|
||||
flavor: latest=false
|
||||
tags: |
|
||||
type=ref,event=tag
|
||||
type=raw,value=nightly,enable=${{ github.event_name != 'push' }}
|
||||
type=semver,pattern=v{{major}}.{{minor}},enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' && steps.check-tag-format.outputs.latest == 'true' }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
# We do not push tags for the cron jobs, this is only for test purposes
|
||||
push: ${{ github.event_name != 'schedule' }}
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
build-args: |
|
||||
COMMIT_SHA=${{ github.sha }}
|
||||
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
||||
|
||||
# /!\ Don't touch this without checking with Cloud team
|
||||
- name: Send CI information to Cloud team
|
||||
if: github.event_name != 'schedule'
|
||||
# Do not send if nightly build (i.e. 'schedule' or 'workflow_dispatch' event)
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
59
.github/workflows/rust.yml
vendored
59
.github/workflows/rust.yml
vendored
@ -15,17 +15,46 @@ env:
|
||||
RUSTFLAGS: "-D warnings"
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
test-linux:
|
||||
name: Tests on ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --locked --release --no-default-features
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release
|
||||
|
||||
test-others:
|
||||
name: Tests on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-18.04, macos-latest, windows-latest]
|
||||
os: [macos-12, windows-2022]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.0.0
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -40,16 +69,22 @@ jobs:
|
||||
# We run tests in debug also, to make sure that the debug_assertions are hit
|
||||
test-debug:
|
||||
name: Run tests in debug
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27, which are the production expectations
|
||||
image: ubuntu:18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.0.0
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run tests in debug
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -58,7 +93,7 @@ jobs:
|
||||
|
||||
clippy:
|
||||
name: Run Clippy
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
@ -68,25 +103,25 @@ jobs:
|
||||
override: true
|
||||
components: clippy
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.0.0
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo clippy
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
args: --all-targets -- --deny warnings
|
||||
args: --all-targets -- --deny warnings --allow clippy::uninlined_format_args
|
||||
|
||||
fmt:
|
||||
name: Run Rustfmt
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
toolchain: nightly
|
||||
override: true
|
||||
components: rustfmt
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.0.0
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
|
@ -16,7 +16,7 @@ jobs:
|
||||
|
||||
update-version-cargo-toml:
|
||||
name: Update version in Cargo.toml files
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
@ -44,4 +44,5 @@ jobs:
|
||||
--title "Update version for the next release ($NEW_VERSION) in Cargo.toml files" \
|
||||
--body '⚠️ This PR is automatically generated. Check the new version is the expected one before merging.' \
|
||||
--label 'skip changelog' \
|
||||
--milestone $NEW_VERSION
|
||||
--milestone $NEW_VERSION \
|
||||
--base $GITHUB_REF_NAME
|
||||
|
@ -10,24 +10,12 @@ If Meilisearch does not offer optimized support for your language, please consid
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Hacktoberfest 2022](#hacktoberfest-2022)
|
||||
- [Assumptions](#assumptions)
|
||||
- [How to Contribute](#how-to-contribute)
|
||||
- [Development Workflow](#development-workflow)
|
||||
- [Git Guidelines](#git-guidelines)
|
||||
- [Release Process (for internal team only)](#release-process-for-internal-team-only)
|
||||
|
||||
## Hacktoberfest 2022
|
||||
|
||||
It's [Hacktoberfest month](https://hacktoberfest.com)! 🥳
|
||||
|
||||
Thanks so much for participating with Meilisearch this year!
|
||||
1. We will follow the quality standards set by the organizers of Hacktoberfest (see detail on their [website](https://hacktoberfest.com/participation/#spam)). Our reviewers will not consider any PR that doesn’t match that standard.
|
||||
2. PRs reviews will take place from Monday to Thursday, during usual working hours, CEST time. If you submit outside of these hours, there’s no need to panic; we will get around to your contribution.
|
||||
3. There will be no issue assignment as we don’t want people to ask to be assigned specific issues and never return, discouraging the volunteer contributors from opening a PR to fix this issue. We take the liberty to choose the PR that best fixes the issue, so we encourage you to get to it as soon as possible and do your best!
|
||||
|
||||
You can check out the longer, more complete guideline documentation [here](https://github.com/meilisearch/.github/blob/main/Hacktoberfest_2022_contributors_guidelines.md).
|
||||
|
||||
## Assumptions
|
||||
|
||||
1. **You're familiar with [GitHub](https://github.com) and the [Pull Requests](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests)(PR) workflow.**
|
||||
@ -109,7 +97,7 @@ _[Read more about this](https://github.com/meilisearch/integration-guides/blob/m
|
||||
|
||||
### How to Publish a new Release
|
||||
|
||||
The full Meilisearch release process is described in [this guide](https://github.com/meilisearch/core-team/blob/main/resources/meilisearch-release.md). Please follow it carefully before doing any release.
|
||||
The full Meilisearch release process is described in [this guide](https://github.com/meilisearch/engine-team/blob/main/resources/meilisearch-release.md). Please follow it carefully before doing any release.
|
||||
|
||||
### Release assets
|
||||
|
||||
|
1301
Cargo.lock
generated
1301
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"meilisearch-http",
|
||||
"meilisearch",
|
||||
"meilisearch-types",
|
||||
"meilisearch-auth",
|
||||
"meili-snap",
|
||||
|
@ -7,7 +7,7 @@ WORKDIR /meilisearch
|
||||
|
||||
ARG COMMIT_SHA
|
||||
ARG COMMIT_DATE
|
||||
ENV COMMIT_SHA=${COMMIT_SHA} COMMIT_DATE=${COMMIT_DATE}
|
||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE}
|
||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||
|
||||
COPY . .
|
||||
|
14
README.md
14
README.md
@ -9,7 +9,7 @@
|
||||
<a href="https://blog.meilisearch.com">Blog</a> |
|
||||
<a href="https://docs.meilisearch.com">Documentation</a> |
|
||||
<a href="https://docs.meilisearch.com/faq/">FAQ</a> |
|
||||
<a href="https://slack.meilisearch.com">Slack</a>
|
||||
<a href="https://discord.meilisearch.com">Discord</a>
|
||||
</h4>
|
||||
|
||||
<p align="center">
|
||||
@ -34,14 +34,6 @@ Meilisearch helps you shape a delightful search experience in a snap, offering f
|
||||
|
||||
🔥 [**Try it!**](https://where2watch.meilisearch.com/) 🔥
|
||||
|
||||
## 🎃 Hacktoberfest
|
||||
|
||||
It’s Hacktoberfest 2022 @Meilisearch
|
||||
|
||||
[Hacktoberfest](https://hacktoberfest.com/) is a celebration of the open-source community. This year, and for the third time in a row, Meilisearch is participating in this fantastic event.
|
||||
|
||||
You’d like to contribute? Don’t hesitate to check out our [contributing guidelines](./CONTRIBUTING.md).
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- **Search-as-you-type:** find search results in less than 50 milliseconds
|
||||
@ -69,7 +61,7 @@ You may also want to check out [Meilisearch 101](https://docs.meilisearch.com/le
|
||||
|
||||
## ☁️ Meilisearch cloud
|
||||
|
||||
Join the closed beta for Meilisearch cloud by filling out [this form](https://meilisearch.typeform.com/to/VI2cI2rv).
|
||||
Let us manage your infrastructure so you can focus on integrating a great search experience. Try [Meilisearch Cloud](https://meilisearch.com/pricing) today.
|
||||
|
||||
## 🧰 SDKs & integration tools
|
||||
|
||||
@ -105,7 +97,7 @@ Meilisearch is a search engine created by [Meili](https://www.welcometothejungle
|
||||
|
||||
- For feature requests, please visit our [product repository](https://github.com/meilisearch/product/discussions)
|
||||
- Found a bug? Open an [issue](https://github.com/meilisearch/meilisearch/issues)!
|
||||
- Want to be part of our Slack community? [Join us!](https://slack.meilisearch.com/)
|
||||
- Want to be part of our Discord community? [Join us!](https://discord.gg/meilisearch)
|
||||
- For everything else, please check [this page listing some of the other places where you can find us](https://docs.meilisearch.com/learn/what_is_meilisearch/contact.html)
|
||||
|
||||
Thank you for your support!
|
||||
|
@ -1,7 +1,7 @@
|
||||
status = [
|
||||
'Tests on ubuntu-18.04',
|
||||
'Tests on macos-latest',
|
||||
'Tests on windows-latest',
|
||||
'Tests on macos-12',
|
||||
'Tests on windows-2022',
|
||||
'Run Clippy',
|
||||
'Run Rustfmt',
|
||||
'Run tests in debug',
|
||||
|
25
config.toml
25
config.toml
@ -28,17 +28,9 @@ http_payload_size_limit = "100 MB"
|
||||
|
||||
log_level = "INFO"
|
||||
# Defines how much detail should be present in Meilisearch's logs.
|
||||
# Meilisearch currently supports five log levels, listed in order of increasing verbosity: `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`
|
||||
# Meilisearch currently supports six log levels, listed in order of increasing verbosity: `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#log-level
|
||||
|
||||
max_index_size = "100 GiB"
|
||||
# Sets the maximum size of the index.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#max-index-size
|
||||
|
||||
max_task_db_size = "100 GiB"
|
||||
# Sets the maximum size of the task database.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#max-task-db-size
|
||||
|
||||
# max_indexing_memory = "2 GiB"
|
||||
# Sets the maximum amount of RAM Meilisearch can use when indexing.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#max-indexing-memory
|
||||
@ -47,16 +39,11 @@ max_task_db_size = "100 GiB"
|
||||
# Sets the maximum number of threads Meilisearch can use during indexing.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#max-indexing-threads
|
||||
|
||||
disable_auto_batching = false
|
||||
# Deactivates auto-batching when provided.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#disable-auto-batching
|
||||
|
||||
|
||||
#############
|
||||
### DUMPS ###
|
||||
#############
|
||||
|
||||
dumps_dir = "dumps/"
|
||||
dump_dir = "dumps/"
|
||||
# Sets the directory where Meilisearch will create dump files.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#dumps-destination
|
||||
|
||||
@ -78,17 +65,15 @@ ignore_dump_if_db_exists = false
|
||||
#################
|
||||
|
||||
schedule_snapshot = false
|
||||
# Activates scheduled snapshots when provided.
|
||||
# Enables scheduled snapshots when true, disable when false (the default).
|
||||
# If the value is given as an integer, then enables the scheduled snapshot with the passed value as the interval
|
||||
# between each snapshot, in seconds.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#schedule-snapshot-creation
|
||||
|
||||
snapshot_dir = "snapshots/"
|
||||
# Sets the directory where Meilisearch will store snapshots.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#snapshot-destination
|
||||
|
||||
snapshot_interval_sec = 86400
|
||||
# Defines the interval between each snapshot. Value must be given in seconds.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#snapshot-interval
|
||||
|
||||
# import_snapshot = "./path/to/my/snapshot"
|
||||
# Launches Meilisearch after importing a previously-generated snapshot at the given filepath.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#import-snapshot
|
||||
|
@ -1,5 +1,8 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script can optionally use a GitHub token to increase your request limit (for example, if using this script in a CI).
|
||||
# To use a GitHub token, pass it through the GITHUB_PAT environment variable.
|
||||
|
||||
# GLOBALS
|
||||
|
||||
# Colors
|
||||
@ -10,9 +13,6 @@ DEFAULT='\033[0m'
|
||||
# Project name
|
||||
PNAME='meilisearch'
|
||||
|
||||
# Version regexp i.e. v[number].[number].[number]
|
||||
GREP_SEMVER_REGEXP='v\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)$'
|
||||
|
||||
# GitHub API address
|
||||
GITHUB_API='https://api.github.com/repos/meilisearch/meilisearch/releases'
|
||||
# GitHub Release address
|
||||
@ -20,126 +20,26 @@ GITHUB_REL='https://github.com/meilisearch/meilisearch/releases/download/'
|
||||
|
||||
# FUNCTIONS
|
||||
|
||||
# semverParseInto and semverLT from: https://github.com/cloudflare/semver_bash/blob/master/semver.sh
|
||||
# usage: semverParseInto version major minor patch special
|
||||
# version: the string version
|
||||
# major, minor, patch, special: will be assigned by the function
|
||||
semverParseInto() {
|
||||
local RE='[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)'
|
||||
# MAJOR
|
||||
eval $2=`echo $1 | sed -e "s#$RE#\1#"`
|
||||
# MINOR
|
||||
eval $3=`echo $1 | sed -e "s#$RE#\2#"`
|
||||
# PATCH
|
||||
eval $4=`echo $1 | sed -e "s#$RE#\3#"`
|
||||
# SPECIAL
|
||||
eval $5=`echo $1 | sed -e "s#$RE#\4#"`
|
||||
}
|
||||
|
||||
# usage: semverLT version1 version2
|
||||
semverLT() {
|
||||
local MAJOR_A=0
|
||||
local MINOR_A=0
|
||||
local PATCH_A=0
|
||||
local SPECIAL_A=0
|
||||
|
||||
local MAJOR_B=0
|
||||
local MINOR_B=0
|
||||
local PATCH_B=0
|
||||
local SPECIAL_B=0
|
||||
|
||||
semverParseInto $1 MAJOR_A MINOR_A PATCH_A SPECIAL_A
|
||||
semverParseInto $2 MAJOR_B MINOR_B PATCH_B SPECIAL_B
|
||||
|
||||
if [ $MAJOR_A -lt $MAJOR_B ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ $MAJOR_A -le $MAJOR_B ] && [ $MINOR_A -lt $MINOR_B ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ $MAJOR_A -le $MAJOR_B ] && [ $MINOR_A -le $MINOR_B ] && [ $PATCH_A -lt $PATCH_B ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ "_$SPECIAL_A" == '_' ] && [ "_$SPECIAL_B" == '_' ] ; then
|
||||
return 1
|
||||
fi
|
||||
if [ "_$SPECIAL_A" == '_' ] && [ "_$SPECIAL_B" != '_' ] ; then
|
||||
return 1
|
||||
fi
|
||||
if [ "_$SPECIAL_A" != '_' ] && [ "_$SPECIAL_B" == '_' ] ; then
|
||||
return 0
|
||||
fi
|
||||
if [ "_$SPECIAL_A" < "_$SPECIAL_B" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Get a token from: https://github.com/settings/tokens to increase rate limit (from 60 to 5000),
|
||||
# make sure the token scope is set to 'public_repo'.
|
||||
# Create GITHUB_PAT environment variable once you acquired the token to start using it.
|
||||
# Returns the tag of the latest stable release (in terms of semver and not of release date).
|
||||
# Gets the version of the latest stable version of Meilisearch by setting the $latest variable.
|
||||
# Returns 0 in case of success, 1 otherwise.
|
||||
get_latest() {
|
||||
# temp_file is needed because the grep would start before the download is over
|
||||
temp_file=$(mktemp -q /tmp/$PNAME.XXXXXXXXX)
|
||||
latest_release="$GITHUB_API/latest"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "$0: Can't create temp file, bye bye.."
|
||||
echo "$0: Can't create temp file."
|
||||
fetch_release_failure_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$GITHUB_PAT" ]; then
|
||||
curl -s $GITHUB_API > "$temp_file" || return 1
|
||||
curl -s "$latest_release" > "$temp_file" || return 1
|
||||
else
|
||||
curl -H "Authorization: token $GITHUB_PAT" -s $GITHUB_API > "$temp_file" || return 1
|
||||
curl -H "Authorization: token $GITHUB_PAT" -s "$latest_release" > "$temp_file" || return 1
|
||||
fi
|
||||
|
||||
releases=$(cat "$temp_file" | \
|
||||
grep -E '"tag_name":|"draft":|"prerelease":' \
|
||||
| tr -d ',"' | cut -d ':' -f2 | tr -d ' ')
|
||||
# Returns a list of [tag_name draft_boolean prerelease_boolean ...]
|
||||
# Ex: v0.10.1 false false v0.9.1-rc.1 false true v0.9.0 false false...
|
||||
|
||||
i=0
|
||||
latest=''
|
||||
current_tag=''
|
||||
for release_info in $releases; do
|
||||
# Checking tag_name
|
||||
if [ $i -eq 0 ]; then
|
||||
# If it's not an alpha or beta release
|
||||
if echo "$release_info" | grep -q "$GREP_SEMVER_REGEXP"; then
|
||||
current_tag=$release_info
|
||||
else
|
||||
current_tag=''
|
||||
fi
|
||||
i=1
|
||||
# Checking draft boolean
|
||||
elif [ $i -eq 1 ]; then
|
||||
if [ "$release_info" = 'true' ]; then
|
||||
current_tag=''
|
||||
fi
|
||||
i=2
|
||||
# Checking prerelease boolean
|
||||
elif [ $i -eq 2 ]; then
|
||||
if [ "$release_info" = 'true' ]; then
|
||||
current_tag=''
|
||||
fi
|
||||
i=0
|
||||
# If the current_tag is valid
|
||||
if [ "$current_tag" != '' ]; then
|
||||
# If there is no latest yes
|
||||
if [ "$latest" = '' ]; then
|
||||
latest="$current_tag"
|
||||
else
|
||||
# Comparing latest and the current tag
|
||||
semverLT $current_tag $latest
|
||||
if [ $? -eq 1 ]; then
|
||||
latest="$current_tag"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
latest="$(cat "$temp_file" | grep '"tag_name":' | cut -d ':' -f2 | tr -d '"' | tr -d ',' | tr -d ' ')"
|
||||
|
||||
rm -f "$temp_file"
|
||||
return 0
|
||||
@ -174,9 +74,9 @@ get_archi() {
|
||||
archi='amd64'
|
||||
;;
|
||||
'arm64')
|
||||
# MacOS M1
|
||||
# macOS M1/M2
|
||||
if [ $os = 'macos' ]; then
|
||||
archi='amd64'
|
||||
archi='apple-silicon'
|
||||
else
|
||||
archi='aarch64'
|
||||
fi
|
||||
@ -210,12 +110,13 @@ fetch_release_failure_usage() {
|
||||
echo ''
|
||||
printf "$RED%s\n$DEFAULT" 'ERROR: Impossible to get the latest stable version of Meilisearch.'
|
||||
echo 'Please let us know about this issue: https://github.com/meilisearch/meilisearch/issues/new/choose'
|
||||
echo ''
|
||||
echo 'In the meantime, you can manually download the appropriate binary from the GitHub release assets here: https://github.com/meilisearch/meilisearch/releases/latest'
|
||||
}
|
||||
|
||||
fill_release_variables() {
|
||||
# Fill $latest variable.
|
||||
if ! get_latest; then
|
||||
# TO CHANGE.
|
||||
fetch_release_failure_usage
|
||||
exit 1
|
||||
fi
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "dump"
|
||||
version = "0.30.0"
|
||||
version = "1.0.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
|
@ -3,8 +3,6 @@ use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("The version 1 of the dumps is not supported anymore. You can re-export your dump from a version between 0.21 and 0.24, or start fresh from a version 0.25 onwards.")]
|
||||
DumpV1Unsupported,
|
||||
#[error("Bad index name.")]
|
||||
BadIndexName,
|
||||
#[error("Malformed task.")]
|
||||
@ -21,14 +19,14 @@ pub enum Error {
|
||||
impl ErrorCode for Error {
|
||||
fn error_code(&self) -> Code {
|
||||
match self {
|
||||
// Are these three really Internal errors?
|
||||
// TODO look at that later.
|
||||
Error::Io(_) => Code::Internal,
|
||||
Error::Io(e) => e.error_code(),
|
||||
|
||||
// These errors either happen when creating a dump and don't need any error code,
|
||||
// or come from an internal bad deserialization.
|
||||
Error::Serde(_) => Code::Internal,
|
||||
Error::Uuid(_) => Code::Internal,
|
||||
|
||||
// all these errors should never be raised when creating a dump, thus no error code should be associated.
|
||||
Error::DumpV1Unsupported => Code::Internal,
|
||||
Error::BadIndexName => Code::Internal,
|
||||
Error::MalformedTask => Code::Internal,
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ const CURRENT_DUMP_VERSION: Version = Version::V6;
|
||||
|
||||
type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Metadata {
|
||||
pub dump_version: Version,
|
||||
@ -32,7 +32,7 @@ pub struct Metadata {
|
||||
pub dump_date: OffsetDateTime,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct IndexMetadata {
|
||||
pub uid: String,
|
||||
@ -43,7 +43,7 @@ pub struct IndexMetadata {
|
||||
pub updated_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)]
|
||||
pub enum Version {
|
||||
V1,
|
||||
V2,
|
||||
@ -87,7 +87,7 @@ pub struct TaskDump {
|
||||
pub finished_at: Option<OffsetDateTime>,
|
||||
}
|
||||
|
||||
// A `Kind` specific version made for the dump. If modified you may break the dump.
|
||||
// A `Kind` specific version made for the dump. If modified you may break the dump.
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum KindDump {
|
||||
@ -125,7 +125,6 @@ pub enum KindDump {
|
||||
tasks: RoaringBitmap,
|
||||
},
|
||||
DumpCreation {
|
||||
dump_uid: String,
|
||||
keys: Vec<Key>,
|
||||
instance_uid: Option<InstanceUid>,
|
||||
},
|
||||
@ -188,8 +187,8 @@ impl From<KindWithContent> for KindDump {
|
||||
KindWithContent::TaskDeletion { query, tasks } => {
|
||||
KindDump::TasksDeletion { query, tasks }
|
||||
}
|
||||
KindWithContent::DumpCreation { dump_uid, keys, instance_uid } => {
|
||||
KindDump::DumpCreation { dump_uid, keys, instance_uid }
|
||||
KindWithContent::DumpCreation { keys, instance_uid } => {
|
||||
KindDump::DumpCreation { keys, instance_uid }
|
||||
}
|
||||
KindWithContent::SnapshotCreation => KindDump::SnapshotCreation,
|
||||
}
|
||||
@ -199,7 +198,7 @@ impl From<KindWithContent> for KindDump {
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use std::io::Seek;
|
||||
use std::str::FromStr;
|
||||
|
||||
use big_s::S;
|
||||
@ -411,13 +410,12 @@ pub(crate) mod test {
|
||||
// create the dump
|
||||
let mut file = tempfile::tempfile().unwrap();
|
||||
dump.persist_to(&mut file).unwrap();
|
||||
file.seek(SeekFrom::Start(0)).unwrap();
|
||||
file.rewind().unwrap();
|
||||
|
||||
file
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_creating_and_read_dump() {
|
||||
let mut file = create_test_dump();
|
||||
let mut dump = DumpReader::open(&mut file).unwrap();
|
||||
|
@ -1,3 +1,4 @@
|
||||
pub mod v1_to_v2;
|
||||
pub mod v2_to_v3;
|
||||
pub mod v3_to_v4;
|
||||
pub mod v4_to_v5;
|
||||
|
@ -0,0 +1,38 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v1_to_v2.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v1_to_v2.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v1_to_v2.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v2_to_v3.rs
|
||||
expression: movies2.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v2_to_v3.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,37 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v2_to_v3.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v2_to_v3.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v3_to_v4.rs
|
||||
expression: movies2.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v3_to_v4.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v3_to_v4.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v3_to_v4.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,56 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v4_to_v5.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": "Reset",
|
||||
"searchableAttributes": "Reset",
|
||||
"filterableAttributes": {
|
||||
"Set": []
|
||||
},
|
||||
"sortableAttributes": {
|
||||
"Set": []
|
||||
},
|
||||
"rankingRules": {
|
||||
"Set": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
]
|
||||
},
|
||||
"stopWords": {
|
||||
"Set": []
|
||||
},
|
||||
"synonyms": {
|
||||
"Set": {}
|
||||
},
|
||||
"distinctAttribute": "Reset",
|
||||
"typoTolerance": {
|
||||
"Set": {
|
||||
"enabled": {
|
||||
"Set": true
|
||||
},
|
||||
"minWordSizeForTypos": {
|
||||
"Set": {
|
||||
"oneTypo": {
|
||||
"Set": 5
|
||||
},
|
||||
"twoTypos": {
|
||||
"Set": 9
|
||||
}
|
||||
}
|
||||
},
|
||||
"disableOnWords": {
|
||||
"Set": []
|
||||
},
|
||||
"disableOnAttributes": {
|
||||
"Set": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"faceting": "NotSet",
|
||||
"pagination": "NotSet"
|
||||
}
|
@ -0,0 +1,70 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v4_to_v5.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": "Reset",
|
||||
"searchableAttributes": "Reset",
|
||||
"filterableAttributes": {
|
||||
"Set": []
|
||||
},
|
||||
"sortableAttributes": {
|
||||
"Set": []
|
||||
},
|
||||
"rankingRules": {
|
||||
"Set": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
]
|
||||
},
|
||||
"stopWords": {
|
||||
"Set": []
|
||||
},
|
||||
"synonyms": {
|
||||
"Set": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
}
|
||||
},
|
||||
"distinctAttribute": "Reset",
|
||||
"typoTolerance": {
|
||||
"Set": {
|
||||
"enabled": {
|
||||
"Set": true
|
||||
},
|
||||
"minWordSizeForTypos": {
|
||||
"Set": {
|
||||
"oneTypo": {
|
||||
"Set": 5
|
||||
},
|
||||
"twoTypos": {
|
||||
"Set": 9
|
||||
}
|
||||
}
|
||||
},
|
||||
"disableOnWords": {
|
||||
"Set": []
|
||||
},
|
||||
"disableOnAttributes": {
|
||||
"Set": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"faceting": "NotSet",
|
||||
"pagination": "NotSet"
|
||||
}
|
@ -0,0 +1,62 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v4_to_v5.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": "Reset",
|
||||
"searchableAttributes": "Reset",
|
||||
"filterableAttributes": {
|
||||
"Set": [
|
||||
"genres",
|
||||
"id"
|
||||
]
|
||||
},
|
||||
"sortableAttributes": {
|
||||
"Set": [
|
||||
"release_date"
|
||||
]
|
||||
},
|
||||
"rankingRules": {
|
||||
"Set": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
]
|
||||
},
|
||||
"stopWords": {
|
||||
"Set": []
|
||||
},
|
||||
"synonyms": {
|
||||
"Set": {}
|
||||
},
|
||||
"distinctAttribute": "Reset",
|
||||
"typoTolerance": {
|
||||
"Set": {
|
||||
"enabled": {
|
||||
"Set": true
|
||||
},
|
||||
"minWordSizeForTypos": {
|
||||
"Set": {
|
||||
"oneTypo": {
|
||||
"Set": 5
|
||||
},
|
||||
"twoTypos": {
|
||||
"Set": 9
|
||||
}
|
||||
}
|
||||
},
|
||||
"disableOnWords": {
|
||||
"Set": []
|
||||
},
|
||||
"disableOnAttributes": {
|
||||
"Set": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"faceting": "NotSet",
|
||||
"pagination": "NotSet"
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v5_to_v6.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
}
|
@ -0,0 +1,54 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v5_to_v6.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
---
|
||||
source: dump/src/reader/compat/v5_to_v6.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
}
|
410
dump/src/reader/compat/v1_to_v2.rs
Normal file
410
dump/src/reader/compat/v1_to_v2.rs
Normal file
@ -0,0 +1,410 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use super::v2_to_v3::CompatV2ToV3;
|
||||
use crate::reader::{v1, v2, Document};
|
||||
use crate::Result;
|
||||
|
||||
pub struct CompatV1ToV2 {
|
||||
pub from: v1::V1Reader,
|
||||
}
|
||||
|
||||
impl CompatV1ToV2 {
|
||||
pub fn new(v1: v1::V1Reader) -> Self {
|
||||
Self { from: v1 }
|
||||
}
|
||||
|
||||
pub fn to_v3(self) -> CompatV2ToV3 {
|
||||
CompatV2ToV3::Compat(self)
|
||||
}
|
||||
|
||||
pub fn version(&self) -> crate::Version {
|
||||
self.from.version()
|
||||
}
|
||||
|
||||
pub fn date(&self) -> Option<time::OffsetDateTime> {
|
||||
self.from.date()
|
||||
}
|
||||
|
||||
pub fn index_uuid(&self) -> Vec<v2::meta::IndexUuid> {
|
||||
self.from
|
||||
.index_uuid()
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
// we use the index of the index 😬 as UUID for the index, so that we can link the v2::Task to their index
|
||||
.map(|(index, index_uuid)| v2::meta::IndexUuid {
|
||||
uid: index_uuid.uid,
|
||||
uuid: uuid::Uuid::from_u128(index as u128),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<CompatIndexV1ToV2>> + '_> {
|
||||
Ok(self.from.indexes()?.map(|index_reader| Ok(CompatIndexV1ToV2 { from: index_reader? })))
|
||||
}
|
||||
|
||||
pub fn tasks(
|
||||
&mut self,
|
||||
) -> Box<dyn Iterator<Item = Result<(v2::Task, Option<v2::UpdateFile>)>> + '_> {
|
||||
// Convert an error here to an iterator yielding the error
|
||||
let indexes = match self.from.indexes() {
|
||||
Ok(indexes) => indexes,
|
||||
Err(err) => return Box::new(std::iter::once(Err(err))),
|
||||
};
|
||||
let it = indexes.enumerate().flat_map(
|
||||
move |(index, index_reader)| -> Box<dyn Iterator<Item = _>> {
|
||||
let index_reader = match index_reader {
|
||||
Ok(index_reader) => index_reader,
|
||||
Err(err) => return Box::new(std::iter::once(Err(err))),
|
||||
};
|
||||
Box::new(
|
||||
index_reader
|
||||
.tasks()
|
||||
// Filter out the UpdateStatus::Customs variant that is not supported in v2
|
||||
// and enqueued tasks, that don't contain the necessary update file in v1
|
||||
.filter_map(move |task| -> Option<_> {
|
||||
let task = match task {
|
||||
Ok(task) => task,
|
||||
Err(err) => return Some(Err(err)),
|
||||
};
|
||||
Some(Ok((
|
||||
v2::Task {
|
||||
uuid: uuid::Uuid::from_u128(index as u128),
|
||||
update: Option::from(task)?,
|
||||
},
|
||||
None,
|
||||
)))
|
||||
}),
|
||||
)
|
||||
},
|
||||
);
|
||||
Box::new(it)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CompatIndexV1ToV2 {
|
||||
pub from: v1::V1IndexReader,
|
||||
}
|
||||
|
||||
impl CompatIndexV1ToV2 {
|
||||
pub fn metadata(&self) -> &crate::IndexMetadata {
|
||||
self.from.metadata()
|
||||
}
|
||||
|
||||
pub fn documents(&mut self) -> Result<Box<dyn Iterator<Item = Result<Document>> + '_>> {
|
||||
self.from.documents().map(|it| Box::new(it) as Box<dyn Iterator<Item = _>>)
|
||||
}
|
||||
|
||||
pub fn settings(&mut self) -> Result<v2::settings::Settings<v2::settings::Checked>> {
|
||||
Ok(v2::settings::Settings::<v2::settings::Unchecked>::from(self.from.settings()?).check())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v1::settings::Settings> for v2::Settings<v2::Unchecked> {
|
||||
fn from(source: v1::settings::Settings) -> Self {
|
||||
Self {
|
||||
displayed_attributes: option_to_setting(source.displayed_attributes)
|
||||
.map(|displayed| displayed.into_iter().collect()),
|
||||
searchable_attributes: option_to_setting(source.searchable_attributes),
|
||||
filterable_attributes: option_to_setting(source.attributes_for_faceting.clone())
|
||||
.map(|filterable| filterable.into_iter().collect()),
|
||||
sortable_attributes: option_to_setting(source.attributes_for_faceting)
|
||||
.map(|sortable| sortable.into_iter().collect()),
|
||||
ranking_rules: option_to_setting(source.ranking_rules).map(|ranking_rules| {
|
||||
ranking_rules
|
||||
.into_iter()
|
||||
.filter_map(|ranking_rule| {
|
||||
match v1::settings::RankingRule::from_str(&ranking_rule) {
|
||||
Ok(ranking_rule) => {
|
||||
let criterion: Option<v2::settings::Criterion> =
|
||||
ranking_rule.into();
|
||||
criterion.as_ref().map(ToString::to_string)
|
||||
}
|
||||
Err(()) => {
|
||||
log::warn!(
|
||||
"Could not import the following ranking rule: `{}`.",
|
||||
ranking_rule
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}),
|
||||
stop_words: option_to_setting(source.stop_words),
|
||||
synonyms: option_to_setting(source.synonyms),
|
||||
distinct_attribute: option_to_setting(source.distinct_attribute),
|
||||
_kind: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn option_to_setting<T>(opt: Option<Option<T>>) -> v2::Setting<T> {
|
||||
match opt {
|
||||
Some(Some(t)) => v2::Setting::Set(t),
|
||||
None => v2::Setting::NotSet,
|
||||
Some(None) => v2::Setting::Reset,
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v1::update::UpdateStatus> for Option<v2::updates::UpdateStatus> {
|
||||
fn from(source: v1::update::UpdateStatus) -> Self {
|
||||
use v1::update::UpdateStatus as UpdateStatusV1;
|
||||
use v2::updates::UpdateStatus as UpdateStatusV2;
|
||||
Some(match source {
|
||||
UpdateStatusV1::Enqueued { content } => {
|
||||
log::warn!(
|
||||
"Cannot import task {} (importing enqueued tasks from v1 dumps is unsupported)",
|
||||
content.update_id
|
||||
);
|
||||
log::warn!("Task will be skipped in the queue of imported tasks.");
|
||||
|
||||
return None;
|
||||
}
|
||||
UpdateStatusV1::Failed { content } => UpdateStatusV2::Failed(v2::updates::Failed {
|
||||
from: v2::updates::Processing {
|
||||
from: v2::updates::Enqueued {
|
||||
update_id: content.update_id,
|
||||
meta: Option::from(content.update_type)?,
|
||||
enqueued_at: content.enqueued_at,
|
||||
content: None,
|
||||
},
|
||||
started_processing_at: content.processed_at
|
||||
- std::time::Duration::from_secs_f64(content.duration),
|
||||
},
|
||||
error: v2::ResponseError {
|
||||
// error code is ignored by serialization, and so always default in deserialized v2 dumps
|
||||
// that's a good thing, because we don't have them in v1 dump 😅
|
||||
code: http::StatusCode::default(),
|
||||
message: content.error.unwrap_or_default(),
|
||||
// error codes are unchanged between v1 and v2
|
||||
error_code: content.error_code.unwrap_or_default(),
|
||||
// error types are unchanged between v1 and v2
|
||||
error_type: content.error_type.unwrap_or_default(),
|
||||
// error links are unchanged between v1 and v2
|
||||
error_link: content.error_link.unwrap_or_default(),
|
||||
},
|
||||
failed_at: content.processed_at,
|
||||
}),
|
||||
UpdateStatusV1::Processed { content } => {
|
||||
UpdateStatusV2::Processed(v2::updates::Processed {
|
||||
success: match &content.update_type {
|
||||
v1::update::UpdateType::ClearAll => {
|
||||
v2::updates::UpdateResult::DocumentDeletion { deleted: u64::MAX }
|
||||
}
|
||||
v1::update::UpdateType::Customs => v2::updates::UpdateResult::Other,
|
||||
v1::update::UpdateType::DocumentsAddition { number } => {
|
||||
v2::updates::UpdateResult::DocumentsAddition(
|
||||
v2::updates::DocumentAdditionResult { nb_documents: *number },
|
||||
)
|
||||
}
|
||||
v1::update::UpdateType::DocumentsPartial { number } => {
|
||||
v2::updates::UpdateResult::DocumentsAddition(
|
||||
v2::updates::DocumentAdditionResult { nb_documents: *number },
|
||||
)
|
||||
}
|
||||
v1::update::UpdateType::DocumentsDeletion { number } => {
|
||||
v2::updates::UpdateResult::DocumentDeletion { deleted: *number as u64 }
|
||||
}
|
||||
v1::update::UpdateType::Settings { .. } => v2::updates::UpdateResult::Other,
|
||||
},
|
||||
processed_at: content.processed_at,
|
||||
from: v2::updates::Processing {
|
||||
from: v2::updates::Enqueued {
|
||||
update_id: content.update_id,
|
||||
meta: Option::from(content.update_type)?,
|
||||
enqueued_at: content.enqueued_at,
|
||||
content: None,
|
||||
},
|
||||
started_processing_at: content.processed_at
|
||||
- std::time::Duration::from_secs_f64(content.duration),
|
||||
},
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v1::update::UpdateType> for Option<v2::updates::UpdateMeta> {
|
||||
fn from(source: v1::update::UpdateType) -> Self {
|
||||
Some(match source {
|
||||
v1::update::UpdateType::ClearAll => v2::updates::UpdateMeta::ClearDocuments,
|
||||
v1::update::UpdateType::Customs => {
|
||||
log::warn!("Ignoring task with type 'Customs' that is no longer supported");
|
||||
return None;
|
||||
}
|
||||
v1::update::UpdateType::DocumentsAddition { .. } => {
|
||||
v2::updates::UpdateMeta::DocumentsAddition {
|
||||
method: v2::updates::IndexDocumentsMethod::ReplaceDocuments,
|
||||
format: v2::updates::UpdateFormat::Json,
|
||||
primary_key: None,
|
||||
}
|
||||
}
|
||||
v1::update::UpdateType::DocumentsPartial { .. } => {
|
||||
v2::updates::UpdateMeta::DocumentsAddition {
|
||||
method: v2::updates::IndexDocumentsMethod::UpdateDocuments,
|
||||
format: v2::updates::UpdateFormat::Json,
|
||||
primary_key: None,
|
||||
}
|
||||
}
|
||||
v1::update::UpdateType::DocumentsDeletion { .. } => {
|
||||
v2::updates::UpdateMeta::DeleteDocuments { ids: vec![] }
|
||||
}
|
||||
v1::update::UpdateType::Settings { settings } => {
|
||||
v2::updates::UpdateMeta::Settings((*settings).into())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v1::settings::SettingsUpdate> for v2::Settings<v2::Unchecked> {
|
||||
fn from(source: v1::settings::SettingsUpdate) -> Self {
|
||||
let ranking_rules = v2::Setting::from(source.ranking_rules);
|
||||
|
||||
// go from the concrete types of v1 (RankingRule) to the concrete type of v2 (Criterion),
|
||||
// and then back to string as this is what the settings manipulate
|
||||
let ranking_rules = ranking_rules.map(|ranking_rules| {
|
||||
ranking_rules
|
||||
.into_iter()
|
||||
// filter out the WordsPosition ranking rule that exists in v1 but not v2
|
||||
.filter_map(Option::<v2::settings::Criterion>::from)
|
||||
.map(|criterion| criterion.to_string())
|
||||
.collect()
|
||||
});
|
||||
|
||||
Self {
|
||||
displayed_attributes: v2::Setting::from(source.displayed_attributes)
|
||||
.map(|displayed_attributes| displayed_attributes.into_iter().collect()),
|
||||
searchable_attributes: source.searchable_attributes.into(),
|
||||
filterable_attributes: v2::Setting::from(source.attributes_for_faceting.clone())
|
||||
.map(|attributes_for_faceting| attributes_for_faceting.into_iter().collect()),
|
||||
sortable_attributes: v2::Setting::from(source.attributes_for_faceting)
|
||||
.map(|attributes_for_faceting| attributes_for_faceting.into_iter().collect()),
|
||||
ranking_rules,
|
||||
stop_words: source.stop_words.into(),
|
||||
synonyms: source.synonyms.into(),
|
||||
distinct_attribute: source.distinct_attribute.into(),
|
||||
_kind: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<v1::settings::RankingRule> for Option<v2::settings::Criterion> {
|
||||
fn from(source: v1::settings::RankingRule) -> Self {
|
||||
match source {
|
||||
v1::settings::RankingRule::Typo => Some(v2::settings::Criterion::Typo),
|
||||
v1::settings::RankingRule::Words => Some(v2::settings::Criterion::Words),
|
||||
v1::settings::RankingRule::Proximity => Some(v2::settings::Criterion::Proximity),
|
||||
v1::settings::RankingRule::Attribute => Some(v2::settings::Criterion::Attribute),
|
||||
v1::settings::RankingRule::WordsPosition => {
|
||||
log::warn!("Removing the 'WordsPosition' ranking rule that is no longer supported, please check the resulting ranking rules of your indexes");
|
||||
None
|
||||
}
|
||||
v1::settings::RankingRule::Exactness => Some(v2::settings::Criterion::Exactness),
|
||||
v1::settings::RankingRule::Asc(field_name) => {
|
||||
Some(v2::settings::Criterion::Asc(field_name))
|
||||
}
|
||||
v1::settings::RankingRule::Desc(field_name) => {
|
||||
Some(v2::settings::Criterion::Desc(field_name))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<v1::settings::UpdateState<T>> for v2::Setting<T> {
|
||||
fn from(source: v1::settings::UpdateState<T>) -> Self {
|
||||
match source {
|
||||
v1::settings::UpdateState::Update(new_value) => v2::Setting::Set(new_value),
|
||||
v1::settings::UpdateState::Clear => v2::Setting::Reset,
|
||||
v1::settings::UpdateState::Nothing => v2::Setting::NotSet,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
|
||||
use flate2::bufread::GzDecoder;
|
||||
use meili_snap::insta;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn compat_v1_v2() {
|
||||
let dump = File::open("tests/assets/v1.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(dir.path()).unwrap();
|
||||
|
||||
let mut dump = v1::V1Reader::open(dir).unwrap().to_v2();
|
||||
|
||||
// top level infos
|
||||
assert_eq!(dump.date(), None);
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"2298010973ee98cf4670787314176a3a");
|
||||
assert_eq!(update_files.len(), 9);
|
||||
assert!(update_files[..].iter().all(|u| u.is_none())); // no update file in dumps v1
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "2022-10-02T13:23:39.976870431Z",
|
||||
"updatedAt": "2022-10-02T13:27:54.353262482Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "2022-10-02T13:15:29.477512777Z",
|
||||
"updatedAt": "2022-10-02T13:21:12.671204856Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b63dbed5bbc059f3e32bc471ae699bf5");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "2022-10-02T13:38:26.358882984Z",
|
||||
"updatedAt": "2022-10-02T13:38:26.385609433Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"aa24c0cfc733d66c396237ad44263bed");
|
||||
}
|
||||
}
|
@ -4,22 +4,28 @@ use std::str::FromStr;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::v1_to_v2::{CompatIndexV1ToV2, CompatV1ToV2};
|
||||
use super::v3_to_v4::CompatV3ToV4;
|
||||
use crate::reader::{v2, v3, Document};
|
||||
use crate::Result;
|
||||
|
||||
pub struct CompatV2ToV3 {
|
||||
pub from: v2::V2Reader,
|
||||
pub enum CompatV2ToV3 {
|
||||
V2(v2::V2Reader),
|
||||
Compat(CompatV1ToV2),
|
||||
}
|
||||
|
||||
impl CompatV2ToV3 {
|
||||
pub fn new(v2: v2::V2Reader) -> CompatV2ToV3 {
|
||||
CompatV2ToV3 { from: v2 }
|
||||
CompatV2ToV3::V2(v2)
|
||||
}
|
||||
|
||||
pub fn index_uuid(&self) -> Vec<v3::meta::IndexUuid> {
|
||||
self.from
|
||||
.index_uuid()
|
||||
let v2_uuids = match self {
|
||||
CompatV2ToV3::V2(from) => from.index_uuid(),
|
||||
CompatV2ToV3::Compat(compat) => compat.index_uuid(),
|
||||
};
|
||||
v2_uuids
|
||||
.into_iter()
|
||||
.into_iter()
|
||||
.map(|index| v3::meta::IndexUuid { uid: index.uid, uuid: index.uuid })
|
||||
.collect()
|
||||
@ -30,11 +36,17 @@ impl CompatV2ToV3 {
|
||||
}
|
||||
|
||||
pub fn version(&self) -> crate::Version {
|
||||
self.from.version()
|
||||
match self {
|
||||
CompatV2ToV3::V2(from) => from.version(),
|
||||
CompatV2ToV3::Compat(compat) => compat.version(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn date(&self) -> Option<time::OffsetDateTime> {
|
||||
self.from.date()
|
||||
match self {
|
||||
CompatV2ToV3::V2(from) => from.date(),
|
||||
CompatV2ToV3::Compat(compat) => compat.date(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn instance_uid(&self) -> Result<Option<uuid::Uuid>> {
|
||||
@ -42,10 +54,18 @@ impl CompatV2ToV3 {
|
||||
}
|
||||
|
||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<CompatIndexV2ToV3>> + '_> {
|
||||
Ok(self.from.indexes()?.map(|index_reader| -> Result<_> {
|
||||
let compat = CompatIndexV2ToV3::new(index_reader?);
|
||||
Ok(compat)
|
||||
}))
|
||||
Ok(match self {
|
||||
CompatV2ToV3::V2(from) => Box::new(from.indexes()?.map(|index_reader| -> Result<_> {
|
||||
let compat = CompatIndexV2ToV3::new(index_reader?);
|
||||
Ok(compat)
|
||||
}))
|
||||
as Box<dyn Iterator<Item = Result<CompatIndexV2ToV3>> + '_>,
|
||||
CompatV2ToV3::Compat(compat) => Box::new(compat.indexes()?.map(|index_reader| {
|
||||
let compat = CompatIndexV2ToV3::Compat(Box::new(index_reader?));
|
||||
Ok(compat)
|
||||
}))
|
||||
as Box<dyn Iterator<Item = Result<CompatIndexV2ToV3>> + '_>,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn tasks(
|
||||
@ -54,11 +74,13 @@ impl CompatV2ToV3 {
|
||||
dyn Iterator<Item = Result<(v3::Task, Option<Box<dyn Iterator<Item = Result<Document>>>>)>>
|
||||
+ '_,
|
||||
> {
|
||||
let _indexes = self.from.index_uuid.clone();
|
||||
let tasks = match self {
|
||||
CompatV2ToV3::V2(from) => from.tasks(),
|
||||
CompatV2ToV3::Compat(compat) => compat.tasks(),
|
||||
};
|
||||
|
||||
Box::new(
|
||||
self.from
|
||||
.tasks()
|
||||
tasks
|
||||
.map(move |task| {
|
||||
task.map(|(task, content_file)| {
|
||||
let task = v3::Task { uuid: task.uuid, update: task.update.into() };
|
||||
@ -76,27 +98,38 @@ impl CompatV2ToV3 {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CompatIndexV2ToV3 {
|
||||
from: v2::V2IndexReader,
|
||||
pub enum CompatIndexV2ToV3 {
|
||||
V2(v2::V2IndexReader),
|
||||
Compat(Box<CompatIndexV1ToV2>),
|
||||
}
|
||||
|
||||
impl CompatIndexV2ToV3 {
|
||||
pub fn new(v2: v2::V2IndexReader) -> CompatIndexV2ToV3 {
|
||||
CompatIndexV2ToV3 { from: v2 }
|
||||
CompatIndexV2ToV3::V2(v2)
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &crate::IndexMetadata {
|
||||
self.from.metadata()
|
||||
match self {
|
||||
CompatIndexV2ToV3::V2(from) => from.metadata(),
|
||||
CompatIndexV2ToV3::Compat(compat) => compat.metadata(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn documents(&mut self) -> Result<Box<dyn Iterator<Item = Result<Document>> + '_>> {
|
||||
self.from
|
||||
.documents()
|
||||
.map(|iter| Box::new(iter) as Box<dyn Iterator<Item = Result<Document>> + '_>)
|
||||
match self {
|
||||
CompatIndexV2ToV3::V2(from) => from
|
||||
.documents()
|
||||
.map(|iter| Box::new(iter) as Box<dyn Iterator<Item = Result<Document>> + '_>),
|
||||
CompatIndexV2ToV3::Compat(compat) => compat.documents(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn settings(&mut self) -> Result<v3::Settings<v3::Checked>> {
|
||||
Ok(v3::Settings::<v3::Unchecked>::from(self.from.settings()?).check())
|
||||
let settings = match self {
|
||||
CompatIndexV2ToV3::V2(from) => from.settings()?,
|
||||
CompatIndexV2ToV3::Compat(compat) => compat.settings()?,
|
||||
};
|
||||
Ok(v3::Settings::<v3::Unchecked>::from(settings).check())
|
||||
}
|
||||
}
|
||||
|
||||
@ -328,28 +361,29 @@ impl From<String> for v3::Code {
|
||||
}
|
||||
}
|
||||
|
||||
fn option_to_setting<T>(opt: Option<Option<T>>) -> v3::Setting<T> {
|
||||
match opt {
|
||||
Some(Some(t)) => v3::Setting::Set(t),
|
||||
None => v3::Setting::NotSet,
|
||||
Some(None) => v3::Setting::Reset,
|
||||
impl<A> From<v2::Setting<A>> for v3::Setting<A> {
|
||||
fn from(setting: v2::Setting<A>) -> Self {
|
||||
match setting {
|
||||
v2::settings::Setting::Set(a) => v3::settings::Setting::Set(a),
|
||||
v2::settings::Setting::Reset => v3::settings::Setting::Reset,
|
||||
v2::settings::Setting::NotSet => v3::settings::Setting::NotSet,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<v2::Settings<T>> for v3::Settings<v3::Unchecked> {
|
||||
fn from(settings: v2::Settings<T>) -> Self {
|
||||
v3::Settings {
|
||||
displayed_attributes: option_to_setting(settings.displayed_attributes),
|
||||
searchable_attributes: option_to_setting(settings.searchable_attributes),
|
||||
filterable_attributes: option_to_setting(settings.filterable_attributes)
|
||||
.map(|f| f.into_iter().collect()),
|
||||
sortable_attributes: v3::Setting::NotSet,
|
||||
ranking_rules: option_to_setting(settings.ranking_rules).map(|criteria| {
|
||||
displayed_attributes: settings.displayed_attributes.into(),
|
||||
searchable_attributes: settings.searchable_attributes.into(),
|
||||
filterable_attributes: settings.filterable_attributes.into(),
|
||||
sortable_attributes: settings.sortable_attributes.into(),
|
||||
ranking_rules: v3::Setting::from(settings.ranking_rules).map(|criteria| {
|
||||
criteria.into_iter().map(|criterion| patch_ranking_rules(&criterion)).collect()
|
||||
}),
|
||||
stop_words: option_to_setting(settings.stop_words),
|
||||
synonyms: option_to_setting(settings.synonyms),
|
||||
distinct_attribute: option_to_setting(settings.distinct_attribute),
|
||||
stop_words: settings.stop_words.into(),
|
||||
synonyms: settings.synonyms.into(),
|
||||
distinct_attribute: settings.distinct_attribute.into(),
|
||||
_kind: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
@ -361,6 +395,7 @@ fn patch_ranking_rules(ranking_rule: &str) -> String {
|
||||
Ok(v2::settings::Criterion::Typo) => String::from("typo"),
|
||||
Ok(v2::settings::Criterion::Proximity) => String::from("proximity"),
|
||||
Ok(v2::settings::Criterion::Attribute) => String::from("attribute"),
|
||||
Ok(v2::settings::Criterion::Sort) => String::from("sort"),
|
||||
Ok(v2::settings::Criterion::Exactness) => String::from("exactness"),
|
||||
Ok(v2::settings::Criterion::Asc(name)) => format!("{name}:asc"),
|
||||
Ok(v2::settings::Criterion::Desc(name)) => format!("{name}:desc"),
|
||||
@ -381,7 +416,6 @@ pub(crate) mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn compat_v2_v3() {
|
||||
let dump = File::open("tests/assets/v2.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
@ -427,7 +461,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", products.settings()), @"54b3d7a0d96de35427d867fa17164a99");
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
@ -442,7 +476,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies.settings()), @"ae7c5ade2243a553152dab2f354e9095");
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d153b5a81d8b3cdcbe1dec270b574022");
|
||||
@ -457,7 +491,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies2.settings()), @"1be82b894556d23953af557b6a328a58");
|
||||
insta::assert_json_snapshot!(movies2.settings().unwrap());
|
||||
let documents = movies2.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 0);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
@ -472,7 +506,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", spells.settings()), @"1be82b894556d23953af557b6a328a58");
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
|
@ -347,7 +347,6 @@ pub(crate) mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn compat_v3_v4() {
|
||||
let dump = File::open("tests/assets/v3.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
@ -397,7 +396,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", products.settings()), @"d3402aff19b90acea9e9a07c466690aa");
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
@ -412,7 +411,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies.settings()), @"687aaab250f01b55d57bc69aa313b581");
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d153b5a81d8b3cdcbe1dec270b574022");
|
||||
@ -427,7 +426,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies2.settings()), @"cd9fedbd7e3492831a94da62c90013ea");
|
||||
insta::assert_json_snapshot!(movies2.settings().unwrap());
|
||||
let documents = movies2.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 0);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
@ -442,7 +441,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", spells.settings()), @"cd9fedbd7e3492831a94da62c90013ea");
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
|
@ -383,7 +383,6 @@ pub(crate) mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn compat_v4_v5() {
|
||||
let dump = File::open("tests/assets/v4.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
@ -430,7 +429,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", products.settings()), @"26947283836ee4cdf0974f82efcc5332");
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
@ -445,7 +444,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies.settings()), @"156871410d17e23803d0c90ddc6a66cb");
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"786022a66ecb992c8a2a60fee070a5ab");
|
||||
@ -460,7 +459,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", spells.settings()), @"69c9916142612cf4a2da9b9ed9455e9e");
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
|
@ -1,3 +1,5 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use super::v4_to_v5::{CompatIndexV4ToV5, CompatV4ToV5};
|
||||
use crate::reader::{v5, v6, Document, UpdateFile};
|
||||
use crate::Result;
|
||||
@ -119,11 +121,10 @@ impl CompatV5ToV6 {
|
||||
allow_index_creation,
|
||||
settings: Box::new(settings.into()),
|
||||
},
|
||||
v5::tasks::TaskContent::Dump { uid } => v6::Kind::DumpCreation {
|
||||
dump_uid: uid,
|
||||
keys: keys.clone(),
|
||||
instance_uid,
|
||||
},
|
||||
v5::tasks::TaskContent::Dump { uid: _ } => {
|
||||
// in v6 we compute the dump_uid from the started_at processing time
|
||||
v6::Kind::DumpCreation { keys: keys.clone(), instance_uid }
|
||||
}
|
||||
},
|
||||
canceled_by: None,
|
||||
details: task_view.details.map(|details| match details {
|
||||
@ -143,13 +144,15 @@ impl CompatV5ToV6 {
|
||||
received_document_ids,
|
||||
deleted_documents,
|
||||
} => v6::Details::DocumentDeletion {
|
||||
matched_documents: received_document_ids,
|
||||
provided_ids: received_document_ids,
|
||||
deleted_documents,
|
||||
},
|
||||
v5::Details::ClearAll { deleted_documents } => {
|
||||
v6::Details::ClearAll { deleted_documents }
|
||||
}
|
||||
v5::Details::Dump { dump_uid } => v6::Details::Dump { dump_uid },
|
||||
v5::Details::Dump { dump_uid } => {
|
||||
v6::Details::Dump { dump_uid: Some(dump_uid) }
|
||||
}
|
||||
}),
|
||||
error: task_view.error.map(|e| e.into()),
|
||||
enqueued_at: task_view.enqueued_at,
|
||||
@ -253,51 +256,50 @@ impl<T> From<v5::Setting<T>> for v6::Setting<T> {
|
||||
impl From<v5::ResponseError> for v6::ResponseError {
|
||||
fn from(error: v5::ResponseError) -> Self {
|
||||
let code = match error.error_code.as_ref() {
|
||||
"index_creation_failed" => v6::Code::CreateIndex,
|
||||
"index_creation_failed" => v6::Code::IndexCreationFailed,
|
||||
"index_already_exists" => v6::Code::IndexAlreadyExists,
|
||||
"index_not_found" => v6::Code::IndexNotFound,
|
||||
"invalid_index_uid" => v6::Code::InvalidIndexUid,
|
||||
"invalid_min_word_length_for_typo" => v6::Code::InvalidMinWordLengthForTypo,
|
||||
"invalid_min_word_length_for_typo" => v6::Code::InvalidSettingsTypoTolerance,
|
||||
"invalid_state" => v6::Code::InvalidState,
|
||||
"primary_key_inference_failed" => v6::Code::MissingPrimaryKey,
|
||||
"index_primary_key_already_exists" => v6::Code::PrimaryKeyAlreadyPresent,
|
||||
"primary_key_inference_failed" => v6::Code::IndexPrimaryKeyNoCandidateFound,
|
||||
"index_primary_key_already_exists" => v6::Code::IndexPrimaryKeyAlreadyExists,
|
||||
"max_fields_limit_exceeded" => v6::Code::MaxFieldsLimitExceeded,
|
||||
"missing_document_id" => v6::Code::MissingDocumentId,
|
||||
"invalid_document_id" => v6::Code::InvalidDocumentId,
|
||||
"invalid_filter" => v6::Code::Filter,
|
||||
"invalid_sort" => v6::Code::Sort,
|
||||
"invalid_filter" => v6::Code::InvalidSettingsFilterableAttributes,
|
||||
"invalid_sort" => v6::Code::InvalidSettingsSortableAttributes,
|
||||
"bad_parameter" => v6::Code::BadParameter,
|
||||
"bad_request" => v6::Code::BadRequest,
|
||||
"database_size_limit_reached" => v6::Code::DatabaseSizeLimitReached,
|
||||
"document_not_found" => v6::Code::DocumentNotFound,
|
||||
"internal" => v6::Code::Internal,
|
||||
"invalid_geo_field" => v6::Code::InvalidGeoField,
|
||||
"invalid_ranking_rule" => v6::Code::InvalidRankingRule,
|
||||
"invalid_store_file" => v6::Code::InvalidStore,
|
||||
"invalid_api_key" => v6::Code::InvalidToken,
|
||||
"invalid_geo_field" => v6::Code::InvalidDocumentGeoField,
|
||||
"invalid_ranking_rule" => v6::Code::InvalidSettingsRankingRules,
|
||||
"invalid_store_file" => v6::Code::InvalidStoreFile,
|
||||
"invalid_api_key" => v6::Code::InvalidApiKey,
|
||||
"missing_authorization_header" => v6::Code::MissingAuthorizationHeader,
|
||||
"no_space_left_on_device" => v6::Code::NoSpaceLeftOnDevice,
|
||||
"dump_not_found" => v6::Code::DumpNotFound,
|
||||
"task_not_found" => v6::Code::TaskNotFound,
|
||||
"payload_too_large" => v6::Code::PayloadTooLarge,
|
||||
"unretrievable_document" => v6::Code::RetrieveDocument,
|
||||
"search_error" => v6::Code::SearchDocuments,
|
||||
"unretrievable_document" => v6::Code::UnretrievableDocument,
|
||||
"unsupported_media_type" => v6::Code::UnsupportedMediaType,
|
||||
"dump_already_processing" => v6::Code::DumpAlreadyInProgress,
|
||||
"dump_already_processing" => v6::Code::DumpAlreadyProcessing,
|
||||
"dump_process_failed" => v6::Code::DumpProcessFailed,
|
||||
"invalid_content_type" => v6::Code::InvalidContentType,
|
||||
"missing_content_type" => v6::Code::MissingContentType,
|
||||
"malformed_payload" => v6::Code::MalformedPayload,
|
||||
"missing_payload" => v6::Code::MissingPayload,
|
||||
"api_key_not_found" => v6::Code::ApiKeyNotFound,
|
||||
"missing_parameter" => v6::Code::MissingParameter,
|
||||
"missing_parameter" => v6::Code::BadRequest,
|
||||
"invalid_api_key_actions" => v6::Code::InvalidApiKeyActions,
|
||||
"invalid_api_key_indexes" => v6::Code::InvalidApiKeyIndexes,
|
||||
"invalid_api_key_expires_at" => v6::Code::InvalidApiKeyExpiresAt,
|
||||
"invalid_api_key_description" => v6::Code::InvalidApiKeyDescription,
|
||||
"invalid_api_key_name" => v6::Code::InvalidApiKeyName,
|
||||
"invalid_api_key_uid" => v6::Code::InvalidApiKeyUid,
|
||||
"immutable_field" => v6::Code::ImmutableField,
|
||||
"immutable_field" => v6::Code::BadRequest,
|
||||
"api_key_already_exists" => v6::Code::ApiKeyAlreadyExists,
|
||||
other => {
|
||||
log::warn!("Unknown error code {}", other);
|
||||
@ -315,7 +317,26 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
||||
searchable_attributes: settings.searchable_attributes.into(),
|
||||
filterable_attributes: settings.filterable_attributes.into(),
|
||||
sortable_attributes: settings.sortable_attributes.into(),
|
||||
ranking_rules: settings.ranking_rules.into(),
|
||||
ranking_rules: {
|
||||
match settings.ranking_rules {
|
||||
v5::settings::Setting::Set(ranking_rules) => {
|
||||
let mut new_ranking_rules = vec![];
|
||||
for rule in ranking_rules {
|
||||
match v6::RankingRuleView::from_str(&rule) {
|
||||
Ok(new_rule) => {
|
||||
new_ranking_rules.push(new_rule);
|
||||
}
|
||||
Err(_) => {
|
||||
log::warn!("Error while importing settings. The ranking rule `{rule}` does not exist anymore.")
|
||||
}
|
||||
}
|
||||
}
|
||||
v6::Setting::Set(new_ranking_rules)
|
||||
}
|
||||
v5::settings::Setting::Reset => v6::Setting::Reset,
|
||||
v5::settings::Setting::NotSet => v6::Setting::NotSet,
|
||||
}
|
||||
},
|
||||
stop_words: settings.stop_words.into(),
|
||||
synonyms: settings.synonyms.into(),
|
||||
distinct_attribute: settings.distinct_attribute.into(),
|
||||
@ -401,7 +422,6 @@ pub(crate) mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn compat_v5_v6() {
|
||||
let dump = File::open("tests/assets/v5.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
@ -419,7 +439,7 @@ pub(crate) mod test {
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"42d4200cf6d92a6449989ca48cd8e28a");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"41f91d3a94911b2735ec41b07540df5c");
|
||||
assert_eq!(update_files.len(), 22);
|
||||
assert!(update_files[0].is_none()); // the dump creation
|
||||
assert!(update_files[1].is_some()); // the enqueued document addition
|
||||
@ -449,7 +469,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", products.settings()), @"8e5cadabf74aebe1160bf51c3d489efe");
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
@ -464,7 +484,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies.settings()), @"4894ac1e74b9e1069ed5ee262b7a1aca");
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 200);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"e962baafd2fbae4cdd14e876053b0c5a");
|
||||
@ -479,7 +499,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", spells.settings()), @"054dbf08a79e08bb9becba6f5d090f13");
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
|
@ -1,42 +0,0 @@
|
||||
use meilisearch_auth::error::AuthControllerError;
|
||||
use meilisearch_types::error::{Code, ErrorCode};
|
||||
use meilisearch_types::internal_error;
|
||||
|
||||
use crate::{index_resolver::error::IndexResolverError, tasks::error::TaskError};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, DumpError>;
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum DumpError {
|
||||
#[error("An internal error has occurred. `{0}`.")]
|
||||
Internal(Box<dyn std::error::Error + Send + Sync + 'static>),
|
||||
#[error("{0}")]
|
||||
IndexResolver(Box<IndexResolverError>),
|
||||
}
|
||||
|
||||
internal_error!(
|
||||
DumpError: milli::heed::Error,
|
||||
std::io::Error,
|
||||
tokio::task::JoinError,
|
||||
tokio::sync::oneshot::error::RecvError,
|
||||
serde_json::error::Error,
|
||||
tempfile::PersistError,
|
||||
fs_extra::error::Error,
|
||||
AuthControllerError,
|
||||
TaskError
|
||||
);
|
||||
|
||||
impl From<IndexResolverError> for DumpError {
|
||||
fn from(e: IndexResolverError) -> Self {
|
||||
Self::IndexResolver(Box::new(e))
|
||||
}
|
||||
}
|
||||
|
||||
impl ErrorCode for DumpError {
|
||||
fn error_code(&self) -> Code {
|
||||
match self {
|
||||
DumpError::Internal(_) => Code::Internal,
|
||||
DumpError::IndexResolver(e) => e.error_code(),
|
||||
}
|
||||
}
|
||||
}
|
@ -9,11 +9,11 @@ use self::compat::v4_to_v5::CompatV4ToV5;
|
||||
use self::compat::v5_to_v6::{CompatIndexV5ToV6, CompatV5ToV6};
|
||||
use self::v5::V5Reader;
|
||||
use self::v6::{V6IndexReader, V6Reader};
|
||||
use crate::{Error, Result, Version};
|
||||
use crate::{Result, Version};
|
||||
|
||||
mod compat;
|
||||
|
||||
// pub(self) mod v1;
|
||||
pub(self) mod v1;
|
||||
pub(self) mod v2;
|
||||
pub(self) mod v3;
|
||||
pub(self) mod v4;
|
||||
@ -45,8 +45,9 @@ impl DumpReader {
|
||||
let MetadataVersion { dump_version } = serde_json::from_reader(&mut meta_file)?;
|
||||
|
||||
match dump_version {
|
||||
// Version::V1 => Ok(Box::new(v1::Reader::open(path)?)),
|
||||
Version::V1 => Err(Error::DumpV1Unsupported),
|
||||
Version::V1 => {
|
||||
Ok(v1::V1Reader::open(path)?.to_v2().to_v3().to_v4().to_v5().to_v6().into())
|
||||
}
|
||||
Version::V2 => Ok(v2::V2Reader::open(path)?.to_v3().to_v4().to_v5().to_v6().into()),
|
||||
Version::V3 => Ok(v3::V3Reader::open(path)?.to_v4().to_v5().to_v6().into()),
|
||||
Version::V4 => Ok(v4::V4Reader::open(path)?.to_v5().to_v6().into()),
|
||||
@ -189,7 +190,6 @@ pub(crate) mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn import_dump_v5() {
|
||||
let dump = File::open("tests/assets/v5.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
@ -201,7 +201,7 @@ pub(crate) mod test {
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"42d4200cf6d92a6449989ca48cd8e28a");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"41f91d3a94911b2735ec41b07540df5c");
|
||||
assert_eq!(update_files.len(), 22);
|
||||
assert!(update_files[0].is_none()); // the dump creation
|
||||
assert!(update_files[1].is_some()); // the enqueued document addition
|
||||
@ -222,53 +222,52 @@ pub(crate) mod test {
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-04T15:51:35.939396731Z",
|
||||
"updatedAt": "2022-10-04T15:55:01.897325373Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", products.settings()), @"8e5cadabf74aebe1160bf51c3d489efe");
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-04T15:51:35.291992167Z",
|
||||
"updatedAt": "2022-10-04T15:55:10.33561842Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies.settings()), @"4894ac1e74b9e1069ed5ee262b7a1aca");
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 200);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"e962baafd2fbae4cdd14e876053b0c5a");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-04T15:51:37.381094632Z",
|
||||
"updatedAt": "2022-10-04T15:55:02.394503431Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", spells.settings()), @"054dbf08a79e08bb9becba6f5d090f13");
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn import_dump_v4() {
|
||||
let dump = File::open("tests/assets/v4.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
@ -280,7 +279,7 @@ pub(crate) mod test {
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"491e244a80a19fe2a900b809d310c24a");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"c2445ddd1785528b80f2ba534d3bd00c");
|
||||
assert_eq!(update_files.len(), 10);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
@ -300,53 +299,52 @@ pub(crate) mod test {
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-06T12:53:39.360187055Z",
|
||||
"updatedAt": "2022-10-06T12:53:40.603035979Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", products.settings()), @"1f9da51a4518166fb440def5437eafdb");
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-06T12:53:38.710611568Z",
|
||||
"updatedAt": "2022-10-06T12:53:49.785862546Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies.settings()), @"488816aba82c1bd65f1609630055c611");
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"786022a66ecb992c8a2a60fee070a5ab");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-06T12:53:40.831649057Z",
|
||||
"updatedAt": "2022-10-06T12:53:41.116036186Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", spells.settings()), @"7b4f66dad597dc651650f35fe34be27f");
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn import_dump_v3() {
|
||||
let dump = File::open("tests/assets/v3.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
@ -358,7 +356,7 @@ pub(crate) mod test {
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"7cacce2e21702be696b866808c726946");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"cd12efd308fe3ed226356a727ab42ed3");
|
||||
assert_eq!(update_files.len(), 10);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
@ -388,7 +386,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", products.settings()), @"855f3165dec609b919171ff83f82b364");
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
@ -403,7 +401,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies.settings()), @"43e0bf1746c3ea1d64c1e10ea544c190");
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d153b5a81d8b3cdcbe1dec270b574022");
|
||||
@ -418,7 +416,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies2.settings()), @"5fd06a5038f49311600379d43412b655");
|
||||
insta::assert_json_snapshot!(movies2.settings().unwrap());
|
||||
let documents = movies2.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 0);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
@ -433,14 +431,13 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", spells.settings()), @"5fd06a5038f49311600379d43412b655");
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn import_dump_v2() {
|
||||
let dump = File::open("tests/assets/v2.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
@ -452,7 +449,7 @@ pub(crate) mod test {
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"6cabec4e252b74c8f3a2c8517622e85f");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"bc616290adfe7d09a624cf6065ca9069");
|
||||
assert_eq!(update_files.len(), 9);
|
||||
assert!(update_files[0].is_some()); // the enqueued document addition
|
||||
assert!(update_files[1..].iter().all(|u| u.is_none())); // everything already processed
|
||||
@ -482,7 +479,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", products.settings()), @"b15b71f56dd082d8e8ec5182e688bf36");
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
@ -497,7 +494,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies.settings()), @"5389153ddf5527fa79c54b6a6e9c21f6");
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d153b5a81d8b3cdcbe1dec270b574022");
|
||||
@ -512,7 +509,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies2.settings()), @"8aebab01301d266acf3e18dd449c008f");
|
||||
insta::assert_json_snapshot!(movies2.settings().unwrap());
|
||||
let documents = movies2.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 0);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
@ -527,9 +524,162 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", spells.settings()), @"8aebab01301d266acf3e18dd449c008f");
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_dump_v2_from_meilisearch_v0_22_0_issue_3435() {
|
||||
let dump = File::open("tests/assets/v2-v0.22.0.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2023-01-30 16:26:09.247261 +00:00:00");
|
||||
assert_eq!(dump.instance_uid().unwrap(), None);
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"2db37756d8af1fb7623436b76e8956a6");
|
||||
assert_eq!(update_files.len(), 8);
|
||||
assert!(update_files[0..].iter().all(|u| u.is_none())); // everything already processed
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"0227598af846e574139ee0b80e03a720");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_dump_v1() {
|
||||
let dump = File::open("tests/assets/v1.dump").unwrap();
|
||||
let mut dump = DumpReader::open(dump).unwrap();
|
||||
|
||||
// top level infos
|
||||
assert_eq!(dump.date(), None);
|
||||
assert_eq!(dump.instance_uid().unwrap(), None);
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"8df6eab075a44b3c1af6b726f9fd9a43");
|
||||
assert_eq!(update_files.len(), 9);
|
||||
assert!(update_files[..].iter().all(|u| u.is_none())); // no update file in dump v1
|
||||
|
||||
// keys
|
||||
let keys = dump.keys().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot!(meili_snap::json_string!(keys), @"[]");
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(keys), @"d751713988987e9331980363e24189ce");
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "2022-10-02T13:23:39.976870431Z",
|
||||
"updatedAt": "2022-10-02T13:27:54.353262482Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "2022-10-02T13:15:29.477512777Z",
|
||||
"updatedAt": "2022-10-02T13:21:12.671204856Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b63dbed5bbc059f3e32bc471ae699bf5");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "2022-10-02T13:38:26.358882984Z",
|
||||
"updatedAt": "2022-10-02T13:38:26.385609433Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"aa24c0cfc733d66c396237ad44263bed");
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,24 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,38 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies2.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,37 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies2.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
}
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
}
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
}
|
@ -0,0 +1,54 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
---
|
||||
source: dump/src/reader/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
}
|
@ -1,173 +1,262 @@
|
||||
use std::{
|
||||
convert::Infallible,
|
||||
fs::{self, File},
|
||||
io::{BufRead, BufReader},
|
||||
path::Path,
|
||||
};
|
||||
use std::fs::{self, File};
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use serde::Deserialize;
|
||||
use tempfile::TempDir;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use self::update::UpdateStatus;
|
||||
|
||||
use super::{DumpReader, IndexReader};
|
||||
use crate::{Error, Result, Version};
|
||||
use super::compat::v1_to_v2::CompatV1ToV2;
|
||||
use super::Document;
|
||||
use crate::{IndexMetadata, Result, Version};
|
||||
|
||||
pub mod settings;
|
||||
pub mod update;
|
||||
pub mod v1;
|
||||
|
||||
pub struct V1Reader {
|
||||
dump: TempDir,
|
||||
metadata: v1::Metadata,
|
||||
indexes: Vec<V1IndexReader>,
|
||||
pub dump: TempDir,
|
||||
pub db_version: String,
|
||||
pub dump_version: crate::Version,
|
||||
indexes: Vec<V1Index>,
|
||||
}
|
||||
|
||||
struct V1IndexReader {
|
||||
name: String,
|
||||
pub struct IndexUuid {
|
||||
pub name: String,
|
||||
pub uid: String,
|
||||
}
|
||||
pub type Task = self::update::UpdateStatus;
|
||||
|
||||
struct V1Index {
|
||||
metadata: IndexMetadataV1,
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl V1Index {
|
||||
pub fn new(path: PathBuf, metadata: Index) -> Self {
|
||||
Self { metadata: metadata.into(), path }
|
||||
}
|
||||
|
||||
pub fn open(&self) -> Result<V1IndexReader> {
|
||||
V1IndexReader::new(&self.path, self.metadata.clone())
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &IndexMetadata {
|
||||
&self.metadata.metadata
|
||||
}
|
||||
}
|
||||
|
||||
pub struct V1IndexReader {
|
||||
metadata: IndexMetadataV1,
|
||||
documents: BufReader<File>,
|
||||
settings: BufReader<File>,
|
||||
updates: BufReader<File>,
|
||||
|
||||
current_update: Option<UpdateStatus>,
|
||||
}
|
||||
|
||||
impl V1IndexReader {
|
||||
pub fn new(name: String, path: &Path) -> Result<Self> {
|
||||
let mut ret = V1IndexReader {
|
||||
name,
|
||||
pub fn new(path: &Path, metadata: IndexMetadataV1) -> Result<Self> {
|
||||
Ok(V1IndexReader {
|
||||
metadata,
|
||||
documents: BufReader::new(File::open(path.join("documents.jsonl"))?),
|
||||
settings: BufReader::new(File::open(path.join("settings.json"))?),
|
||||
updates: BufReader::new(File::open(path.join("updates.jsonl"))?),
|
||||
current_update: None,
|
||||
};
|
||||
ret.next_update();
|
||||
|
||||
Ok(ret)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn next_update(&mut self) -> Result<Option<UpdateStatus>> {
|
||||
let current_update = if let Some(line) = self.updates.lines().next() {
|
||||
Some(serde_json::from_str(&line?)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
pub fn metadata(&self) -> &IndexMetadata {
|
||||
&self.metadata.metadata
|
||||
}
|
||||
|
||||
Ok(std::mem::replace(&mut self.current_update, current_update))
|
||||
pub fn documents(&mut self) -> Result<impl Iterator<Item = Result<Document>> + '_> {
|
||||
Ok((&mut self.documents)
|
||||
.lines()
|
||||
.map(|line| -> Result<_> { Ok(serde_json::from_str(&line?)?) }))
|
||||
}
|
||||
|
||||
pub fn settings(&mut self) -> Result<self::settings::Settings> {
|
||||
Ok(serde_json::from_reader(&mut self.settings)?)
|
||||
}
|
||||
|
||||
pub fn tasks(self) -> impl Iterator<Item = Result<Task>> {
|
||||
self.updates.lines().map(|line| -> Result<_> { Ok(serde_json::from_str(&line?)?) })
|
||||
}
|
||||
}
|
||||
|
||||
impl V1Reader {
|
||||
pub fn open(dump: TempDir) -> Result<Self> {
|
||||
let mut meta_file = fs::read(dump.path().join("metadata.json"))?;
|
||||
let metadata = serde_json::from_reader(&*meta_file)?;
|
||||
let meta_file = fs::read(dump.path().join("metadata.json"))?;
|
||||
let metadata: Metadata = serde_json::from_reader(&*meta_file)?;
|
||||
|
||||
let mut indexes = Vec::new();
|
||||
|
||||
let entries = fs::read_dir(dump.path())?;
|
||||
for entry in entries {
|
||||
let entry = entry?;
|
||||
if entry.file_type()?.is_dir() {
|
||||
indexes.push(V1IndexReader::new(
|
||||
entry
|
||||
.file_name()
|
||||
.to_str()
|
||||
.ok_or(Error::BadIndexName)?
|
||||
.to_string(),
|
||||
&entry.path(),
|
||||
)?);
|
||||
}
|
||||
for index in metadata.indexes.into_iter() {
|
||||
let index_path = dump.path().join(&index.uid);
|
||||
indexes.push(V1Index::new(index_path, index));
|
||||
}
|
||||
|
||||
Ok(V1Reader {
|
||||
dump,
|
||||
metadata,
|
||||
indexes,
|
||||
db_version: metadata.db_version,
|
||||
dump_version: metadata.dump_version,
|
||||
})
|
||||
}
|
||||
|
||||
fn next_update(&mut self) -> Result<Option<UpdateStatus>> {
|
||||
if let Some((idx, _)) = self
|
||||
.indexes
|
||||
pub fn to_v2(self) -> CompatV1ToV2 {
|
||||
CompatV1ToV2 { from: self }
|
||||
}
|
||||
|
||||
pub fn index_uuid(&self) -> Vec<IndexUuid> {
|
||||
self.indexes
|
||||
.iter()
|
||||
.map(|index| index.current_update)
|
||||
.enumerate()
|
||||
.filter_map(|(idx, update)| update.map(|u| (idx, u)))
|
||||
.min_by_key(|(_, update)| update.enqueued_at())
|
||||
{
|
||||
self.indexes[idx].next_update()
|
||||
} else {
|
||||
Ok(None)
|
||||
.map(|index| IndexUuid {
|
||||
name: index.metadata.name.to_owned(),
|
||||
uid: index.metadata().uid.to_owned(),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn version(&self) -> Version {
|
||||
Version::V1
|
||||
}
|
||||
|
||||
pub fn date(&self) -> Option<OffsetDateTime> {
|
||||
None
|
||||
}
|
||||
|
||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V1IndexReader>> + '_> {
|
||||
Ok(self.indexes.iter().map(|index| index.open()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Index {
|
||||
pub name: String,
|
||||
pub uid: String,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
created_at: OffsetDateTime,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
updated_at: OffsetDateTime,
|
||||
pub primary_key: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct IndexMetadataV1 {
|
||||
pub name: String,
|
||||
pub metadata: crate::IndexMetadata,
|
||||
}
|
||||
|
||||
impl From<Index> for IndexMetadataV1 {
|
||||
fn from(index: Index) -> Self {
|
||||
IndexMetadataV1 {
|
||||
name: index.name,
|
||||
metadata: crate::IndexMetadata {
|
||||
uid: index.uid,
|
||||
primary_key: index.primary_key,
|
||||
created_at: index.created_at,
|
||||
updated_at: index.updated_at,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexReader for &V1IndexReader {
|
||||
type Document = serde_json::Map<String, serde_json::Value>;
|
||||
type Settings = settings::Settings;
|
||||
|
||||
fn name(&self) -> &str {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn documents(&self) -> Result<Box<dyn Iterator<Item = Result<Self::Document>>>> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn settings(&self) -> Result<Self::Settings> {
|
||||
todo!()
|
||||
}
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Metadata {
|
||||
pub indexes: Vec<Index>,
|
||||
pub db_version: String,
|
||||
pub dump_version: crate::Version,
|
||||
}
|
||||
|
||||
impl DumpReader for V1Reader {
|
||||
type Document = serde_json::Map<String, serde_json::Value>;
|
||||
type Settings = settings::Settings;
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
|
||||
type Task = update::UpdateStatus;
|
||||
type UpdateFile = Infallible;
|
||||
use flate2::bufread::GzDecoder;
|
||||
use meili_snap::insta;
|
||||
use tempfile::TempDir;
|
||||
|
||||
type Key = Infallible;
|
||||
use super::*;
|
||||
|
||||
fn date(&self) -> Option<OffsetDateTime> {
|
||||
None
|
||||
}
|
||||
#[test]
|
||||
fn read_dump_v1() {
|
||||
let dump = File::open("tests/assets/v1.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(dir.path()).unwrap();
|
||||
|
||||
fn version(&self) -> Version {
|
||||
Version::V1
|
||||
}
|
||||
let dump = V1Reader::open(dir).unwrap();
|
||||
|
||||
fn indexes(
|
||||
&self,
|
||||
) -> Result<
|
||||
Box<
|
||||
dyn Iterator<
|
||||
Item = Result<
|
||||
Box<
|
||||
dyn super::IndexReader<
|
||||
Document = Self::Document,
|
||||
Settings = Self::Settings,
|
||||
>,
|
||||
>,
|
||||
>,
|
||||
>,
|
||||
>,
|
||||
> {
|
||||
Ok(Box::new(self.indexes.iter().map(|index| {
|
||||
let index = Box::new(index)
|
||||
as Box<dyn IndexReader<Document = Self::Document, Settings = Self::Settings>>;
|
||||
Ok(index)
|
||||
})))
|
||||
}
|
||||
// top level infos
|
||||
assert_eq!(dump.date(), None);
|
||||
|
||||
fn tasks(&self) -> Box<dyn Iterator<Item = Result<(Self::Task, Option<Self::UpdateFile>)>>> {
|
||||
Box::new(std::iter::from_fn(|| {
|
||||
self.next_update()
|
||||
.transpose()
|
||||
.map(|result| result.map(|task| (task, None)))
|
||||
}))
|
||||
}
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
|
||||
fn keys(&self) -> Box<dyn Iterator<Item = Result<Self::Key>>> {
|
||||
Box::new(std::iter::empty())
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut dnd_spells = indexes.pop().unwrap();
|
||||
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "2022-10-02T13:23:39.976870431Z",
|
||||
"updatedAt": "2022-10-02T13:27:54.353262482Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// products tasks
|
||||
let tasks = products.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"91de507f206ad21964584021932ba7a7");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "2022-10-02T13:15:29.477512777Z",
|
||||
"updatedAt": "2022-10-02T13:21:12.671204856Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b63dbed5bbc059f3e32bc471ae699bf5");
|
||||
|
||||
// movies tasks
|
||||
let tasks = movies.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"55eef4de2bef7e84c5ce0bee47488f56");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(dnd_spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "2022-10-02T13:38:26.358882984Z",
|
||||
"updatedAt": "2022-10-02T13:38:26.385609433Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(dnd_spells.settings().unwrap());
|
||||
let documents = dnd_spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"aa24c0cfc733d66c396237ad44263bed");
|
||||
|
||||
// spells tasks
|
||||
let tasks = dnd_spells.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"836dd7d64d5ad20ad901c44b1b161a4c");
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,9 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::result::Result as StdResult;
|
||||
use std::str::FromStr;
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
|
||||
#[derive(Default, Clone, Serialize, Deserialize, Debug)]
|
||||
@ -53,6 +56,34 @@ pub enum RankingRule {
|
||||
Desc(String),
|
||||
}
|
||||
|
||||
static ASC_DESC_REGEX: Lazy<Regex> =
|
||||
Lazy::new(|| Regex::new(r#"(asc|desc)\(([\w_-]+)\)"#).unwrap());
|
||||
|
||||
impl FromStr for RankingRule {
|
||||
type Err = ();
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(match s {
|
||||
"typo" => Self::Typo,
|
||||
"words" => Self::Words,
|
||||
"proximity" => Self::Proximity,
|
||||
"attribute" => Self::Attribute,
|
||||
"wordsPosition" => Self::WordsPosition,
|
||||
"exactness" => Self::Exactness,
|
||||
text => {
|
||||
let caps = ASC_DESC_REGEX.captures(text).ok_or(())?;
|
||||
let order = caps.get(1).unwrap().as_str();
|
||||
let field_name = caps.get(2).unwrap().as_str();
|
||||
match order {
|
||||
"asc" => Self::Asc(field_name.to_string()),
|
||||
"desc" => Self::Desc(field_name.to_string()),
|
||||
_ => return Err(()),
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Any value that is present is considered Some value, including null.
|
||||
fn deserialize_some<'de, T, D>(deserializer: D) -> StdResult<Option<T>, D::Error>
|
||||
where
|
||||
|
@ -0,0 +1,24 @@
|
||||
---
|
||||
source: dump/src/reader/v1/mod.rs
|
||||
expression: dnd_spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"wordsPosition",
|
||||
"exactness"
|
||||
],
|
||||
"distinctAttribute": null,
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"attributesForFaceting": []
|
||||
}
|
@ -0,0 +1,38 @@
|
||||
---
|
||||
source: dump/src/reader/v1/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"wordsPosition",
|
||||
"exactness"
|
||||
],
|
||||
"distinctAttribute": null,
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"attributesForFaceting": []
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
---
|
||||
source: dump/src/reader/v1/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"wordsPosition",
|
||||
"exactness",
|
||||
"asc(release_date)"
|
||||
],
|
||||
"distinctAttribute": null,
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"attributesForFaceting": [
|
||||
"id",
|
||||
"genres"
|
||||
]
|
||||
}
|
@ -1,54 +1,8 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use super::settings::SettingsUpdate;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Update {
|
||||
data: UpdateData,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
enqueued_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum UpdateData {
|
||||
ClearAll,
|
||||
Customs(Vec<u8>),
|
||||
// (primary key, documents)
|
||||
DocumentsAddition {
|
||||
primary_key: Option<String>,
|
||||
documents: Vec<serde_json::Map<String, Value>>,
|
||||
},
|
||||
DocumentsPartial {
|
||||
primary_key: Option<String>,
|
||||
documents: Vec<serde_json::Map<String, Value>>,
|
||||
},
|
||||
DocumentsDeletion(Vec<String>),
|
||||
Settings(Box<SettingsUpdate>),
|
||||
}
|
||||
|
||||
impl UpdateData {
|
||||
pub fn update_type(&self) -> UpdateType {
|
||||
match self {
|
||||
UpdateData::ClearAll => UpdateType::ClearAll,
|
||||
UpdateData::Customs(_) => UpdateType::Customs,
|
||||
UpdateData::DocumentsAddition { documents, .. } => UpdateType::DocumentsAddition {
|
||||
number: documents.len(),
|
||||
},
|
||||
UpdateData::DocumentsPartial { documents, .. } => UpdateType::DocumentsPartial {
|
||||
number: documents.len(),
|
||||
},
|
||||
UpdateData::DocumentsDeletion(deletion) => UpdateType::DocumentsDeletion {
|
||||
number: deletion.len(),
|
||||
},
|
||||
UpdateData::Settings(update) => UpdateType::Settings {
|
||||
settings: update.clone(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "name")]
|
||||
pub enum UpdateType {
|
||||
|
@ -1,22 +0,0 @@
|
||||
use serde::Deserialize;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Index {
|
||||
pub name: String,
|
||||
pub uid: String,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
created_at: OffsetDateTime,
|
||||
#[serde(with = "time::serde::rfc3339")]
|
||||
updated_at: OffsetDateTime,
|
||||
pub primary_key: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Metadata {
|
||||
indexes: Vec<Index>,
|
||||
db_version: String,
|
||||
dump_version: crate::Version,
|
||||
}
|
@ -41,6 +41,7 @@ use super::Document;
|
||||
use crate::{IndexMetadata, Result, Version};
|
||||
|
||||
pub type Settings<T> = settings::Settings<T>;
|
||||
pub type Setting<T> = settings::Setting<T>;
|
||||
pub type Checked = settings::Checked;
|
||||
pub type Unchecked = settings::Unchecked;
|
||||
|
||||
@ -211,7 +212,6 @@ pub(crate) mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn read_dump_v2() {
|
||||
let dump = File::open("tests/assets/v2.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
@ -257,7 +257,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", products.settings()), @"c41bf7315d404da46c99b9e3a2a3cc1e");
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
@ -272,7 +272,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies.settings()), @"3d1d96c85b6bab46e957bc8d2532a910");
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d153b5a81d8b3cdcbe1dec270b574022");
|
||||
@ -287,7 +287,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies2.settings()), @"4f04afc086828d8da0da57a7d598ddba");
|
||||
insta::assert_json_snapshot!(movies2.settings().unwrap());
|
||||
let documents = movies2.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 0);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
@ -302,7 +302,84 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", spells.settings()), @"4f04afc086828d8da0da57a7d598ddba");
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_dump_v2_from_meilisearch_v0_22_0_issue_3435() {
|
||||
let dump = File::open("tests/assets/v2-v0.22.0.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut dump = BufReader::new(dump);
|
||||
let gz = GzDecoder::new(&mut dump);
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
archive.unpack(dir.path()).unwrap();
|
||||
|
||||
let mut dump = V2Reader::open(dir).unwrap();
|
||||
|
||||
// top level infos
|
||||
insta::assert_display_snapshot!(dump.date().unwrap(), @"2023-01-30 16:26:09.247261 +00:00:00");
|
||||
|
||||
// tasks
|
||||
let tasks = dump.tasks().collect::<Result<Vec<_>>>().unwrap();
|
||||
let (tasks, update_files): (Vec<_>, Vec<_>) = tasks.into_iter().unzip();
|
||||
meili_snap::snapshot_hash!(meili_snap::json_string!(tasks), @"aca8ba13046272664eb3ea2da3031633");
|
||||
assert_eq!(update_files.len(), 8);
|
||||
assert!(update_files[0..].iter().all(|u| u.is_none())); // everything has already been processed
|
||||
|
||||
// indexes
|
||||
let mut indexes = dump.indexes().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
// the index are not ordered in any way by default
|
||||
indexes.sort_by_key(|index| index.metadata().uid.to_string());
|
||||
|
||||
let mut products = indexes.pop().unwrap();
|
||||
let mut movies = indexes.pop().unwrap();
|
||||
let mut spells = indexes.pop().unwrap();
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"0227598af846e574139ee0b80e03a720");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
}
|
||||
"###);
|
||||
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
|
@ -1,34 +1,33 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::str::FromStr;
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Deserializer};
|
||||
|
||||
#[cfg(test)]
|
||||
fn serialize_with_wildcard<S>(
|
||||
field: &Option<Option<Vec<String>>>,
|
||||
field: &Setting<Vec<String>>,
|
||||
s: S,
|
||||
) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let wildcard = vec!["*".to_string()];
|
||||
s.serialize_some(&field.as_ref().map(|o| o.as_ref().unwrap_or(&wildcard)))
|
||||
}
|
||||
use serde::Serialize;
|
||||
|
||||
fn deserialize_some<'de, T, D>(deserializer: D) -> std::result::Result<Option<T>, D::Error>
|
||||
where
|
||||
T: Deserialize<'de>,
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Deserialize::deserialize(deserializer).map(Some)
|
||||
let wildcard = vec!["*".to_string()];
|
||||
match field {
|
||||
Setting::Set(value) => Some(value),
|
||||
Setting::Reset => Some(&wildcard),
|
||||
Setting::NotSet => None,
|
||||
}
|
||||
.serialize(s)
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, Debug)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct Checked;
|
||||
|
||||
#[derive(Clone, Default, Debug, Deserialize)]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct Unchecked;
|
||||
@ -41,75 +40,54 @@ pub struct Unchecked;
|
||||
pub struct Settings<T> {
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
serialize_with = "serialize_with_wildcard",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
skip_serializing_if = "Setting::is_not_set"
|
||||
)]
|
||||
pub displayed_attributes: Option<Option<Vec<String>>>,
|
||||
pub displayed_attributes: Setting<Vec<String>>,
|
||||
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
serialize_with = "serialize_with_wildcard",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
skip_serializing_if = "Setting::is_not_set"
|
||||
)]
|
||||
pub searchable_attributes: Option<Option<Vec<String>>>,
|
||||
pub searchable_attributes: Setting<Vec<String>>,
|
||||
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
pub filterable_attributes: Option<Option<BTreeSet<String>>>,
|
||||
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
pub ranking_rules: Option<Option<Vec<String>>>,
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
pub stop_words: Option<Option<BTreeSet<String>>>,
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
pub synonyms: Option<Option<BTreeMap<String, Vec<String>>>>,
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "deserialize_some",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
pub distinct_attribute: Option<Option<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub filterable_attributes: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub sortable_attributes: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub ranking_rules: Setting<Vec<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub stop_words: Setting<BTreeSet<String>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub synonyms: Setting<BTreeMap<String, Vec<String>>>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
pub distinct_attribute: Setting<String>,
|
||||
|
||||
#[serde(skip)]
|
||||
pub _kind: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl Settings<Unchecked> {
|
||||
pub fn check(mut self) -> Settings<Checked> {
|
||||
let displayed_attributes = match self.displayed_attributes.take() {
|
||||
Some(Some(fields)) => {
|
||||
pub fn check(self) -> Settings<Checked> {
|
||||
let displayed_attributes = match self.displayed_attributes {
|
||||
Setting::Set(fields) => {
|
||||
if fields.iter().any(|f| f == "*") {
|
||||
Some(None)
|
||||
Setting::Reset
|
||||
} else {
|
||||
Some(Some(fields))
|
||||
Setting::Set(fields)
|
||||
}
|
||||
}
|
||||
otherwise => otherwise,
|
||||
};
|
||||
|
||||
let searchable_attributes = match self.searchable_attributes.take() {
|
||||
Some(Some(fields)) => {
|
||||
let searchable_attributes = match self.searchable_attributes {
|
||||
Setting::Set(fields) => {
|
||||
if fields.iter().any(|f| f == "*") {
|
||||
Some(None)
|
||||
Setting::Reset
|
||||
} else {
|
||||
Some(Some(fields))
|
||||
Setting::Set(fields)
|
||||
}
|
||||
}
|
||||
otherwise => otherwise,
|
||||
@ -119,6 +97,7 @@ impl Settings<Unchecked> {
|
||||
displayed_attributes,
|
||||
searchable_attributes,
|
||||
filterable_attributes: self.filterable_attributes,
|
||||
sortable_attributes: self.sortable_attributes,
|
||||
ranking_rules: self.ranking_rules,
|
||||
stop_words: self.stop_words,
|
||||
synonyms: self.synonyms,
|
||||
@ -128,10 +107,61 @@ impl Settings<Unchecked> {
|
||||
}
|
||||
}
|
||||
|
||||
static ASC_DESC_REGEX: Lazy<Regex> =
|
||||
Lazy::new(|| Regex::new(r#"(asc|desc)\(([\w_-]+)\)"#).unwrap());
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum Setting<T> {
|
||||
Set(T),
|
||||
Reset,
|
||||
NotSet,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, PartialEq, Eq)]
|
||||
impl<T> Default for Setting<T> {
|
||||
fn default() -> Self {
|
||||
Self::NotSet
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Setting<T> {
|
||||
pub const fn is_not_set(&self) -> bool {
|
||||
matches!(self, Self::NotSet)
|
||||
}
|
||||
|
||||
pub fn map<A>(self, f: fn(T) -> A) -> Setting<A> {
|
||||
match self {
|
||||
Setting::Set(a) => Setting::Set(f(a)),
|
||||
Setting::Reset => Setting::Reset,
|
||||
Setting::NotSet => Setting::NotSet,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl<T: serde::Serialize> serde::Serialize for Setting<T> {
|
||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Self::Set(value) => Some(value),
|
||||
// Usually not_set isn't serialized by setting skip_serializing_if field attribute
|
||||
Self::NotSet | Self::Reset => None,
|
||||
}
|
||||
.serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, T: Deserialize<'de>> Deserialize<'de> for Setting<T> {
|
||||
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Deserialize::deserialize(deserializer).map(|x| match x {
|
||||
Some(x) => Self::Set(x),
|
||||
None => Self::Reset, // Reset is forced by sending null value
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Criterion {
|
||||
/// Sorted by decreasing number of matched query terms.
|
||||
/// Query words at the front of an attribute is considered better than if it was at the back.
|
||||
@ -141,8 +171,11 @@ pub enum Criterion {
|
||||
/// Sorted by increasing distance between matched query terms.
|
||||
Proximity,
|
||||
/// Documents with quey words contained in more important
|
||||
/// attributes are considred better.
|
||||
/// attributes are considered better.
|
||||
Attribute,
|
||||
/// Dynamically sort at query time the documents. None, one or multiple Asc/Desc sortable
|
||||
/// attributes can be used in place of this criterion at query time.
|
||||
Sort,
|
||||
/// Sorted by the similarity of the matched words with the query words.
|
||||
Exactness,
|
||||
/// Sorted by the increasing value of the field specified.
|
||||
@ -151,26 +184,86 @@ pub enum Criterion {
|
||||
Desc(String),
|
||||
}
|
||||
|
||||
impl Criterion {
|
||||
/// Returns the field name parameter of this criterion.
|
||||
pub fn field_name(&self) -> Option<&str> {
|
||||
match self {
|
||||
Criterion::Asc(name) | Criterion::Desc(name) => Some(name),
|
||||
_otherwise => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Criterion {
|
||||
// since we're not going to show the custom error message we can override the
|
||||
// error type.
|
||||
type Err = ();
|
||||
|
||||
fn from_str(txt: &str) -> Result<Criterion, Self::Err> {
|
||||
match txt {
|
||||
fn from_str(text: &str) -> Result<Criterion, Self::Err> {
|
||||
match text {
|
||||
"words" => Ok(Criterion::Words),
|
||||
"typo" => Ok(Criterion::Typo),
|
||||
"proximity" => Ok(Criterion::Proximity),
|
||||
"attribute" => Ok(Criterion::Attribute),
|
||||
"sort" => Ok(Criterion::Sort),
|
||||
"exactness" => Ok(Criterion::Exactness),
|
||||
text => {
|
||||
let caps = ASC_DESC_REGEX.captures(text).ok_or(())?;
|
||||
let order = caps.get(1).unwrap().as_str();
|
||||
let field_name = caps.get(2).unwrap().as_str();
|
||||
match order {
|
||||
"asc" => Ok(Criterion::Asc(field_name.to_string())),
|
||||
"desc" => Ok(Criterion::Desc(field_name.to_string())),
|
||||
_text => Err(()),
|
||||
}
|
||||
}
|
||||
text => match AscDesc::from_str(text) {
|
||||
Ok(AscDesc::Asc(field)) => Ok(Criterion::Asc(field)),
|
||||
Ok(AscDesc::Desc(field)) => Ok(Criterion::Desc(field)),
|
||||
Err(_) => Err(()),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub enum AscDesc {
|
||||
Asc(String),
|
||||
Desc(String),
|
||||
}
|
||||
|
||||
impl FromStr for AscDesc {
|
||||
type Err = ();
|
||||
|
||||
// since we don't know if this comes from the old or new syntax we need to check
|
||||
// for both syntax.
|
||||
// WARN: this code doesn't come from the original meilisearch v0.22.0 but was
|
||||
// written specifically to be able to import the dump of meilisearch v0.21.0 AND
|
||||
// meilisearch v0.22.0.
|
||||
fn from_str(text: &str) -> Result<AscDesc, Self::Err> {
|
||||
if let Some((field_name, asc_desc)) = text.rsplit_once(':') {
|
||||
match asc_desc {
|
||||
"asc" => Ok(AscDesc::Asc(field_name.to_string())),
|
||||
"desc" => Ok(AscDesc::Desc(field_name.to_string())),
|
||||
_ => Err(()),
|
||||
}
|
||||
} else if text.starts_with("asc(") && text.ends_with(')') {
|
||||
Ok(AscDesc::Asc(
|
||||
text.strip_prefix("asc(").unwrap().strip_suffix(')').unwrap().to_string(),
|
||||
))
|
||||
} else if text.starts_with("desc(") && text.ends_with(')') {
|
||||
Ok(AscDesc::Desc(
|
||||
text.strip_prefix("desc(").unwrap().strip_suffix(')').unwrap().to_string(),
|
||||
))
|
||||
} else {
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Criterion {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use Criterion::*;
|
||||
|
||||
match self {
|
||||
Words => f.write_str("words"),
|
||||
Typo => f.write_str("typo"),
|
||||
Proximity => f.write_str("proximity"),
|
||||
Attribute => f.write_str("attribute"),
|
||||
Sort => f.write_str("sort"),
|
||||
Exactness => f.write_str("exactness"),
|
||||
Asc(attr) => write!(f, "{}:asc", attr),
|
||||
Desc(attr) => write!(f, "{}:desc", attr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: movies2.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,37 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"asc(release_date)"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
---
|
||||
source: dump/src/reader/v2/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -227,7 +227,6 @@ pub(crate) mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn read_dump_v3() {
|
||||
let dump = File::open("tests/assets/v3.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
@ -273,7 +272,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", products.settings()), @"f309b009608cc0b770b2f74516f92647");
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"548284a84de510f71e88e6cdea495cf5");
|
||||
@ -288,7 +287,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies.settings()), @"95dff22ba3a7019616c12df9daa35e1e");
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d153b5a81d8b3cdcbe1dec270b574022");
|
||||
@ -303,7 +302,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies2.settings()), @"1dafc4b123e3a8e14a889719cc01f6e5");
|
||||
insta::assert_json_snapshot!(movies2.settings().unwrap());
|
||||
let documents = movies2.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 0);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"d751713988987e9331980363e24189ce");
|
||||
@ -318,7 +317,7 @@ pub(crate) mod test {
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", spells.settings()), @"1dafc4b123e3a8e14a889719cc01f6e5");
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/v3/mod.rs
|
||||
expression: movies2.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: dump/src/reader/v3/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: dump/src/reader/v3/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
---
|
||||
source: dump/src/reader/v3/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null
|
||||
}
|
@ -5,10 +5,8 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[cfg_attr(feature = "test-traits", derive(proptest_derive::Arbitrary))]
|
||||
pub struct ResponseError {
|
||||
#[serde(skip)]
|
||||
#[cfg_attr(feature = "test-traits", proptest(strategy = "strategy::status_code_strategy()"))]
|
||||
pub code: StatusCode,
|
||||
pub message: String,
|
||||
#[serde(rename = "code")]
|
||||
|
@ -13,7 +13,7 @@ pub mod meta;
|
||||
pub mod settings;
|
||||
pub mod tasks;
|
||||
|
||||
use self::meta::{DumpMeta, IndexUuid};
|
||||
use self::meta::{DumpMeta, IndexMeta, IndexUuid};
|
||||
use super::compat::v4_to_v5::CompatV4ToV5;
|
||||
use crate::{Error, IndexMetadata, Result, Version};
|
||||
|
||||
@ -100,6 +100,10 @@ impl V4Reader {
|
||||
V4IndexReader::new(
|
||||
index.uid.clone(),
|
||||
&self.dump.path().join("indexes").join(index.index_meta.uuid.to_string()),
|
||||
&index.index_meta,
|
||||
BufReader::new(
|
||||
File::open(self.dump.path().join("updates").join("data.jsonl")).unwrap(),
|
||||
),
|
||||
)
|
||||
}))
|
||||
}
|
||||
@ -147,16 +151,44 @@ pub struct V4IndexReader {
|
||||
}
|
||||
|
||||
impl V4IndexReader {
|
||||
pub fn new(name: String, path: &Path) -> Result<Self> {
|
||||
pub fn new(
|
||||
name: String,
|
||||
path: &Path,
|
||||
index_metadata: &IndexMeta,
|
||||
tasks: BufReader<File>,
|
||||
) -> Result<Self> {
|
||||
let meta = File::open(path.join("meta.json"))?;
|
||||
let meta: DumpMeta = serde_json::from_reader(meta)?;
|
||||
|
||||
let mut created_at = None;
|
||||
let mut updated_at = None;
|
||||
|
||||
for line in tasks.lines() {
|
||||
let task: Task = serde_json::from_str(&line?)?;
|
||||
|
||||
if task.index_uid.to_string() == name {
|
||||
// The first task to match our index_uid that succeeded (ie. processed_at returns Some)
|
||||
// is our `last_updated_at`.
|
||||
if updated_at.is_none() {
|
||||
updated_at = task.processed_at()
|
||||
}
|
||||
|
||||
// Once we reach the `creation_task_id` we can stop iterating on the task queue and
|
||||
// this task represents our `created_at`.
|
||||
if task.id as usize == index_metadata.creation_task_id {
|
||||
created_at = task.created_at();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let current_time = OffsetDateTime::now_utc();
|
||||
|
||||
let metadata = IndexMetadata {
|
||||
uid: name,
|
||||
primary_key: meta.primary_key,
|
||||
// FIXME: Iterate over the whole task queue to find the creation and last update date.
|
||||
created_at: OffsetDateTime::now_utc(),
|
||||
updated_at: OffsetDateTime::now_utc(),
|
||||
created_at: created_at.unwrap_or(current_time),
|
||||
updated_at: updated_at.unwrap_or(current_time),
|
||||
};
|
||||
|
||||
let ret = V4IndexReader {
|
||||
@ -219,7 +251,6 @@ pub(crate) mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn read_dump_v4() {
|
||||
let dump = File::open("tests/assets/v4.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
@ -260,46 +291,46 @@ pub(crate) mod test {
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-06T12:53:39.360187055Z",
|
||||
"updatedAt": "2022-10-06T12:53:40.603035979Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", products.settings()), @"65b139c6b9fc251e187073c8557803e2");
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-06T12:53:38.710611568Z",
|
||||
"updatedAt": "2022-10-06T12:53:49.785862546Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies.settings()), @"06aa1988493485d9b2cda7c751e6bb15");
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 110);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"786022a66ecb992c8a2a60fee070a5ab");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-06T12:53:40.831649057Z",
|
||||
"updatedAt": "2022-10-06T12:53:41.116036186Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", spells.settings()), @"7d722fc2629eaa45032ed3deb0c9b4ce");
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
|
@ -0,0 +1,40 @@
|
||||
---
|
||||
source: dump/src/reader/v4/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"genres",
|
||||
"id"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"release_date"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
}
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
---
|
||||
source: dump/src/reader/v4/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
---
|
||||
source: dump/src/reader/v4/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [],
|
||||
"sortableAttributes": [],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [],
|
||||
"synonyms": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null,
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 5,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [],
|
||||
"disableOnAttributes": []
|
||||
}
|
||||
}
|
@ -104,6 +104,41 @@ impl Task {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn processed_at(&self) -> Option<OffsetDateTime> {
|
||||
match self.events.last() {
|
||||
Some(TaskEvent::Succeded { result: _, timestamp }) => Some(*timestamp),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn created_at(&self) -> Option<OffsetDateTime> {
|
||||
match &self.content {
|
||||
TaskContent::IndexCreation { primary_key: _ } => match self.events.first() {
|
||||
Some(TaskEvent::Created(ts)) => Some(*ts),
|
||||
_ => None,
|
||||
},
|
||||
TaskContent::DocumentAddition {
|
||||
content_uuid: _,
|
||||
merge_strategy: _,
|
||||
primary_key: _,
|
||||
documents_count: _,
|
||||
allow_index_creation: _,
|
||||
} => match self.events.first() {
|
||||
Some(TaskEvent::Created(ts)) => Some(*ts),
|
||||
_ => None,
|
||||
},
|
||||
TaskContent::SettingsUpdate {
|
||||
settings: _,
|
||||
is_deletion: _,
|
||||
allow_index_creation: _,
|
||||
} => match self.events.first() {
|
||||
Some(TaskEvent::Created(ts)) => Some(*ts),
|
||||
_ => None,
|
||||
},
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the content_uuid of the `Task` if there is one.
|
||||
pub fn get_content_uuid(&self) -> Option<Uuid> {
|
||||
match self {
|
||||
|
@ -5,7 +5,6 @@ use serde::Deserialize;
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, PartialEq, Eq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[cfg_attr(feature = "test-traits", derive(proptest_derive::Arbitrary))]
|
||||
#[cfg_attr(test, derive(serde::Serialize))]
|
||||
pub struct ResponseError {
|
||||
#[serde(skip)]
|
||||
|
@ -33,7 +33,7 @@
|
||||
//!
|
||||
|
||||
use std::fs::{self, File};
|
||||
use std::io::{BufRead, BufReader, ErrorKind, Seek, SeekFrom};
|
||||
use std::io::{BufRead, BufReader, ErrorKind, Seek};
|
||||
use std::path::Path;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
@ -141,6 +141,10 @@ impl V5Reader {
|
||||
V5IndexReader::new(
|
||||
index.uid.clone(),
|
||||
&self.dump.path().join("indexes").join(index.index_meta.uuid.to_string()),
|
||||
&index.index_meta,
|
||||
BufReader::new(
|
||||
File::open(self.dump.path().join("updates").join("data.jsonl")).unwrap(),
|
||||
),
|
||||
)
|
||||
}))
|
||||
}
|
||||
@ -174,7 +178,7 @@ impl V5Reader {
|
||||
}
|
||||
|
||||
pub fn keys(&mut self) -> Result<Box<dyn Iterator<Item = Result<Key>> + '_>> {
|
||||
self.keys.seek(SeekFrom::Start(0))?;
|
||||
self.keys.rewind()?;
|
||||
Ok(Box::new(
|
||||
(&mut self.keys).lines().map(|line| -> Result<_> { Ok(serde_json::from_str(&line?)?) }),
|
||||
))
|
||||
@ -189,16 +193,39 @@ pub struct V5IndexReader {
|
||||
}
|
||||
|
||||
impl V5IndexReader {
|
||||
pub fn new(name: String, path: &Path) -> Result<Self> {
|
||||
pub fn new(
|
||||
name: String,
|
||||
path: &Path,
|
||||
index_metadata: &meta::IndexMeta,
|
||||
tasks: BufReader<File>,
|
||||
) -> Result<Self> {
|
||||
let meta = File::open(path.join("meta.json"))?;
|
||||
let meta: meta::DumpMeta = serde_json::from_reader(meta)?;
|
||||
|
||||
let mut created_at = None;
|
||||
let mut updated_at = None;
|
||||
|
||||
for line in tasks.lines() {
|
||||
let task: Task = serde_json::from_str(&line?)?;
|
||||
|
||||
if *task.index_uid().unwrap_or_default().to_string() == name {
|
||||
if updated_at.is_none() {
|
||||
updated_at = task.processed_at()
|
||||
}
|
||||
|
||||
if task.id as usize == index_metadata.creation_task_id {
|
||||
created_at = task.created_at();
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let metadata = IndexMetadata {
|
||||
uid: name,
|
||||
primary_key: meta.primary_key,
|
||||
// FIXME: Iterate over the whole task queue to find the creation and last update date.
|
||||
created_at: OffsetDateTime::now_utc(),
|
||||
updated_at: OffsetDateTime::now_utc(),
|
||||
created_at: created_at.unwrap_or_else(OffsetDateTime::now_utc),
|
||||
updated_at: updated_at.unwrap_or_else(OffsetDateTime::now_utc),
|
||||
};
|
||||
|
||||
let ret = V5IndexReader {
|
||||
@ -261,7 +288,6 @@ pub(crate) mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn read_dump_v5() {
|
||||
let dump = File::open("tests/assets/v5.dump").unwrap();
|
||||
let dir = TempDir::new().unwrap();
|
||||
@ -303,46 +329,46 @@ pub(crate) mod test {
|
||||
assert!(indexes.is_empty());
|
||||
|
||||
// products
|
||||
insta::assert_json_snapshot!(products.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(products.metadata(), @r###"
|
||||
{
|
||||
"uid": "products",
|
||||
"primaryKey": "sku",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-04T15:51:35.939396731Z",
|
||||
"updatedAt": "2022-10-04T15:55:01.897325373Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", products.settings()), @"b392b928dab63468318b2bdaad844c5a");
|
||||
insta::assert_json_snapshot!(products.settings().unwrap());
|
||||
let documents = products.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"b01c8371aea4c7171af0d4d846a2bdca");
|
||||
|
||||
// movies
|
||||
insta::assert_json_snapshot!(movies.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(movies.metadata(), @r###"
|
||||
{
|
||||
"uid": "movies",
|
||||
"primaryKey": "id",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-04T15:51:35.291992167Z",
|
||||
"updatedAt": "2022-10-04T15:55:10.33561842Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", movies.settings()), @"2f881248b7c3623e2ba2885dbf0b2c18");
|
||||
insta::assert_json_snapshot!(movies.settings().unwrap());
|
||||
let documents = movies.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 200);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"e962baafd2fbae4cdd14e876053b0c5a");
|
||||
|
||||
// spells
|
||||
insta::assert_json_snapshot!(spells.metadata(), { ".createdAt" => "[now]", ".updatedAt" => "[now]" }, @r###"
|
||||
insta::assert_json_snapshot!(spells.metadata(), @r###"
|
||||
{
|
||||
"uid": "dnd_spells",
|
||||
"primaryKey": "index",
|
||||
"createdAt": "[now]",
|
||||
"updatedAt": "[now]"
|
||||
"createdAt": "2022-10-04T15:51:37.381094632Z",
|
||||
"updatedAt": "2022-10-04T15:55:02.394503431Z"
|
||||
}
|
||||
"###);
|
||||
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", spells.settings()), @"ade154e63ab713de67919892917d3d9d");
|
||||
insta::assert_json_snapshot!(spells.settings().unwrap());
|
||||
let documents = spells.documents().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(documents.len(), 10);
|
||||
meili_snap::snapshot_hash!(format!("{:#?}", documents), @"235016433dd04262c7f2da01d1e808ce");
|
||||
|
@ -0,0 +1,74 @@
|
||||
---
|
||||
source: dump/src/reader/v5/mod.rs
|
||||
expression: movies.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": "Reset",
|
||||
"searchableAttributes": "Reset",
|
||||
"filterableAttributes": {
|
||||
"Set": [
|
||||
"genres",
|
||||
"id"
|
||||
]
|
||||
},
|
||||
"sortableAttributes": {
|
||||
"Set": [
|
||||
"release_date"
|
||||
]
|
||||
},
|
||||
"rankingRules": {
|
||||
"Set": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness",
|
||||
"release_date:asc"
|
||||
]
|
||||
},
|
||||
"stopWords": {
|
||||
"Set": []
|
||||
},
|
||||
"synonyms": {
|
||||
"Set": {}
|
||||
},
|
||||
"distinctAttribute": "Reset",
|
||||
"typoTolerance": {
|
||||
"Set": {
|
||||
"enabled": {
|
||||
"Set": true
|
||||
},
|
||||
"minWordSizeForTypos": {
|
||||
"Set": {
|
||||
"oneTypo": {
|
||||
"Set": 5
|
||||
},
|
||||
"twoTypos": {
|
||||
"Set": 9
|
||||
}
|
||||
}
|
||||
},
|
||||
"disableOnWords": {
|
||||
"Set": []
|
||||
},
|
||||
"disableOnAttributes": {
|
||||
"Set": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"faceting": {
|
||||
"Set": {
|
||||
"maxValuesPerFacet": {
|
||||
"Set": 100
|
||||
}
|
||||
}
|
||||
},
|
||||
"pagination": {
|
||||
"Set": {
|
||||
"maxTotalHits": {
|
||||
"Set": 1000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,68 @@
|
||||
---
|
||||
source: dump/src/reader/v5/mod.rs
|
||||
expression: spells.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": "Reset",
|
||||
"searchableAttributes": "Reset",
|
||||
"filterableAttributes": {
|
||||
"Set": []
|
||||
},
|
||||
"sortableAttributes": {
|
||||
"Set": []
|
||||
},
|
||||
"rankingRules": {
|
||||
"Set": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
]
|
||||
},
|
||||
"stopWords": {
|
||||
"Set": []
|
||||
},
|
||||
"synonyms": {
|
||||
"Set": {}
|
||||
},
|
||||
"distinctAttribute": "Reset",
|
||||
"typoTolerance": {
|
||||
"Set": {
|
||||
"enabled": {
|
||||
"Set": true
|
||||
},
|
||||
"minWordSizeForTypos": {
|
||||
"Set": {
|
||||
"oneTypo": {
|
||||
"Set": 5
|
||||
},
|
||||
"twoTypos": {
|
||||
"Set": 9
|
||||
}
|
||||
}
|
||||
},
|
||||
"disableOnWords": {
|
||||
"Set": []
|
||||
},
|
||||
"disableOnAttributes": {
|
||||
"Set": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"faceting": {
|
||||
"Set": {
|
||||
"maxValuesPerFacet": {
|
||||
"Set": 100
|
||||
}
|
||||
}
|
||||
},
|
||||
"pagination": {
|
||||
"Set": {
|
||||
"maxTotalHits": {
|
||||
"Set": 1000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,82 @@
|
||||
---
|
||||
source: dump/src/reader/v5/mod.rs
|
||||
expression: products.settings().unwrap()
|
||||
---
|
||||
{
|
||||
"displayedAttributes": "Reset",
|
||||
"searchableAttributes": "Reset",
|
||||
"filterableAttributes": {
|
||||
"Set": []
|
||||
},
|
||||
"sortableAttributes": {
|
||||
"Set": []
|
||||
},
|
||||
"rankingRules": {
|
||||
"Set": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
]
|
||||
},
|
||||
"stopWords": {
|
||||
"Set": []
|
||||
},
|
||||
"synonyms": {
|
||||
"Set": {
|
||||
"android": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"iphone": [
|
||||
"phone",
|
||||
"smartphone"
|
||||
],
|
||||
"phone": [
|
||||
"android",
|
||||
"iphone",
|
||||
"smartphone"
|
||||
]
|
||||
}
|
||||
},
|
||||
"distinctAttribute": "Reset",
|
||||
"typoTolerance": {
|
||||
"Set": {
|
||||
"enabled": {
|
||||
"Set": true
|
||||
},
|
||||
"minWordSizeForTypos": {
|
||||
"Set": {
|
||||
"oneTypo": {
|
||||
"Set": 5
|
||||
},
|
||||
"twoTypos": {
|
||||
"Set": 9
|
||||
}
|
||||
}
|
||||
},
|
||||
"disableOnWords": {
|
||||
"Set": []
|
||||
},
|
||||
"disableOnAttributes": {
|
||||
"Set": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"faceting": {
|
||||
"Set": {
|
||||
"maxValuesPerFacet": {
|
||||
"Set": 100
|
||||
}
|
||||
}
|
||||
},
|
||||
"pagination": {
|
||||
"Set": {
|
||||
"maxTotalHits": {
|
||||
"Set": 1000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -140,6 +140,45 @@ impl Task {
|
||||
TaskContent::Dump { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn processed_at(&self) -> Option<OffsetDateTime> {
|
||||
match self.events.last() {
|
||||
Some(TaskEvent::Succeeded { result: _, timestamp }) => Some(*timestamp),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn created_at(&self) -> Option<OffsetDateTime> {
|
||||
match &self.content {
|
||||
TaskContent::IndexCreation { index_uid: _, primary_key: _ } => {
|
||||
match self.events.first() {
|
||||
Some(TaskEvent::Created(ts)) => Some(*ts),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
TaskContent::DocumentAddition {
|
||||
index_uid: _,
|
||||
content_uuid: _,
|
||||
merge_strategy: _,
|
||||
primary_key: _,
|
||||
documents_count: _,
|
||||
allow_index_creation: _,
|
||||
} => match self.events.first() {
|
||||
Some(TaskEvent::Created(ts)) => Some(*ts),
|
||||
_ => None,
|
||||
},
|
||||
TaskContent::SettingsUpdate {
|
||||
index_uid: _,
|
||||
settings: _,
|
||||
is_deletion: _,
|
||||
allow_index_creation: _,
|
||||
} => match self.events.first() {
|
||||
Some(TaskEvent::Created(ts)) => Some(*ts),
|
||||
_ => None,
|
||||
},
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexUid {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user