mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-07-21 13:51:05 +00:00
Compare commits
162 Commits
optimize-l
...
release-v0
Author | SHA1 | Date | |
---|---|---|---|
ed6c2fb22c | |||
ab17c0acd5 | |||
05af8f0e46 | |||
dc93853946 | |||
a7d2c9572e | |||
53c7c1b07f | |||
62b06e6088 | |||
8ad5c3043c | |||
8fa8eb9767 | |||
069374c23a | |||
4ccfc837c1 | |||
ce75bb5ef8 | |||
f7f34fb714 | |||
fcedce8578 | |||
a93eec1d34 | |||
135f656e8f | |||
88e69f4302 | |||
459829631f | |||
20589a41b5 | |||
61a518a384 | |||
7905dae7ad | |||
05f93541d8 | |||
2827ff7957 | |||
d166a97d67 | |||
248d727e61 | |||
56d72d4493 | |||
740926e747 | |||
f4b81fa0a1 | |||
0fdbf128e9 | |||
d3b984d862 | |||
d406fe901b | |||
4dfae44478 | |||
935f18efcf | |||
5b57114771 | |||
c2ab7a7939 | |||
fa315352da | |||
268d59ccb1 | |||
5901d4e407 | |||
aabd67a9fa | |||
a690ace36e | |||
579fa3f1ad | |||
3fd6af25f9 | |||
7f267ec4be | |||
441492f1c8 | |||
528a4721c1 | |||
5a4f1508d0 | |||
d1a30df23d | |||
92b0c51bfe | |||
135499f398 | |||
b3ffcb2d97 | |||
5cbd047989 | |||
ef3fa92536 | |||
6520d3c474 | |||
403226a029 | |||
07f45251e9 | |||
3a62e97c9f | |||
37dc6537c3 | |||
55aa83d75a | |||
0c962f6a76 | |||
2e0221cfc6 | |||
2d34b239ab | |||
55853815a5 | |||
b897ce8dfa | |||
5f5b787483 | |||
d0aa8042e2 | |||
9cb1e4af5c | |||
cc09aa8868 | |||
2eca723a91 | |||
ae14567f97 | |||
d0f1054f5c | |||
3878c289df | |||
d1b3642923 | |||
f45ddff312 | |||
8f3b590436 | |||
4e37427de8 | |||
50434d35d0 | |||
60792eebcf | |||
4fbc8c705f | |||
80b1f3e830 | |||
e315547ffc | |||
833ade80a6 | |||
f117c90c46 | |||
3efd06168f | |||
07bb32b622 | |||
1131400694 | |||
9a789daa58 | |||
0826aa35e2 | |||
6aa3ad6b6c | |||
b774adfbf7 | |||
47a1aa69f0 | |||
b3d89da74d | |||
0798170a64 | |||
446dfccc8c | |||
43175cfb78 | |||
a1bb49c351 | |||
bebd76064a | |||
f0b2ac6efb | |||
08d86e33ca | |||
2c2efc7ab6 | |||
381df43be4 | |||
f87ebfe477 | |||
c445334070 | |||
651a22b1ed | |||
ff59ae56f4 | |||
b2577aac52 | |||
c9bb111ef3 | |||
e2af8dccb8 | |||
5e206ee84b | |||
aff4b64265 | |||
0a2ef0037f | |||
40ae26478a | |||
6fe3f285ce | |||
72f8adaa70 | |||
e659c08ac4 | |||
ea365126b4 | |||
a277cc9a18 | |||
a37c7ba1bb | |||
ef1d6b1694 | |||
099abefc6d | |||
a05101af4d | |||
109540011a | |||
2f92169e48 | |||
a58b00d8f1 | |||
2b8f3c26ec | |||
0b6ca73790 | |||
1f1482e97c | |||
25fecf9360 | |||
4bee0565e8 | |||
d5da063666 | |||
43bb5176a9 | |||
a0734c991c | |||
cb29d7d124 | |||
e32d5ef2b3 | |||
ee69ede1ce | |||
9b2036ac05 | |||
5c543f9d94 | |||
0c03ed3c1e | |||
54a0b47c2b | |||
947fb5c956 | |||
cd18459484 | |||
225d9936ed | |||
93daa4c464 | |||
d08c77706c | |||
de58ccd4ba | |||
62240b7e19 | |||
22874ce300 | |||
b5f91b91c3 | |||
b6e6a08f7d | |||
8198bb9da2 | |||
68a7d6bc61 | |||
83e20027fd | |||
f21a4d61da | |||
12538d5a44 | |||
ae174c2cca | |||
e6b806e0cf | |||
cf955a77db | |||
3a48de136e | |||
e6f03f82df | |||
58d2aad309 | |||
e3426d5b7a | |||
73d4869e5e | |||
fe32097964 |
8
.github/ISSUE_TEMPLATE/config.yml
vendored
8
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -1,13 +1,13 @@
|
||||
contact_links:
|
||||
- name: Support questions & other
|
||||
url: https://github.com/meilisearch/meilisearch/discussions/new
|
||||
about: For any other question, open a discussion in this repository
|
||||
- name: Language support request & feedback
|
||||
url: https://github.com/meilisearch/product/discussions/categories/feedback-feature-proposal?discussions_q=label%3Aproduct%3Acore%3Atokenizer+category%3A%22Feedback+%26+Feature+Proposal%22
|
||||
about: The requests and feedback regarding Language support are not managed in this repository. Please upvote the related discussion in our dedicated product repository or open a new one if it doesn't exist.
|
||||
- name: Feature request & feedback
|
||||
- name: Any other feature request & feedback
|
||||
url: https://github.com/meilisearch/product/discussions/categories/feedback-feature-proposal
|
||||
about: The feature requests and feedback regarding the already existing features are not managed in this repository. Please open a discussion in our dedicated product repository
|
||||
- name: Documentation issue
|
||||
url: https://github.com/meilisearch/documentation/issues/new
|
||||
about: For documentation issues, open an issue or a PR in the documentation repository
|
||||
- name: Support questions & other
|
||||
url: https://github.com/meilisearch/meilisearch/discussions/new
|
||||
about: For any other question, open a discussion in this repository
|
||||
|
2
.github/scripts/is-latest-release.sh
vendored
2
.github/scripts/is-latest-release.sh
vendored
@ -85,7 +85,7 @@ get_latest() {
|
||||
latest=""
|
||||
current_tag=""
|
||||
for release_info in $releases; do
|
||||
if [ $i -eq 0 ]; then # Cheking tag_name
|
||||
if [ $i -eq 0 ]; then # Checking tag_name
|
||||
if echo "$release_info" | grep -q "$GREP_SEMVER_REGEXP"; then # If it's not an alpha or beta release
|
||||
current_tag=$release_info
|
||||
else
|
||||
|
@ -3,7 +3,7 @@ on:
|
||||
schedule:
|
||||
- cron: '0 0 1 */3 *'
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
create-issue:
|
||||
runs-on: ubuntu-latest
|
||||
@ -12,12 +12,12 @@ jobs:
|
||||
- name: Create an issue
|
||||
uses: actions-ecosystem/action-create-issue@v1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
github_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
title: Upgrade dependencies
|
||||
body: |
|
||||
We need to update the dependencies of the Meilisearch repository, and, if possible, the dependencies of all the core-team repositories that Meilisearch depends on (milli, charabia, heed...).
|
||||
|
||||
⚠️ This issue should only be done at the beginning of the sprint!
|
||||
⚠️ This issue should only be done at the beginning of the sprint!
|
||||
labels: |
|
||||
dependencies
|
||||
maintenance
|
||||
|
156
.github/workflows/milestone-workflow.yml
vendored
Normal file
156
.github/workflows/milestone-workflow.yml
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
name: Milestone's workflow
|
||||
|
||||
# /!\ No git flow are handled here
|
||||
|
||||
# For each Milestone created (not opened!), and if the release is NOT a patch release (only the patch changed)
|
||||
# - the roadmap issue is created, see https://github.com/meilisearch/core-team/blob/main/issue-templates/roadmap-issue.md
|
||||
# - the changelog issue is created, see https://github.com/meilisearch/core-team/blob/main/issue-templates/changelog-issue.md
|
||||
|
||||
# For each Milestone closed
|
||||
# - the `release_version` label is created
|
||||
# - this label is applied to all issues/PRs in the Milestone
|
||||
|
||||
on:
|
||||
milestone:
|
||||
types: [created, closed]
|
||||
|
||||
env:
|
||||
MILESTONE_VERSION: ${{ github.event.milestone.title }}
|
||||
MILESTONE_URL: ${{ github.event.milestone.html_url }}
|
||||
MILESTONE_DUE_ON: ${{ github.event.milestone.due_on }}
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
|
||||
# -----------------
|
||||
# MILESTONE CREATED
|
||||
# -----------------
|
||||
|
||||
get-release-version:
|
||||
if: github.event.action == 'created'
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
is-patch: ${{ steps.check-patch.outputs.is-patch }}
|
||||
env:
|
||||
MILESTONE_VERSION: ${{ github.event.milestone.title }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Check if this release is a patch release only
|
||||
id: check-patch
|
||||
run: |
|
||||
echo version: $MILESTONE_VERSION
|
||||
if [[ $MILESTONE_VERSION =~ ^v[0-9]+\.[0-9]+\.0$ ]]; then
|
||||
echo 'This is NOT a patch release'
|
||||
echo ::set-output name=is-patch::false
|
||||
elif [[ $MILESTONE_VERSION =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo 'This is a patch release'
|
||||
echo ::set-output name=is-patch::true
|
||||
else
|
||||
echo "Not a valid format of release, check the Milestone's title."
|
||||
echo 'Should be vX.Y.Z'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
create-roadmap-issue:
|
||||
needs: get-release-version
|
||||
# Create the roadmap issue if the release is not only a patch release
|
||||
if: github.event.action == 'created' && needs.get-release-version.outputs.is-patch == 'false'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
ISSUE_TEMPLATE: issue-template.md
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Download the issue template
|
||||
run: curl -s https://raw.githubusercontent.com/meilisearch/core-team/main/issue-templates/roadmap-issue.md > $ISSUE_TEMPLATE
|
||||
- name: Replace all empty occurrences in the templates
|
||||
run: |
|
||||
# Replace all <<version>> occurrences
|
||||
sed -i "s/<<version>>/$MILESTONE_VERSION/g" $ISSUE_TEMPLATE
|
||||
|
||||
# Replace all <<milestone_id>> occurrences
|
||||
milestone_id=$(echo $MILESTONE_URL | cut -d '/' -f 7)
|
||||
sed -i "s/<<milestone_id>>/$milestone_id/g" $ISSUE_TEMPLATE
|
||||
|
||||
# Replace release date if exists
|
||||
if [[ ! -z $MILESTONE_DUE_ON ]]; then
|
||||
date=$(echo $MILESTONE_DUE_ON | cut -d 'T' -f 1)
|
||||
sed -i "s/Release date\: 20XX-XX-XX/Release date\: $date/g" $ISSUE_TEMPLATE
|
||||
fi
|
||||
- name: Create the issue
|
||||
run: |
|
||||
gh issue create \
|
||||
--title "$MILESTONE_VERSION ROADMAP" \
|
||||
--label 'epic,impacts docs,impacts integrations,impacts cloud' \
|
||||
--body-file $ISSUE_TEMPLATE \
|
||||
--milestone $MILESTONE_VERSION
|
||||
|
||||
create-changelog-issue:
|
||||
needs: get-release-version
|
||||
# Create the changelog issue if the release is not only a patch release
|
||||
if: github.event.action == 'created' && needs.get-release-version.outputs.is-patch == 'false'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
ISSUE_TEMPLATE: issue-template.md
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Download the issue template
|
||||
run: curl -s https://raw.githubusercontent.com/meilisearch/core-team/main/issue-templates/changelog-issue.md > $ISSUE_TEMPLATE
|
||||
- name: Replace all empty occurrences in the templates
|
||||
run: |
|
||||
# Replace all <<version>> occurrences
|
||||
sed -i "s/<<version>>/$MILESTONE_VERSION/g" $ISSUE_TEMPLATE
|
||||
|
||||
# Replace all <<milestone_id>> occurrences
|
||||
milestone_id=$(echo $MILESTONE_URL | cut -d '/' -f 7)
|
||||
sed -i "s/<<milestone_id>>/$milestone_id/g" $ISSUE_TEMPLATE
|
||||
- name: Create the issue
|
||||
run: |
|
||||
gh issue create \
|
||||
--title "Create release changelogs for $MILESTONE_VERSION" \
|
||||
--label 'impacts docs,documentation' \
|
||||
--body-file $ISSUE_TEMPLATE \
|
||||
--milestone $MILESTONE_VERSION \
|
||||
--assignee curquiza
|
||||
|
||||
# ----------------
|
||||
# MILESTONE CLOSED
|
||||
# ----------------
|
||||
|
||||
create-release-label:
|
||||
if: github.event.action == 'closed'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Create the ${{ env.MILESTONE_VERSION }} label
|
||||
run: |
|
||||
label_description="PRs/issues solved in $MILESTONE_VERSION"
|
||||
if [[ ! -z $MILESTONE_DUE_ON ]]; then
|
||||
date=$(echo $MILESTONE_DUE_ON | cut -d 'T' -f 1)
|
||||
label_description="$label_description released on $date"
|
||||
fi
|
||||
|
||||
gh api repos/curquiza/meilisearch/labels \
|
||||
--method POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-f name="$MILESTONE_VERSION" \
|
||||
-f description="$label_description" \
|
||||
-f color='ff5ba3'
|
||||
|
||||
labelize-all-milestone-content:
|
||||
if: github.event.action == 'closed'
|
||||
needs: create-release-label
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Add label ${{ env.MILESTONE_VERSION }} to all PRs in the Milestone
|
||||
run: |
|
||||
prs=$(gh pr list --search milestone:"$MILESTONE_VERSION" --limit 1000 --state all --json number --template '{{range .}}{{tablerow (printf "%v" .number)}}{{end}}')
|
||||
for pr in $prs; do
|
||||
gh pr $pr edit --add-label $MILESTONE_VERSION
|
||||
done
|
||||
- name: Add label ${{ env.MILESTONE_VERSION }} to all issues in the Milestone
|
||||
run: |
|
||||
issues=$(gh issue list --search milestone:"$MILESTONE_VERSION" --limit 1000 --state all --json number --template '{{range .}}{{tablerow (printf "%v" .number)}}{{end}}')
|
||||
for issue in $issues; do
|
||||
gh issue edit $issue --add-label $MILESTONE_VERSION
|
||||
done
|
15
.github/workflows/publish-binaries.yml
vendored
15
.github/workflows/publish-binaries.yml
vendored
@ -1,4 +1,6 @@
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *' # Every day at 2:00am
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
@ -8,8 +10,9 @@ jobs:
|
||||
check-version:
|
||||
name: Check the version validity
|
||||
runs-on: ubuntu-latest
|
||||
# No need to check the version for dry run (cron)
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
# Check if the tag has the v<nmumber>.<number>.<number> format.
|
||||
# If yes, it means we are publishing an official release.
|
||||
# If no, we are releasing a RC, so no need to check the version.
|
||||
@ -25,7 +28,7 @@ jobs:
|
||||
echo ::set-output name=stable::false
|
||||
fi
|
||||
- name: Check release validity
|
||||
if: steps.check-tag-format.outputs.stable == 'true'
|
||||
if: github.event_name != 'schedule' && steps.check-tag-format.outputs.stable == 'true'
|
||||
run: bash .github/scripts/check-release.sh
|
||||
|
||||
publish:
|
||||
@ -54,10 +57,12 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build
|
||||
run: cargo build --release --locked
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name != 'schedule'
|
||||
uses: svenstaro/upload-release-action@v1-release
|
||||
with:
|
||||
repo_token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/${{ matrix.artifact_name }}
|
||||
asset_name: ${{ matrix.asset_name }}
|
||||
tag: ${{ github.ref }}
|
||||
@ -123,9 +128,11 @@ jobs:
|
||||
run: ls -lR ./target
|
||||
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name != 'schedule'
|
||||
uses: svenstaro/upload-release-action@v1-release
|
||||
with:
|
||||
repo_token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
asset_name: ${{ matrix.asset_name }}
|
||||
tag: ${{ github.ref }}
|
||||
|
4
.github/workflows/publish-deb-brew-pkg.yml
vendored
4
.github/workflows/publish-deb-brew-pkg.yml
vendored
@ -9,7 +9,7 @@ jobs:
|
||||
name: Check the version validity
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Check release validity
|
||||
run: bash .github/scripts/check-release.sh
|
||||
|
||||
@ -29,7 +29,7 @@ jobs:
|
||||
- name: Upload debian pkg to release
|
||||
uses: svenstaro/upload-release-action@v1-release
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/debian/meilisearch.deb
|
||||
asset_name: meilisearch.deb
|
||||
tag: ${{ github.ref }}
|
||||
|
15
.github/workflows/publish-docker-images.yml
vendored
15
.github/workflows/publish-docker-images.yml
vendored
@ -12,7 +12,7 @@ jobs:
|
||||
docker:
|
||||
runs-on: docker
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
# Check if the tag has the v<nmumber>.<number>.<number> format. If yes, it means we are publishing an official release.
|
||||
# In this situation, we need to set `output.stable` to create/update the following tags (additionally to the `vX.Y.Z` Docker tag):
|
||||
@ -53,7 +53,7 @@ jobs:
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: getmeili/meilisearch
|
||||
# The lastest and `vX.Y` tags are only pushed for the official Meilisearch releases
|
||||
# The latest and `vX.Y` tags are only pushed for the official Meilisearch releases
|
||||
# See https://github.com/docker/metadata-action#latest-tag
|
||||
flavor: latest=false
|
||||
tags: |
|
||||
@ -62,10 +62,19 @@ jobs:
|
||||
type=raw,value=latest,enable=${{ steps.check-tag-format.outputs.stable == 'true' }}
|
||||
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
# We do not push tags for the cron jobs, this is only for test purposes
|
||||
push: ${{ github.event_name != 'schedule' }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
|
||||
# /!\ Don't touch this without checking with Cloud team
|
||||
- name: Send CI information to Cloud team
|
||||
if: github.event_name != 'schedule'
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
repository: meilisearch/meilisearch-cloud
|
||||
event-type: cloud-docker-build
|
||||
client-payload: '{ "meilisearch_version": "${{ steps.meta.outputs.tags }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
||||
|
20
.github/workflows/rust.yml
vendored
20
.github/workflows/rust.yml
vendored
@ -24,9 +24,8 @@ jobs:
|
||||
os: [ubuntu-18.04, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: rui314/setup-mold@v1 # Optimize link time
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.0.0
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.0.0
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -44,14 +43,13 @@ jobs:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: rui314/setup-mold@v1 # Optimize link time
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.0.0
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.0.0
|
||||
- name: Run tests in debug
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -63,15 +61,14 @@ jobs:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: rui314/setup-mold@v1 # Optimize link time
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
components: clippy
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.0.0
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.0.0
|
||||
- name: Run cargo clippy
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -83,14 +80,13 @@ jobs:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: rui314/setup-mold@v1 # Optimize link time
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
components: rustfmt
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.0.0
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.0.0
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
|
47
.github/workflows/update-cargo-toml-version.yml
vendored
Normal file
47
.github/workflows/update-cargo-toml-version.yml
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
name: Update Meilisearch version in all Cargo.toml files
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
new_version:
|
||||
description: 'The new version (vX.Y.Z)'
|
||||
required: true
|
||||
|
||||
env:
|
||||
NEW_VERSION: ${{ github.event.inputs.new_version }}
|
||||
NEW_BRANCH: update-version-${{ github.event.inputs.new_version }}
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
|
||||
update-version-cargo-toml:
|
||||
name: Update version in Cargo.toml files
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Install sd
|
||||
run: cargo install sd
|
||||
- name: Update Cargo.toml files
|
||||
run: |
|
||||
raw_new_version=$(echo $NEW_VERSION | cut -d 'v' -f 2)
|
||||
new_string="version = \"$raw_new_version\""
|
||||
sd '^version = "\d+.\d+.\w+"$' "$new_string" */Cargo.toml
|
||||
- name: Build Meilisearch to update Cargo.lock
|
||||
run: cargo build
|
||||
- name: Commit and push the changes to the ${{ env.NEW_BRANCH }} branch
|
||||
uses: EndBug/add-and-commit@v9
|
||||
with:
|
||||
message: "Update version for the next release (${{ env.NEW_VERSION }}) in Cargo.toml files"
|
||||
new_branch: ${{ env.NEW_BRANCH }}
|
||||
- name: Create the PR pointing to ${{ github.ref_name }}
|
||||
run: |
|
||||
gh pr create \
|
||||
--title "Update version for the next release ($NEW_VERSION) in Cargo.toml files" \
|
||||
--body '⚠️ This PR is automatically generated. Check the new version is the expected one before merging.' \
|
||||
--label 'skip changelog' \
|
||||
--milestone $NEW_VERSION
|
@ -4,14 +4,30 @@ First, thank you for contributing to Meilisearch! The goal of this document is t
|
||||
|
||||
Remember that there are many ways to contribute other than writing code: writing [tutorials or blog posts](https://github.com/meilisearch/awesome-meilisearch), improving [the documentation](https://github.com/meilisearch/documentation), submitting [bug reports](https://github.com/meilisearch/meilisearch/issues/new?assignees=&labels=&template=bug_report.md&title=) and [feature requests](https://github.com/meilisearch/product/discussions/categories/feedback-feature-proposal)...
|
||||
|
||||
The code in this repository is only concerned with managing multiple indexes, handling the update store, and exposing an HTTP API. Search and indexation are the domain of our core engine, [`milli`](https://github.com/meilisearch/milli), while tokenization is handled by [our `charabia` library](https://github.com/meilisearch/charabia/).
|
||||
|
||||
If Meilisearch does not offer optimized support for your language, please consider contributing to `charabia` by following the [CONTRIBUTING.md file](https://github.com/meilisearch/charabia/blob/main/CONTRIBUTING.md) and integrating your intended normalizer/segmenter.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Hacktoberfest 2022](#hacktoberfest-2022)
|
||||
- [Assumptions](#assumptions)
|
||||
- [How to Contribute](#how-to-contribute)
|
||||
- [Development Workflow](#development-workflow)
|
||||
- [Git Guidelines](#git-guidelines)
|
||||
- [Release Process (for internal team only)](#release-process-for-internal-team-only)
|
||||
|
||||
## Hacktoberfest 2022
|
||||
|
||||
It's [Hacktoberfest month](https://hacktoberfest.com)! 🥳
|
||||
|
||||
Thanks so much for participating with Meilisearch this year!
|
||||
1. We will follow the quality standards set by the organizers of Hacktoberfest (see detail on their [website](https://hacktoberfest.com/participation/#spam)). Our reviewers will not consider any PR that doesn’t match that standard.
|
||||
2. PRs reviews will take place from Monday to Thursday, during usual working hours, CEST time. If you submit outside of these hours, there’s no need to panic; we will get around to your contribution.
|
||||
3. There will be no issue assignment as we don’t want people to ask to be assigned specific issues and never return, discouraging the volunteer contributors from opening a PR to fix this issue. We take the liberty to choose the PR that best fixes the issue, so we encourage you to get to it as soon as possible and do your best!
|
||||
|
||||
You can check out the longer, more complete guideline documentation [here](https://github.com/meilisearch/.github/blob/main/Hacktoberfest_2022_contributors_guidelines.md).
|
||||
|
||||
## Assumptions
|
||||
|
||||
1. **You're familiar with [GitHub](https://github.com) and the [Pull Requests](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests)(PR) workflow.**
|
||||
@ -98,7 +114,7 @@ The full Meilisearch release process is described in [this guide](https://github
|
||||
### Release assets
|
||||
|
||||
For each release, the following assets are created:
|
||||
- Binaries for differents platforms (Linux, MacOS, Windows and ARM architectures) are attached to the GitHub release
|
||||
- Binaries for different platforms (Linux, MacOS, Windows and ARM architectures) are attached to the GitHub release
|
||||
- Binaries are pushed to HomeBrew and APT (not published for RC)
|
||||
- Docker tags are created/updated:
|
||||
- `vX.Y.Z`
|
||||
|
1025
Cargo.lock
generated
1025
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -8,6 +8,9 @@ members = [
|
||||
"permissive-json-pointer",
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
|
||||
[profile.dev.package.flate2]
|
||||
opt-level = 3
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Compile
|
||||
FROM rust:alpine3.14 AS compiler
|
||||
FROM rust:alpine3.16 AS compiler
|
||||
|
||||
RUN apk add -q --update-cache --no-cache build-base openssl-dev
|
||||
|
||||
@ -19,7 +19,7 @@ RUN set -eux; \
|
||||
cargo build --release
|
||||
|
||||
# Run
|
||||
FROM alpine:3.14
|
||||
FROM alpine:3.16
|
||||
|
||||
ENV MEILI_HTTP_ADDR 0.0.0.0:7700
|
||||
ENV MEILI_SERVER_PROVIDER docker
|
||||
|
220
README.md
220
README.md
@ -1,205 +1,111 @@
|
||||
<p align="center">
|
||||
<img src="assets/logo.svg" alt="Meilisearch" width="200" height="200" />
|
||||
<img src="assets/meilisearch-logo-light.svg?sanitize=true#gh-light-mode-only">
|
||||
<img src="assets/meilisearch-logo-dark.svg?sanitize=true#gh-dark-mode-only">
|
||||
</p>
|
||||
|
||||
<h1 align="center">Meilisearch</h1>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://www.meilisearch.com">Website</a> |
|
||||
<a href="https://roadmap.meilisearch.com/tabs/1-under-consideration">Roadmap</a> |
|
||||
<a href="https://blog.meilisearch.com">Blog</a> |
|
||||
<a href="https://fr.linkedin.com/company/meilisearch">LinkedIn</a> |
|
||||
<a href="https://twitter.com/meilisearch">Twitter</a> |
|
||||
<a href="https://docs.meilisearch.com">Documentation</a> |
|
||||
<a href="https://docs.meilisearch.com/faq/">FAQ</a>
|
||||
<a href="https://docs.meilisearch.com/faq/">FAQ</a> |
|
||||
<a href="https://slack.meilisearch.com">Slack</a>
|
||||
</h4>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/meilisearch/meilisearch/actions"><img src="https://github.com/meilisearch/meilisearch/workflows/Cargo%20test/badge.svg" alt="Build Status"></a>
|
||||
<a href="https://deps.rs/repo/github/meilisearch/meilisearch"><img src="https://deps.rs/repo/github/meilisearch/meilisearch/status.svg" alt="Dependency status"></a>
|
||||
<a href="https://github.com/meilisearch/meilisearch/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-informational" alt="License"></a>
|
||||
<a href="https://slack.meilisearch.com"><img src="https://img.shields.io/badge/slack-meilisearch-blue.svg?logo=slack" alt="Slack"></a>
|
||||
<a href="https://github.com/meilisearch/meilisearch/discussions" alt="Discussions"><img src="https://img.shields.io/badge/github-discussions-red" /></a>
|
||||
<a href="https://app.bors.tech/repositories/26457"><img src="https://bors.tech/images/badge_small.svg" alt="Bors enabled"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">⚡ Lightning Fast, Ultra Relevant, and Typo-Tolerant Search Engine 🔍</p>
|
||||
<p align="center">⚡ A lightning-fast search engine that fits effortlessly into your apps, websites, and workflow 🔍</p>
|
||||
|
||||
**Meilisearch** is a powerful, fast, open-source, easy to use and deploy search engine. Both searching and indexing are highly customizable. Features such as typo-tolerance, filters, and synonyms are provided out-of-the-box.
|
||||
For more information about features go to [our documentation](https://docs.meilisearch.com/).
|
||||
Meilisearch helps you shape a delightful search experience in a snap, offering features that work out-of-the-box to speed up your workflow.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/trumen-fast.gif" alt="Web interface gif" />
|
||||
<p align="center" name="demo">
|
||||
<a href="https://where2watch.meilisearch.com/#gh-light-mode-only" target="_blank">
|
||||
<img src="assets/demo-light.gif#gh-light-mode-only" alt="A bright colored application for finding movies screening near the user">
|
||||
</a>
|
||||
<a href="https://where2watch.meilisearch.com/#gh-dark-mode-only" target="_blank">
|
||||
<img src="assets/demo-dark.gif#gh-dark-mode-only" alt="A dark colored application for finding movies screening near the user">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
🔥 [**Try it!**](https://where2watch.meilisearch.com/) 🔥
|
||||
|
||||
## 🎃 Hacktoberfest
|
||||
|
||||
It’s Hacktoberfest 2022 @Meilisearch
|
||||
|
||||
[Hacktoberfest](https://hacktoberfest.com/) is a celebration of the open-source community. This year, and for the third time in a row, Meilisearch is participating in this fantastic event.
|
||||
|
||||
You’d like to contribute? Don’t hesitate to check out our [contributing guidelines](./CONTRIBUTING.md).
|
||||
|
||||
## ✨ Features
|
||||
* Search-as-you-type experience (answers < 50 milliseconds)
|
||||
* Full-text search
|
||||
* Typo tolerant (understands typos and misspelling)
|
||||
* Faceted search and filters
|
||||
* Supports hanzi (Chinese characters)
|
||||
* Supports synonyms
|
||||
* Easy to install, deploy, and maintain
|
||||
* Whole documents are returned
|
||||
* Highly customizable
|
||||
* RESTful API
|
||||
|
||||
## Getting started
|
||||
- **Search-as-you-type:** find search results in less than 50 milliseconds
|
||||
- **[Typo tolerance](https://docs.meilisearch.com/learn/getting_started/customizing_relevancy.html#typo-tolerance):** get relevant matches even when queries contain typos and misspellings
|
||||
- **[Filtering and faceted search](https://docs.meilisearch.com/learn/advanced/filtering_and_faceted_search.html):** enhance your user's search experience with custom filters and build a faceted search interface in a few lines of code
|
||||
- **[Sorting](https://docs.meilisearch.com/learn/advanced/sorting.html):** sort results based on price, date, or pretty much anything else your users need
|
||||
- **[Synonym support](https://docs.meilisearch.com/learn/getting_started/customizing_relevancy.html#synonyms):** configure synonyms to include more relevant content in your search results
|
||||
- **[Geosearch](https://docs.meilisearch.com/learn/advanced/geosearch.html):** filter and sort documents based on geographic data
|
||||
- **[Extensive language support](https://docs.meilisearch.com/learn/what_is_meilisearch/language.html):** search datasets in any language, with optimized support for Chinese, Japanese, Hebrew, and languages using the Latin alphabet
|
||||
- **[Security management](https://docs.meilisearch.com/learn/security/master_api_keys.html):** control which users can access what data with API keys that allow fine-grained permissions handling
|
||||
- **[Multi-Tenancy](https://docs.meilisearch.com/learn/security/tenant_tokens.html):** personalize search results for any number of application tenants
|
||||
- **Highly Customizable:** customize Meilisearch to your specific needs or use our out-of-the-box and hassle-free presets
|
||||
- **[RESTful API](https://docs.meilisearch.com/reference/api/overview.html):** integrate Meilisearch in your technical stack with our plugins and SDKs
|
||||
- **Easy to install, deploy, and maintain**
|
||||
|
||||
### Deploy the Server
|
||||
## 📖 Documentation
|
||||
|
||||
#### Homebrew (Mac OS)
|
||||
You can consult Meilisearch's documentation at [https://docs.meilisearch.com](https://docs.meilisearch.com/).
|
||||
|
||||
```bash
|
||||
brew update && brew install meilisearch
|
||||
meilisearch
|
||||
```
|
||||
## 🚀 Getting started
|
||||
|
||||
#### Docker
|
||||
For basic instructions on how to set up Meilisearch, add documents to an index, and search for documents, take a look at our [Quick Start](https://docs.meilisearch.com/learn/getting_started/quick_start.html) guide.
|
||||
|
||||
```bash
|
||||
docker run -p 7700:7700 -v "$(pwd)/meili_data:/meili_data" getmeili/meilisearch
|
||||
```
|
||||
You may also want to check out [Meilisearch 101](https://docs.meilisearch.com/learn/getting_started/filtering_and_sorting.html) for an introduction to some of Meilisearch's most popular features.
|
||||
|
||||
#### Announcing a cloud-hosted Meilisearch
|
||||
## ☁️ Meilisearch cloud
|
||||
|
||||
Join the closed beta by filling out this [form](https://meilisearch.typeform.com/to/VI2cI2rv).
|
||||
Join the closed beta for Meilisearch cloud by filling out [this form](https://meilisearch.typeform.com/to/VI2cI2rv).
|
||||
|
||||
#### Try Meilisearch in our Sandbox
|
||||
## 🧰 SDKs & integration tools
|
||||
|
||||
Create a Meilisearch instance in [Meilisearch Sandbox](https://sandbox.meilisearch.com/). This instance is free, and will be active for 48 hours.
|
||||
Install one of our SDKs in your project for seamless integration between Meilisearch and your favorite language or framework!
|
||||
|
||||
#### Run on Digital Ocean
|
||||
Take a look at the complete [Meilisearch integration list](https://docs.meilisearch.com/learn/what_is_meilisearch/sdks.html).
|
||||
|
||||
[](https://marketplace.digitalocean.com/apps/meilisearch?action=deploy&refcode=7c67bd97e101)
|
||||

|
||||
|
||||
#### Deploy on Platform.sh
|
||||
## ⚙️ Advanced usage
|
||||
|
||||
<a href="https://console.platform.sh/projects/create-project?template=https://raw.githubusercontent.com/platformsh/template-builder/master/templates/meilisearch/.platform.template.yaml&utm_content=meilisearch&utm_source=github&utm_medium=button&utm_campaign=deploy_on_platform">
|
||||
<img src="https://platform.sh/images/deploy/lg-blue.svg" alt="Deploy on Platform.sh" width="180px" />
|
||||
</a>
|
||||
Experienced users will want to keep our [API Reference](https://docs.meilisearch.com/reference/api) close at hand.
|
||||
|
||||
#### APT (Debian & Ubuntu)
|
||||
We also offer a wide range of dedicated guides to all Meilisearch features, such as [filtering](https://docs.meilisearch.com/learn/advanced/filtering_and_faceted_search.html), [sorting](https://docs.meilisearch.com/learn/advanced/sorting.html), [geosearch](https://docs.meilisearch.com/learn/advanced/geosearch.html), [API keys](https://docs.meilisearch.com/learn/security/master_api_keys.html), and [tenant tokens](https://docs.meilisearch.com/learn/security/tenant_tokens.html).
|
||||
|
||||
```bash
|
||||
echo "deb [trusted=yes] https://apt.fury.io/meilisearch/ /" > /etc/apt/sources.list.d/fury.list
|
||||
apt update && apt install meilisearch-http
|
||||
meilisearch
|
||||
```
|
||||
Finally, for more in-depth information, refer to our articles explaining fundamental Meilisearch concepts such as [documents](https://docs.meilisearch.com/learn/core_concepts/documents.html) and [indexes](https://docs.meilisearch.com/learn/core_concepts/indexes.html).
|
||||
|
||||
#### Download the binary (Linux & Mac OS)
|
||||
## 📊 Telemetry
|
||||
|
||||
```bash
|
||||
curl -L https://install.meilisearch.com | sh
|
||||
./meilisearch
|
||||
```
|
||||
Meilisearch collects **anonymized** data from users to help us improve our product. You can [deactivate this](https://docs.meilisearch.com/learn/what_is_meilisearch/telemetry.html#how-to-disable-data-collection) whenever you want.
|
||||
|
||||
#### Compile and run it from sources
|
||||
To request deletion of collected data, please write to us at [privacy@meilisearch.com](mailto:privacy@meilisearch.com). Don't forget to include your `Instance UID` in the message, as this helps us quickly find and delete your data.
|
||||
|
||||
If you have the latest stable Rust toolchain installed on your local system, clone the repository and change it to your working directory.
|
||||
If you want to know more about the kind of data we collect and what we use it for, check the [telemetry section](https://docs.meilisearch.com/learn/what_is_meilisearch/telemetry.html) of our documentation.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/meilisearch/meilisearch.git
|
||||
cd meilisearch
|
||||
cargo run --release
|
||||
```
|
||||
## 📫 Get in touch!
|
||||
|
||||
### Create an Index and Upload Some Documents
|
||||
Meilisearch is a search engine created by [Meili](https://www.welcometothejungle.com/en/companies/meilisearch), a software development company based in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/)
|
||||
|
||||
Let's create an index! If you need a sample dataset, use [this movie database](https://www.notion.so/meilisearch/A-movies-dataset-to-test-Meili-1cbf7c9cfa4247249c40edfa22d7ca87#b5ae399b81834705ba5420ac70358a65). You can also find it in the `datasets/` directory.
|
||||
🗞 [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
|
||||
|
||||
```bash
|
||||
curl -L https://docs.meilisearch.com/movies.json -o movies.json
|
||||
```
|
||||
💌 Want to make a suggestion or give feedback? Here are some of the channels where you can reach us:
|
||||
|
||||
Now, you're ready to index some data.
|
||||
- For feature requests, please visit our [product repository](https://github.com/meilisearch/product/discussions)
|
||||
- Found a bug? Open an [issue](https://github.com/meilisearch/meilisearch/issues)!
|
||||
- Want to be part of our Slack community? [Join us!](https://slack.meilisearch.com/)
|
||||
- For everything else, please check [this page listing some of the other places where you can find us](https://docs.meilisearch.com/learn/what_is_meilisearch/contact.html)
|
||||
|
||||
```bash
|
||||
curl -i -X POST 'http://127.0.0.1:7700/indexes/movies/documents' \
|
||||
--header 'content-type: application/json' \
|
||||
--data-binary @movies.json
|
||||
```
|
||||
|
||||
### Search for Documents
|
||||
|
||||
#### In command line
|
||||
|
||||
The search engine is now aware of your documents and can serve those via an HTTP server.
|
||||
|
||||
The [`jq` command-line tool](https://stedolan.github.io/jq/) can greatly help you read the server responses.
|
||||
|
||||
```bash
|
||||
curl 'http://127.0.0.1:7700/indexes/movies/search?q=botman+robin&limit=2' | jq
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"id": "415",
|
||||
"title": "Batman & Robin",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/79AYCcxw3kSKbhGpx1LiqaCAbwo.jpg",
|
||||
"overview": "Along with crime-fighting partner Robin and new recruit Batgirl, Batman battles the dual threat of frosty genius Mr. Freeze and homicidal horticulturalist Poison Ivy. Freeze plans to put Gotham City on ice, while Ivy tries to drive a wedge between the dynamic duo.",
|
||||
"release_date": 866768400
|
||||
},
|
||||
{
|
||||
"id": "411736",
|
||||
"title": "Batman: Return of the Caped Crusaders",
|
||||
"poster": "https://image.tmdb.org/t/p/w1280/GW3IyMW5Xgl0cgCN8wu96IlNpD.jpg",
|
||||
"overview": "Adam West and Burt Ward returns to their iconic roles of Batman and Robin. Featuring the voices of Adam West, Burt Ward, and Julie Newmar, the film sees the superheroes going up against classic villains like The Joker, The Riddler, The Penguin and Catwoman, both in Gotham City… and in space.",
|
||||
"release_date": 1475888400
|
||||
}
|
||||
],
|
||||
"nbHits": 8,
|
||||
"exhaustiveNbHits": false,
|
||||
"query": "botman robin",
|
||||
"limit": 2,
|
||||
"offset": 0,
|
||||
"processingTimeMs": 2
|
||||
}
|
||||
```
|
||||
|
||||
#### Use the Web Interface
|
||||
|
||||
We also deliver an **out-of-the-box [web interface](https://github.com/meilisearch/mini-dashboard)** in which you can test Meilisearch interactively.
|
||||
|
||||
You can access the web interface in your web browser at the root of the server. The default URL is [http://127.0.0.1:7700](http://127.0.0.1:7700). All you need to do is open your web browser and enter Meilisearch’s address to visit it. This will lead you to a web page with a search bar that will allow you to search in the selected index.
|
||||
|
||||
| [See the gif above](#demo)
|
||||
|
||||
## Documentation
|
||||
|
||||
Now that your Meilisearch server is up and running, you can learn more about how to tune your search engine in [the documentation](https://docs.meilisearch.com).
|
||||
|
||||
## Contributing
|
||||
|
||||
Hey! We're glad you're thinking about contributing to Meilisearch! Feel free to pick an [issue labeled as `good first issue`](https://github.com/meilisearch/meilisearch/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22), and to ask any question you need. Some points might not be clear and we are available to help you!
|
||||
|
||||
Also, we recommend following the [CONTRIBUTING](./CONTRIBUTING.md) to create your PR.
|
||||
|
||||
## Core engine and tokenizer
|
||||
|
||||
The code in this repository is only concerned with managing multiple indexes, handling the update store, and exposing an HTTP API.
|
||||
|
||||
Search and indexation are the domain of our core engine, [`milli`](https://github.com/meilisearch/milli), while tokenization is handled by [our `tokenizer` library](https://github.com/meilisearch/tokenizer/).
|
||||
## Telemetry
|
||||
|
||||
Meilisearch collects anonymous data regarding general usage.
|
||||
This helps us better understand developers' usage of Meilisearch features.
|
||||
|
||||
To find out more on what information we're retrieving, please see our documentation on [Telemetry](https://docs.meilisearch.com/learn/what_is_meilisearch/telemetry.html).
|
||||
|
||||
This program is optional, you can disable these analytics by using the `MEILI_NO_ANALYTICS` env variable.
|
||||
|
||||
## Feature request
|
||||
|
||||
The feature requests are not managed in this repository. Please visit our [dedicated repository](https://github.com/meilisearch/product) to see our work about the Meilisearch product.
|
||||
|
||||
If you have a feature request or any feedback about an existing feature, please open [a discussion](https://github.com/meilisearch/product/discussions).
|
||||
Also, feel free to participate in the current discussions, we are looking forward to reading your comments.
|
||||
|
||||
## 💌 Contact
|
||||
|
||||
Please visit [this page](https://docs.meilisearch.com/learn/what_is_meilisearch/contact.html#contact-us).
|
||||
|
||||
Meilisearch is developed by [Meili](https://www.meilisearch.com), a young company. To know more about us, you can [read our blog](https://blog.meilisearch.com). Any suggestion or feedback is highly appreciated. Thank you for your support!
|
||||
Thank you for your support!
|
||||
|
BIN
assets/demo-dark.gif
Normal file
BIN
assets/demo-dark.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.8 MiB |
BIN
assets/demo-light.gif
Normal file
BIN
assets/demo-light.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.7 MiB |
BIN
assets/integrations.png
Normal file
BIN
assets/integrations.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 799 KiB |
30
assets/meilisearch-logo-dark.svg
Normal file
30
assets/meilisearch-logo-dark.svg
Normal file
@ -0,0 +1,30 @@
|
||||
<svg width="495" height="74" viewBox="0 0 495 74" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M181.842 42.5349C181.842 37.6137 184.201 34.715 188.718 34.715C192.965 34.715 194.381 37.7486 194.381 41.6585V62.6238H203.953V40.5799C203.953 32.3556 199.639 26.4907 191.145 26.4907C186.089 26.4907 182.516 28.0412 179.415 31.4792C177.393 28.3782 173.955 26.4907 169.168 26.4907C164.112 26.4907 160.607 28.5805 158.989 31.614V27.2996H150.158V62.6238H159.731V42.3326C159.731 37.6137 162.157 34.715 166.607 34.715C170.854 34.715 172.269 37.7486 172.269 41.6585V62.6238H181.842V42.5349Z" fill="white"/>
|
||||
<path d="M243.245 47.7256C243.245 47.7256 243.379 46.4448 243.379 44.8943C243.379 34.4454 236.301 26.4907 225.852 26.4907C215.403 26.4907 208.123 34.4454 208.123 44.8943C208.123 55.7477 215.471 63.4327 225.92 63.4327C234.077 63.4327 240.548 58.5116 242.638 51.3659H232.998C231.852 53.9276 229.088 55.2084 226.189 55.2084C221.403 55.2084 218.302 52.5793 217.628 47.7256H243.245ZM225.785 34.1757C230.234 34.1757 233.133 36.8722 233.807 40.8495H217.763C218.572 36.8048 221.403 34.1757 225.785 34.1757Z" fill="white"/>
|
||||
<path d="M244.791 35.524H249.038V62.6238H258.61V27.2996H244.791V35.524ZM253.824 22.7156C257.195 22.7156 259.622 20.3561 259.622 16.9855C259.622 13.6149 257.195 11.188 253.824 11.188C250.454 11.188 248.027 13.6149 248.027 16.9855C248.027 20.3561 250.454 22.7156 253.824 22.7156Z" fill="white"/>
|
||||
<path d="M278.432 54.3995C278.163 54.3995 277.758 54.4669 277.152 54.4669C274.994 54.4669 274.725 53.4557 274.725 51.9726V12.0644H265.152V52.6467C265.152 59.6576 267.849 62.7586 275.466 62.7586C276.747 62.7586 277.96 62.6238 278.432 62.5564V54.3995Z" fill="white"/>
|
||||
<path d="M279.521 35.524H283.768V62.6238H293.341V27.2996H279.521V35.524ZM288.555 22.7156C291.925 22.7156 294.352 20.3561 294.352 16.9855C294.352 13.6149 291.925 11.188 288.555 11.188C285.184 11.188 282.757 13.6149 282.757 16.9855C282.757 20.3561 285.184 22.7156 288.555 22.7156Z" fill="white"/>
|
||||
<path d="M312.557 62.9937C321.86 62.9937 326.242 58.0726 326.242 52.8819C326.242 38.4556 305.007 46.4777 305.007 36.9725C305.007 33.8716 307.636 31.2425 312.962 31.2425C318.422 31.2425 320.984 34.2086 321.388 37.9163H326.175C325.77 33.2648 322.602 27.0629 313.097 27.0629C304.94 27.0629 300.356 31.9166 300.356 37.1748C300.356 51.264 321.591 43.1745 321.591 53.0167C321.591 56.4547 318.355 58.8142 312.557 58.8142C306.625 58.8142 303.659 55.848 303.322 51.4662H298.468C298.873 57.4659 302.648 62.9937 312.557 62.9937Z" fill="white"/>
|
||||
<path d="M364.256 46.4103C364.256 46.4103 364.324 45.3317 364.324 44.5901C364.324 34.8827 358.054 27.0629 347.808 27.0629C337.494 27.0629 330.955 35.4894 330.955 44.9946C330.955 54.6346 337.022 62.9937 347.875 62.9937C356.032 62.9937 361.695 58.0052 363.717 51.4662H358.729C357.245 55.6458 353.201 58.6794 347.943 58.6794C340.729 58.6794 336.213 53.3538 335.741 46.4103H364.256ZM347.808 31.3773C354.549 31.3773 358.931 35.8939 359.538 42.5004H335.876C336.685 36.1636 341.134 31.3773 347.808 31.3773Z" fill="white"/>
|
||||
<path d="M394.037 45.871V49.1068C394.037 54.9717 389.79 59.0164 381.634 59.0164C376.578 59.0164 373.814 56.9266 373.814 52.41C373.814 50.118 374.892 48.3652 376.578 47.4215C378.33 46.4777 380.69 45.871 394.037 45.871ZM381.094 62.9937C387.027 62.9937 391.813 61.1062 394.24 57.1963V62.1848H398.824V39.7364C398.824 32.1188 394.442 27.0629 384.532 27.0629C375.027 27.0629 370.848 31.8492 369.971 37.9837H374.623C375.566 33.13 379.274 31.1751 384.33 31.1751C390.802 31.1751 394.037 33.8716 394.037 39.669V41.8936C383.184 41.8936 378.667 42.0959 375.297 43.4441C371.387 44.9946 369.095 48.4327 369.095 52.5448C369.095 58.5445 372.937 62.9937 381.094 62.9937Z" fill="white"/>
|
||||
<path d="M424.991 27.6022C424.991 27.6022 424.182 27.5348 423.845 27.5348C417.509 27.5348 414.138 30.838 412.857 33.1974V27.8718H408.273V62.1848H413.059V42.7026C413.059 35.5569 417.441 32.0514 423.306 32.0514C424.182 32.0514 424.991 32.1188 424.991 32.1188V27.6022Z" fill="white"/>
|
||||
<path d="M425.809 45.062C425.809 54.4324 432.28 62.9937 442.729 62.9937C452.032 62.9937 457.425 56.7918 458.773 49.9831H453.92C452.504 55.3087 448.594 58.6794 442.729 58.6794C435.516 58.6794 430.662 52.9493 430.662 45.062C430.662 37.1073 435.516 31.3773 442.729 31.3773C448.594 31.3773 452.504 34.7479 453.92 40.0735H458.773C457.425 33.2648 452.032 27.0629 442.729 27.0629C432.28 27.0629 425.809 35.6243 425.809 45.062Z" fill="white"/>
|
||||
<path d="M470.041 11.6254H465.255V62.1848H470.041V41.8936C470.041 34.8827 474.558 31.2425 480.355 31.2425C486.49 31.2425 489.389 35.0176 489.389 41.2195V62.1848H494.175V40.2757C494.175 32.6581 489.658 27.0629 481.164 27.0629C474.76 27.0629 471.255 30.5683 470.041 32.6581V11.6254Z" fill="white"/>
|
||||
<path d="M0.825012 73.993L24.0688 14.5224C27.3443 6.14179 35.4223 0.625977 44.4203 0.625977H58.4336L35.1899 60.0966C31.9143 68.4772 23.8363 73.993 14.8384 73.993H0.825012Z" fill="url(#paint0_linear_0_3)"/>
|
||||
<path d="M34.9246 73.9932L58.1684 14.5226C61.444 6.14197 69.5219 0.626152 78.5199 0.626152H92.5333L69.2895 60.0968C66.014 68.4774 57.936 73.9932 48.938 73.9932H34.9246Z" fill="url(#paint1_linear_0_3)"/>
|
||||
<path d="M69.0262 73.9932L92.27 14.5226C95.5456 6.14197 103.624 0.626152 112.622 0.626152H126.635L103.391 60.0968C100.116 68.4774 92.0376 73.9932 83.0396 73.9932H69.0262Z" fill="url(#paint2_linear_0_3)"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_0_3" x1="126.635" y1="-4.97799" x2="0.825008" y2="66.0978" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FF5CAA"/>
|
||||
<stop offset="1" stop-color="#FF4E62"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint1_linear_0_3" x1="126.635" y1="-4.97799" x2="0.825008" y2="66.0978" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FF5CAA"/>
|
||||
<stop offset="1" stop-color="#FF4E62"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint2_linear_0_3" x1="126.635" y1="-4.97799" x2="0.825008" y2="66.0978" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FF5CAA"/>
|
||||
<stop offset="1" stop-color="#FF4E62"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
After Width: | Height: | Size: 5.8 KiB |
30
assets/meilisearch-logo-light.svg
Normal file
30
assets/meilisearch-logo-light.svg
Normal file
@ -0,0 +1,30 @@
|
||||
<svg width="495" height="74" viewBox="0 0 495 74" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M181.84 42.5347C181.84 37.6136 184.199 34.7149 188.716 34.7149C192.963 34.7149 194.378 37.7484 194.378 41.6584V62.6237H203.951V40.5798C203.951 32.3554 199.637 26.4906 191.143 26.4906C186.087 26.4906 182.514 28.041 179.413 31.4791C177.39 28.3781 173.952 26.4906 169.166 26.4906C164.11 26.4906 160.605 28.5804 158.987 31.6139V27.2995H150.156V62.6237H159.728V42.3325C159.728 37.6136 162.155 34.7149 166.604 34.7149C170.851 34.7149 172.267 37.7484 172.267 41.6584V62.6237H181.84V42.5347Z" fill="#21004B"/>
|
||||
<path d="M243.242 47.7255C243.242 47.7255 243.377 46.4447 243.377 44.8942C243.377 34.4452 236.299 26.4906 225.85 26.4906C215.401 26.4906 208.12 34.4452 208.12 44.8942C208.12 55.7476 215.468 63.4326 225.917 63.4326C234.074 63.4326 240.546 58.5115 242.636 51.3658H232.996C231.85 53.9274 229.086 55.2083 226.187 55.2083C221.401 55.2083 218.3 52.5792 217.626 47.7255H243.242ZM225.783 34.1756C230.232 34.1756 233.131 36.8721 233.805 40.8494H217.76C218.569 36.8047 221.401 34.1756 225.783 34.1756Z" fill="#21004B"/>
|
||||
<path d="M244.789 35.5238H249.036V62.6237H258.608V27.2995H244.789V35.5238ZM253.822 22.7155C257.193 22.7155 259.619 20.356 259.619 16.9854C259.619 13.6148 257.193 11.1879 253.822 11.1879C250.451 11.1879 248.024 13.6148 248.024 16.9854C248.024 20.356 250.451 22.7155 253.822 22.7155Z" fill="#21004B"/>
|
||||
<path d="M278.43 54.3993C278.16 54.3993 277.756 54.4667 277.149 54.4667C274.992 54.4667 274.722 53.4556 274.722 51.9725V12.0643H265.15V52.6466C265.15 59.6575 267.846 62.7585 275.464 62.7585C276.745 62.7585 277.958 62.6237 278.43 62.5562V54.3993Z" fill="#21004B"/>
|
||||
<path d="M279.519 35.5238H283.766V62.6237H293.339V27.2995H279.519V35.5238ZM288.553 22.7155C291.923 22.7155 294.35 20.356 294.35 16.9854C294.35 13.6148 291.923 11.1879 288.553 11.1879C285.182 11.1879 282.755 13.6148 282.755 16.9854C282.755 20.356 285.182 22.7155 288.553 22.7155Z" fill="#21004B"/>
|
||||
<path d="M312.557 62.9939C321.86 62.9939 326.242 58.0728 326.242 52.882C326.242 38.4557 305.007 46.4778 305.007 36.9726C305.007 33.8717 307.636 31.2426 312.962 31.2426C318.422 31.2426 320.984 34.2087 321.388 37.9164H326.175C325.77 33.265 322.602 27.063 313.097 27.063C304.94 27.063 300.356 31.9167 300.356 37.1749C300.356 51.2641 321.591 43.1746 321.591 53.0168C321.591 56.4548 318.355 58.8143 312.557 58.8143C306.625 58.8143 303.659 55.8481 303.322 51.4663H298.468C298.872 57.466 302.648 62.9939 312.557 62.9939Z" fill="#21004B"/>
|
||||
<path d="M364.256 46.4104C364.256 46.4104 364.324 45.3318 364.324 44.5903C364.324 34.8829 358.054 27.063 347.808 27.063C337.494 27.063 330.955 35.4896 330.955 44.9947C330.955 54.6347 337.022 62.9939 347.875 62.9939C356.032 62.9939 361.695 58.0053 363.717 51.4663H358.728C357.245 55.6459 353.201 58.6795 347.942 58.6795C340.729 58.6795 336.213 53.3539 335.741 46.4104H364.256ZM347.808 31.3774C354.549 31.3774 358.931 35.894 359.537 42.5005H335.876C336.685 36.1637 341.134 31.3774 347.808 31.3774Z" fill="#21004B"/>
|
||||
<path d="M394.037 45.8711V49.1069C394.037 54.9718 389.79 59.0165 381.633 59.0165C376.578 59.0165 373.814 56.9267 373.814 52.4101C373.814 50.1181 374.892 48.3654 376.578 47.4216C378.33 46.4778 380.69 45.8711 394.037 45.8711ZM381.094 62.9939C387.026 62.9939 391.813 61.1063 394.24 57.1964V62.1849H398.824V39.7366C398.824 32.1189 394.442 27.063 384.532 27.063C375.027 27.063 370.847 31.8493 369.971 37.9838H374.623C375.566 33.1301 379.274 31.1752 384.33 31.1752C390.802 31.1752 394.037 33.8717 394.037 39.6691V41.8938C383.184 41.8938 378.667 42.096 375.297 43.4442C371.387 44.9947 369.095 48.4328 369.095 52.5449C369.095 58.5446 372.937 62.9939 381.094 62.9939Z" fill="#21004B"/>
|
||||
<path d="M424.991 27.6023C424.991 27.6023 424.182 27.5349 423.845 27.5349C417.508 27.5349 414.138 30.8381 412.857 33.1975V27.872H408.273V62.1849H413.059V42.7027C413.059 35.557 417.441 32.0515 423.306 32.0515C424.182 32.0515 424.991 32.1189 424.991 32.1189V27.6023Z" fill="#21004B"/>
|
||||
<path d="M425.809 45.0621C425.809 54.4325 432.28 62.9939 442.729 62.9939C452.032 62.9939 457.425 56.7919 458.773 49.9832H453.92C452.504 55.3088 448.594 58.6795 442.729 58.6795C435.516 58.6795 430.662 52.9494 430.662 45.0621C430.662 37.1075 435.516 31.3774 442.729 31.3774C448.594 31.3774 452.504 34.748 453.92 40.0736H458.773C457.425 33.265 452.032 27.063 442.729 27.063C432.28 27.063 425.809 35.6244 425.809 45.0621Z" fill="#21004B"/>
|
||||
<path d="M470.041 11.6255H465.255V62.1849H470.041V41.8938C470.041 34.8829 474.558 31.2426 480.355 31.2426C486.49 31.2426 489.389 35.0177 489.389 41.2196V62.1849H494.175V40.2759C494.175 32.6582 489.658 27.063 481.164 27.063C474.76 27.063 471.255 30.5685 470.041 32.6582V11.6255Z" fill="#21004B"/>
|
||||
<path d="M0.824951 73.993L24.0688 14.5224C27.3443 6.14179 35.4223 0.625977 44.4202 0.625977H58.4336L35.1898 60.0966C31.9143 68.4772 23.8363 73.993 14.8383 73.993H0.824951Z" fill="url(#paint0_linear_0_15)"/>
|
||||
<path d="M34.9246 73.9932L58.1684 14.5226C61.4439 6.14197 69.5219 0.626152 78.5199 0.626152H92.5332L69.2894 60.0968C66.0139 68.4774 57.9359 73.9932 48.9379 73.9932H34.9246Z" fill="url(#paint1_linear_0_15)"/>
|
||||
<path d="M69.0262 73.9932L92.27 14.5226C95.5455 6.14197 103.623 0.626152 112.621 0.626152H126.635L103.391 60.0968C100.115 68.4774 92.0375 73.9932 83.0395 73.9932H69.0262Z" fill="url(#paint2_linear_0_15)"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_0_15" x1="126.635" y1="-4.97799" x2="0.824952" y2="66.0978" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FF5CAA"/>
|
||||
<stop offset="1" stop-color="#FF4E62"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint1_linear_0_15" x1="126.635" y1="-4.97799" x2="0.824952" y2="66.0978" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FF5CAA"/>
|
||||
<stop offset="1" stop-color="#FF4E62"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint2_linear_0_15" x1="126.635" y1="-4.97799" x2="0.824952" y2="66.0978" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FF5CAA"/>
|
||||
<stop offset="1" stop-color="#FF4E62"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
After Width: | Height: | Size: 5.9 KiB |
129
config.toml
Normal file
129
config.toml
Normal file
@ -0,0 +1,129 @@
|
||||
# This file shows the default configuration of Meilisearch.
|
||||
# All variables are defined here https://docs.meilisearch.com/learn/configuration/instance_options.html#environment-variables
|
||||
|
||||
db_path = "./data.ms"
|
||||
# The destination where the database must be created.
|
||||
|
||||
env = "development" # Possible values: [development, production]
|
||||
# This environment variable must be set to `production` if you are running in production.
|
||||
# More logs wiil be displayed if the server is running in development mode. Setting the master
|
||||
# key is optional; hence no security on the updates routes. This
|
||||
# is useful to debug when integrating the engine with another service.
|
||||
|
||||
http_addr = "127.0.0.1:7700"
|
||||
# The address on which the HTTP server will listen.
|
||||
|
||||
# master_key = "MASTER_KEY"
|
||||
# Sets the instance's master key, automatically protecting all routes except GET /health.
|
||||
|
||||
# no_analytics = false
|
||||
# Do not send analytics to Meilisearch.
|
||||
|
||||
disable_auto_batching = false
|
||||
# The engine will disable task auto-batching, and will sequencialy compute each task one by one.
|
||||
|
||||
|
||||
### DUMP
|
||||
|
||||
dumps_dir = "dumps/"
|
||||
# Folder where dumps are created when the dump route is called.
|
||||
|
||||
# import_dump = "./path/to/my/file.dump"
|
||||
# Import a dump from the specified path, must be a `.dump` file.
|
||||
|
||||
ignore_missing_dump = false
|
||||
# If the dump doesn't exist, load or create the database specified by `db_path` instead.
|
||||
|
||||
ignore_dump_if_db_exists = false
|
||||
# Ignore the dump if a database already exists, and load that database instead.
|
||||
|
||||
###
|
||||
|
||||
|
||||
log_level = "INFO" # Possible values: [ERROR, WARN, INFO, DEBUG, TRACE]
|
||||
# Set the log level.
|
||||
|
||||
|
||||
### INDEX
|
||||
|
||||
max_index_size = "100 GiB"
|
||||
# The maximum size, in bytes, of the main LMDB database directory.
|
||||
|
||||
# max_indexing_memory = "2 GiB"
|
||||
# The maximum amount of memory the indexer will use.
|
||||
#
|
||||
# In case the engine is unable to retrieve the available memory the engine will try to use
|
||||
# the memory it needs but without real limit, this can lead to Out-Of-Memory issues and it
|
||||
# is recommended to specify the amount of memory to use.
|
||||
#
|
||||
# /!\ The default value is system dependant /!\
|
||||
|
||||
# max_indexing_threads = 4
|
||||
# The maximum number of threads the indexer will use. If the number set is higher than the
|
||||
# real number of cores available in the machine, it will use the maximum number of
|
||||
# available cores.
|
||||
#
|
||||
# It defaults to half of the available threads.
|
||||
|
||||
###
|
||||
|
||||
|
||||
max_task_db_size = "100 GiB"
|
||||
# The maximum size, in bytes, of the update LMDB database directory.
|
||||
|
||||
http_payload_size_limit = "100 MB"
|
||||
# The maximum size, in bytes, of accepted JSON payloads.
|
||||
|
||||
|
||||
### SNAPSHOT
|
||||
|
||||
schedule_snapshot = false
|
||||
# Activate snapshot scheduling.
|
||||
|
||||
snapshot_dir = "snapshots/"
|
||||
# Defines the directory path where Meilisearch will create a snapshot each snapshot_interval_sec.
|
||||
|
||||
snapshot_interval_sec = 86400
|
||||
# Defines time interval, in seconds, between each snapshot creation.
|
||||
|
||||
# import_snapshot = "./path/to/my/snapshot"
|
||||
# Defines the path of the snapshot file to import. This option will, by default, stop the
|
||||
# process if a database already exists, or if no snapshot exists at the given path. If this
|
||||
# option is not specified, no snapshot is imported.
|
||||
|
||||
ignore_missing_snapshot = false
|
||||
# The engine will ignore a missing snapshot and not return an error in such a case.
|
||||
|
||||
ignore_snapshot_if_db_exists = false
|
||||
# The engine will skip snapshot importation and not return an error in such a case.
|
||||
|
||||
###
|
||||
|
||||
|
||||
### SSL
|
||||
|
||||
# ssl_auth_path = "./path/to/root"
|
||||
# Enable client authentication, and accept certificates signed by those roots provided in CERTFILE.
|
||||
|
||||
# ssl_cert_path = "./path/to/CERTFILE"
|
||||
# Read server certificates from CERTFILE. This should contain PEM-format certificates in
|
||||
# the right order (the first certificate should certify KEYFILE, the last should be a root
|
||||
# CA).
|
||||
|
||||
# ssl_key_path = "./path/to/private-key"
|
||||
# Read the private key from KEYFILE. This should be an RSA private key or PKCS8-encoded
|
||||
# private key, in PEM format.
|
||||
|
||||
# ssl_ocsp_path = "./path/to/OCSPFILE"
|
||||
# Read DER-encoded OCSP response from OCSPFILE and staple to certificate. Optional.
|
||||
|
||||
ssl_require_auth = false
|
||||
# Send a fatal alert if the client does not complete client authentication.
|
||||
|
||||
ssl_resumption = false
|
||||
# SSL support session resumption.
|
||||
|
||||
ssl_tickets = false
|
||||
# SSL support tickets.
|
||||
|
||||
###
|
1007
grafana-dashboards/dashboard.json
Normal file
1007
grafana-dashboards/dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,16 +1,16 @@
|
||||
[package]
|
||||
name = "meilisearch-auth"
|
||||
version = "0.28.1"
|
||||
version = "0.29.1"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
enum-iterator = "0.7.0"
|
||||
hmac = "0.12.1"
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
||||
milli = { git = "https://github.com/meilisearch/milli.git", tag = "v0.31.2" }
|
||||
milli = { git = "https://github.com/meilisearch/milli.git", tag = "v0.33.4", default-features = false }
|
||||
rand = "0.8.4"
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = { version = "1.0.79", features = ["preserve_order"] }
|
||||
serde_json = { version = "1.0.85", features = ["preserve_order"] }
|
||||
sha2 = "0.10.2"
|
||||
thiserror = "1.0.30"
|
||||
time = { version = "0.3.7", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||
|
@ -6,59 +6,63 @@ use std::hash::Hash;
|
||||
#[repr(u8)]
|
||||
pub enum Action {
|
||||
#[serde(rename = "*")]
|
||||
All = actions::ALL,
|
||||
All = 0,
|
||||
#[serde(rename = "search")]
|
||||
Search = actions::SEARCH,
|
||||
Search,
|
||||
#[serde(rename = "documents.*")]
|
||||
DocumentsAll = actions::DOCUMENTS_ALL,
|
||||
DocumentsAll,
|
||||
#[serde(rename = "documents.add")]
|
||||
DocumentsAdd = actions::DOCUMENTS_ADD,
|
||||
DocumentsAdd,
|
||||
#[serde(rename = "documents.get")]
|
||||
DocumentsGet = actions::DOCUMENTS_GET,
|
||||
DocumentsGet,
|
||||
#[serde(rename = "documents.delete")]
|
||||
DocumentsDelete = actions::DOCUMENTS_DELETE,
|
||||
DocumentsDelete,
|
||||
#[serde(rename = "indexes.*")]
|
||||
IndexesAll = actions::INDEXES_ALL,
|
||||
IndexesAll,
|
||||
#[serde(rename = "indexes.create")]
|
||||
IndexesAdd = actions::INDEXES_CREATE,
|
||||
IndexesAdd,
|
||||
#[serde(rename = "indexes.get")]
|
||||
IndexesGet = actions::INDEXES_GET,
|
||||
IndexesGet,
|
||||
#[serde(rename = "indexes.update")]
|
||||
IndexesUpdate = actions::INDEXES_UPDATE,
|
||||
IndexesUpdate,
|
||||
#[serde(rename = "indexes.delete")]
|
||||
IndexesDelete = actions::INDEXES_DELETE,
|
||||
IndexesDelete,
|
||||
#[serde(rename = "tasks.*")]
|
||||
TasksAll = actions::TASKS_ALL,
|
||||
TasksAll,
|
||||
#[serde(rename = "tasks.get")]
|
||||
TasksGet = actions::TASKS_GET,
|
||||
TasksGet,
|
||||
#[serde(rename = "settings.*")]
|
||||
SettingsAll = actions::SETTINGS_ALL,
|
||||
SettingsAll,
|
||||
#[serde(rename = "settings.get")]
|
||||
SettingsGet = actions::SETTINGS_GET,
|
||||
SettingsGet,
|
||||
#[serde(rename = "settings.update")]
|
||||
SettingsUpdate = actions::SETTINGS_UPDATE,
|
||||
SettingsUpdate,
|
||||
#[serde(rename = "stats.*")]
|
||||
StatsAll = actions::STATS_ALL,
|
||||
StatsAll,
|
||||
#[serde(rename = "stats.get")]
|
||||
StatsGet = actions::STATS_GET,
|
||||
StatsGet,
|
||||
#[serde(rename = "metrics.*")]
|
||||
MetricsAll,
|
||||
#[serde(rename = "metrics.get")]
|
||||
MetricsGet,
|
||||
#[serde(rename = "dumps.*")]
|
||||
DumpsAll = actions::DUMPS_ALL,
|
||||
DumpsAll,
|
||||
#[serde(rename = "dumps.create")]
|
||||
DumpsCreate = actions::DUMPS_CREATE,
|
||||
DumpsCreate,
|
||||
#[serde(rename = "version")]
|
||||
Version = actions::VERSION,
|
||||
Version,
|
||||
#[serde(rename = "keys.create")]
|
||||
KeysAdd = actions::KEYS_CREATE,
|
||||
KeysAdd,
|
||||
#[serde(rename = "keys.get")]
|
||||
KeysGet = actions::KEYS_GET,
|
||||
KeysGet,
|
||||
#[serde(rename = "keys.update")]
|
||||
KeysUpdate = actions::KEYS_UPDATE,
|
||||
KeysUpdate,
|
||||
#[serde(rename = "keys.delete")]
|
||||
KeysDelete = actions::KEYS_DELETE,
|
||||
KeysDelete,
|
||||
}
|
||||
|
||||
impl Action {
|
||||
pub fn from_repr(repr: u8) -> Option<Self> {
|
||||
pub const fn from_repr(repr: u8) -> Option<Self> {
|
||||
use actions::*;
|
||||
match repr {
|
||||
ALL => Some(Self::All),
|
||||
@ -79,6 +83,8 @@ impl Action {
|
||||
SETTINGS_UPDATE => Some(Self::SettingsUpdate),
|
||||
STATS_ALL => Some(Self::StatsAll),
|
||||
STATS_GET => Some(Self::StatsGet),
|
||||
METRICS_ALL => Some(Self::MetricsAll),
|
||||
METRICS_GET => Some(Self::MetricsGet),
|
||||
DUMPS_ALL => Some(Self::DumpsAll),
|
||||
DUMPS_CREATE => Some(Self::DumpsCreate),
|
||||
VERSION => Some(Self::Version),
|
||||
@ -90,62 +96,39 @@ impl Action {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn repr(&self) -> u8 {
|
||||
use actions::*;
|
||||
match self {
|
||||
Self::All => ALL,
|
||||
Self::Search => SEARCH,
|
||||
Self::DocumentsAll => DOCUMENTS_ALL,
|
||||
Self::DocumentsAdd => DOCUMENTS_ADD,
|
||||
Self::DocumentsGet => DOCUMENTS_GET,
|
||||
Self::DocumentsDelete => DOCUMENTS_DELETE,
|
||||
Self::IndexesAll => INDEXES_ALL,
|
||||
Self::IndexesAdd => INDEXES_CREATE,
|
||||
Self::IndexesGet => INDEXES_GET,
|
||||
Self::IndexesUpdate => INDEXES_UPDATE,
|
||||
Self::IndexesDelete => INDEXES_DELETE,
|
||||
Self::TasksAll => TASKS_ALL,
|
||||
Self::TasksGet => TASKS_GET,
|
||||
Self::SettingsAll => SETTINGS_ALL,
|
||||
Self::SettingsGet => SETTINGS_GET,
|
||||
Self::SettingsUpdate => SETTINGS_UPDATE,
|
||||
Self::StatsAll => STATS_ALL,
|
||||
Self::StatsGet => STATS_GET,
|
||||
Self::DumpsAll => DUMPS_ALL,
|
||||
Self::DumpsCreate => DUMPS_CREATE,
|
||||
Self::Version => VERSION,
|
||||
Self::KeysAdd => KEYS_CREATE,
|
||||
Self::KeysGet => KEYS_GET,
|
||||
Self::KeysUpdate => KEYS_UPDATE,
|
||||
Self::KeysDelete => KEYS_DELETE,
|
||||
}
|
||||
pub const fn repr(&self) -> u8 {
|
||||
*self as u8
|
||||
}
|
||||
}
|
||||
|
||||
pub mod actions {
|
||||
pub(crate) const ALL: u8 = 0;
|
||||
pub const SEARCH: u8 = 1;
|
||||
pub const DOCUMENTS_ALL: u8 = 2;
|
||||
pub const DOCUMENTS_ADD: u8 = 3;
|
||||
pub const DOCUMENTS_GET: u8 = 4;
|
||||
pub const DOCUMENTS_DELETE: u8 = 5;
|
||||
pub const INDEXES_ALL: u8 = 6;
|
||||
pub const INDEXES_CREATE: u8 = 7;
|
||||
pub const INDEXES_GET: u8 = 8;
|
||||
pub const INDEXES_UPDATE: u8 = 9;
|
||||
pub const INDEXES_DELETE: u8 = 10;
|
||||
pub const TASKS_ALL: u8 = 11;
|
||||
pub const TASKS_GET: u8 = 12;
|
||||
pub const SETTINGS_ALL: u8 = 13;
|
||||
pub const SETTINGS_GET: u8 = 14;
|
||||
pub const SETTINGS_UPDATE: u8 = 15;
|
||||
pub const STATS_ALL: u8 = 16;
|
||||
pub const STATS_GET: u8 = 17;
|
||||
pub const DUMPS_ALL: u8 = 18;
|
||||
pub const DUMPS_CREATE: u8 = 19;
|
||||
pub const VERSION: u8 = 20;
|
||||
pub const KEYS_CREATE: u8 = 21;
|
||||
pub const KEYS_GET: u8 = 22;
|
||||
pub const KEYS_UPDATE: u8 = 23;
|
||||
pub const KEYS_DELETE: u8 = 24;
|
||||
use super::Action::*;
|
||||
|
||||
pub(crate) const ALL: u8 = All.repr();
|
||||
pub const SEARCH: u8 = Search.repr();
|
||||
pub const DOCUMENTS_ALL: u8 = DocumentsAll.repr();
|
||||
pub const DOCUMENTS_ADD: u8 = DocumentsAdd.repr();
|
||||
pub const DOCUMENTS_GET: u8 = DocumentsGet.repr();
|
||||
pub const DOCUMENTS_DELETE: u8 = DocumentsDelete.repr();
|
||||
pub const INDEXES_ALL: u8 = IndexesAll.repr();
|
||||
pub const INDEXES_CREATE: u8 = IndexesAdd.repr();
|
||||
pub const INDEXES_GET: u8 = IndexesGet.repr();
|
||||
pub const INDEXES_UPDATE: u8 = IndexesUpdate.repr();
|
||||
pub const INDEXES_DELETE: u8 = IndexesDelete.repr();
|
||||
pub const TASKS_ALL: u8 = TasksAll.repr();
|
||||
pub const TASKS_GET: u8 = TasksGet.repr();
|
||||
pub const SETTINGS_ALL: u8 = SettingsAll.repr();
|
||||
pub const SETTINGS_GET: u8 = SettingsGet.repr();
|
||||
pub const SETTINGS_UPDATE: u8 = SettingsUpdate.repr();
|
||||
pub const STATS_ALL: u8 = StatsAll.repr();
|
||||
pub const STATS_GET: u8 = StatsGet.repr();
|
||||
pub const METRICS_ALL: u8 = MetricsAll.repr();
|
||||
pub const METRICS_GET: u8 = MetricsGet.repr();
|
||||
pub const DUMPS_ALL: u8 = DumpsAll.repr();
|
||||
pub const DUMPS_CREATE: u8 = DumpsCreate.repr();
|
||||
pub const VERSION: u8 = Version.repr();
|
||||
pub const KEYS_CREATE: u8 = KeysAdd.repr();
|
||||
pub const KEYS_GET: u8 = KeysGet.repr();
|
||||
pub const KEYS_UPDATE: u8 = KeysUpdate.repr();
|
||||
pub const KEYS_DELETE: u8 = KeysDelete.repr();
|
||||
}
|
||||
|
@ -110,10 +110,7 @@ impl AuthController {
|
||||
filters.search_rules = search_rules;
|
||||
}
|
||||
|
||||
filters.allow_index_creation = key
|
||||
.actions
|
||||
.iter()
|
||||
.any(|&action| action == Action::IndexesAdd || action == Action::All);
|
||||
filters.allow_index_creation = self.is_key_authorized(uid, Action::IndexesAdd, None)?;
|
||||
|
||||
Ok(filters)
|
||||
}
|
||||
|
@ -126,6 +126,9 @@ impl HeedAuthStore {
|
||||
Action::StatsAll => {
|
||||
actions.insert(Action::StatsGet);
|
||||
}
|
||||
Action::MetricsAll => {
|
||||
actions.insert(Action::MetricsGet);
|
||||
}
|
||||
other => {
|
||||
actions.insert(*other);
|
||||
}
|
||||
|
@ -4,14 +4,14 @@ description = "Meilisearch HTTP server"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
name = "meilisearch-http"
|
||||
version = "0.28.1"
|
||||
version = "0.29.1"
|
||||
|
||||
[[bin]]
|
||||
name = "meilisearch"
|
||||
path = "src/main.rs"
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = { version = "1.0.56", optional = true }
|
||||
anyhow = { version = "1.0.62", optional = true }
|
||||
cargo_toml = { version = "0.11.4", optional = true }
|
||||
hex = { version = "0.4.3", optional = true }
|
||||
reqwest = { version = "0.11.9", features = ["blocking", "rustls-tls"], default-features = false, optional = true }
|
||||
@ -25,7 +25,7 @@ zip = { version = "0.5.13", optional = true }
|
||||
actix-cors = "0.6.1"
|
||||
actix-web = { version = "4.0.1", default-features = false, features = ["macros", "compress-brotli", "compress-gzip", "cookies", "rustls"] }
|
||||
actix-web-static-files = { git = "https://github.com/kilork/actix-web-static-files.git", rev = "2d3b6160", optional = true }
|
||||
anyhow = { version = "1.0.56", features = ["backtrace"] }
|
||||
anyhow = { version = "1.0.62", features = ["backtrace"] }
|
||||
async-stream = "0.3.3"
|
||||
async-trait = "0.1.52"
|
||||
bstr = "0.2.17"
|
||||
@ -46,7 +46,8 @@ jsonwebtoken = "8.0.1"
|
||||
log = "0.4.14"
|
||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
||||
meilisearch-lib = { path = "../meilisearch-lib" }
|
||||
meilisearch-lib = { path = "../meilisearch-lib", default-features = false }
|
||||
mimalloc = { version = "0.1.29", default-features = false }
|
||||
mime = "0.3.16"
|
||||
num_cpus = "1.13.1"
|
||||
obkv = "0.2.0"
|
||||
@ -63,7 +64,7 @@ rustls-pemfile = "0.3.0"
|
||||
segment = { version = "0.2.0", optional = true }
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde-cs = "0.2.3"
|
||||
serde_json = { version = "1.0.79", features = ["preserve_order"] }
|
||||
serde_json = { version = "1.0.85", features = ["preserve_order"] }
|
||||
sha2 = "0.10.2"
|
||||
siphasher = "0.3.10"
|
||||
slice-group-by = "0.3.0"
|
||||
@ -75,8 +76,11 @@ thiserror = "1.0.30"
|
||||
time = { version = "0.3.7", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||
tokio = { version = "1.17.0", features = ["full"] }
|
||||
tokio-stream = "0.1.8"
|
||||
toml = "0.5.9"
|
||||
uuid = { version = "1.1.2", features = ["serde", "v4"] }
|
||||
walkdir = "2.3.2"
|
||||
prometheus = { version = "0.13.0", features = ["process"], optional = true }
|
||||
lazy_static = "1.4.0"
|
||||
|
||||
[dev-dependencies]
|
||||
actix-rt = "2.7.0"
|
||||
@ -87,7 +91,8 @@ urlencoding = "2.1.0"
|
||||
yaup = "0.2.0"
|
||||
|
||||
[features]
|
||||
default = ["analytics", "mini-dashboard"]
|
||||
default = ["analytics", "meilisearch-lib/default", "mini-dashboard"]
|
||||
metrics = ["prometheus"]
|
||||
analytics = ["segment"]
|
||||
mini-dashboard = [
|
||||
"actix-web-static-files",
|
||||
@ -100,10 +105,11 @@ mini-dashboard = [
|
||||
"tempfile",
|
||||
"zip",
|
||||
]
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
tikv-jemallocator = "0.4.3"
|
||||
chinese = ["meilisearch-lib/chinese"]
|
||||
hebrew = ["meilisearch-lib/hebrew"]
|
||||
japanese = ["meilisearch-lib/japanese"]
|
||||
thai = ["meilisearch-lib/thai"]
|
||||
|
||||
[package.metadata.mini-dashboard]
|
||||
assets-url = "https://github.com/meilisearch/mini-dashboard/releases/download/v0.2.1/build.zip"
|
||||
sha1 = "05a02ff13c3982091884a3f81d28bf53e72607b2"
|
||||
assets-url = "https://github.com/meilisearch/mini-dashboard/releases/download/v0.2.2/build.zip"
|
||||
sha1 = "c69feffc6b590e38a46981a85c47f48905d4082a"
|
||||
|
@ -349,16 +349,16 @@ pub struct SearchAggregator {
|
||||
|
||||
// sort
|
||||
sort_with_geo_point: bool,
|
||||
// everytime a request has a filter, this field must be incremented by the number of terms it contains
|
||||
// every time a request has a filter, this field must be incremented by the number of terms it contains
|
||||
sort_sum_of_criteria_terms: usize,
|
||||
// everytime a request has a filter, this field must be incremented by one
|
||||
// every time a request has a filter, this field must be incremented by one
|
||||
sort_total_number_of_criteria: usize,
|
||||
|
||||
// filter
|
||||
filter_with_geo_radius: bool,
|
||||
// everytime a request has a filter, this field must be incremented by the number of terms it contains
|
||||
// every time a request has a filter, this field must be incremented by the number of terms it contains
|
||||
filter_sum_of_criteria_terms: usize,
|
||||
// everytime a request has a filter, this field must be incremented by one
|
||||
// every time a request has a filter, this field must be incremented by one
|
||||
filter_total_number_of_criteria: usize,
|
||||
used_syntax: HashMap<String, usize>,
|
||||
|
||||
@ -366,6 +366,9 @@ pub struct SearchAggregator {
|
||||
// The maximum number of terms in a q request
|
||||
max_terms_number: usize,
|
||||
|
||||
// every time a search is done, we increment the counter linked to the used settings
|
||||
matching_strategy: HashMap<String, usize>,
|
||||
|
||||
// pagination
|
||||
max_limit: usize,
|
||||
max_offset: usize,
|
||||
@ -423,6 +426,9 @@ impl SearchAggregator {
|
||||
ret.max_terms_number = q.split_whitespace().count();
|
||||
}
|
||||
|
||||
ret.matching_strategy
|
||||
.insert(format!("{:?}", query.matching_strategy), 1);
|
||||
|
||||
ret.max_limit = query.limit;
|
||||
ret.max_offset = query.offset.unwrap_or_default();
|
||||
|
||||
@ -476,6 +482,11 @@ impl SearchAggregator {
|
||||
}
|
||||
// q
|
||||
self.max_terms_number = self.max_terms_number.max(other.max_terms_number);
|
||||
|
||||
for (key, value) in other.matching_strategy.into_iter() {
|
||||
let matching_strategy = self.matching_strategy.entry(key).or_insert(0);
|
||||
*matching_strategy = matching_strategy.saturating_add(value);
|
||||
}
|
||||
// pagination
|
||||
self.max_limit = self.max_limit.max(other.max_limit);
|
||||
self.max_offset = self.max_offset.max(other.max_offset);
|
||||
@ -517,6 +528,7 @@ impl SearchAggregator {
|
||||
},
|
||||
"q": {
|
||||
"max_terms_number": self.max_terms_number,
|
||||
"most_used_matching_strategy": self.matching_strategy.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)),
|
||||
},
|
||||
"pagination": {
|
||||
"max_limit": self.max_limit,
|
||||
|
@ -5,7 +5,7 @@ use std::ops::Deref;
|
||||
use std::pin::Pin;
|
||||
|
||||
use actix_web::FromRequest;
|
||||
use error::AuthenticationError;
|
||||
pub use error::AuthenticationError;
|
||||
use futures::future::err;
|
||||
use futures::Future;
|
||||
use meilisearch_auth::{AuthController, AuthFilter};
|
||||
|
@ -1,17 +0,0 @@
|
||||
use meilisearch_lib::heed::Env;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
pub trait EnvSizer {
|
||||
fn size(&self) -> u64;
|
||||
}
|
||||
|
||||
impl EnvSizer for Env {
|
||||
fn size(&self) -> u64 {
|
||||
WalkDir::new(self.path())
|
||||
.into_iter()
|
||||
.filter_map(|entry| entry.ok())
|
||||
.filter_map(|entry| entry.metadata().ok())
|
||||
.filter(|metadata| metadata.is_file())
|
||||
.fold(0, |acc, m| acc + m.len())
|
||||
}
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
mod env;
|
||||
|
||||
pub use env::EnvSizer;
|
@ -5,10 +5,14 @@ pub mod analytics;
|
||||
pub mod task;
|
||||
#[macro_use]
|
||||
pub mod extractors;
|
||||
pub mod helpers;
|
||||
pub mod option;
|
||||
pub mod routes;
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
pub mod metrics;
|
||||
#[cfg(feature = "metrics")]
|
||||
pub mod route_metrics;
|
||||
|
||||
use std::sync::{atomic::AtomicBool, Arc};
|
||||
use std::time::Duration;
|
||||
|
||||
@ -30,9 +34,9 @@ pub static AUTOBATCHING_ENABLED: AtomicBool = AtomicBool::new(false);
|
||||
pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<MeiliSearch> {
|
||||
let mut meilisearch = MeiliSearch::builder();
|
||||
|
||||
// enable autobatching?
|
||||
// disable autobatching?
|
||||
AUTOBATCHING_ENABLED.store(
|
||||
opt.scheduler_options.enable_auto_batching,
|
||||
!opt.scheduler_options.disable_auto_batching,
|
||||
std::sync::atomic::Ordering::Relaxed,
|
||||
);
|
||||
|
||||
@ -141,22 +145,40 @@ pub fn dashboard(config: &mut web::ServiceConfig, _enable_frontend: bool) {
|
||||
config.service(web::resource("/").route(web::get().to(routes::running)));
|
||||
}
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
pub fn configure_metrics_route(config: &mut web::ServiceConfig, enable_metrics_route: bool) {
|
||||
if enable_metrics_route {
|
||||
config.service(
|
||||
web::resource("/metrics").route(web::get().to(crate::route_metrics::get_metrics)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! create_app {
|
||||
($data:expr, $auth:expr, $enable_frontend:expr, $opt:expr, $analytics:expr) => {{
|
||||
use actix_cors::Cors;
|
||||
use actix_web::dev::Service;
|
||||
use actix_web::middleware::Condition;
|
||||
use actix_web::middleware::TrailingSlash;
|
||||
use actix_web::App;
|
||||
use actix_web::{middleware, web};
|
||||
use meilisearch_http::error::MeilisearchHttpError;
|
||||
use meilisearch_http::routes;
|
||||
use meilisearch_http::{configure_data, dashboard};
|
||||
#[cfg(feature = "metrics")]
|
||||
use meilisearch_http::{configure_metrics_route, metrics, route_metrics};
|
||||
use meilisearch_types::error::ResponseError;
|
||||
|
||||
App::new()
|
||||
let app = App::new()
|
||||
.configure(|s| configure_data(s, $data.clone(), $auth.clone(), &$opt, $analytics))
|
||||
.configure(routes::configure)
|
||||
.configure(|s| dashboard(s, $enable_frontend))
|
||||
.configure(|s| dashboard(s, $enable_frontend));
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
let app = app.configure(|s| configure_metrics_route(s, $opt.enable_metrics_route));
|
||||
|
||||
let app = app
|
||||
.wrap(
|
||||
Cors::default()
|
||||
.send_wildcard()
|
||||
@ -169,6 +191,14 @@ macro_rules! create_app {
|
||||
.wrap(middleware::Compress::default())
|
||||
.wrap(middleware::NormalizePath::new(
|
||||
middleware::TrailingSlash::Trim,
|
||||
))
|
||||
));
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
let app = app.wrap(Condition::new(
|
||||
$opt.enable_metrics_route,
|
||||
route_metrics::RouteMetrics,
|
||||
));
|
||||
|
||||
app
|
||||
}};
|
||||
}
|
||||
|
@ -1,18 +1,17 @@
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use actix_web::http::KeepAlive;
|
||||
use actix_web::HttpServer;
|
||||
use clap::Parser;
|
||||
use meilisearch_auth::AuthController;
|
||||
use meilisearch_http::analytics;
|
||||
use meilisearch_http::analytics::Analytics;
|
||||
use meilisearch_http::{create_app, setup_meilisearch, Opt};
|
||||
use meilisearch_lib::MeiliSearch;
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
#[global_allocator]
|
||||
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
/// does all the setup before meilisearch is launched
|
||||
fn setup(opt: &Opt) -> anyhow::Result<()> {
|
||||
@ -30,7 +29,7 @@ fn setup(opt: &Opt) -> anyhow::Result<()> {
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let opt = Opt::parse();
|
||||
let (opt, config_read_from) = Opt::try_build()?;
|
||||
|
||||
setup(&opt)?;
|
||||
|
||||
@ -59,7 +58,7 @@ async fn main() -> anyhow::Result<()> {
|
||||
#[cfg(any(debug_assertions, not(feature = "analytics")))]
|
||||
let (analytics, user) = analytics::MockAnalytics::new(&opt);
|
||||
|
||||
print_launch_resume(&opt, &user);
|
||||
print_launch_resume(&opt, &user, config_read_from);
|
||||
|
||||
run_http(meilisearch, auth_controller, opt, analytics).await?;
|
||||
|
||||
@ -98,10 +97,14 @@ async fn run_http(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn print_launch_resume(opt: &Opt, user: &str) {
|
||||
pub fn print_launch_resume(opt: &Opt, user: &str, config_read_from: Option<PathBuf>) {
|
||||
let commit_sha = option_env!("VERGEN_GIT_SHA").unwrap_or("unknown");
|
||||
let commit_date = option_env!("VERGEN_GIT_COMMIT_TIMESTAMP").unwrap_or("unknown");
|
||||
|
||||
let protocol = if opt.ssl_cert_path.is_some() && opt.ssl_key_path.is_some() {
|
||||
"https"
|
||||
} else {
|
||||
"http"
|
||||
};
|
||||
let ascii_name = r#"
|
||||
888b d888 d8b 888 d8b 888
|
||||
8888b d8888 Y8P 888 Y8P 888
|
||||
@ -115,8 +118,14 @@ pub fn print_launch_resume(opt: &Opt, user: &str) {
|
||||
|
||||
eprintln!("{}", ascii_name);
|
||||
|
||||
eprintln!(
|
||||
"Config file path:\t{:?}",
|
||||
config_read_from
|
||||
.map(|config_file_path| config_file_path.display().to_string())
|
||||
.unwrap_or_else(|| "none".to_string())
|
||||
);
|
||||
eprintln!("Database path:\t\t{:?}", opt.db_path);
|
||||
eprintln!("Server listening on:\t\"http://{}\"", opt.http_addr);
|
||||
eprintln!("Server listening on:\t\"{}://{}\"", protocol, opt.http_addr);
|
||||
eprintln!("Environment:\t\t{:?}", opt.env);
|
||||
eprintln!("Commit SHA:\t\t{:?}", commit_sha.to_string());
|
||||
eprintln!("Commit date:\t\t{:?}", commit_date.to_string());
|
||||
|
42
meilisearch-http/src/metrics.rs
Normal file
42
meilisearch-http/src/metrics.rs
Normal file
@ -0,0 +1,42 @@
|
||||
use lazy_static::lazy_static;
|
||||
use prometheus::{
|
||||
opts, register_histogram_vec, register_int_counter_vec, register_int_gauge,
|
||||
register_int_gauge_vec,
|
||||
};
|
||||
use prometheus::{HistogramVec, IntCounterVec, IntGauge, IntGaugeVec};
|
||||
|
||||
const HTTP_RESPONSE_TIME_CUSTOM_BUCKETS: &[f64; 14] = &[
|
||||
0.0005, 0.0008, 0.00085, 0.0009, 0.00095, 0.001, 0.00105, 0.0011, 0.00115, 0.0012, 0.0015,
|
||||
0.002, 0.003, 1.0,
|
||||
];
|
||||
|
||||
lazy_static! {
|
||||
pub static ref HTTP_REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
|
||||
opts!("http_requests_total", "HTTP requests total"),
|
||||
&["method", "path"]
|
||||
)
|
||||
.expect("Can't create a metric");
|
||||
pub static ref MEILISEARCH_DB_SIZE_BYTES: IntGauge = register_int_gauge!(opts!(
|
||||
"meilisearch_db_size_bytes",
|
||||
"Meilisearch Db Size In Bytes"
|
||||
))
|
||||
.expect("Can't create a metric");
|
||||
pub static ref MEILISEARCH_INDEX_COUNT: IntGauge =
|
||||
register_int_gauge!(opts!("meilisearch_index_count", "Meilisearch Index Count"))
|
||||
.expect("Can't create a metric");
|
||||
pub static ref MEILISEARCH_INDEX_DOCS_COUNT: IntGaugeVec = register_int_gauge_vec!(
|
||||
opts!(
|
||||
"meilisearch_index_docs_count",
|
||||
"Meilisearch Index Docs Count"
|
||||
),
|
||||
&["index"]
|
||||
)
|
||||
.expect("Can't create a metric");
|
||||
pub static ref HTTP_RESPONSE_TIME_SECONDS: HistogramVec = register_histogram_vec!(
|
||||
"http_response_time_seconds",
|
||||
"HTTP response times",
|
||||
&["method", "path"],
|
||||
HTTP_RESPONSE_TIME_CUSTOM_BUCKETS.to_vec()
|
||||
)
|
||||
.expect("Can't create a metric");
|
||||
}
|
@ -5,7 +5,10 @@ use std::sync::Arc;
|
||||
|
||||
use byte_unit::Byte;
|
||||
use clap::Parser;
|
||||
use meilisearch_lib::options::{IndexerOpts, SchedulerConfig};
|
||||
use meilisearch_lib::{
|
||||
export_to_env_if_not_present,
|
||||
options::{IndexerOpts, SchedulerConfig},
|
||||
};
|
||||
use rustls::{
|
||||
server::{
|
||||
AllowAnyAnonymousOrAuthenticatedClient, AllowAnyAuthenticatedClient,
|
||||
@ -14,138 +17,210 @@ use rustls::{
|
||||
RootCertStore,
|
||||
};
|
||||
use rustls_pemfile::{certs, pkcs8_private_keys, rsa_private_keys};
|
||||
use serde::Serialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const POSSIBLE_ENV: [&str; 2] = ["development", "production"];
|
||||
|
||||
#[derive(Debug, Clone, Parser, Serialize)]
|
||||
const MEILI_DB_PATH: &str = "MEILI_DB_PATH";
|
||||
const MEILI_HTTP_ADDR: &str = "MEILI_HTTP_ADDR";
|
||||
const MEILI_MASTER_KEY: &str = "MEILI_MASTER_KEY";
|
||||
const MEILI_ENV: &str = "MEILI_ENV";
|
||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||
const MEILI_NO_ANALYTICS: &str = "MEILI_NO_ANALYTICS";
|
||||
const MEILI_MAX_INDEX_SIZE: &str = "MEILI_MAX_INDEX_SIZE";
|
||||
const MEILI_MAX_TASK_DB_SIZE: &str = "MEILI_MAX_TASK_DB_SIZE";
|
||||
const MEILI_HTTP_PAYLOAD_SIZE_LIMIT: &str = "MEILI_HTTP_PAYLOAD_SIZE_LIMIT";
|
||||
const MEILI_SSL_CERT_PATH: &str = "MEILI_SSL_CERT_PATH";
|
||||
const MEILI_SSL_KEY_PATH: &str = "MEILI_SSL_KEY_PATH";
|
||||
const MEILI_SSL_AUTH_PATH: &str = "MEILI_SSL_AUTH_PATH";
|
||||
const MEILI_SSL_OCSP_PATH: &str = "MEILI_SSL_OCSP_PATH";
|
||||
const MEILI_SSL_REQUIRE_AUTH: &str = "MEILI_SSL_REQUIRE_AUTH";
|
||||
const MEILI_SSL_RESUMPTION: &str = "MEILI_SSL_RESUMPTION";
|
||||
const MEILI_SSL_TICKETS: &str = "MEILI_SSL_TICKETS";
|
||||
const MEILI_IMPORT_SNAPSHOT: &str = "MEILI_IMPORT_SNAPSHOT";
|
||||
const MEILI_IGNORE_MISSING_SNAPSHOT: &str = "MEILI_IGNORE_MISSING_SNAPSHOT";
|
||||
const MEILI_IGNORE_SNAPSHOT_IF_DB_EXISTS: &str = "MEILI_IGNORE_SNAPSHOT_IF_DB_EXISTS";
|
||||
const MEILI_SNAPSHOT_DIR: &str = "MEILI_SNAPSHOT_DIR";
|
||||
const MEILI_SCHEDULE_SNAPSHOT: &str = "MEILI_SCHEDULE_SNAPSHOT";
|
||||
const MEILI_SNAPSHOT_INTERVAL_SEC: &str = "MEILI_SNAPSHOT_INTERVAL_SEC";
|
||||
const MEILI_IMPORT_DUMP: &str = "MEILI_IMPORT_DUMP";
|
||||
const MEILI_IGNORE_MISSING_DUMP: &str = "MEILI_IGNORE_MISSING_DUMP";
|
||||
const MEILI_IGNORE_DUMP_IF_DB_EXISTS: &str = "MEILI_IGNORE_DUMP_IF_DB_EXISTS";
|
||||
const MEILI_DUMPS_DIR: &str = "MEILI_DUMPS_DIR";
|
||||
const MEILI_LOG_LEVEL: &str = "MEILI_LOG_LEVEL";
|
||||
#[cfg(feature = "metrics")]
|
||||
const MEILI_ENABLE_METRICS_ROUTE: &str = "MEILI_ENABLE_METRICS_ROUTE";
|
||||
|
||||
const DEFAULT_DB_PATH: &str = "./data.ms";
|
||||
const DEFAULT_HTTP_ADDR: &str = "127.0.0.1:7700";
|
||||
const DEFAULT_ENV: &str = "development";
|
||||
const DEFAULT_MAX_INDEX_SIZE: &str = "100 GiB";
|
||||
const DEFAULT_MAX_TASK_DB_SIZE: &str = "100 GiB";
|
||||
const DEFAULT_HTTP_PAYLOAD_SIZE_LIMIT: &str = "100 MB";
|
||||
const DEFAULT_SNAPSHOT_DIR: &str = "snapshots/";
|
||||
const DEFAULT_SNAPSHOT_INTERVAL_SEC: u64 = 86400;
|
||||
const DEFAULT_DUMPS_DIR: &str = "dumps/";
|
||||
const DEFAULT_LOG_LEVEL: &str = "INFO";
|
||||
|
||||
#[derive(Debug, Clone, Parser, Serialize, Deserialize)]
|
||||
#[clap(version)]
|
||||
#[serde(rename_all = "snake_case", deny_unknown_fields)]
|
||||
pub struct Opt {
|
||||
/// The destination where the database must be created.
|
||||
#[clap(long, env = "MEILI_DB_PATH", default_value = "./data.ms")]
|
||||
#[clap(long, env = MEILI_DB_PATH, default_value_os_t = default_db_path())]
|
||||
#[serde(default = "default_db_path")]
|
||||
pub db_path: PathBuf,
|
||||
|
||||
/// The address on which the http server will listen.
|
||||
#[clap(long, env = "MEILI_HTTP_ADDR", default_value = "127.0.0.1:7700")]
|
||||
#[clap(long, env = MEILI_HTTP_ADDR, default_value_t = default_http_addr())]
|
||||
#[serde(default = "default_http_addr")]
|
||||
pub http_addr: String,
|
||||
|
||||
/// The master key allowing you to do everything on the server.
|
||||
#[serde(skip)]
|
||||
#[clap(long, env = "MEILI_MASTER_KEY")]
|
||||
/// Sets the instance's master key, automatically protecting all routes except GET /health
|
||||
#[serde(skip_serializing)]
|
||||
#[clap(long, env = MEILI_MASTER_KEY)]
|
||||
pub master_key: Option<String>,
|
||||
|
||||
/// This environment variable must be set to `production` if you are running in production.
|
||||
/// If the server is running in development mode more logs will be displayed,
|
||||
/// and the master key can be avoided which implies that there is no security on the updates routes.
|
||||
/// This is useful to debug when integrating the engine with another service.
|
||||
#[clap(long, env = "MEILI_ENV", default_value = "development", possible_values = &POSSIBLE_ENV)]
|
||||
/// More logs wiil be displayed if the server is running in development mode. Setting the master
|
||||
/// key is optional; hence no security on the updates routes. This
|
||||
/// is useful to debug when integrating the engine with another service
|
||||
#[clap(long, env = MEILI_ENV, default_value_t = default_env(), possible_values = &POSSIBLE_ENV)]
|
||||
#[serde(default = "default_env")]
|
||||
pub env: String,
|
||||
|
||||
/// Do not send analytics to Meili.
|
||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||
#[serde(skip)] // we can't send true
|
||||
#[clap(long, env = "MEILI_NO_ANALYTICS")]
|
||||
#[serde(skip_serializing, default)] // we can't send true
|
||||
#[clap(long, env = MEILI_NO_ANALYTICS)]
|
||||
pub no_analytics: bool,
|
||||
|
||||
/// The maximum size, in bytes, of the main lmdb database directory
|
||||
#[clap(long, env = "MEILI_MAX_INDEX_SIZE", default_value = "100 GiB")]
|
||||
/// The maximum size, in bytes, of the main LMDB database directory
|
||||
#[clap(long, env = MEILI_MAX_INDEX_SIZE, default_value_t = default_max_index_size())]
|
||||
#[serde(default = "default_max_index_size")]
|
||||
pub max_index_size: Byte,
|
||||
|
||||
/// The maximum size, in bytes, of the update lmdb database directory
|
||||
#[clap(long, env = "MEILI_MAX_TASK_DB_SIZE", default_value = "100 GiB")]
|
||||
/// The maximum size, in bytes, of the update LMDB database directory
|
||||
#[clap(long, env = MEILI_MAX_TASK_DB_SIZE, default_value_t = default_max_task_db_size())]
|
||||
#[serde(default = "default_max_task_db_size")]
|
||||
pub max_task_db_size: Byte,
|
||||
|
||||
/// The maximum size, in bytes, of accepted JSON payloads
|
||||
#[clap(long, env = "MEILI_HTTP_PAYLOAD_SIZE_LIMIT", default_value = "100 MB")]
|
||||
#[clap(long, env = MEILI_HTTP_PAYLOAD_SIZE_LIMIT, default_value_t = default_http_payload_size_limit())]
|
||||
#[serde(default = "default_http_payload_size_limit")]
|
||||
pub http_payload_size_limit: Byte,
|
||||
|
||||
/// Read server certificates from CERTFILE.
|
||||
/// This should contain PEM-format certificates
|
||||
/// in the right order (the first certificate should
|
||||
/// certify KEYFILE, the last should be a root CA).
|
||||
#[serde(skip)]
|
||||
#[clap(long, env = "MEILI_SSL_CERT_PATH", parse(from_os_str))]
|
||||
#[serde(skip_serializing)]
|
||||
#[clap(long, env = MEILI_SSL_CERT_PATH, parse(from_os_str))]
|
||||
pub ssl_cert_path: Option<PathBuf>,
|
||||
|
||||
/// Read private key from KEYFILE. This should be a RSA
|
||||
/// Read the private key from KEYFILE. This should be an RSA
|
||||
/// private key or PKCS8-encoded private key, in PEM format.
|
||||
#[serde(skip)]
|
||||
#[clap(long, env = "MEILI_SSL_KEY_PATH", parse(from_os_str))]
|
||||
#[serde(skip_serializing)]
|
||||
#[clap(long, env = MEILI_SSL_KEY_PATH, parse(from_os_str))]
|
||||
pub ssl_key_path: Option<PathBuf>,
|
||||
|
||||
/// Enable client authentication, and accept certificates
|
||||
/// signed by those roots provided in CERTFILE.
|
||||
#[clap(long, env = "MEILI_SSL_AUTH_PATH", parse(from_os_str))]
|
||||
#[serde(skip)]
|
||||
#[serde(skip_serializing)]
|
||||
#[clap(long, env = MEILI_SSL_AUTH_PATH, parse(from_os_str))]
|
||||
pub ssl_auth_path: Option<PathBuf>,
|
||||
|
||||
/// Read DER-encoded OCSP response from OCSPFILE and staple to certificate.
|
||||
/// Optional
|
||||
#[serde(skip)]
|
||||
#[clap(long, env = "MEILI_SSL_OCSP_PATH", parse(from_os_str))]
|
||||
#[serde(skip_serializing)]
|
||||
#[clap(long, env = MEILI_SSL_OCSP_PATH, parse(from_os_str))]
|
||||
pub ssl_ocsp_path: Option<PathBuf>,
|
||||
|
||||
/// Send a fatal alert if the client does not complete client authentication.
|
||||
#[serde(skip)]
|
||||
#[clap(long, env = "MEILI_SSL_REQUIRE_AUTH")]
|
||||
#[serde(skip_serializing, default)]
|
||||
#[clap(long, env = MEILI_SSL_REQUIRE_AUTH)]
|
||||
pub ssl_require_auth: bool,
|
||||
|
||||
/// SSL support session resumption
|
||||
#[serde(skip)]
|
||||
#[clap(long, env = "MEILI_SSL_RESUMPTION")]
|
||||
#[serde(skip_serializing, default)]
|
||||
#[clap(long, env = MEILI_SSL_RESUMPTION)]
|
||||
pub ssl_resumption: bool,
|
||||
|
||||
/// SSL support tickets.
|
||||
#[serde(skip)]
|
||||
#[clap(long, env = "MEILI_SSL_TICKETS")]
|
||||
#[serde(skip_serializing, default)]
|
||||
#[clap(long, env = MEILI_SSL_TICKETS)]
|
||||
pub ssl_tickets: bool,
|
||||
|
||||
/// Defines the path of the snapshot file to import.
|
||||
/// This option will, by default, stop the process if a database already exist or if no snapshot exists at
|
||||
/// the given path. If this option is not specified no snapshot is imported.
|
||||
#[clap(long)]
|
||||
/// This option will, by default, stop the process if a database already exists, or if no snapshot exists at
|
||||
/// the given path. If this option is not specified, no snapshot is imported.
|
||||
#[clap(long, env = MEILI_IMPORT_SNAPSHOT)]
|
||||
pub import_snapshot: Option<PathBuf>,
|
||||
|
||||
/// The engine will ignore a missing snapshot and not return an error in such case.
|
||||
#[clap(long, requires = "import-snapshot")]
|
||||
/// The engine will ignore a missing snapshot and not return an error in such a case.
|
||||
#[clap(
|
||||
long,
|
||||
env = MEILI_IGNORE_MISSING_SNAPSHOT,
|
||||
requires = "import-snapshot"
|
||||
)]
|
||||
#[serde(default)]
|
||||
pub ignore_missing_snapshot: bool,
|
||||
|
||||
/// The engine will skip snapshot importation and not return an error in such case.
|
||||
#[clap(long, requires = "import-snapshot")]
|
||||
#[clap(
|
||||
long,
|
||||
env = MEILI_IGNORE_SNAPSHOT_IF_DB_EXISTS,
|
||||
requires = "import-snapshot"
|
||||
)]
|
||||
#[serde(default)]
|
||||
pub ignore_snapshot_if_db_exists: bool,
|
||||
|
||||
/// Defines the directory path where meilisearch will create snapshot each snapshot_time_gap.
|
||||
#[clap(long, env = "MEILI_SNAPSHOT_DIR", default_value = "snapshots/")]
|
||||
/// Defines the directory path where Meilisearch will create a snapshot each snapshot-interval-sec.
|
||||
#[clap(long, env = MEILI_SNAPSHOT_DIR, default_value_os_t = default_snapshot_dir())]
|
||||
#[serde(default = "default_snapshot_dir")]
|
||||
pub snapshot_dir: PathBuf,
|
||||
|
||||
/// Activate snapshot scheduling.
|
||||
#[clap(long, env = "MEILI_SCHEDULE_SNAPSHOT")]
|
||||
#[clap(long, env = MEILI_SCHEDULE_SNAPSHOT)]
|
||||
#[serde(default)]
|
||||
pub schedule_snapshot: bool,
|
||||
|
||||
/// Defines time interval, in seconds, between each snapshot creation.
|
||||
#[clap(long, env = "MEILI_SNAPSHOT_INTERVAL_SEC", default_value = "86400")] // 24h
|
||||
#[clap(long, env = MEILI_SNAPSHOT_INTERVAL_SEC, default_value_t = default_snapshot_interval_sec())]
|
||||
#[serde(default = "default_snapshot_interval_sec")]
|
||||
// 24h
|
||||
pub snapshot_interval_sec: u64,
|
||||
|
||||
/// Import a dump from the specified path, must be a `.dump` file.
|
||||
#[clap(long, conflicts_with = "import-snapshot")]
|
||||
#[clap(long, env = MEILI_IMPORT_DUMP, conflicts_with = "import-snapshot")]
|
||||
pub import_dump: Option<PathBuf>,
|
||||
|
||||
/// If the dump doesn't exists, load or create the database specified by `db-path` instead.
|
||||
#[clap(long, requires = "import-dump")]
|
||||
/// If the dump doesn't exist, load or create the database specified by `db-path` instead.
|
||||
#[clap(long, env = MEILI_IGNORE_MISSING_DUMP, requires = "import-dump")]
|
||||
#[serde(default)]
|
||||
pub ignore_missing_dump: bool,
|
||||
|
||||
/// Ignore the dump if a database already exists, and load that database instead.
|
||||
#[clap(long, requires = "import-dump")]
|
||||
#[clap(long, env = MEILI_IGNORE_DUMP_IF_DB_EXISTS, requires = "import-dump")]
|
||||
#[serde(default)]
|
||||
pub ignore_dump_if_db_exists: bool,
|
||||
|
||||
/// Folder where dumps are created when the dump route is called.
|
||||
#[clap(long, env = "MEILI_DUMPS_DIR", default_value = "dumps/")]
|
||||
#[clap(long, env = MEILI_DUMPS_DIR, default_value_os_t = default_dumps_dir())]
|
||||
#[serde(default = "default_dumps_dir")]
|
||||
pub dumps_dir: PathBuf,
|
||||
|
||||
/// Set the log level
|
||||
#[clap(long, env = "MEILI_LOG_LEVEL", default_value = "info")]
|
||||
/// Set the log level. # Possible values: [ERROR, WARN, INFO, DEBUG, TRACE]
|
||||
#[clap(long, env = MEILI_LOG_LEVEL, default_value_t = default_log_level())]
|
||||
#[serde(default = "default_log_level")]
|
||||
pub log_level: String,
|
||||
|
||||
/// Enables Prometheus metrics and /metrics route.
|
||||
#[cfg(feature = "metrics")]
|
||||
#[clap(long, env = MEILI_ENABLE_METRICS_ROUTE)]
|
||||
#[serde(default)]
|
||||
pub enable_metrics_route: bool,
|
||||
|
||||
#[serde(flatten)]
|
||||
#[clap(flatten)]
|
||||
pub indexer_options: IndexerOpts,
|
||||
@ -153,15 +228,139 @@ pub struct Opt {
|
||||
#[serde(flatten)]
|
||||
#[clap(flatten)]
|
||||
pub scheduler_options: SchedulerConfig,
|
||||
|
||||
/// The path to a configuration file that should be used to setup the engine.
|
||||
/// Format must be TOML.
|
||||
#[serde(skip_serializing)]
|
||||
#[clap(long)]
|
||||
pub config_file_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl Opt {
|
||||
/// Wether analytics should be enabled or not.
|
||||
/// Whether analytics should be enabled or not.
|
||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||
pub fn analytics(&self) -> bool {
|
||||
!self.no_analytics
|
||||
}
|
||||
|
||||
/// Build a new Opt from config file, env vars and cli args.
|
||||
pub fn try_build() -> anyhow::Result<(Self, Option<PathBuf>)> {
|
||||
// Parse the args to get the config_file_path.
|
||||
let mut opts = Opt::parse();
|
||||
let mut config_read_from = None;
|
||||
if let Some(config_file_path) = opts
|
||||
.config_file_path
|
||||
.clone()
|
||||
.or_else(|| Some(PathBuf::from("./config.toml")))
|
||||
{
|
||||
match std::fs::read(&config_file_path) {
|
||||
Ok(config) => {
|
||||
// If the file is successfully read, we deserialize it with `toml`.
|
||||
let opt_from_config = toml::from_slice::<Opt>(&config)?;
|
||||
// We inject the values from the toml in the corresponding env vars if needs be. Doing so, we respect the priority toml < env vars < cli args.
|
||||
opt_from_config.export_to_env();
|
||||
// Once injected we parse the cli args once again to take the new env vars into scope.
|
||||
opts = Opt::parse();
|
||||
config_read_from = Some(config_file_path);
|
||||
}
|
||||
// If we have an error while reading the file defined by the user.
|
||||
Err(_) if opts.config_file_path.is_some() => anyhow::bail!(
|
||||
"unable to open or read the {:?} configuration file.",
|
||||
opts.config_file_path.unwrap().display().to_string()
|
||||
),
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
Ok((opts, config_read_from))
|
||||
}
|
||||
|
||||
/// Exports the opts values to their corresponding env vars if they are not set.
|
||||
fn export_to_env(self) {
|
||||
let Opt {
|
||||
db_path,
|
||||
http_addr,
|
||||
master_key,
|
||||
env,
|
||||
max_index_size,
|
||||
max_task_db_size,
|
||||
http_payload_size_limit,
|
||||
ssl_cert_path,
|
||||
ssl_key_path,
|
||||
ssl_auth_path,
|
||||
ssl_ocsp_path,
|
||||
ssl_require_auth,
|
||||
ssl_resumption,
|
||||
ssl_tickets,
|
||||
snapshot_dir,
|
||||
schedule_snapshot,
|
||||
snapshot_interval_sec,
|
||||
dumps_dir,
|
||||
log_level,
|
||||
indexer_options,
|
||||
scheduler_options,
|
||||
import_snapshot: _,
|
||||
ignore_missing_snapshot: _,
|
||||
ignore_snapshot_if_db_exists: _,
|
||||
import_dump: _,
|
||||
ignore_missing_dump: _,
|
||||
ignore_dump_if_db_exists: _,
|
||||
config_file_path: _,
|
||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||
no_analytics,
|
||||
#[cfg(feature = "metrics")]
|
||||
enable_metrics_route,
|
||||
} = self;
|
||||
export_to_env_if_not_present(MEILI_DB_PATH, db_path);
|
||||
export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr);
|
||||
if let Some(master_key) = master_key {
|
||||
export_to_env_if_not_present(MEILI_MASTER_KEY, master_key);
|
||||
}
|
||||
export_to_env_if_not_present(MEILI_ENV, env);
|
||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||
{
|
||||
export_to_env_if_not_present(MEILI_NO_ANALYTICS, no_analytics.to_string());
|
||||
}
|
||||
export_to_env_if_not_present(MEILI_MAX_INDEX_SIZE, max_index_size.to_string());
|
||||
export_to_env_if_not_present(MEILI_MAX_TASK_DB_SIZE, max_task_db_size.to_string());
|
||||
export_to_env_if_not_present(
|
||||
MEILI_HTTP_PAYLOAD_SIZE_LIMIT,
|
||||
http_payload_size_limit.to_string(),
|
||||
);
|
||||
if let Some(ssl_cert_path) = ssl_cert_path {
|
||||
export_to_env_if_not_present(MEILI_SSL_CERT_PATH, ssl_cert_path);
|
||||
}
|
||||
if let Some(ssl_key_path) = ssl_key_path {
|
||||
export_to_env_if_not_present(MEILI_SSL_KEY_PATH, ssl_key_path);
|
||||
}
|
||||
if let Some(ssl_auth_path) = ssl_auth_path {
|
||||
export_to_env_if_not_present(MEILI_SSL_AUTH_PATH, ssl_auth_path);
|
||||
}
|
||||
if let Some(ssl_ocsp_path) = ssl_ocsp_path {
|
||||
export_to_env_if_not_present(MEILI_SSL_OCSP_PATH, ssl_ocsp_path);
|
||||
}
|
||||
export_to_env_if_not_present(MEILI_SSL_REQUIRE_AUTH, ssl_require_auth.to_string());
|
||||
export_to_env_if_not_present(MEILI_SSL_RESUMPTION, ssl_resumption.to_string());
|
||||
export_to_env_if_not_present(MEILI_SSL_TICKETS, ssl_tickets.to_string());
|
||||
export_to_env_if_not_present(MEILI_SNAPSHOT_DIR, snapshot_dir);
|
||||
export_to_env_if_not_present(MEILI_SCHEDULE_SNAPSHOT, schedule_snapshot.to_string());
|
||||
export_to_env_if_not_present(
|
||||
MEILI_SNAPSHOT_INTERVAL_SEC,
|
||||
snapshot_interval_sec.to_string(),
|
||||
);
|
||||
export_to_env_if_not_present(MEILI_DUMPS_DIR, dumps_dir);
|
||||
export_to_env_if_not_present(MEILI_LOG_LEVEL, log_level);
|
||||
#[cfg(feature = "metrics")]
|
||||
{
|
||||
export_to_env_if_not_present(
|
||||
MEILI_ENABLE_METRICS_ROUTE,
|
||||
enable_metrics_route.to_string(),
|
||||
);
|
||||
}
|
||||
indexer_options.export_to_env();
|
||||
scheduler_options.export_to_env();
|
||||
}
|
||||
|
||||
pub fn get_ssl_config(&self) -> anyhow::Result<Option<rustls::ServerConfig>> {
|
||||
if let (Some(cert_path), Some(key_path)) = (&self.ssl_cert_path, &self.ssl_key_path) {
|
||||
let config = rustls::ServerConfig::builder().with_safe_defaults();
|
||||
@ -260,6 +459,48 @@ fn load_ocsp(filename: &Option<PathBuf>) -> anyhow::Result<Vec<u8>> {
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
/// Functions used to get default value for `Opt` fields, needs to be function because of serde's default attribute.
|
||||
|
||||
fn default_db_path() -> PathBuf {
|
||||
PathBuf::from(DEFAULT_DB_PATH)
|
||||
}
|
||||
|
||||
fn default_http_addr() -> String {
|
||||
DEFAULT_HTTP_ADDR.to_string()
|
||||
}
|
||||
|
||||
fn default_env() -> String {
|
||||
DEFAULT_ENV.to_string()
|
||||
}
|
||||
|
||||
fn default_max_index_size() -> Byte {
|
||||
Byte::from_str(DEFAULT_MAX_INDEX_SIZE).unwrap()
|
||||
}
|
||||
|
||||
fn default_max_task_db_size() -> Byte {
|
||||
Byte::from_str(DEFAULT_MAX_TASK_DB_SIZE).unwrap()
|
||||
}
|
||||
|
||||
fn default_http_payload_size_limit() -> Byte {
|
||||
Byte::from_str(DEFAULT_HTTP_PAYLOAD_SIZE_LIMIT).unwrap()
|
||||
}
|
||||
|
||||
fn default_snapshot_dir() -> PathBuf {
|
||||
PathBuf::from(DEFAULT_SNAPSHOT_DIR)
|
||||
}
|
||||
|
||||
fn default_snapshot_interval_sec() -> u64 {
|
||||
DEFAULT_SNAPSHOT_INTERVAL_SEC
|
||||
}
|
||||
|
||||
fn default_dumps_dir() -> PathBuf {
|
||||
PathBuf::from(DEFAULT_DUMPS_DIR)
|
||||
}
|
||||
|
||||
fn default_log_level() -> String {
|
||||
DEFAULT_LOG_LEVEL.to_string()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
112
meilisearch-http/src/route_metrics.rs
Normal file
112
meilisearch-http/src/route_metrics.rs
Normal file
@ -0,0 +1,112 @@
|
||||
use std::future::{ready, Ready};
|
||||
|
||||
use actix_web::http::header;
|
||||
use actix_web::HttpResponse;
|
||||
use actix_web::{
|
||||
dev::{self, Service, ServiceRequest, ServiceResponse, Transform},
|
||||
Error,
|
||||
};
|
||||
use futures_util::future::LocalBoxFuture;
|
||||
use meilisearch_auth::actions;
|
||||
use meilisearch_lib::MeiliSearch;
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use prometheus::HistogramTimer;
|
||||
use prometheus::{Encoder, TextEncoder};
|
||||
|
||||
use crate::extractors::authentication::policies::ActionPolicy;
|
||||
use crate::extractors::authentication::GuardedData;
|
||||
|
||||
pub async fn get_metrics(
|
||||
meilisearch: GuardedData<ActionPolicy<{ actions::METRICS_GET }>, MeiliSearch>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let search_rules = &meilisearch.filters().search_rules;
|
||||
let response = meilisearch.get_all_stats(search_rules).await?;
|
||||
|
||||
crate::metrics::MEILISEARCH_DB_SIZE_BYTES.set(response.database_size as i64);
|
||||
crate::metrics::MEILISEARCH_INDEX_COUNT.set(response.indexes.len() as i64);
|
||||
|
||||
for (index, value) in response.indexes.iter() {
|
||||
crate::metrics::MEILISEARCH_INDEX_DOCS_COUNT
|
||||
.with_label_values(&[index])
|
||||
.set(value.number_of_documents as i64);
|
||||
}
|
||||
|
||||
let encoder = TextEncoder::new();
|
||||
let mut buffer = vec![];
|
||||
encoder
|
||||
.encode(&prometheus::gather(), &mut buffer)
|
||||
.expect("Failed to encode metrics");
|
||||
|
||||
let response = String::from_utf8(buffer).expect("Failed to convert bytes to string");
|
||||
|
||||
Ok(HttpResponse::Ok()
|
||||
.insert_header(header::ContentType(mime::TEXT_PLAIN))
|
||||
.body(response))
|
||||
}
|
||||
|
||||
pub struct RouteMetrics;
|
||||
|
||||
// Middleware factory is `Transform` trait from actix-service crate
|
||||
// `S` - type of the next service
|
||||
// `B` - type of response's body
|
||||
impl<S, B> Transform<S, ServiceRequest> for RouteMetrics
|
||||
where
|
||||
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error>,
|
||||
S::Future: 'static,
|
||||
B: 'static,
|
||||
{
|
||||
type Response = ServiceResponse<B>;
|
||||
type Error = Error;
|
||||
type InitError = ();
|
||||
type Transform = RouteMetricsMiddleware<S>;
|
||||
type Future = Ready<Result<Self::Transform, Self::InitError>>;
|
||||
|
||||
fn new_transform(&self, service: S) -> Self::Future {
|
||||
ready(Ok(RouteMetricsMiddleware { service }))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouteMetricsMiddleware<S> {
|
||||
service: S,
|
||||
}
|
||||
|
||||
impl<S, B> Service<ServiceRequest> for RouteMetricsMiddleware<S>
|
||||
where
|
||||
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error>,
|
||||
S::Future: 'static,
|
||||
B: 'static,
|
||||
{
|
||||
type Response = ServiceResponse<B>;
|
||||
type Error = Error;
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
dev::forward_ready!(service);
|
||||
|
||||
fn call(&self, req: ServiceRequest) -> Self::Future {
|
||||
let mut histogram_timer: Option<HistogramTimer> = None;
|
||||
let request_path = req.path();
|
||||
let is_registered_resource = req.resource_map().has_resource(request_path);
|
||||
if is_registered_resource {
|
||||
let request_method = req.method().to_string();
|
||||
histogram_timer = Some(
|
||||
crate::metrics::HTTP_RESPONSE_TIME_SECONDS
|
||||
.with_label_values(&[&request_method, request_path])
|
||||
.start_timer(),
|
||||
);
|
||||
crate::metrics::HTTP_REQUESTS_TOTAL
|
||||
.with_label_values(&[&request_method, request_path])
|
||||
.inc();
|
||||
}
|
||||
|
||||
let fut = self.service.call(req);
|
||||
|
||||
Box::pin(async move {
|
||||
let res = fut.await?;
|
||||
|
||||
if let Some(histogram_timer) = histogram_timer {
|
||||
histogram_timer.observe_duration();
|
||||
};
|
||||
Ok(res)
|
||||
})
|
||||
}
|
||||
}
|
@ -8,7 +8,7 @@ use serde_json::json;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use crate::analytics::Analytics;
|
||||
use crate::extractors::authentication::{policies::*, GuardedData};
|
||||
use crate::extractors::authentication::{policies::*, AuthenticationError, GuardedData};
|
||||
use crate::extractors::sequential_extractor::SeqHandler;
|
||||
use crate::task::SummarizedTaskView;
|
||||
|
||||
@ -74,16 +74,21 @@ pub async fn create_index(
|
||||
primary_key, uid, ..
|
||||
} = body.into_inner();
|
||||
|
||||
analytics.publish(
|
||||
"Index Created".to_string(),
|
||||
json!({ "primary_key": primary_key }),
|
||||
Some(&req),
|
||||
);
|
||||
let allow_index_creation = meilisearch.filters().search_rules.is_index_authorized(&uid);
|
||||
if allow_index_creation {
|
||||
analytics.publish(
|
||||
"Index Created".to_string(),
|
||||
json!({ "primary_key": primary_key }),
|
||||
Some(&req),
|
||||
);
|
||||
|
||||
let update = Update::CreateIndex { primary_key };
|
||||
let task: SummarizedTaskView = meilisearch.register_update(uid, update).await?.into();
|
||||
let update = Update::CreateIndex { primary_key };
|
||||
let task: SummarizedTaskView = meilisearch.register_update(uid, update).await?.into();
|
||||
|
||||
Ok(HttpResponse::Accepted().json(task))
|
||||
Ok(HttpResponse::Accepted().json(task))
|
||||
} else {
|
||||
Err(AuthenticationError::InvalidToken.into())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@ -158,7 +163,14 @@ pub async fn delete_index(
|
||||
pub async fn get_index_stats(
|
||||
meilisearch: GuardedData<ActionPolicy<{ actions::STATS_GET }>, MeiliSearch>,
|
||||
path: web::Path<String>,
|
||||
req: HttpRequest,
|
||||
analytics: web::Data<dyn Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
analytics.publish(
|
||||
"Stats Seen".to_string(),
|
||||
json!({ "per_index_uid": true }),
|
||||
Some(&req),
|
||||
);
|
||||
let response = meilisearch.get_index_stats(path.into_inner()).await?;
|
||||
|
||||
debug!("returns: {:?}", response);
|
||||
|
@ -2,8 +2,8 @@ use actix_web::{web, HttpRequest, HttpResponse};
|
||||
use log::debug;
|
||||
use meilisearch_auth::IndexSearchRules;
|
||||
use meilisearch_lib::index::{
|
||||
SearchQuery, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG,
|
||||
DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT,
|
||||
MatchingStrategy, SearchQuery, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
|
||||
DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT,
|
||||
};
|
||||
use meilisearch_lib::MeiliSearch;
|
||||
use meilisearch_types::error::ResponseError;
|
||||
@ -45,6 +45,8 @@ pub struct SearchQueryGet {
|
||||
highlight_post_tag: String,
|
||||
#[serde(default = "DEFAULT_CROP_MARKER")]
|
||||
crop_marker: String,
|
||||
#[serde(default)]
|
||||
matching_strategy: MatchingStrategy,
|
||||
}
|
||||
|
||||
impl From<SearchQueryGet> for SearchQuery {
|
||||
@ -76,6 +78,7 @@ impl From<SearchQueryGet> for SearchQuery {
|
||||
highlight_pre_tag: other.highlight_pre_tag,
|
||||
highlight_post_tag: other.highlight_post_tag,
|
||||
crop_marker: other.crop_marker,
|
||||
matching_strategy: other.matching_strategy,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,8 @@
|
||||
use actix_web::{web, HttpResponse};
|
||||
use actix_web::{web, HttpRequest, HttpResponse};
|
||||
use log::debug;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use serde_json::json;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use meilisearch_lib::index::{Settings, Unchecked};
|
||||
@ -9,6 +10,7 @@ use meilisearch_lib::MeiliSearch;
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::star_or::StarOr;
|
||||
|
||||
use crate::analytics::Analytics;
|
||||
use crate::extractors::authentication::{policies::*, GuardedData};
|
||||
|
||||
mod api_key;
|
||||
@ -231,7 +233,14 @@ pub async fn running() -> HttpResponse {
|
||||
|
||||
async fn get_stats(
|
||||
meilisearch: GuardedData<ActionPolicy<{ actions::STATS_GET }>, MeiliSearch>,
|
||||
req: HttpRequest,
|
||||
analytics: web::Data<dyn Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
analytics.publish(
|
||||
"Stats Seen".to_string(),
|
||||
json!({ "per_index_uid": false }),
|
||||
Some(&req),
|
||||
);
|
||||
let search_rules = &meilisearch.filters().search_rules;
|
||||
let response = meilisearch.get_all_stats(search_rules).await?;
|
||||
|
||||
|
@ -119,7 +119,7 @@ async fn get_tasks(
|
||||
// Then we complete the task filter with other potential status and types filters.
|
||||
let filters = if type_.is_some() || status.is_some() {
|
||||
let mut filters = indexes_filters.unwrap_or_default();
|
||||
filters.filter_fn(move |task| {
|
||||
filters.filter_fn(Box::new(move |task| {
|
||||
let matches_type = match &type_ {
|
||||
Some(types) => types
|
||||
.iter()
|
||||
@ -135,7 +135,7 @@ async fn get_tasks(
|
||||
};
|
||||
|
||||
matches_type && matches_status
|
||||
});
|
||||
}));
|
||||
Some(filters)
|
||||
} else {
|
||||
indexes_filters
|
||||
|
@ -4,7 +4,6 @@ use std::str::FromStr;
|
||||
use std::write;
|
||||
|
||||
use meilisearch_lib::index::{Settings, Unchecked};
|
||||
use meilisearch_lib::tasks::batch::BatchId;
|
||||
use meilisearch_lib::tasks::task::{
|
||||
DocumentDeletion, Task, TaskContent, TaskEvent, TaskId, TaskResult,
|
||||
};
|
||||
@ -12,8 +11,6 @@ use meilisearch_types::error::ResponseError;
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use time::{Duration, OffsetDateTime};
|
||||
|
||||
use crate::AUTOBATCHING_ENABLED;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum TaskType {
|
||||
@ -150,7 +147,7 @@ enum TaskDetails {
|
||||
IndexInfo { primary_key: Option<String> },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
DocumentDeletion {
|
||||
received_document_ids: usize,
|
||||
matched_documents: usize,
|
||||
deleted_documents: Option<u64>,
|
||||
},
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@ -230,8 +227,6 @@ pub struct TaskView {
|
||||
started_at: Option<OffsetDateTime>,
|
||||
#[serde(serialize_with = "time::serde::rfc3339::option::serialize")]
|
||||
finished_at: Option<OffsetDateTime>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
batch_uid: Option<Option<BatchId>>,
|
||||
}
|
||||
|
||||
impl From<Task> for TaskView {
|
||||
@ -260,7 +255,7 @@ impl From<Task> for TaskView {
|
||||
} => (
|
||||
TaskType::DocumentDeletion,
|
||||
Some(TaskDetails::DocumentDeletion {
|
||||
received_document_ids: ids.len(),
|
||||
matched_documents: ids.len(),
|
||||
deleted_documents: None,
|
||||
}),
|
||||
),
|
||||
@ -380,16 +375,6 @@ impl From<Task> for TaskView {
|
||||
|
||||
let duration = finished_at.zip(started_at).map(|(tf, ts)| (tf - ts));
|
||||
|
||||
let batch_uid = if AUTOBATCHING_ENABLED.load(std::sync::atomic::Ordering::Relaxed) {
|
||||
let id = events.iter().find_map(|e| match e {
|
||||
TaskEvent::Batched { batch_id, .. } => Some(*batch_id),
|
||||
_ => None,
|
||||
});
|
||||
Some(id)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Self {
|
||||
uid: id,
|
||||
index_uid,
|
||||
@ -401,7 +386,6 @@ impl From<Task> for TaskView {
|
||||
enqueued_at,
|
||||
started_at,
|
||||
finished_at,
|
||||
batch_uid,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ use time::{Duration, OffsetDateTime};
|
||||
|
||||
pub static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), HashSet<&'static str>>> =
|
||||
Lazy::new(|| {
|
||||
hashmap! {
|
||||
let mut authorizations = hashmap! {
|
||||
("POST", "/indexes/products/search") => hashset!{"search", "*"},
|
||||
("GET", "/indexes/products/search") => hashset!{"search", "*"},
|
||||
("POST", "/indexes/products/documents") => hashset!{"documents.add", "documents.*", "*"},
|
||||
@ -52,7 +52,16 @@ pub static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), HashSet<&'
|
||||
("DELETE", "/keys/mykey/") => hashset!{"keys.delete", "*"},
|
||||
("POST", "/keys") => hashset!{"keys.create", "*"},
|
||||
("GET", "/keys") => hashset!{"keys.get", "*"},
|
||||
};
|
||||
|
||||
if cfg!(feature = "metrics") {
|
||||
authorizations.insert(
|
||||
("GET", "/metrics"),
|
||||
hashset! {"metrics.get", "metrics.*", "*"},
|
||||
);
|
||||
}
|
||||
|
||||
authorizations
|
||||
});
|
||||
|
||||
pub static ALL_ACTIONS: Lazy<HashSet<&'static str>> = Lazy::new(|| {
|
||||
@ -588,11 +597,91 @@ async fn error_creating_index_without_action() {
|
||||
#[actix_rt::test]
|
||||
async fn lazy_create_index() {
|
||||
let mut server = Server::new_auth().await;
|
||||
|
||||
// create key with access on all indexes.
|
||||
let contents = vec![
|
||||
json!({
|
||||
"indexes": ["*"],
|
||||
"actions": ["*"],
|
||||
"expiresAt": "2050-11-13T00:00:00Z"
|
||||
}),
|
||||
json!({
|
||||
"indexes": ["*"],
|
||||
"actions": ["indexes.*", "documents.*", "settings.*", "tasks.*"],
|
||||
"expiresAt": "2050-11-13T00:00:00Z"
|
||||
}),
|
||||
json!({
|
||||
"indexes": ["*"],
|
||||
"actions": ["indexes.create", "documents.add", "settings.update", "tasks.get"],
|
||||
"expiresAt": "2050-11-13T00:00:00Z"
|
||||
}),
|
||||
];
|
||||
|
||||
for content in contents {
|
||||
server.use_api_key("MASTER_KEY");
|
||||
let (response, code) = server.add_api_key(content).await;
|
||||
assert_eq!(201, code, "{:?}", &response);
|
||||
assert!(response["key"].is_string());
|
||||
|
||||
// use created key.
|
||||
let key = response["key"].as_str().unwrap();
|
||||
server.use_api_key(&key);
|
||||
|
||||
// try to create a index via add documents route
|
||||
let index = server.index("test");
|
||||
let documents = json!([
|
||||
{
|
||||
"id": 1,
|
||||
"content": "foo",
|
||||
}
|
||||
]);
|
||||
|
||||
let (response, code) = index.add_documents(documents, None).await;
|
||||
assert_eq!(202, code, "{:?}", &response);
|
||||
let task_id = response["taskUid"].as_u64().unwrap();
|
||||
|
||||
index.wait_task(task_id).await;
|
||||
|
||||
let (response, code) = index.get_task(task_id).await;
|
||||
assert_eq!(200, code, "{:?}", &response);
|
||||
assert_eq!(response["status"], "succeeded");
|
||||
|
||||
// try to create a index via add settings route
|
||||
let index = server.index("test1");
|
||||
let settings = json!({ "distinctAttribute": "test"});
|
||||
|
||||
let (response, code) = index.update_settings(settings).await;
|
||||
assert_eq!(202, code, "{:?}", &response);
|
||||
let task_id = response["taskUid"].as_u64().unwrap();
|
||||
|
||||
index.wait_task(task_id).await;
|
||||
|
||||
let (response, code) = index.get_task(task_id).await;
|
||||
assert_eq!(200, code, "{:?}", &response);
|
||||
assert_eq!(response["status"], "succeeded");
|
||||
|
||||
// try to create a index via add specialized settings route
|
||||
let index = server.index("test2");
|
||||
let (response, code) = index.update_distinct_attribute(json!("test")).await;
|
||||
assert_eq!(202, code, "{:?}", &response);
|
||||
let task_id = response["taskUid"].as_u64().unwrap();
|
||||
|
||||
index.wait_task(task_id).await;
|
||||
|
||||
let (response, code) = index.get_task(task_id).await;
|
||||
assert_eq!(200, code, "{:?}", &response);
|
||||
assert_eq!(response["status"], "succeeded");
|
||||
}
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn error_creating_index_without_index() {
|
||||
let mut server = Server::new_auth().await;
|
||||
server.use_api_key("MASTER_KEY");
|
||||
|
||||
// create key with access on all indexes.
|
||||
let content = json!({
|
||||
"indexes": ["*"],
|
||||
"indexes": ["unexpected"],
|
||||
"actions": ["*"],
|
||||
"expiresAt": "2050-11-13T00:00:00Z"
|
||||
});
|
||||
@ -615,38 +704,21 @@ async fn lazy_create_index() {
|
||||
]);
|
||||
|
||||
let (response, code) = index.add_documents(documents, None).await;
|
||||
assert_eq!(202, code, "{:?}", &response);
|
||||
let task_id = response["taskUid"].as_u64().unwrap();
|
||||
|
||||
index.wait_task(task_id).await;
|
||||
|
||||
let (response, code) = index.get_task(task_id).await;
|
||||
assert_eq!(200, code, "{:?}", &response);
|
||||
assert_eq!(response["status"], "succeeded");
|
||||
assert_eq!(403, code, "{:?}", &response);
|
||||
|
||||
// try to create a index via add settings route
|
||||
let index = server.index("test1");
|
||||
let settings = json!({ "distinctAttribute": "test"});
|
||||
|
||||
let (response, code) = index.update_settings(settings).await;
|
||||
assert_eq!(202, code, "{:?}", &response);
|
||||
let task_id = response["taskUid"].as_u64().unwrap();
|
||||
|
||||
index.wait_task(task_id).await;
|
||||
|
||||
let (response, code) = index.get_task(task_id).await;
|
||||
assert_eq!(200, code, "{:?}", &response);
|
||||
assert_eq!(response["status"], "succeeded");
|
||||
assert_eq!(403, code, "{:?}", &response);
|
||||
|
||||
// try to create a index via add specialized settings route
|
||||
let index = server.index("test2");
|
||||
let (response, code) = index.update_distinct_attribute(json!("test")).await;
|
||||
assert_eq!(202, code, "{:?}", &response);
|
||||
let task_id = response["taskUid"].as_u64().unwrap();
|
||||
assert_eq!(403, code, "{:?}", &response);
|
||||
|
||||
index.wait_task(task_id).await;
|
||||
|
||||
let (response, code) = index.get_task(task_id).await;
|
||||
assert_eq!(200, code, "{:?}", &response);
|
||||
assert_eq!(response["status"], "succeeded");
|
||||
// try to create a index via create index route
|
||||
let index = server.index("test3");
|
||||
let (response, code) = index.create(None).await;
|
||||
assert_eq!(403, code, "{:?}", &response);
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ async fn error_api_key_bad_content_types() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -91,7 +91,7 @@ async fn error_api_key_empty_content_types() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -163,7 +163,7 @@ async fn error_api_key_missing_content_types() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -227,7 +227,7 @@ async fn error_api_key_empty_payload() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -283,7 +283,7 @@ async fn error_api_key_malformed_payload() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
|
@ -162,6 +162,8 @@ pub fn default_settings(dir: impl AsRef<Path>) -> Opt {
|
||||
max_indexing_memory: MaxMemory::unlimited(),
|
||||
..Parser::parse_from(None as Option<&str>)
|
||||
},
|
||||
#[cfg(feature = "metrics")]
|
||||
enable_metrics_route: true,
|
||||
..Parser::parse_from(None as Option<&str>)
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ impl Service {
|
||||
&self.meilisearch,
|
||||
&self.auth,
|
||||
true,
|
||||
&self.options,
|
||||
self.options,
|
||||
analytics::MockAnalytics::new(&self.options).0
|
||||
))
|
||||
.await;
|
||||
@ -46,7 +46,7 @@ impl Service {
|
||||
&self.meilisearch,
|
||||
&self.auth,
|
||||
true,
|
||||
&self.options,
|
||||
self.options,
|
||||
analytics::MockAnalytics::new(&self.options).0
|
||||
))
|
||||
.await;
|
||||
@ -72,7 +72,7 @@ impl Service {
|
||||
&self.meilisearch,
|
||||
&self.auth,
|
||||
true,
|
||||
&self.options,
|
||||
self.options,
|
||||
analytics::MockAnalytics::new(&self.options).0
|
||||
))
|
||||
.await;
|
||||
@ -95,7 +95,7 @@ impl Service {
|
||||
&self.meilisearch,
|
||||
&self.auth,
|
||||
true,
|
||||
&self.options,
|
||||
self.options,
|
||||
analytics::MockAnalytics::new(&self.options).0
|
||||
))
|
||||
.await;
|
||||
@ -118,7 +118,7 @@ impl Service {
|
||||
&self.meilisearch,
|
||||
&self.auth,
|
||||
true,
|
||||
&self.options,
|
||||
self.options,
|
||||
analytics::MockAnalytics::new(&self.options).0
|
||||
))
|
||||
.await;
|
||||
@ -141,7 +141,7 @@ impl Service {
|
||||
&self.meilisearch,
|
||||
&self.auth,
|
||||
true,
|
||||
&self.options,
|
||||
self.options,
|
||||
analytics::MockAnalytics::new(&self.options).0
|
||||
))
|
||||
.await;
|
||||
|
@ -63,7 +63,7 @@ async fn error_json_bad_content_type() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -146,7 +146,7 @@ async fn extract_actual_content_type() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
|
@ -1,5 +1,6 @@
|
||||
use crate::common::Server;
|
||||
|
||||
#[cfg(feature = "mini-dashboard")]
|
||||
#[actix_rt::test]
|
||||
async fn dashboard_assets_load() {
|
||||
let server = Server::new().await;
|
||||
|
@ -1,5 +1,6 @@
|
||||
use crate::common::{GetAllDocumentsOptions, Server};
|
||||
use actix_web::test;
|
||||
|
||||
use meilisearch_http::{analytics, create_app};
|
||||
use serde_json::{json, Value};
|
||||
use time::{format_description::well_known::Rfc3339, OffsetDateTime};
|
||||
@ -20,7 +21,52 @@ async fn add_documents_test_json_content_types() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
// post
|
||||
let req = test::TestRequest::post()
|
||||
.uri("/indexes/dog/documents")
|
||||
.set_payload(document.to_string())
|
||||
.insert_header(("content-type", "application/json"))
|
||||
.to_request();
|
||||
let res = test::call_service(&app, req).await;
|
||||
let status_code = res.status();
|
||||
let body = test::read_body(res).await;
|
||||
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
|
||||
assert_eq!(status_code, 202);
|
||||
assert_eq!(response["taskUid"], 0);
|
||||
|
||||
// put
|
||||
let req = test::TestRequest::put()
|
||||
.uri("/indexes/dog/documents")
|
||||
.set_payload(document.to_string())
|
||||
.insert_header(("content-type", "application/json"))
|
||||
.to_request();
|
||||
let res = test::call_service(&app, req).await;
|
||||
let status_code = res.status();
|
||||
let body = test::read_body(res).await;
|
||||
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
|
||||
assert_eq!(status_code, 202);
|
||||
assert_eq!(response["taskUid"], 1);
|
||||
}
|
||||
|
||||
/// Here we try to send a single document instead of an array with a single document inside.
|
||||
#[actix_rt::test]
|
||||
async fn add_single_document_test_json_content_types() {
|
||||
let document = json!({
|
||||
"id": 1,
|
||||
"content": "Bouvier Bernois",
|
||||
});
|
||||
|
||||
// this is a what is expected and should work
|
||||
let server = Server::new().await;
|
||||
let app = test::init_service(create_app!(
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -66,7 +112,7 @@ async fn error_add_documents_test_bad_content_types() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -134,7 +180,7 @@ async fn error_add_documents_test_no_content_type() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -194,7 +240,7 @@ async fn error_add_malformed_csv_documents() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -256,7 +302,7 @@ async fn error_add_malformed_json_documents() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -326,7 +372,7 @@ async fn error_add_malformed_json_documents() {
|
||||
assert_eq!(
|
||||
response["message"],
|
||||
json!(
|
||||
r#"The `json` payload provided is malformed. `Couldn't serialize document value: invalid type: string "0123456789012345678901234567...890123456789", expected a documents, or a sequence of documents. at line 1 column 102`."#
|
||||
r#"The `json` payload provided is malformed. `Couldn't serialize document value: data are neither an object nor a list of objects`."#
|
||||
)
|
||||
);
|
||||
assert_eq!(response["code"], json!("malformed_payload"));
|
||||
@ -349,9 +395,7 @@ async fn error_add_malformed_json_documents() {
|
||||
assert_eq!(status_code, 400);
|
||||
assert_eq!(
|
||||
response["message"],
|
||||
json!(
|
||||
r#"The `json` payload provided is malformed. `Couldn't serialize document value: invalid type: string "0123456789012345678901234567...90123456789m", expected a documents, or a sequence of documents. at line 1 column 103`."#
|
||||
)
|
||||
json!("The `json` payload provided is malformed. `Couldn't serialize document value: data are neither an object nor a list of objects`.")
|
||||
);
|
||||
assert_eq!(response["code"], json!("malformed_payload"));
|
||||
assert_eq!(response["type"], json!("invalid_request"));
|
||||
@ -370,7 +414,7 @@ async fn error_add_malformed_ndjson_documents() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -388,7 +432,7 @@ async fn error_add_malformed_ndjson_documents() {
|
||||
assert_eq!(
|
||||
response["message"],
|
||||
json!(
|
||||
r#"The `ndjson` payload provided is malformed. `Couldn't serialize document value: key must be a string at line 1 column 2`."#
|
||||
r#"The `ndjson` payload provided is malformed. `Couldn't serialize document value: key must be a string at line 2 column 2`."#
|
||||
)
|
||||
);
|
||||
assert_eq!(response["code"], json!("malformed_payload"));
|
||||
@ -411,9 +455,7 @@ async fn error_add_malformed_ndjson_documents() {
|
||||
assert_eq!(status_code, 400);
|
||||
assert_eq!(
|
||||
response["message"],
|
||||
json!(
|
||||
r#"The `ndjson` payload provided is malformed. `Couldn't serialize document value: key must be a string at line 1 column 2`."#
|
||||
)
|
||||
json!("The `ndjson` payload provided is malformed. `Couldn't serialize document value: key must be a string at line 2 column 2`.")
|
||||
);
|
||||
assert_eq!(response["code"], json!("malformed_payload"));
|
||||
assert_eq!(response["type"], json!("invalid_request"));
|
||||
@ -432,7 +474,7 @@ async fn error_add_missing_payload_csv_documents() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -484,7 +526,7 @@ async fn error_add_missing_payload_json_documents() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -536,7 +578,7 @@ async fn error_add_missing_payload_ndjson_documents() {
|
||||
&server.service.meilisearch,
|
||||
&server.service.auth,
|
||||
true,
|
||||
&server.service.options,
|
||||
server.service.options,
|
||||
analytics::MockAnalytics::new(&server.service.options).0
|
||||
))
|
||||
.await;
|
||||
@ -1020,7 +1062,7 @@ async fn add_documents_invalid_geo_field() {
|
||||
index.wait_task(2).await;
|
||||
let (response, code) = index.get_task(2).await;
|
||||
assert_eq!(code, 200);
|
||||
assert_eq!(response["status"], "succeeded");
|
||||
assert_eq!(response["status"], "failed");
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@ -1099,3 +1141,62 @@ async fn add_documents_with_primary_key_twice() {
|
||||
let (response, _code) = index.get_task(1).await;
|
||||
assert_eq!(response["status"], "succeeded");
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn batch_several_documents_addition() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let mut documents: Vec<_> = (0..150usize)
|
||||
.into_iter()
|
||||
.map(|id| {
|
||||
json!(
|
||||
{
|
||||
"id": id,
|
||||
"title": "foo",
|
||||
"desc": "bar"
|
||||
}
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
documents[100] = json!({"title": "error", "desc": "error"});
|
||||
|
||||
// enqueue batch of documents
|
||||
let mut waiter = Vec::new();
|
||||
for chunk in documents.chunks(30) {
|
||||
waiter.push(index.add_documents(json!(chunk), Some("id")));
|
||||
}
|
||||
|
||||
// wait first batch of documents to finish
|
||||
futures::future::join_all(waiter).await;
|
||||
index.wait_task(4).await;
|
||||
|
||||
// run a second completely failing batch
|
||||
documents[40] = json!({"title": "error", "desc": "error"});
|
||||
documents[70] = json!({"title": "error", "desc": "error"});
|
||||
documents[130] = json!({"title": "error", "desc": "error"});
|
||||
let mut waiter = Vec::new();
|
||||
for chunk in documents.chunks(30) {
|
||||
waiter.push(index.add_documents(json!(chunk), Some("id")));
|
||||
}
|
||||
// wait second batch of documents to finish
|
||||
futures::future::join_all(waiter).await;
|
||||
index.wait_task(9).await;
|
||||
|
||||
let (response, _code) = index.filtered_tasks(&[], &["failed"]).await;
|
||||
|
||||
// Check if only the 6th task failed
|
||||
println!("{}", &response);
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 5);
|
||||
|
||||
// Check if there are exactly 120 documents (150 - 30) in the index;
|
||||
let (response, code) = index
|
||||
.get_all_documents(GetAllDocumentsOptions {
|
||||
limit: Some(200),
|
||||
..Default::default()
|
||||
})
|
||||
.await;
|
||||
assert_eq!(code, 200, "failed with `{}`", response);
|
||||
assert_eq!(response["results"].as_array().unwrap().len(), 120);
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ async fn filter_invalid_syntax_object() {
|
||||
index.wait_task(1).await;
|
||||
|
||||
let expected_response = json!({
|
||||
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `TO` or `_geoRadius` at `title & Glass`.\n1:14 title & Glass",
|
||||
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `TO`, `EXISTS`, `NOT EXISTS`, or `_geoRadius` at `title & Glass`.\n1:14 title & Glass",
|
||||
"code": "invalid_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_filter"
|
||||
@ -101,7 +101,7 @@ async fn filter_invalid_syntax_array() {
|
||||
index.wait_task(1).await;
|
||||
|
||||
let expected_response = json!({
|
||||
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `TO` or `_geoRadius` at `title & Glass`.\n1:14 title & Glass",
|
||||
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `TO`, `EXISTS`, `NOT EXISTS`, or `_geoRadius` at `title & Glass`.\n1:14 title & Glass",
|
||||
"code": "invalid_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_filter"
|
||||
|
@ -708,9 +708,7 @@ async fn faceting_max_values_per_facet() {
|
||||
}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
let numbers = dbg!(&response)["facetDistribution"]["number"]
|
||||
.as_object()
|
||||
.unwrap();
|
||||
let numbers = &response["facetDistribution"]["number"].as_object().unwrap();
|
||||
assert_eq!(numbers.len(), 10_000);
|
||||
},
|
||||
)
|
||||
|
@ -1,11 +1,11 @@
|
||||
[package]
|
||||
name = "meilisearch-lib"
|
||||
version = "0.28.1"
|
||||
version = "0.29.1"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
actix-web = { version = "4.0.1", default-features = false }
|
||||
anyhow = { version = "1.0.56", features = ["backtrace"] }
|
||||
anyhow = { version = "1.0.62", features = ["backtrace"] }
|
||||
async-stream = "0.3.3"
|
||||
async-trait = "0.1.52"
|
||||
atomic_refcell = "0.1.8"
|
||||
@ -15,7 +15,7 @@ clap = { version = "3.1.6", features = ["derive", "env"] }
|
||||
crossbeam-channel = "0.5.2"
|
||||
csv = "1.1.6"
|
||||
derivative = "2.2.0"
|
||||
either = "1.6.1"
|
||||
either = { version = "1.6.1", features = ["serde"] }
|
||||
flate2 = "1.0.22"
|
||||
fs_extra = "1.2.0"
|
||||
fst = "0.4.7"
|
||||
@ -28,11 +28,12 @@ lazy_static = "1.4.0"
|
||||
log = "0.4.14"
|
||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
||||
milli = { git = "https://github.com/meilisearch/milli.git", tag = "v0.31.2" }
|
||||
milli = { git = "https://github.com/meilisearch/milli.git", tag = "v0.33.4", default-features = false }
|
||||
mime = "0.3.16"
|
||||
num_cpus = "1.13.1"
|
||||
obkv = "0.2.0"
|
||||
once_cell = "1.10.0"
|
||||
page_size = "0.4.2"
|
||||
parking_lot = "0.12.0"
|
||||
permissive-json-pointer = { path = "../permissive-json-pointer" }
|
||||
rand = "0.8.5"
|
||||
@ -42,7 +43,7 @@ reqwest = { version = "0.11.9", features = ["json", "rustls-tls"], default-featu
|
||||
roaring = "0.9.0"
|
||||
rustls = "0.20.4"
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = { version = "1.0.79", features = ["preserve_order"] }
|
||||
serde_json = { version = "1.0.85", features = ["preserve_order"] }
|
||||
siphasher = "0.3.10"
|
||||
slice-group-by = "0.3.0"
|
||||
sysinfo = "0.23.5"
|
||||
@ -63,3 +64,19 @@ nelson = { git = "https://github.com/meilisearch/nelson.git", rev = "675f1388554
|
||||
paste = "1.0.6"
|
||||
proptest = "1.0.0"
|
||||
proptest-derive = "0.3.0"
|
||||
|
||||
[features]
|
||||
# all specialized tokenizations
|
||||
default = ["milli/default"]
|
||||
|
||||
# chinese specialized tokenization
|
||||
chinese = ["milli/chinese"]
|
||||
|
||||
# hebrew specialized tokenization
|
||||
hebrew = ["milli/hebrew"]
|
||||
|
||||
# japanese specialized tokenization
|
||||
japanese = ["milli/japanese"]
|
||||
|
||||
# thai specialized tokenization
|
||||
thai = ["milli/thai"]
|
||||
|
@ -1,10 +1,14 @@
|
||||
use std::borrow::Borrow;
|
||||
use std::fmt::{self, Debug, Display};
|
||||
use std::io::{self, BufRead, BufReader, BufWriter, Cursor, Read, Seek, Write};
|
||||
use std::io::{self, BufReader, Read, Seek, Write};
|
||||
|
||||
use either::Either;
|
||||
use meilisearch_types::error::{Code, ErrorCode};
|
||||
use meilisearch_types::internal_error;
|
||||
use milli::documents::DocumentBatchBuilder;
|
||||
use milli::documents::{DocumentsBatchBuilder, Error};
|
||||
use milli::Object;
|
||||
use serde::Deserialize;
|
||||
use serde_json::error::Category;
|
||||
|
||||
type Result<T> = std::result::Result<T, DocumentFormatError>;
|
||||
|
||||
@ -18,9 +22,9 @@ pub enum PayloadType {
|
||||
impl fmt::Display for PayloadType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
PayloadType::Ndjson => write!(f, "ndjson"),
|
||||
PayloadType::Json => write!(f, "json"),
|
||||
PayloadType::Csv => write!(f, "csv"),
|
||||
PayloadType::Ndjson => f.write_str("ndjson"),
|
||||
PayloadType::Json => f.write_str("json"),
|
||||
PayloadType::Csv => f.write_str("csv"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -28,7 +32,7 @@ impl fmt::Display for PayloadType {
|
||||
#[derive(Debug)]
|
||||
pub enum DocumentFormatError {
|
||||
Internal(Box<dyn std::error::Error + Send + Sync + 'static>),
|
||||
MalformedPayload(Box<milli::documents::Error>, PayloadType),
|
||||
MalformedPayload(Error, PayloadType),
|
||||
}
|
||||
|
||||
impl Display for DocumentFormatError {
|
||||
@ -36,19 +40,33 @@ impl Display for DocumentFormatError {
|
||||
match self {
|
||||
Self::Internal(e) => write!(f, "An internal error has occurred: `{}`.", e),
|
||||
Self::MalformedPayload(me, b) => match me.borrow() {
|
||||
milli::documents::Error::JsonError(se) => {
|
||||
Error::Json(se) => {
|
||||
let mut message = match se.classify() {
|
||||
Category::Data => {
|
||||
"data are neither an object nor a list of objects".to_string()
|
||||
}
|
||||
_ => se.to_string(),
|
||||
};
|
||||
|
||||
// https://github.com/meilisearch/meilisearch/issues/2107
|
||||
// The user input maybe insanely long. We need to truncate it.
|
||||
let mut serde_msg = se.to_string();
|
||||
let ellipsis = "...";
|
||||
if serde_msg.len() > 100 + ellipsis.len() {
|
||||
serde_msg.replace_range(50..serde_msg.len() - 85, ellipsis);
|
||||
let trim_input_prefix_len = 50;
|
||||
let trim_input_suffix_len = 85;
|
||||
|
||||
if message.len()
|
||||
> trim_input_prefix_len + trim_input_suffix_len + ellipsis.len()
|
||||
{
|
||||
message.replace_range(
|
||||
trim_input_prefix_len..message.len() - trim_input_suffix_len,
|
||||
ellipsis,
|
||||
);
|
||||
}
|
||||
|
||||
write!(
|
||||
f,
|
||||
"The `{}` payload provided is malformed. `Couldn't serialize document value: {}`.",
|
||||
b, serde_msg
|
||||
b, message
|
||||
)
|
||||
}
|
||||
_ => write!(f, "The `{}` payload provided is malformed: `{}`.", b, me),
|
||||
@ -59,11 +77,11 @@ impl Display for DocumentFormatError {
|
||||
|
||||
impl std::error::Error for DocumentFormatError {}
|
||||
|
||||
impl From<(PayloadType, milli::documents::Error)> for DocumentFormatError {
|
||||
fn from((ty, error): (PayloadType, milli::documents::Error)) -> Self {
|
||||
impl From<(PayloadType, Error)> for DocumentFormatError {
|
||||
fn from((ty, error): (PayloadType, Error)) -> Self {
|
||||
match error {
|
||||
milli::documents::Error::Io(e) => Self::Internal(Box::new(e)),
|
||||
e => Self::MalformedPayload(Box::new(e), ty),
|
||||
Error::Io(e) => Self::Internal(Box::new(e)),
|
||||
e => Self::MalformedPayload(e, ty),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -79,51 +97,74 @@ impl ErrorCode for DocumentFormatError {
|
||||
|
||||
internal_error!(DocumentFormatError: io::Error);
|
||||
|
||||
/// reads csv from input and write an obkv batch to writer.
|
||||
/// Reads CSV from input and write an obkv batch to writer.
|
||||
pub fn read_csv(input: impl Read, writer: impl Write + Seek) -> Result<usize> {
|
||||
let writer = BufWriter::new(writer);
|
||||
let builder =
|
||||
DocumentBatchBuilder::from_csv(input, writer).map_err(|e| (PayloadType::Csv, e))?;
|
||||
let mut builder = DocumentsBatchBuilder::new(writer);
|
||||
|
||||
let count = builder.finish().map_err(|e| (PayloadType::Csv, e))?;
|
||||
let csv = csv::Reader::from_reader(input);
|
||||
builder.append_csv(csv).map_err(|e| (PayloadType::Csv, e))?;
|
||||
|
||||
Ok(count)
|
||||
let count = builder.documents_count();
|
||||
let _ = builder
|
||||
.into_inner()
|
||||
.map_err(Into::into)
|
||||
.map_err(DocumentFormatError::Internal)?;
|
||||
|
||||
Ok(count as usize)
|
||||
}
|
||||
|
||||
/// reads jsonl from input and write an obkv batch to writer.
|
||||
/// Reads JSON Lines from input and write an obkv batch to writer.
|
||||
pub fn read_ndjson(input: impl Read, writer: impl Write + Seek) -> Result<usize> {
|
||||
let mut reader = BufReader::new(input);
|
||||
let writer = BufWriter::new(writer);
|
||||
let mut builder = DocumentsBatchBuilder::new(writer);
|
||||
let reader = BufReader::new(input);
|
||||
|
||||
let mut builder = DocumentBatchBuilder::new(writer).map_err(|e| (PayloadType::Ndjson, e))?;
|
||||
let mut buf = String::new();
|
||||
|
||||
while reader.read_line(&mut buf)? > 0 {
|
||||
// skip empty lines
|
||||
if buf == "\n" {
|
||||
buf.clear();
|
||||
continue;
|
||||
}
|
||||
builder
|
||||
.extend_from_json(Cursor::new(&buf.as_bytes()))
|
||||
for result in serde_json::Deserializer::from_reader(reader).into_iter() {
|
||||
let object = result
|
||||
.map_err(Error::Json)
|
||||
.map_err(|e| (PayloadType::Ndjson, e))?;
|
||||
buf.clear();
|
||||
builder
|
||||
.append_json_object(&object)
|
||||
.map_err(Into::into)
|
||||
.map_err(DocumentFormatError::Internal)?;
|
||||
}
|
||||
|
||||
let count = builder.finish().map_err(|e| (PayloadType::Ndjson, e))?;
|
||||
let count = builder.documents_count();
|
||||
let _ = builder
|
||||
.into_inner()
|
||||
.map_err(Into::into)
|
||||
.map_err(DocumentFormatError::Internal)?;
|
||||
|
||||
Ok(count)
|
||||
Ok(count as usize)
|
||||
}
|
||||
|
||||
/// reads json from input and write an obkv batch to writer.
|
||||
/// Reads JSON from input and write an obkv batch to writer.
|
||||
pub fn read_json(input: impl Read, writer: impl Write + Seek) -> Result<usize> {
|
||||
let writer = BufWriter::new(writer);
|
||||
let mut builder = DocumentBatchBuilder::new(writer).map_err(|e| (PayloadType::Json, e))?;
|
||||
builder
|
||||
.extend_from_json(input)
|
||||
let mut builder = DocumentsBatchBuilder::new(writer);
|
||||
let reader = BufReader::new(input);
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[serde(transparent)]
|
||||
struct ArrayOrSingleObject {
|
||||
#[serde(with = "either::serde_untagged")]
|
||||
inner: Either<Vec<Object>, Object>,
|
||||
}
|
||||
|
||||
let content: ArrayOrSingleObject = serde_json::from_reader(reader)
|
||||
.map_err(Error::Json)
|
||||
.map_err(|e| (PayloadType::Json, e))?;
|
||||
|
||||
let count = builder.finish().map_err(|e| (PayloadType::Json, e))?;
|
||||
for object in content.inner.map_right(|o| vec![o]).into_inner() {
|
||||
builder
|
||||
.append_json_object(&object)
|
||||
.map_err(Into::into)
|
||||
.map_err(DocumentFormatError::Internal)?;
|
||||
}
|
||||
|
||||
Ok(count)
|
||||
let count = builder.documents_count();
|
||||
let _ = builder
|
||||
.into_inner()
|
||||
.map_err(Into::into)
|
||||
.map_err(DocumentFormatError::Internal)?;
|
||||
|
||||
Ok(count as usize)
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ pub fn error_code_from_str(s: &str) -> anyhow::Result<Code> {
|
||||
"unsupported_media_type" => Code::UnsupportedMediaType,
|
||||
"dump_already_in_progress" => Code::DumpAlreadyInProgress,
|
||||
"dump_process_failed" => Code::DumpProcessFailed,
|
||||
_ => bail!("unknow error code."),
|
||||
_ => bail!("unknown error code."),
|
||||
};
|
||||
|
||||
Ok(code)
|
||||
|
@ -11,7 +11,7 @@ pub enum DumpError {
|
||||
#[error("An internal error has occurred. `{0}`.")]
|
||||
Internal(Box<dyn std::error::Error + Send + Sync + 'static>),
|
||||
#[error("{0}")]
|
||||
IndexResolver(#[from] IndexResolverError),
|
||||
IndexResolver(Box<IndexResolverError>),
|
||||
}
|
||||
|
||||
internal_error!(
|
||||
@ -26,6 +26,12 @@ internal_error!(
|
||||
TaskError
|
||||
);
|
||||
|
||||
impl From<IndexResolverError> for DumpError {
|
||||
fn from(e: IndexResolverError) -> Self {
|
||||
Self::IndexResolver(Box::new(e))
|
||||
}
|
||||
}
|
||||
|
||||
impl ErrorCode for DumpError {
|
||||
fn error_code(&self) -> Code {
|
||||
match self {
|
||||
|
@ -1,24 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::index_controller::IndexMetadata;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct MetadataV1 {
|
||||
pub db_version: String,
|
||||
indexes: Vec<IndexMetadata>,
|
||||
}
|
||||
|
||||
impl MetadataV1 {
|
||||
#[allow(dead_code, unreachable_code, unused_variables)]
|
||||
pub fn load_dump(
|
||||
self,
|
||||
src: impl AsRef<Path>,
|
||||
dst: impl AsRef<Path>,
|
||||
size: usize,
|
||||
indexer_options: &IndexerOpts,
|
||||
) -> anyhow::Result<()> {
|
||||
anyhow::bail!("The version 1 of the dumps is not supported anymore. You can re-export your dump from a version between 0.21 and 0.24, or start fresh from a version 0.25 onwards.")
|
||||
}
|
@ -57,10 +57,10 @@ fn patch_updates(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> anyhow::Result
|
||||
let updates_path = src.as_ref().join("updates/data.jsonl");
|
||||
let output_updates_path = dst.as_ref().join("updates/data.jsonl");
|
||||
create_dir_all(output_updates_path.parent().unwrap())?;
|
||||
let udpates_file = File::open(updates_path)?;
|
||||
let updates_file = File::open(updates_path)?;
|
||||
let mut output_update_file = File::create(output_updates_path)?;
|
||||
|
||||
serde_json::Deserializer::from_reader(udpates_file)
|
||||
serde_json::Deserializer::from_reader(updates_file)
|
||||
.into_iter::<compat::v4::Task>()
|
||||
.try_for_each(|task| -> anyhow::Result<()> {
|
||||
let task: Task = task?.into();
|
||||
|
@ -143,7 +143,7 @@ impl MetadataVersion {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum DumpStatus {
|
||||
Done,
|
||||
|
@ -24,7 +24,9 @@ impl ErrorCode for MilliError<'_> {
|
||||
match error {
|
||||
// TODO: wait for spec for new error codes.
|
||||
UserError::SerdeJson(_)
|
||||
| UserError::InvalidLmdbOpenOptions
|
||||
| UserError::DocumentLimitReached
|
||||
| UserError::AccessingSoftDeletedDocument { .. }
|
||||
| UserError::UnknownInternalDocumentId { .. } => Code::Internal,
|
||||
UserError::InvalidStoreFile => Code::InvalidStore,
|
||||
UserError::NoSpaceLeftOnDevice => Code::NoSpaceLeftOnDevice,
|
||||
@ -32,7 +34,9 @@ impl ErrorCode for MilliError<'_> {
|
||||
UserError::AttributeLimitReached => Code::MaxFieldsLimitExceeded,
|
||||
UserError::InvalidFilter(_) => Code::Filter,
|
||||
UserError::MissingDocumentId { .. } => Code::MissingDocumentId,
|
||||
UserError::InvalidDocumentId { .. } => Code::InvalidDocumentId,
|
||||
UserError::InvalidDocumentId { .. } | UserError::TooManyDocumentIds { .. } => {
|
||||
Code::InvalidDocumentId
|
||||
}
|
||||
UserError::MissingPrimaryKey => Code::MissingPrimaryKey,
|
||||
UserError::PrimaryKeyCannotBeChanged(_) => Code::PrimaryKeyAlreadyPresent,
|
||||
UserError::SortRankingRuleMissing => Code::Sort,
|
||||
|
@ -4,7 +4,7 @@ use std::path::Path;
|
||||
|
||||
use anyhow::Context;
|
||||
use indexmap::IndexMap;
|
||||
use milli::documents::DocumentBatchReader;
|
||||
use milli::documents::DocumentsBatchReader;
|
||||
use milli::heed::{EnvOpenOptions, RoTxn};
|
||||
use milli::update::{IndexDocumentsConfig, IndexerConfig};
|
||||
use serde::{Deserialize, Serialize};
|
||||
@ -105,6 +105,7 @@ impl Index {
|
||||
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(size);
|
||||
options.max_readers(1024);
|
||||
let index = milli::Index::new(options, &dst_dir_path)?;
|
||||
|
||||
let mut txn = index.write_txn()?;
|
||||
@ -135,19 +136,20 @@ impl Index {
|
||||
if !empty {
|
||||
tmp_doc_file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
let documents_reader = DocumentBatchReader::from_reader(tmp_doc_file)?;
|
||||
let documents_reader = DocumentsBatchReader::from_reader(tmp_doc_file)?;
|
||||
|
||||
//If the document file is empty, we don't perform the document addition, to prevent
|
||||
//a primary key error to be thrown.
|
||||
let config = IndexDocumentsConfig::default();
|
||||
let mut builder = milli::update::IndexDocuments::new(
|
||||
let builder = milli::update::IndexDocuments::new(
|
||||
&mut txn,
|
||||
&index,
|
||||
indexer_config,
|
||||
config,
|
||||
|_| (),
|
||||
)?;
|
||||
builder.add_documents(documents_reader)?;
|
||||
let (builder, user_error) = builder.add_documents(documents_reader)?;
|
||||
user_error?;
|
||||
builder.execute()?;
|
||||
}
|
||||
|
||||
|
@ -40,6 +40,12 @@ impl ErrorCode for IndexError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<milli::UserError> for IndexError {
|
||||
fn from(error: milli::UserError) -> IndexError {
|
||||
IndexError::Milli(error.into())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum FacetError {
|
||||
#[error("Invalid syntax for the filter parameter: `expected {}, found: {1}`.", .0.join(", "))]
|
||||
|
@ -4,7 +4,6 @@ use std::marker::PhantomData;
|
||||
use std::ops::Deref;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
use fst::IntoStreamer;
|
||||
use milli::heed::{CompactionOption, EnvOpenOptions, RoTxn};
|
||||
@ -14,6 +13,7 @@ use serde::{Deserialize, Serialize};
|
||||
use serde_json::{Map, Value};
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
use crate::index::search::DEFAULT_PAGINATION_MAX_TOTAL_HITS;
|
||||
|
||||
@ -94,6 +94,7 @@ impl Index {
|
||||
create_dir_all(&path)?;
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(size);
|
||||
options.max_readers(1024);
|
||||
let inner = Arc::new(milli::Index::new(options, &path)?);
|
||||
Ok(Index {
|
||||
inner,
|
||||
@ -245,11 +246,8 @@ impl Index {
|
||||
let fields_ids_map = self.fields_ids_map(&txn)?;
|
||||
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
||||
|
||||
let iter = self.all_documents(&txn)?.skip(offset).take(limit);
|
||||
|
||||
let mut documents = Vec::new();
|
||||
|
||||
for entry in iter {
|
||||
for entry in self.all_documents(&txn)?.skip(offset).take(limit) {
|
||||
let (_id, obkv) = entry?;
|
||||
let document = obkv_to_json(&all_fields, &fields_ids_map, obkv)?;
|
||||
let document = match &attributes_to_retrieve {
|
||||
@ -302,7 +300,7 @@ impl Index {
|
||||
}
|
||||
|
||||
pub fn size(&self) -> u64 {
|
||||
WalkDir::new(self.inner.path())
|
||||
WalkDir::new(self.path())
|
||||
.into_iter()
|
||||
.filter_map(|entry| entry.ok())
|
||||
.filter_map(|entry| entry.metadata().ok())
|
||||
|
@ -1,5 +1,5 @@
|
||||
pub use search::{
|
||||
SearchQuery, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
|
||||
MatchingStrategy, SearchQuery, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
|
||||
DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT,
|
||||
};
|
||||
pub use updates::{apply_settings_to_builder, Checked, Facets, Settings, Unchecked};
|
||||
@ -24,12 +24,12 @@ pub use test::MockIndex as Index;
|
||||
/// code for unit testing, in places where an index would normally be used.
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use milli::update::IndexerConfig;
|
||||
use milli::update::{DocumentAdditionResult, DocumentDeletionResult, IndexDocumentsMethod};
|
||||
use milli::update::{
|
||||
DocumentAdditionResult, DocumentDeletionResult, IndexDocumentsMethod, IndexerConfig,
|
||||
};
|
||||
use nelson::Mocker;
|
||||
use uuid::Uuid;
|
||||
|
||||
@ -162,7 +162,7 @@ pub mod test {
|
||||
primary_key: Option<String>,
|
||||
file_store: UpdateFileStore,
|
||||
contents: impl Iterator<Item = Uuid>,
|
||||
) -> Result<DocumentAdditionResult> {
|
||||
) -> Result<Vec<Result<DocumentAdditionResult>>> {
|
||||
match self {
|
||||
MockIndex::Real(index) => {
|
||||
index.update_documents(method, primary_key, file_store, contents)
|
||||
|
@ -7,7 +7,7 @@ use either::Either;
|
||||
use milli::tokenizer::TokenizerBuilder;
|
||||
use milli::{
|
||||
AscDesc, FieldId, FieldsIdsMap, Filter, FormatOptions, MatchBounds, MatcherBuilder, SortError,
|
||||
DEFAULT_VALUES_PER_FACET,
|
||||
TermsMatchingStrategy, DEFAULT_VALUES_PER_FACET,
|
||||
};
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@ -27,11 +27,11 @@ pub const DEFAULT_CROP_MARKER: fn() -> String = || "…".to_string();
|
||||
pub const DEFAULT_HIGHLIGHT_PRE_TAG: fn() -> String = || "<em>".to_string();
|
||||
pub const DEFAULT_HIGHLIGHT_POST_TAG: fn() -> String = || "</em>".to_string();
|
||||
|
||||
/// The maximimum number of results that the engine
|
||||
/// The maximum number of results that the engine
|
||||
/// will be able to return in one search call.
|
||||
pub const DEFAULT_PAGINATION_MAX_TOTAL_HITS: usize = 1000;
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[serde(rename_all = "camelCase", deny_unknown_fields)]
|
||||
pub struct SearchQuery {
|
||||
pub q: Option<String>,
|
||||
@ -55,6 +55,32 @@ pub struct SearchQuery {
|
||||
pub highlight_post_tag: String,
|
||||
#[serde(default = "DEFAULT_CROP_MARKER")]
|
||||
pub crop_marker: String,
|
||||
#[serde(default)]
|
||||
pub matching_strategy: MatchingStrategy,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum MatchingStrategy {
|
||||
/// Remove query words from last to first
|
||||
Last,
|
||||
/// All query words are mandatory
|
||||
All,
|
||||
}
|
||||
|
||||
impl Default for MatchingStrategy {
|
||||
fn default() -> Self {
|
||||
Self::Last
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MatchingStrategy> for TermsMatchingStrategy {
|
||||
fn from(other: MatchingStrategy) -> Self {
|
||||
match other {
|
||||
MatchingStrategy::Last => Self::Last,
|
||||
MatchingStrategy::All => Self::All,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, PartialEq)]
|
||||
@ -91,6 +117,8 @@ impl Index {
|
||||
search.query(query);
|
||||
}
|
||||
|
||||
search.terms_matching_strategy(query.matching_strategy.into());
|
||||
|
||||
let max_total_hits = self
|
||||
.pagination_max_total_hits(&rtxn)?
|
||||
.unwrap_or(DEFAULT_PAGINATION_MAX_TOTAL_HITS);
|
||||
|
@ -3,7 +3,7 @@ use std::marker::PhantomData;
|
||||
use std::num::NonZeroUsize;
|
||||
|
||||
use log::{debug, info, trace};
|
||||
use milli::documents::DocumentBatchReader;
|
||||
use milli::documents::DocumentsBatchReader;
|
||||
use milli::update::{
|
||||
DocumentAdditionResult, DocumentDeletionResult, IndexDocumentsConfig, IndexDocumentsMethod,
|
||||
Setting,
|
||||
@ -11,7 +11,7 @@ use milli::update::{
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::error::Result;
|
||||
use super::error::{IndexError, Result};
|
||||
use super::index::{Index, IndexMeta};
|
||||
use crate::update_file_store::UpdateFileStore;
|
||||
|
||||
@ -31,10 +31,10 @@ where
|
||||
.serialize(s)
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, Debug, Serialize, PartialEq)]
|
||||
#[derive(Clone, Default, Debug, Serialize, PartialEq, Eq)]
|
||||
pub struct Checked;
|
||||
|
||||
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct Unchecked;
|
||||
|
||||
#[cfg_attr(test, derive(proptest_derive::Arbitrary))]
|
||||
@ -299,7 +299,7 @@ impl Index {
|
||||
primary_key: Option<String>,
|
||||
file_store: UpdateFileStore,
|
||||
contents: impl IntoIterator<Item = Uuid>,
|
||||
) -> Result<DocumentAdditionResult> {
|
||||
) -> Result<Vec<Result<DocumentAdditionResult>>> {
|
||||
trace!("performing document addition");
|
||||
let mut txn = self.write_txn()?;
|
||||
|
||||
@ -323,19 +323,31 @@ impl Index {
|
||||
indexing_callback,
|
||||
)?;
|
||||
|
||||
let mut results = Vec::new();
|
||||
for content_uuid in contents.into_iter() {
|
||||
let content_file = file_store.get_update(content_uuid)?;
|
||||
let reader = DocumentBatchReader::from_reader(content_file)?;
|
||||
builder.add_documents(reader)?;
|
||||
let reader = DocumentsBatchReader::from_reader(content_file)?;
|
||||
let (new_builder, user_result) = builder.add_documents(reader)?;
|
||||
builder = new_builder;
|
||||
|
||||
let user_result = match user_result {
|
||||
Ok(count) => Ok(DocumentAdditionResult {
|
||||
indexed_documents: count,
|
||||
number_of_documents: count,
|
||||
}),
|
||||
Err(e) => Err(IndexError::from(e)),
|
||||
};
|
||||
|
||||
results.push(user_result);
|
||||
}
|
||||
|
||||
let addition = builder.execute()?;
|
||||
if results.iter().any(Result::is_ok) {
|
||||
let addition = builder.execute()?;
|
||||
txn.commit()?;
|
||||
info!("document addition done: {:?}", addition);
|
||||
}
|
||||
|
||||
txn.commit()?;
|
||||
|
||||
info!("document addition done: {:?}", addition);
|
||||
|
||||
Ok(addition)
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
pub fn update_settings(&self, settings: &Settings<Checked>) -> Result<()> {
|
||||
|
@ -275,11 +275,13 @@ impl IndexControllerBuilder {
|
||||
|
||||
/// Set the index controller builder's max update store size.
|
||||
pub fn set_max_task_store_size(&mut self, max_update_store_size: usize) -> &mut Self {
|
||||
let max_update_store_size = clamp_to_page_size(max_update_store_size);
|
||||
self.max_task_store_size.replace(max_update_store_size);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn set_max_index_size(&mut self, size: usize) -> &mut Self {
|
||||
let size = clamp_to_page_size(size);
|
||||
self.max_index_size.replace(size);
|
||||
self
|
||||
}
|
||||
@ -645,6 +647,11 @@ pub async fn get_arc_ownership_blocking<T>(mut item: Arc<T>) -> T {
|
||||
}
|
||||
}
|
||||
|
||||
// Clamp the provided value to be a multiple of system page size.
|
||||
fn clamp_to_page_size(size: usize) -> usize {
|
||||
size / page_size::get() * page_size::get()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use futures::future::ok;
|
||||
@ -697,6 +704,7 @@ mod test {
|
||||
highlight_pre_tag: DEFAULT_HIGHLIGHT_PRE_TAG(),
|
||||
highlight_post_tag: DEFAULT_HIGHLIGHT_POST_TAG(),
|
||||
crop_marker: DEFAULT_CROP_MARKER(),
|
||||
matching_strategy: Default::default(),
|
||||
};
|
||||
|
||||
let result = SearchResult {
|
||||
|
@ -51,7 +51,7 @@ impl MapIndexStore {
|
||||
#[async_trait::async_trait]
|
||||
impl IndexStore for MapIndexStore {
|
||||
async fn create(&self, uuid: Uuid) -> Result<Index> {
|
||||
// We need to keep the lock until we are sure the db file has been opened correclty, to
|
||||
// We need to keep the lock until we are sure the db file has been opened correctly, to
|
||||
// ensure that another db is not created at the same time.
|
||||
let mut lock = self.index_store.write().await;
|
||||
|
||||
|
@ -150,25 +150,34 @@ mod real {
|
||||
})
|
||||
.await;
|
||||
|
||||
let event = match result {
|
||||
Ok(Ok(result)) => TaskEvent::Succeeded {
|
||||
timestamp: OffsetDateTime::now_utc(),
|
||||
result: TaskResult::DocumentAddition {
|
||||
indexed_documents: result.indexed_documents,
|
||||
},
|
||||
},
|
||||
Ok(Err(e)) => TaskEvent::Failed {
|
||||
timestamp: OffsetDateTime::now_utc(),
|
||||
error: e.into(),
|
||||
},
|
||||
Err(e) => TaskEvent::Failed {
|
||||
timestamp: OffsetDateTime::now_utc(),
|
||||
error: IndexResolverError::from(e).into(),
|
||||
},
|
||||
};
|
||||
|
||||
for task in tasks.iter_mut() {
|
||||
task.events.push(event.clone());
|
||||
match result {
|
||||
Ok(Ok(results)) => {
|
||||
for (task, result) in tasks.iter_mut().zip(results) {
|
||||
let event = match result {
|
||||
Ok(addition) => {
|
||||
TaskEvent::succeeded(TaskResult::DocumentAddition {
|
||||
indexed_documents: addition.indexed_documents,
|
||||
})
|
||||
}
|
||||
Err(error) => {
|
||||
TaskEvent::failed(IndexResolverError::from(error))
|
||||
}
|
||||
};
|
||||
task.events.push(event);
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
let event = TaskEvent::failed(e);
|
||||
for task in tasks.iter_mut() {
|
||||
task.events.push(event.clone());
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let event = TaskEvent::failed(IndexResolverError::from(e));
|
||||
for task in tasks.iter_mut() {
|
||||
task.events.push(event.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => panic!("invalid batch!"),
|
||||
|
@ -11,6 +11,8 @@ mod snapshot;
|
||||
pub mod tasks;
|
||||
mod update_file_store;
|
||||
|
||||
use std::env::VarError;
|
||||
use std::ffi::OsStr;
|
||||
use std::path::Path;
|
||||
|
||||
pub use index_controller::MeiliSearch;
|
||||
@ -35,3 +37,14 @@ pub fn is_empty_db(db_path: impl AsRef<Path>) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the key is defined in the environment variables.
|
||||
/// If not, inserts it with the given value.
|
||||
pub fn export_to_env_if_not_present<T>(key: &str, value: T)
|
||||
where
|
||||
T: AsRef<OsStr>,
|
||||
{
|
||||
if let Err(VarError::NotPresent) = std::env::var(key) {
|
||||
std::env::set_var(key, value);
|
||||
}
|
||||
}
|
||||
|
@ -1,33 +1,40 @@
|
||||
use crate::export_to_env_if_not_present;
|
||||
|
||||
use core::fmt;
|
||||
use std::{convert::TryFrom, num::ParseIntError, ops::Deref, str::FromStr};
|
||||
|
||||
use byte_unit::{Byte, ByteError};
|
||||
use clap::Parser;
|
||||
use milli::update::IndexerConfig;
|
||||
use serde::Serialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sysinfo::{RefreshKind, System, SystemExt};
|
||||
|
||||
#[derive(Debug, Clone, Parser, Serialize)]
|
||||
const MEILI_MAX_INDEXING_MEMORY: &str = "MEILI_MAX_INDEXING_MEMORY";
|
||||
const MEILI_MAX_INDEXING_THREADS: &str = "MEILI_MAX_INDEXING_THREADS";
|
||||
const DISABLE_AUTO_BATCHING: &str = "DISABLE_AUTO_BATCHING";
|
||||
const DEFAULT_LOG_EVERY_N: usize = 100000;
|
||||
|
||||
#[derive(Debug, Clone, Parser, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case", deny_unknown_fields)]
|
||||
pub struct IndexerOpts {
|
||||
/// The amount of documents to skip before printing
|
||||
/// a log regarding the indexing advancement.
|
||||
#[serde(skip)]
|
||||
#[clap(long, default_value = "100000", hide = true)] // 100k
|
||||
#[serde(skip_serializing, default = "default_log_every_n")]
|
||||
#[clap(long, default_value_t = default_log_every_n(), hide = true)] // 100k
|
||||
pub log_every_n: usize,
|
||||
|
||||
/// Grenad max number of chunks in bytes.
|
||||
#[serde(skip)]
|
||||
#[serde(skip_serializing)]
|
||||
#[clap(long, hide = true)]
|
||||
pub max_nb_chunks: Option<usize>,
|
||||
|
||||
/// The maximum amount of memory the indexer will use. It defaults to 2/3
|
||||
/// of the available memory. It is recommended to use something like 80%-90%
|
||||
/// of the available memory, no more.
|
||||
/// The maximum amount of memory the indexer will use.
|
||||
///
|
||||
/// In case the engine is unable to retrieve the available memory the engine will
|
||||
/// try to use the memory it needs but without real limit, this can lead to
|
||||
/// Out-Of-Memory issues and it is recommended to specify the amount of memory to use.
|
||||
#[clap(long, env = "MEILI_MAX_INDEXING_MEMORY", default_value_t)]
|
||||
#[clap(long, env = MEILI_MAX_INDEXING_MEMORY, default_value_t)]
|
||||
#[serde(default)]
|
||||
pub max_indexing_memory: MaxMemory,
|
||||
|
||||
/// The maximum number of threads the indexer will use.
|
||||
@ -35,33 +42,41 @@ pub struct IndexerOpts {
|
||||
/// it will use the maximum number of available cores.
|
||||
///
|
||||
/// It defaults to half of the available threads.
|
||||
#[clap(long, env = "MEILI_MAX_INDEXING_THREADS", default_value_t)]
|
||||
#[clap(long, env = MEILI_MAX_INDEXING_THREADS, default_value_t)]
|
||||
#[serde(default)]
|
||||
pub max_indexing_threads: MaxThreads,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Parser, Default, Serialize)]
|
||||
#[derive(Debug, Clone, Parser, Default, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case", deny_unknown_fields)]
|
||||
pub struct SchedulerConfig {
|
||||
/// enable the autobatching experimental feature
|
||||
#[clap(long, hide = true)]
|
||||
pub enable_auto_batching: bool,
|
||||
/// The engine will disable task auto-batching,
|
||||
/// and will sequencialy compute each task one by one.
|
||||
#[clap(long, env = DISABLE_AUTO_BATCHING)]
|
||||
#[serde(default)]
|
||||
pub disable_auto_batching: bool,
|
||||
}
|
||||
|
||||
// The maximum number of updates of the same type that can be batched together.
|
||||
// If unspecified, this is unlimited. A value of 0 is interpreted as 1.
|
||||
#[clap(long, requires = "enable-auto-batching", hide = true)]
|
||||
pub max_batch_size: Option<usize>,
|
||||
|
||||
// The maximum number of documents in a document batch. Since batches must contain at least one
|
||||
// update for the scheduler to make progress, the number of documents in a batch will be at
|
||||
// least the number of documents of its first update.
|
||||
#[clap(long, requires = "enable-auto-batching", hide = true)]
|
||||
pub max_documents_per_batch: Option<usize>,
|
||||
|
||||
/// Debounce duration in seconds
|
||||
///
|
||||
/// When a new task is enqueued, the scheduler waits for `debounce_duration_sec` seconds for new updates before
|
||||
/// starting to process a batch of updates.
|
||||
#[clap(long, requires = "enable-auto-batching", hide = true)]
|
||||
pub debounce_duration_sec: Option<u64>,
|
||||
impl IndexerOpts {
|
||||
/// Exports the values to their corresponding env vars if they are not set.
|
||||
pub fn export_to_env(self) {
|
||||
let IndexerOpts {
|
||||
max_indexing_memory,
|
||||
max_indexing_threads,
|
||||
log_every_n: _,
|
||||
max_nb_chunks: _,
|
||||
} = self;
|
||||
if let Some(max_indexing_memory) = max_indexing_memory.0 {
|
||||
export_to_env_if_not_present(
|
||||
MEILI_MAX_INDEXING_MEMORY,
|
||||
max_indexing_memory.to_string(),
|
||||
);
|
||||
}
|
||||
export_to_env_if_not_present(
|
||||
MEILI_MAX_INDEXING_THREADS,
|
||||
max_indexing_threads.0.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&IndexerOpts> for IndexerConfig {
|
||||
@ -94,8 +109,17 @@ impl Default for IndexerOpts {
|
||||
}
|
||||
}
|
||||
|
||||
impl SchedulerConfig {
|
||||
pub fn export_to_env(self) {
|
||||
let SchedulerConfig {
|
||||
disable_auto_batching,
|
||||
} = self;
|
||||
export_to_env_if_not_present(DISABLE_AUTO_BATCHING, disable_auto_batching.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
/// A type used to detect the max memory available and use 2/3 of it.
|
||||
#[derive(Debug, Clone, Copy, Serialize)]
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub struct MaxMemory(Option<Byte>);
|
||||
|
||||
impl FromStr for MaxMemory {
|
||||
@ -151,7 +175,7 @@ fn total_memory_bytes() -> Option<u64> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize)]
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub struct MaxThreads(usize);
|
||||
|
||||
impl FromStr for MaxThreads {
|
||||
@ -181,3 +205,7 @@ impl Deref for MaxThreads {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
fn default_log_every_n() -> usize {
|
||||
DEFAULT_LOG_EVERY_N
|
||||
}
|
||||
|
@ -181,6 +181,7 @@ impl SnapshotJob {
|
||||
|
||||
let mut options = milli::heed::EnvOpenOptions::new();
|
||||
options.map_size(self.index_size);
|
||||
options.max_readers(1024);
|
||||
let index = milli::Index::new(options, entry.path())?;
|
||||
index.copy_to_path(dst, CompactionOption::Enabled)?;
|
||||
}
|
||||
|
@ -3,7 +3,6 @@ use std::collections::{hash_map::Entry, BinaryHeap, HashMap, VecDeque};
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::slice;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use atomic_refcell::AtomicRefCell;
|
||||
use milli::update::IndexDocumentsMethod;
|
||||
@ -248,17 +247,10 @@ impl Scheduler {
|
||||
pub fn new(
|
||||
store: TaskStore,
|
||||
performers: Vec<Arc<dyn BatchHandler + Sync + Send + 'static>>,
|
||||
mut config: SchedulerConfig,
|
||||
config: SchedulerConfig,
|
||||
) -> Result<Arc<RwLock<Self>>> {
|
||||
let (notifier, rcv) = watch::channel(());
|
||||
|
||||
let debounce_time = config.debounce_duration_sec;
|
||||
|
||||
// Disable autobatching
|
||||
if !config.enable_auto_batching {
|
||||
config.max_batch_size = Some(1);
|
||||
}
|
||||
|
||||
let this = Self {
|
||||
snapshots: VecDeque::new(),
|
||||
tasks: TaskQueue::default(),
|
||||
@ -275,12 +267,7 @@ impl Scheduler {
|
||||
|
||||
let this = Arc::new(RwLock::new(this));
|
||||
|
||||
let update_loop = UpdateLoop::new(
|
||||
this.clone(),
|
||||
performers,
|
||||
debounce_time.filter(|&v| v > 0).map(Duration::from_secs),
|
||||
rcv,
|
||||
);
|
||||
let update_loop = UpdateLoop::new(this.clone(), performers, rcv);
|
||||
|
||||
tokio::task::spawn_local(update_loop.run());
|
||||
|
||||
@ -416,7 +403,7 @@ impl Scheduler {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum Processing {
|
||||
DocumentAdditions(Vec<TaskId>),
|
||||
IndexUpdate(TaskId),
|
||||
@ -497,27 +484,17 @@ fn make_batch(tasks: &mut TaskQueue, config: &SchedulerConfig) -> Processing {
|
||||
match list.peek() {
|
||||
Some(pending) if pending.kind == kind => {
|
||||
// We always need to process at least one task for the scheduler to make progress.
|
||||
if task_list.len() >= config.max_batch_size.unwrap_or(usize::MAX).max(1)
|
||||
{
|
||||
if config.disable_auto_batching && !task_list.is_empty() {
|
||||
break;
|
||||
}
|
||||
let pending = list.pop().unwrap();
|
||||
task_list.push(pending.id);
|
||||
|
||||
// We add the number of documents to the count if we are scheduling document additions and
|
||||
// stop adding if we already have enough.
|
||||
//
|
||||
// We check that bound only after adding the current task to the batch, so that a batch contains at least one task.
|
||||
// We add the number of documents to the count if we are scheduling document additions.
|
||||
match pending.kind {
|
||||
TaskType::DocumentUpdate { number }
|
||||
| TaskType::DocumentAddition { number } => {
|
||||
doc_count += number;
|
||||
|
||||
if doc_count
|
||||
>= config.max_documents_per_batch.unwrap_or(usize::MAX)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ impl Task {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[cfg_attr(test, derive(proptest_derive::Arbitrary))]
|
||||
pub enum DocumentDeletion {
|
||||
Clear,
|
||||
|
@ -22,11 +22,13 @@ pub use store::test::MockStore as Store;
|
||||
#[cfg(not(test))]
|
||||
pub use store::Store;
|
||||
|
||||
type FilterFn = Box<dyn Fn(&Task) -> bool + Sync + Send + 'static>;
|
||||
|
||||
/// Defines constraints to be applied when querying for Tasks from the store.
|
||||
#[derive(Default)]
|
||||
pub struct TaskFilter {
|
||||
indexes: Option<HashSet<String>>,
|
||||
filter_fn: Option<Box<dyn Fn(&Task) -> bool + Sync + Send + 'static>>,
|
||||
filter_fn: Option<FilterFn>,
|
||||
}
|
||||
|
||||
impl TaskFilter {
|
||||
@ -51,8 +53,8 @@ impl TaskFilter {
|
||||
.insert(index);
|
||||
}
|
||||
|
||||
pub fn filter_fn(&mut self, f: impl Fn(&Task) -> bool + Sync + Send + 'static) {
|
||||
self.filter_fn.replace(Box::new(f));
|
||||
pub fn filter_fn(&mut self, f: FilterFn) {
|
||||
self.filter_fn.replace(f);
|
||||
}
|
||||
}
|
||||
|
||||
@ -115,7 +117,7 @@ impl TaskStore {
|
||||
match filter {
|
||||
Some(filter) => filter
|
||||
.pass(&task)
|
||||
.then(|| task)
|
||||
.then_some(task)
|
||||
.ok_or(TaskError::UnexistingTask(id)),
|
||||
None => Ok(task),
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ impl Store {
|
||||
/// Returns the id for the next task.
|
||||
///
|
||||
/// The required `mut txn` acts as a reservation system. It guarantees that as long as you commit
|
||||
/// the task to the store in the same transaction, no one else will hav this task id.
|
||||
/// the task to the store in the same transaction, no one else will have this task id.
|
||||
pub fn next_task_id(&self, txn: &mut RwTxn) -> Result<TaskId> {
|
||||
let id = self
|
||||
.tasks
|
||||
@ -372,9 +372,6 @@ pub mod test {
|
||||
let tasks = store.list_tasks(&txn, None, Some(filter), None).unwrap();
|
||||
|
||||
assert_eq!(tasks.len(), 1);
|
||||
assert_eq!(
|
||||
&*tasks.first().as_ref().unwrap().index_uid().unwrap(),
|
||||
"test"
|
||||
);
|
||||
assert_eq!(tasks.first().as_ref().unwrap().index_uid().unwrap(), "test");
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,7 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use time::OffsetDateTime;
|
||||
use tokio::sync::{watch, RwLock};
|
||||
use tokio::time::interval_at;
|
||||
|
||||
use super::batch::Batch;
|
||||
use super::error::Result;
|
||||
@ -17,20 +15,17 @@ pub struct UpdateLoop {
|
||||
performers: Vec<Arc<dyn BatchHandler + Send + Sync + 'static>>,
|
||||
|
||||
notifier: Option<watch::Receiver<()>>,
|
||||
debounce_duration: Option<Duration>,
|
||||
}
|
||||
|
||||
impl UpdateLoop {
|
||||
pub fn new(
|
||||
scheduler: Arc<RwLock<Scheduler>>,
|
||||
performers: Vec<Arc<dyn BatchHandler + Send + Sync + 'static>>,
|
||||
debuf_duration: Option<Duration>,
|
||||
notifier: watch::Receiver<()>,
|
||||
) -> Self {
|
||||
Self {
|
||||
scheduler,
|
||||
performers,
|
||||
debounce_duration: debuf_duration,
|
||||
notifier: Some(notifier),
|
||||
}
|
||||
}
|
||||
@ -43,11 +38,6 @@ impl UpdateLoop {
|
||||
break;
|
||||
}
|
||||
|
||||
if let Some(t) = self.debounce_duration {
|
||||
let mut interval = interval_at(tokio::time::Instant::now() + t, t);
|
||||
interval.tick().await;
|
||||
};
|
||||
|
||||
if let Err(e) = self.process_next_batch().await {
|
||||
log::error!("an error occurred while processing an update batch: {}", e);
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ use std::io::{self, BufReader, BufWriter, Write};
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use milli::documents::DocumentBatchReader;
|
||||
use milli::documents::DocumentsBatchReader;
|
||||
use serde_json::Map;
|
||||
use tempfile::{NamedTempFile, PersistError};
|
||||
use uuid::Uuid;
|
||||
@ -44,7 +44,8 @@ into_update_store_error!(
|
||||
PersistError,
|
||||
io::Error,
|
||||
serde_json::Error,
|
||||
milli::documents::Error
|
||||
milli::documents::Error,
|
||||
milli::documents::DocumentsBatchCursorError
|
||||
);
|
||||
|
||||
impl UpdateFile {
|
||||
@ -149,12 +150,13 @@ mod store {
|
||||
|
||||
let update_file = File::open(update_file_path)?;
|
||||
let mut dst_file = NamedTempFile::new_in(&dump_path)?;
|
||||
let mut document_reader = DocumentBatchReader::from_reader(update_file)?;
|
||||
let (mut document_cursor, index) =
|
||||
DocumentsBatchReader::from_reader(update_file)?.into_cursor_and_fields_index();
|
||||
|
||||
let mut document_buffer = Map::new();
|
||||
// TODO: we need to find a way to do this more efficiently. (create a custom serializer
|
||||
// for jsonl for example...)
|
||||
while let Some((index, document)) = document_reader.next_document_with_index()? {
|
||||
while let Some(document) = document_cursor.next_document()? {
|
||||
for (field_id, content) in document.iter() {
|
||||
if let Some(field_name) = index.name(field_id) {
|
||||
let content = serde_json::from_slice(content)?;
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "meilisearch-types"
|
||||
version = "0.28.1"
|
||||
version = "0.29.1"
|
||||
authors = ["marin <postma.marin@protonmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
|
@ -5,7 +5,7 @@ use std::str::FromStr;
|
||||
|
||||
/// An index uid is composed of only ascii alphanumeric characters, - and _, between 1 and 400
|
||||
/// bytes long
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "test-traits", derive(proptest_derive::Arbitrary))]
|
||||
pub struct IndexUid(
|
||||
#[cfg_attr(feature = "test-traits", proptest(regex("[a-zA-Z0-9_-]{1,400}")))] String,
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "permissive-json-pointer"
|
||||
version = "0.28.1"
|
||||
version = "0.29.1"
|
||||
edition = "2021"
|
||||
description = "A permissive json pointer"
|
||||
readme = "README.md"
|
||||
|
@ -49,7 +49,7 @@ fn contained_in(selector: &str, key: &str) -> bool {
|
||||
/// map_leaf_values(
|
||||
/// value.as_object_mut().unwrap(),
|
||||
/// ["jean.race.name"],
|
||||
/// |key, value| match (value, dbg!(key)) {
|
||||
/// |key, value| match (value, key) {
|
||||
/// (Value::String(name), "jean.race.name") => *name = "patou".to_string(),
|
||||
/// _ => unreachable!(),
|
||||
/// },
|
||||
@ -729,7 +729,7 @@ mod tests {
|
||||
map_leaf_values(
|
||||
value.as_object_mut().unwrap(),
|
||||
["jean.race.name"],
|
||||
|key, value| match (value, dbg!(key)) {
|
||||
|key, value| match (value, key) {
|
||||
(Value::String(name), "jean.race.name") => *name = S("patou"),
|
||||
_ => unreachable!(),
|
||||
},
|
||||
|
Reference in New Issue
Block a user