mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-07-21 05:41:01 +00:00
Compare commits
59 Commits
reduce-pre
...
bump-to-ed
Author | SHA1 | Date | |
---|---|---|---|
aa87064a13 | |||
2762d5a32a | |||
a0bfcf8872 | |||
64477aac60 | |||
4d90e3d2ec | |||
249da5846c | |||
ee15d4fe77 | |||
f0f6c3000f | |||
5607802fe1 | |||
a8afd5dbcb | |||
55f620a986 | |||
be6abb952d | |||
2f07afa97e | |||
bf3a29b60d | |||
3acf036526 | |||
eefefc482b | |||
43c8a206b4 | |||
a8c407fa36 | |||
18bc56f1fa | |||
38b3e03dde | |||
6b1c262b74 | |||
0f654e45c9 | |||
d71c6f3483 | |||
8b4166410c | |||
9d3037aa1a | |||
5414887bff | |||
03a0550b63 | |||
2800e42243 | |||
5759afac41 | |||
868c902935 | |||
e019ad7692 | |||
1f67f373d1 | |||
2c0bd35923 | |||
b3aaa64de5 | |||
7b3072ad28 | |||
db26c1e5bf | |||
9aee12c906 | |||
debd2b21b8 | |||
39aca661dd | |||
5b51e8a083 | |||
3928fb36b3 | |||
2ddc1d2258 | |||
7c267a8a0e | |||
d39d915a7e | |||
3160ddf9df | |||
d286e63f15 | |||
9ee6254eec | |||
e2c824a7cd | |||
0dd65caffe | |||
4397b7d170 | |||
15db203b7d | |||
041f635214 | |||
537bf27e7c | |||
cf31a65a88 | |||
0f7d71041f | |||
91d221ebe7 | |||
9162e8ba04 | |||
2118cc092e | |||
c7564d500f |
4
.github/ISSUE_TEMPLATE/sprint_issue.md
vendored
4
.github/ISSUE_TEMPLATE/sprint_issue.md
vendored
@ -22,6 +22,10 @@ Related product discussion:
|
||||
|
||||
<!---If necessary, create a list with technical/product steps-->
|
||||
|
||||
### Are you modifying a database?
|
||||
- [ ] If not, add the `no db change` label to your PR, and you're good to merge.
|
||||
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
|
||||
|
||||
### Reminders when modifying the API
|
||||
|
||||
- [ ] Update the openAPI file with utoipa:
|
||||
|
39
.github/workflows/bench-manual.yml
vendored
39
.github/workflows/bench-manual.yml
vendored
@ -1,28 +1,27 @@
|
||||
name: Bench (manual)
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
workload:
|
||||
description: 'The path to the workloads to execute (workloads/...)'
|
||||
required: true
|
||||
default: 'workloads/movies.json'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
workload:
|
||||
description: "The path to the workloads to execute (workloads/...)"
|
||||
required: true
|
||||
default: "workloads/movies.json"
|
||||
|
||||
env:
|
||||
WORKLOAD_NAME: ${{ github.event.inputs.workload }}
|
||||
WORKLOAD_NAME: ${{ github.event.inputs.workload }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
- name: Run benchmarks - workload ${WORKLOAD_NAME} - branch ${{ github.ref }} - commit ${{ github.sha }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Manual [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- ${WORKLOAD_NAME}
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
- name: Run benchmarks - workload ${WORKLOAD_NAME} - branch ${{ github.ref }} - commit ${{ github.sha }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Manual [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- ${WORKLOAD_NAME}
|
||||
|
136
.github/workflows/bench-pr.yml
vendored
136
.github/workflows/bench-pr.yml
vendored
@ -1,82 +1,82 @@
|
||||
name: Bench (PR)
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
issues: write
|
||||
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
run-benchmarks-on-comment:
|
||||
if: startsWith(github.event.comment.body, '/bench')
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- name: Check permissions
|
||||
id: permission
|
||||
env:
|
||||
PR_AUTHOR: ${{github.event.issue.user.login }}
|
||||
COMMENT_AUTHOR: ${{github.event.comment.user.login }}
|
||||
REPOSITORY: ${{github.repository}}
|
||||
PR_ID: ${{github.event.issue.number}}
|
||||
run: |
|
||||
PR_REPOSITORY=$(gh api /repos/"$REPOSITORY"/pulls/"$PR_ID" --jq .head.repo.full_name)
|
||||
if $(gh api /repos/"$REPOSITORY"/collaborators/"$PR_AUTHOR"/permission --jq .user.permissions.push)
|
||||
then
|
||||
echo "::notice title=Authentication success::PR author authenticated"
|
||||
else
|
||||
echo "::error title=Authentication error::PR author doesn't have push permission on this repository"
|
||||
exit 1
|
||||
fi
|
||||
if $(gh api /repos/"$REPOSITORY"/collaborators/"$COMMENT_AUTHOR"/permission --jq .user.permissions.push)
|
||||
then
|
||||
echo "::notice title=Authentication success::Comment author authenticated"
|
||||
else
|
||||
echo "::error title=Authentication error::Comment author doesn't have push permission on this repository"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$PR_REPOSITORY" = "$REPOSITORY" ]
|
||||
then
|
||||
echo "::notice title=Authentication success::PR started from main repository"
|
||||
else
|
||||
echo "::error title=Authentication error::PR started from a fork"
|
||||
exit 1
|
||||
fi
|
||||
run-benchmarks-on-comment:
|
||||
if: startsWith(github.event.comment.body, '/bench')
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- name: Check permissions
|
||||
id: permission
|
||||
env:
|
||||
PR_AUTHOR: ${{github.event.issue.user.login }}
|
||||
COMMENT_AUTHOR: ${{github.event.comment.user.login }}
|
||||
REPOSITORY: ${{github.repository}}
|
||||
PR_ID: ${{github.event.issue.number}}
|
||||
run: |
|
||||
PR_REPOSITORY=$(gh api /repos/"$REPOSITORY"/pulls/"$PR_ID" --jq .head.repo.full_name)
|
||||
if $(gh api /repos/"$REPOSITORY"/collaborators/"$PR_AUTHOR"/permission --jq .user.permissions.push)
|
||||
then
|
||||
echo "::notice title=Authentication success::PR author authenticated"
|
||||
else
|
||||
echo "::error title=Authentication error::PR author doesn't have push permission on this repository"
|
||||
exit 1
|
||||
fi
|
||||
if $(gh api /repos/"$REPOSITORY"/collaborators/"$COMMENT_AUTHOR"/permission --jq .user.permissions.push)
|
||||
then
|
||||
echo "::notice title=Authentication success::Comment author authenticated"
|
||||
else
|
||||
echo "::error title=Authentication error::Comment author doesn't have push permission on this repository"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$PR_REPOSITORY" = "$REPOSITORY" ]
|
||||
then
|
||||
echo "::notice title=Authentication success::PR started from main repository"
|
||||
else
|
||||
echo "::error title=Authentication error::PR started from a fork"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check for Command
|
||||
id: command
|
||||
uses: xt0rted/slash-command-action@v2
|
||||
with:
|
||||
command: bench
|
||||
reaction-type: "rocket"
|
||||
repo-token: ${{ env.GH_TOKEN }}
|
||||
- name: Check for Command
|
||||
id: command
|
||||
uses: xt0rted/slash-command-action@v2
|
||||
with:
|
||||
command: bench
|
||||
reaction-type: "rocket"
|
||||
repo-token: ${{ env.GH_TOKEN }}
|
||||
|
||||
- uses: xt0rted/pull-request-comment-branch@v3
|
||||
id: comment-branch
|
||||
with:
|
||||
repo_token: ${{ env.GH_TOKEN }}
|
||||
- uses: xt0rted/pull-request-comment-branch@v3
|
||||
id: comment-branch
|
||||
with:
|
||||
repo_token: ${{ env.GH_TOKEN }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
if: success()
|
||||
with:
|
||||
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||
- uses: actions/checkout@v3
|
||||
if: success()
|
||||
with:
|
||||
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
with:
|
||||
profile: minimal
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" \
|
||||
--dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" \
|
||||
--reason "[Comment](${{ github.event.comment.html_url }}) on [#${{ github.event.issue.number }}](${{ github.event.issue.html_url }})" \
|
||||
-- ${{ steps.command.outputs.command-arguments }} > benchlinks.txt
|
||||
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" \
|
||||
--dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" \
|
||||
--reason "[Comment](${{ github.event.comment.html_url }}) on [#${{ github.event.issue.number }}](${{ github.event.issue.html_url }})" \
|
||||
-- ${{ steps.command.outputs.command-arguments }} > benchlinks.txt
|
||||
|
||||
- name: Send comment in PR
|
||||
run: |
|
||||
gh pr comment ${{github.event.issue.number}} --body-file benchlinks.txt
|
||||
- name: Send comment in PR
|
||||
run: |
|
||||
gh pr comment ${{github.event.issue.number}} --body-file benchlinks.txt
|
||||
|
33
.github/workflows/bench-push-indexing.yml
vendored
33
.github/workflows/bench-push-indexing.yml
vendored
@ -1,23 +1,22 @@
|
||||
name: Indexing bench (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Push on `main` [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- workloads/*.json
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 180 # 3h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
||||
run: |
|
||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Push on `main` [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- workloads/*.json
|
||||
|
8
.github/workflows/benchmarks-manual.yml
vendored
8
.github/workflows/benchmarks-manual.yml
vendored
@ -4,9 +4,9 @@ on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dataset_name:
|
||||
description: 'The name of the dataset used to benchmark (search_songs, search_wiki, search_geo or indexing)'
|
||||
description: "The name of the dataset used to benchmark (search_songs, search_wiki, search_geo or indexing)"
|
||||
required: false
|
||||
default: 'search_songs'
|
||||
default: "search_songs"
|
||||
|
||||
env:
|
||||
BENCH_NAME: ${{ github.event.inputs.dataset_name }}
|
||||
@ -18,7 +18,7 @@ jobs:
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
@ -67,7 +67,7 @@ jobs:
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
- name: "README: compare with another benchmark"
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
|
2
.github/workflows/benchmarks-pr.yml
vendored
2
.github/workflows/benchmarks-pr.yml
vendored
@ -44,7 +44,7 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
|
@ -16,7 +16,7 @@ jobs:
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
@ -69,7 +69,7 @@ jobs:
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
- name: "README: compare with another benchmark"
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
|
@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
@ -68,7 +68,7 @@ jobs:
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
- name: "README: compare with another benchmark"
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
|
@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
@ -68,7 +68,7 @@ jobs:
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
- name: "README: compare with another benchmark"
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
|
@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
@ -68,7 +68,7 @@ jobs:
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
- name: "README: compare with another benchmark"
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
|
57
.github/workflows/db-change-comments.yml
vendored
Normal file
57
.github/workflows/db-change-comments.yml
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
name: Comment when db change labels are added
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
env:
|
||||
MESSAGE: |
|
||||
### Hello, I'm a bot 🤖
|
||||
|
||||
You are receiving this message because you declared that this PR make changes to the Meilisearch database.
|
||||
Depending on the nature of the change, additional actions might be required on your part. The following sections detail the additional actions depending on the nature of the change, please copy the relevant section in the description of your PR, and make sure to perform the required actions.
|
||||
|
||||
Thank you for contributing to Meilisearch :heart:
|
||||
|
||||
## This PR makes forward-compatible changes
|
||||
|
||||
*Forward-compatible changes are changes to the database such that databases created in an older version of Meilisearch are still valid in the new version of Meilisearch. They usually represent additive changes, like adding a new optional attribute or setting.*
|
||||
|
||||
- [ ] Detail the change to the DB format and why they are forward compatible
|
||||
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
|
||||
|
||||
|
||||
## This PR makes breaking changes
|
||||
|
||||
*Breaking changes are changes to the database such that databases created in an older version of Meilisearch need changes to remain valid in the new version of Meilisearch. This typically happens when the way to store the data changed (change of database, new required key, etc). This can also happen due to breaking changes in the API of an experimental feature. ⚠️ This kind of changes are more difficult to achieve safely, so proceed with caution and test dumpless upgrade right before merging the PR.*
|
||||
|
||||
- [ ] Detail the changes to the DB format,
|
||||
- [ ] which are compatible, and why
|
||||
- [ ] which are not compatible, why, and how they will be fixed up in the upgrade
|
||||
- [ ] /!\ Ensure all the read operations still work!
|
||||
- If the change happened in milli, you may need to check the version of the database before doing any read operation
|
||||
- If the change happened in the index-scheduler, make sure the new code can immediately read the old database
|
||||
- If the change happened in the meilisearch-auth database, reach out to the team; we don't know yet how to handle these changes
|
||||
- [ ] Write the code to go from the old database to the new one
|
||||
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
|
||||
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
|
||||
- [ ] Write an integration test [here](https://github.com/meilisearch/meilisearch/blob/main/crates/meilisearch/tests/upgrade/mod.rs) ensuring you can read the old database, upgrade to the new database, and read the new database as expected
|
||||
|
||||
|
||||
jobs:
|
||||
add-comment:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.label.name == 'db change'
|
||||
steps:
|
||||
- name: Add comment
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const message = process.env.MESSAGE;
|
||||
github.rest.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: message
|
||||
})
|
28
.github/workflows/db-change-missing.yml
vendored
Normal file
28
.github/workflows/db-change-missing.yml
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
name: Check db change labels
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, labeled, unlabeled]
|
||||
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
check-labels:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Check db change labels
|
||||
id: check_labels
|
||||
run: |
|
||||
URL=/repos/meilisearch/meilisearch/pulls/${{ github.event.pull_request.number }}/labels
|
||||
echo ${{ github.event.pull_request.number }}
|
||||
echo $URL
|
||||
LABELS=$(gh api -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" /repos/meilisearch/meilisearch/issues/${{ github.event.pull_request.number }}/labels -q .[].name)
|
||||
if [[ ! "$LABELS" =~ "db change" && ! "$LABELS" =~ "no db change" ]]; then
|
||||
echo "::error::Pull request must contain either the 'db change' or 'no db change' label."
|
||||
exit 1
|
||||
else
|
||||
echo "The label is set"
|
||||
fi
|
2
.github/workflows/flaky-tests.yml
vendored
2
.github/workflows/flaky-tests.yml
vendored
@ -17,7 +17,7 @@ jobs:
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Install cargo-flaky
|
||||
run: cargo install cargo-flaky
|
||||
- name: Run cargo flaky in the dumps
|
||||
|
2
.github/workflows/fuzzer-indexing.yml
vendored
2
.github/workflows/fuzzer-indexing.yml
vendored
@ -12,7 +12,7 @@ jobs:
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
|
||||
|
42
.github/workflows/milestone-workflow.yml
vendored
42
.github/workflows/milestone-workflow.yml
vendored
@ -5,6 +5,7 @@ name: Milestone's workflow
|
||||
# For each Milestone created (not opened!), and if the release is NOT a patch release (only the patch changed)
|
||||
# - the roadmap issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/roadmap-issue.md
|
||||
# - the changelog issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/changelog-issue.md
|
||||
# - update the ruleset to add the current release version to the list of allowed versions and be able to use the merge queue.
|
||||
|
||||
# For each Milestone closed
|
||||
# - the `release_version` label is created
|
||||
@ -21,10 +22,9 @@ env:
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
|
||||
# -----------------
|
||||
# MILESTONE CREATED
|
||||
# -----------------
|
||||
# -----------------
|
||||
# MILESTONE CREATED
|
||||
# -----------------
|
||||
|
||||
get-release-version:
|
||||
if: github.event.action == 'created'
|
||||
@ -148,9 +148,37 @@ jobs:
|
||||
--body-file $ISSUE_TEMPLATE \
|
||||
--milestone $MILESTONE_VERSION
|
||||
|
||||
# ----------------
|
||||
# MILESTONE CLOSED
|
||||
# ----------------
|
||||
update-ruleset:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install jq
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y jq
|
||||
- name: Update ruleset
|
||||
env:
|
||||
# gh api repos/meilisearch/meilisearch/rulesets --jq '.[] | {name: .name, id: .id}'
|
||||
RULESET_ID: 4253297
|
||||
BRANCH_NAME: ${{ github.event.inputs.branch_name }}
|
||||
run: |
|
||||
# Get current ruleset conditions
|
||||
CONDITIONS=$(gh api repos/meilisearch/meilisearch/rulesets/$RULESET_ID --jq '{ conditions: .conditions }')
|
||||
|
||||
# Update the conditions by appending the milestone version
|
||||
UPDATED_CONDITIONS=$(echo $CONDITIONS | jq '.conditions.ref_name.include += ["refs/heads/release-'$MILESTONE_VERSION'"]')
|
||||
|
||||
# Update the ruleset from stdin (-)
|
||||
echo $UPDATED_CONDITIONS |
|
||||
gh api repos/meilisearch/meilisearch/rulesets/$RULESET_ID \
|
||||
--method PUT \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
--input -
|
||||
|
||||
# ----------------
|
||||
# MILESTONE CLOSED
|
||||
# ----------------
|
||||
|
||||
create-release-label:
|
||||
if: github.event.action == 'closed'
|
||||
|
2
.github/workflows/publish-apt-brew-pkg.yml
vendored
2
.github/workflows/publish-apt-brew-pkg.yml
vendored
@ -25,7 +25,7 @@ jobs:
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Install cargo-deb
|
||||
run: cargo install cargo-deb
|
||||
- uses: actions/checkout@v3
|
||||
|
8
.github/workflows/publish-binaries.yml
vendored
8
.github/workflows/publish-binaries.yml
vendored
@ -45,7 +45,7 @@ jobs:
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Build
|
||||
run: cargo build --release --locked
|
||||
# No need to upload binaries for dry run (cron)
|
||||
@ -75,7 +75,7 @@ jobs:
|
||||
asset_name: meilisearch-windows-amd64.exe
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Build
|
||||
run: cargo build --release --locked
|
||||
# No need to upload binaries for dry run (cron)
|
||||
@ -101,7 +101,7 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: Installing Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@1.81
|
||||
uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
target: ${{ matrix.target }}
|
||||
@ -148,7 +148,7 @@ jobs:
|
||||
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
apt-get update -y && apt-get install -y docker-ce
|
||||
- name: Installing Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@1.81
|
||||
uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
target: ${{ matrix.target }}
|
||||
|
14
.github/workflows/test-suite.yml
vendored
14
.github/workflows/test-suite.yml
vendored
@ -27,7 +27,7 @@ jobs:
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- name: Setup test with Rust stable
|
||||
uses: dtolnay/rust-toolchain@1.81
|
||||
uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
- name: Run cargo check without any default features
|
||||
@ -52,7 +52,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -77,7 +77,7 @@ jobs:
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install --assume-yes build-essential curl
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Run cargo build with almost all features
|
||||
run: |
|
||||
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||
@ -129,7 +129,7 @@ jobs:
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install --assume-yes build-essential curl
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Run cargo tree without default features and check lindera is not present
|
||||
run: |
|
||||
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
||||
@ -153,7 +153,7 @@ jobs:
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
- name: Run tests in debug
|
||||
@ -167,7 +167,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
components: clippy
|
||||
@ -184,7 +184,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: nightly-2024-07-09
|
||||
|
@ -4,7 +4,7 @@ on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
new_version:
|
||||
description: 'The new version (vX.Y.Z)'
|
||||
description: "The new version (vX.Y.Z)"
|
||||
required: true
|
||||
|
||||
env:
|
||||
@ -18,7 +18,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@1.81
|
||||
- uses: dtolnay/rust-toolchain@1.85
|
||||
with:
|
||||
profile: minimal
|
||||
- name: Install sd
|
||||
|
2067
Cargo.lock
generated
2067
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -30,7 +30,7 @@ authors = [
|
||||
description = "Meilisearch HTTP server"
|
||||
homepage = "https://meilisearch.com"
|
||||
readme = "README.md"
|
||||
edition = "2021"
|
||||
edition = "2024"
|
||||
license = "MIT"
|
||||
|
||||
[profile.release]
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Compile
|
||||
FROM rust:1.81.0-alpine3.20 AS compiler
|
||||
FROM rust:1.85-alpine3.20 AS compiler
|
||||
|
||||
RUN apk add -q --no-cache build-base openssl-dev
|
||||
|
||||
|
@ -23,6 +23,12 @@
|
||||
<a href="https://github.com/meilisearch/meilisearch/queue"><img alt="Merge Queues enabled" src="https://img.shields.io/badge/Merge_Queues-enabled-%2357cf60?logo=github"></a>
|
||||
</p>
|
||||
|
||||
<p align="center" name="ph-banner">
|
||||
<a href="https://www.producthunt.com/posts/meilisearch-ai">
|
||||
<img src="assets/ph-banner.png" alt="Meilisearch AI-powered search general availability announcement on ProductHunt">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">⚡ A lightning-fast search engine that fits effortlessly into your apps, websites, and workflow 🔍</p>
|
||||
|
||||
[Meilisearch](https://www.meilisearch.com?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=intro) helps you shape a delightful search experience in a snap, offering features that work out of the box to speed up your workflow.
|
||||
|
BIN
assets/ph-banner.png
Normal file
BIN
assets/ph-banner.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 578 KiB |
@ -326,7 +326,6 @@ pub(crate) mod test {
|
||||
index_uids: maplit::btreemap! { "doggo".to_string() => 1 },
|
||||
progress_trace: Default::default(),
|
||||
write_channel_congestion: None,
|
||||
internal_database_sizes: Default::default(),
|
||||
},
|
||||
enqueued_at: Some(BatchEnqueuedAt {
|
||||
earliest: datetime!(2022-11-11 0:00 UTC),
|
||||
|
@ -108,7 +108,7 @@ where
|
||||
/// not supported on untagged enums.
|
||||
struct StarOrVisitor<T>(PhantomData<T>);
|
||||
|
||||
impl<'de, T, FE> Visitor<'de> for StarOrVisitor<T>
|
||||
impl<T, FE> Visitor<'_> for StarOrVisitor<T>
|
||||
where
|
||||
T: FromStr<Err = FE>,
|
||||
FE: Display,
|
||||
|
@ -99,7 +99,7 @@ impl Task {
|
||||
/// Return true when a task is finished.
|
||||
/// A task is finished when its last state is either `Succeeded` or `Failed`.
|
||||
pub fn is_finished(&self) -> bool {
|
||||
self.events.last().map_or(false, |event| {
|
||||
self.events.last().is_some_and(|event| {
|
||||
matches!(event, TaskEvent::Succeded { .. } | TaskEvent::Failed { .. })
|
||||
})
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ where
|
||||
/// not supported on untagged enums.
|
||||
struct StarOrVisitor<T>(PhantomData<T>);
|
||||
|
||||
impl<'de, T, FE> Visitor<'de> for StarOrVisitor<T>
|
||||
impl<T, FE> Visitor<'_> for StarOrVisitor<T>
|
||||
where
|
||||
T: FromStr<Err = FE>,
|
||||
FE: Display,
|
||||
|
@ -114,7 +114,7 @@ impl Task {
|
||||
/// Return true when a task is finished.
|
||||
/// A task is finished when its last state is either `Succeeded` or `Failed`.
|
||||
pub fn is_finished(&self) -> bool {
|
||||
self.events.last().map_or(false, |event| {
|
||||
self.events.last().is_some_and(|event| {
|
||||
matches!(event, TaskEvent::Succeeded { .. } | TaskEvent::Failed { .. })
|
||||
})
|
||||
}
|
||||
@ -275,19 +275,19 @@ impl From<Task> for TaskView {
|
||||
match (result, &mut details) {
|
||||
(
|
||||
TaskResult::DocumentAddition { indexed_documents: num, .. },
|
||||
Some(TaskDetails::DocumentAddition { ref mut indexed_documents, .. }),
|
||||
Some(TaskDetails::DocumentAddition { indexed_documents, .. }),
|
||||
) => {
|
||||
indexed_documents.replace(*num);
|
||||
}
|
||||
(
|
||||
TaskResult::DocumentDeletion { deleted_documents: docs, .. },
|
||||
Some(TaskDetails::DocumentDeletion { ref mut deleted_documents, .. }),
|
||||
Some(TaskDetails::DocumentDeletion { deleted_documents, .. }),
|
||||
) => {
|
||||
deleted_documents.replace(*docs);
|
||||
}
|
||||
(
|
||||
TaskResult::ClearAll { deleted_documents: docs },
|
||||
Some(TaskDetails::ClearAll { ref mut deleted_documents }),
|
||||
Some(TaskDetails::ClearAll { deleted_documents }),
|
||||
) => {
|
||||
deleted_documents.replace(*docs);
|
||||
}
|
||||
|
@ -170,14 +170,14 @@ impl UpdateFile {
|
||||
}
|
||||
|
||||
pub fn push_document(&mut self, document: &Document) -> Result<()> {
|
||||
if let Some(mut writer) = self.writer.as_mut() {
|
||||
match self.writer.as_mut() { Some(mut writer) => {
|
||||
serde_json::to_writer(&mut writer, &document)?;
|
||||
writer.write_all(b"\n")?;
|
||||
} else {
|
||||
} _ => {
|
||||
let file = File::create(&self.path).unwrap();
|
||||
self.writer = Some(BufWriter::new(file));
|
||||
self.push_document(document)?;
|
||||
}
|
||||
}}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ impl FileStore {
|
||||
}
|
||||
|
||||
/// List the Uuids of the files in the FileStore
|
||||
pub fn all_uuids(&self) -> Result<impl Iterator<Item = Result<Uuid>>> {
|
||||
pub fn all_uuids(&self) -> Result<impl Iterator<Item = Result<Uuid>> + use<>> {
|
||||
Ok(self.path.read_dir()?.filter_map(|entry| {
|
||||
let file_name = match entry {
|
||||
Ok(entry) => entry.file_name(),
|
||||
@ -158,19 +158,19 @@ impl File {
|
||||
|
||||
impl Write for File {
|
||||
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||
if let Some(file) = self.file.as_mut() {
|
||||
match self.file.as_mut() { Some(file) => {
|
||||
file.write(buf)
|
||||
} else {
|
||||
} _ => {
|
||||
Ok(buf.len())
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
if let Some(file) = self.file.as_mut() {
|
||||
match self.file.as_mut() { Some(file) => {
|
||||
file.flush()
|
||||
} else {
|
||||
} _ => {
|
||||
Ok(())
|
||||
}
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,7 @@ name = "filter-parser-fuzz"
|
||||
version = "0.0.0"
|
||||
authors = ["Automatically generated"]
|
||||
publish = false
|
||||
edition = "2018"
|
||||
edition = "2024"
|
||||
|
||||
[package.metadata]
|
||||
cargo-fuzz = true
|
||||
|
@ -35,7 +35,7 @@ impl<E> NomErrorExt<E> for nom::Err<E> {
|
||||
pub fn cut_with_err<'a, O>(
|
||||
mut parser: impl FnMut(Span<'a>) -> IResult<'a, O>,
|
||||
mut with: impl FnMut(Error<'a>) -> Error<'a>,
|
||||
) -> impl FnMut(Span<'a>) -> IResult<O> {
|
||||
) -> impl FnMut(Span<'a>) -> IResult<'a, O> {
|
||||
move |input| match parser.parse(input) {
|
||||
Err(nom::Err::Error(e)) => Err(nom::Err::Failure(with(e))),
|
||||
rest => rest,
|
||||
@ -121,7 +121,7 @@ impl<'a> ParseError<Span<'a>> for Error<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Display for Error<'a> {
|
||||
impl Display for Error<'_> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let input = self.context.fragment();
|
||||
// When printing our error message we want to escape all `\n` to be sure we keep our format with the
|
||||
@ -198,7 +198,7 @@ impl<'a> Display for Error<'a> {
|
||||
f,
|
||||
"Encountered an internal `{:?}` error while parsing your filter. Please fill an issue", kind
|
||||
)?,
|
||||
ErrorKind::External(ref error) => writeln!(f, "{}", error)?,
|
||||
ErrorKind::External(error) => writeln!(f, "{}", error)?,
|
||||
}
|
||||
let base_column = self.context.get_utf8_column();
|
||||
let size = self.context.fragment().chars().count();
|
||||
|
@ -80,7 +80,7 @@ pub struct Token<'a> {
|
||||
value: Option<String>,
|
||||
}
|
||||
|
||||
impl<'a> PartialEq for Token<'a> {
|
||||
impl PartialEq for Token<'_> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.span.fragment() == other.span.fragment()
|
||||
}
|
||||
@ -226,7 +226,7 @@ impl<'a> FilterCondition<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse(input: &'a str) -> Result<Option<Self>, Error> {
|
||||
pub fn parse(input: &'a str) -> Result<Option<Self>, Error<'a>> {
|
||||
if input.trim().is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
@ -527,7 +527,7 @@ pub fn parse_filter(input: Span) -> IResult<FilterCondition> {
|
||||
terminated(|input| parse_expression(input, 0), eof)(input)
|
||||
}
|
||||
|
||||
impl<'a> std::fmt::Display for FilterCondition<'a> {
|
||||
impl std::fmt::Display for FilterCondition<'_> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
FilterCondition::Not(filter) => {
|
||||
@ -576,7 +576,8 @@ impl<'a> std::fmt::Display for FilterCondition<'a> {
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Condition<'a> {
|
||||
|
||||
impl std::fmt::Display for Condition<'_> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Condition::GreaterThan(token) => write!(f, "> {token}"),
|
||||
@ -594,7 +595,8 @@ impl<'a> std::fmt::Display for Condition<'a> {
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Token<'a> {
|
||||
|
||||
impl std::fmt::Display for Token<'_> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{{{}}}", self.value())
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ fn quoted_by(quote: char, input: Span) -> IResult<Token> {
|
||||
}
|
||||
|
||||
// word = (alphanumeric | _ | - | .)+ except for reserved keywords
|
||||
pub fn word_not_keyword<'a>(input: Span<'a>) -> IResult<Token<'a>> {
|
||||
pub fn word_not_keyword<'a>(input: Span<'a>) -> IResult<'a, Token<'a>> {
|
||||
let (input, word): (_, Token<'a>) =
|
||||
take_while1(is_value_component)(input).map(|(s, t)| (s, t.into()))?;
|
||||
if is_keyword(word.value()) {
|
||||
|
@ -3,7 +3,7 @@ name = "flatten-serde-json-fuzz"
|
||||
version = "0.0.0"
|
||||
authors = ["Automatically generated"]
|
||||
publish = false
|
||||
edition = "2018"
|
||||
edition = "2024"
|
||||
|
||||
[package.metadata]
|
||||
cargo-fuzz = true
|
||||
|
@ -13,7 +13,6 @@ license.workspace = true
|
||||
[dependencies]
|
||||
anyhow = "1.0.95"
|
||||
bincode = "1.3.3"
|
||||
byte-unit = "5.1.6"
|
||||
bumpalo = "3.16.0"
|
||||
bumparaw-collections = "0.1.4"
|
||||
convert_case = "0.6.0"
|
||||
@ -23,7 +22,6 @@ dump = { path = "../dump" }
|
||||
enum-iterator = "2.1.0"
|
||||
file-store = { path = "../file-store" }
|
||||
flate2 = "1.0.35"
|
||||
indexmap = "2.7.0"
|
||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
||||
memmap2 = "0.9.5"
|
||||
|
@ -272,11 +272,11 @@ impl IndexMapper {
|
||||
if tries >= 100 {
|
||||
panic!("Too many attempts to close index {name} prior to deletion.")
|
||||
}
|
||||
let reopen = if let Some(reopen) = reopen.wait_timeout(Duration::from_secs(6)) {
|
||||
let reopen = match reopen.wait_timeout(Duration::from_secs(6)) { Some(reopen) => {
|
||||
reopen
|
||||
} else {
|
||||
} _ => {
|
||||
continue;
|
||||
};
|
||||
}};
|
||||
reopen.close(&mut self.index_map.write().unwrap());
|
||||
continue;
|
||||
}
|
||||
@ -382,11 +382,11 @@ impl IndexMapper {
|
||||
Available(index) => break index,
|
||||
Closing(reopen) => {
|
||||
// Avoiding deadlocks: no lock taken while doing this operation.
|
||||
let reopen = if let Some(reopen) = reopen.wait_timeout(Duration::from_secs(6)) {
|
||||
let reopen = match reopen.wait_timeout(Duration::from_secs(6)) { Some(reopen) => {
|
||||
reopen
|
||||
} else {
|
||||
} _ => {
|
||||
continue;
|
||||
};
|
||||
}};
|
||||
let index_path = self.base_path.join(uuid.to_string());
|
||||
// take the lock to reopen the environment.
|
||||
reopen
|
||||
|
@ -344,7 +344,6 @@ pub fn snapshot_batch(batch: &Batch) -> String {
|
||||
let Batch { uid, details, stats, started_at, finished_at, progress: _, enqueued_at } = batch;
|
||||
let stats = BatchStats {
|
||||
progress_trace: Default::default(),
|
||||
internal_database_sizes: Default::default(),
|
||||
write_channel_congestion: None,
|
||||
..stats.clone()
|
||||
};
|
||||
|
@ -355,19 +355,19 @@ impl IndexScheduler {
|
||||
}
|
||||
|
||||
fn is_good_heed(tasks_path: &Path, map_size: usize) -> bool {
|
||||
if let Ok(env) = unsafe {
|
||||
match unsafe {
|
||||
heed::EnvOpenOptions::new().map_size(clamp_to_page_size(map_size)).open(tasks_path)
|
||||
} {
|
||||
} { Ok(env) => {
|
||||
env.prepare_for_closing().wait();
|
||||
true
|
||||
} else {
|
||||
} _ => {
|
||||
// We're treating all errors equally here, not only allocation errors.
|
||||
// This means there's a possiblity for the budget to lower due to errors different from allocation errors.
|
||||
// For persistent errors, this is OK as long as the task db is then reopened normally without ignoring the error this time.
|
||||
// For transient errors, this could lead to an instance with too low a budget.
|
||||
// However transient errors are: 1) less likely than persistent errors 2) likely to cause other issues down the line anyway.
|
||||
false
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn read_txn(&self) -> Result<RoTxn<WithoutTls>> {
|
||||
@ -696,7 +696,7 @@ impl IndexScheduler {
|
||||
written: usize,
|
||||
}
|
||||
|
||||
impl<'a, 'b> Read for TaskReader<'a, 'b> {
|
||||
impl Read for TaskReader<'_, '_> {
|
||||
fn read(&mut self, mut buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
if self.buffer.is_empty() {
|
||||
match self.tasks.next() {
|
||||
|
@ -64,13 +64,6 @@ make_enum_progress! {
|
||||
}
|
||||
}
|
||||
|
||||
make_enum_progress! {
|
||||
pub enum FinalizingIndexStep {
|
||||
Committing,
|
||||
ComputingStats,
|
||||
}
|
||||
}
|
||||
|
||||
make_enum_progress! {
|
||||
pub enum TaskCancelationProgress {
|
||||
RetrievingTasks,
|
||||
|
@ -315,7 +315,7 @@ impl Queue {
|
||||
if let Some(batch_uids) = batch_uids {
|
||||
let mut batch_tasks = RoaringBitmap::new();
|
||||
for batch_uid in batch_uids {
|
||||
if processing_batch.as_ref().map_or(false, |batch| batch.uid == *batch_uid) {
|
||||
if processing_batch.as_ref().is_some_and(|batch| batch.uid == *batch_uid) {
|
||||
batch_tasks |= &**processing_tasks;
|
||||
} else {
|
||||
batch_tasks |= self.tasks_in_batch(rtxn, *batch_uid)?;
|
||||
|
@ -219,7 +219,7 @@ impl BatchKind {
|
||||
primary_key.is_some() &&
|
||||
// 2.1.1 If the task we're trying to accumulate have a pk it must be equal to our primary key
|
||||
// 2.1.2 If the task don't have a primary-key -> we can continue
|
||||
kind.primary_key().map_or(true, |pk| pk == primary_key)
|
||||
kind.primary_key().is_none_or(|pk| pk == primary_key)
|
||||
) ||
|
||||
// 2.2 If we don't have a primary-key ->
|
||||
(
|
||||
|
@ -10,7 +10,7 @@ use crate::TaskId;
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! debug_snapshot {
|
||||
($value:expr, @$snapshot:literal) => {{
|
||||
($value:expr_2021, @$snapshot:literal) => {{
|
||||
let value = format!("{:?}", $value);
|
||||
meili_snap::snapshot!(value, @$snapshot);
|
||||
}};
|
||||
|
@ -499,13 +499,13 @@ impl IndexScheduler {
|
||||
// create the batch directly. Otherwise, get the index name associated with the task
|
||||
// and use the autobatcher to batch the enqueued tasks associated with it
|
||||
|
||||
let index_name = if let Some(&index_name) = task.indexes().first() {
|
||||
let index_name = match task.indexes().first() { Some(&index_name) => {
|
||||
index_name
|
||||
} else {
|
||||
} _ => {
|
||||
assert!(matches!(&task.kind, KindWithContent::IndexSwap { swaps } if swaps.is_empty()));
|
||||
current_batch.processing(Some(&mut task));
|
||||
return Ok(Some((Batch::IndexSwap { task }, current_batch)));
|
||||
};
|
||||
}};
|
||||
|
||||
let index_already_exists = self.index_mapper.exists(rtxn, index_name)?;
|
||||
let mut primary_key = None;
|
||||
|
@ -20,12 +20,10 @@ use std::path::PathBuf;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use convert_case::{Case, Casing as _};
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::heed::{Env, WithoutTls};
|
||||
use meilisearch_types::milli;
|
||||
use meilisearch_types::tasks::Status;
|
||||
use process_batch::ProcessBatchInfo;
|
||||
use rayon::current_num_threads;
|
||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||
use roaring::RoaringBitmap;
|
||||
@ -225,16 +223,16 @@ impl IndexScheduler {
|
||||
let mut stop_scheduler_forever = false;
|
||||
let mut wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
||||
let mut canceled = RoaringBitmap::new();
|
||||
let mut process_batch_info = ProcessBatchInfo::default();
|
||||
let mut congestion = None;
|
||||
|
||||
match res {
|
||||
Ok((tasks, info)) => {
|
||||
Ok((tasks, cong)) => {
|
||||
#[cfg(test)]
|
||||
self.breakpoint(crate::test_utils::Breakpoint::ProcessBatchSucceeded);
|
||||
|
||||
let (task_progress, task_progress_obj) = AtomicTaskStep::new(tasks.len() as u32);
|
||||
progress.update_progress(task_progress_obj);
|
||||
process_batch_info = info;
|
||||
congestion = cong;
|
||||
let mut success = 0;
|
||||
let mut failure = 0;
|
||||
let mut canceled_by = None;
|
||||
@ -352,9 +350,6 @@ impl IndexScheduler {
|
||||
// We must re-add the canceled task so they're part of the same batch.
|
||||
ids |= canceled;
|
||||
|
||||
let ProcessBatchInfo { congestion, pre_commit_dabases_sizes, post_commit_dabases_sizes } =
|
||||
process_batch_info;
|
||||
|
||||
processing_batch.stats.progress_trace =
|
||||
progress.accumulated_durations().into_iter().map(|(k, v)| (k, v.into())).collect();
|
||||
processing_batch.stats.write_channel_congestion = congestion.map(|congestion| {
|
||||
@ -364,33 +359,6 @@ impl IndexScheduler {
|
||||
congestion_info.insert("blocking_ratio".into(), congestion.congestion_ratio().into());
|
||||
congestion_info
|
||||
});
|
||||
processing_batch.stats.internal_database_sizes = pre_commit_dabases_sizes
|
||||
.iter()
|
||||
.flat_map(|(dbname, pre_size)| {
|
||||
post_commit_dabases_sizes
|
||||
.get(dbname)
|
||||
.map(|post_size| {
|
||||
use byte_unit::{Byte, UnitType::Binary};
|
||||
use std::cmp::Ordering::{Equal, Greater, Less};
|
||||
|
||||
let post = Byte::from_u64(*post_size as u64).get_appropriate_unit(Binary);
|
||||
let diff_size = post_size.abs_diff(*pre_size) as u64;
|
||||
let diff = Byte::from_u64(diff_size).get_appropriate_unit(Binary);
|
||||
let sign = match post_size.cmp(pre_size) {
|
||||
Equal => return None,
|
||||
Greater => "+",
|
||||
Less => "-",
|
||||
};
|
||||
|
||||
Some((
|
||||
dbname.to_case(Case::Camel),
|
||||
format!("{post:#.2} ({sign}{diff:#.2})").into(),
|
||||
))
|
||||
})
|
||||
.into_iter()
|
||||
.flatten()
|
||||
})
|
||||
.collect();
|
||||
|
||||
if let Some(congestion) = congestion {
|
||||
tracing::debug!(
|
||||
|
@ -12,7 +12,7 @@ use roaring::RoaringBitmap;
|
||||
|
||||
use super::create_batch::Batch;
|
||||
use crate::processing::{
|
||||
AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress, FinalizingIndexStep,
|
||||
AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress,
|
||||
InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, TaskDeletionProgress,
|
||||
UpdateIndexProgress,
|
||||
};
|
||||
@ -22,16 +22,6 @@ use crate::utils::{
|
||||
};
|
||||
use crate::{Error, IndexScheduler, Result, TaskId};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ProcessBatchInfo {
|
||||
/// The write channel congestion. None when unavailable: settings update.
|
||||
pub congestion: Option<ChannelCongestion>,
|
||||
/// The sizes of the different databases before starting the indexation.
|
||||
pub pre_commit_dabases_sizes: indexmap::IndexMap<&'static str, usize>,
|
||||
/// The sizes of the different databases after commiting the indexation.
|
||||
pub post_commit_dabases_sizes: indexmap::IndexMap<&'static str, usize>,
|
||||
}
|
||||
|
||||
impl IndexScheduler {
|
||||
/// Apply the operation associated with the given batch.
|
||||
///
|
||||
@ -45,7 +35,7 @@ impl IndexScheduler {
|
||||
batch: Batch,
|
||||
current_batch: &mut ProcessingBatch,
|
||||
progress: Progress,
|
||||
) -> Result<(Vec<Task>, ProcessBatchInfo)> {
|
||||
) -> Result<(Vec<Task>, Option<ChannelCongestion>)> {
|
||||
#[cfg(test)]
|
||||
{
|
||||
self.maybe_fail(crate::test_utils::FailureLocation::InsideProcessBatch)?;
|
||||
@ -57,11 +47,11 @@ impl IndexScheduler {
|
||||
Batch::TaskCancelation { mut task } => {
|
||||
// 1. Retrieve the tasks that matched the query at enqueue-time.
|
||||
let matched_tasks =
|
||||
if let KindWithContent::TaskCancelation { tasks, query: _ } = &task.kind {
|
||||
match &task.kind { KindWithContent::TaskCancelation { tasks, query: _ } => {
|
||||
tasks
|
||||
} else {
|
||||
} _ => {
|
||||
unreachable!()
|
||||
};
|
||||
}};
|
||||
|
||||
let rtxn = self.env.read_txn()?;
|
||||
let mut canceled_tasks = self.cancel_matched_tasks(
|
||||
@ -86,18 +76,18 @@ impl IndexScheduler {
|
||||
|
||||
canceled_tasks.push(task);
|
||||
|
||||
Ok((canceled_tasks, ProcessBatchInfo::default()))
|
||||
Ok((canceled_tasks, None))
|
||||
}
|
||||
Batch::TaskDeletions(mut tasks) => {
|
||||
// 1. Retrieve the tasks that matched the query at enqueue-time.
|
||||
let mut matched_tasks = RoaringBitmap::new();
|
||||
|
||||
for task in tasks.iter() {
|
||||
if let KindWithContent::TaskDeletion { tasks, query: _ } = &task.kind {
|
||||
match &task.kind { KindWithContent::TaskDeletion { tasks, query: _ } => {
|
||||
matched_tasks |= tasks;
|
||||
} else {
|
||||
} _ => {
|
||||
unreachable!()
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
@ -125,14 +115,14 @@ impl IndexScheduler {
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
Ok((tasks, ProcessBatchInfo::default()))
|
||||
Ok((tasks, None))
|
||||
}
|
||||
Batch::SnapshotCreation(tasks) => {
|
||||
self.process_snapshot(progress, tasks).map(|tasks| (tasks, None))
|
||||
}
|
||||
Batch::Dump(task) => {
|
||||
self.process_dump_creation(progress, task).map(|tasks| (tasks, None))
|
||||
}
|
||||
Batch::SnapshotCreation(tasks) => self
|
||||
.process_snapshot(progress, tasks)
|
||||
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
||||
Batch::Dump(task) => self
|
||||
.process_dump_creation(progress, task)
|
||||
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
||||
Batch::IndexOperation { op, must_create_index } => {
|
||||
let index_uid = op.index_uid().to_string();
|
||||
let index = if must_create_index {
|
||||
@ -149,12 +139,10 @@ impl IndexScheduler {
|
||||
.set_currently_updating_index(Some((index_uid.clone(), index.clone())));
|
||||
|
||||
let mut index_wtxn = index.write_txn()?;
|
||||
let pre_commit_dabases_sizes = index.database_sizes(&index_wtxn)?;
|
||||
let (tasks, congestion) =
|
||||
self.apply_index_operation(&mut index_wtxn, &index, op, &progress)?;
|
||||
self.apply_index_operation(&mut index_wtxn, &index, op, progress)?;
|
||||
|
||||
{
|
||||
progress.update_progress(FinalizingIndexStep::Committing);
|
||||
let span = tracing::trace_span!(target: "indexing::scheduler", "commit");
|
||||
let _entered = span.enter();
|
||||
|
||||
@ -165,15 +153,12 @@ impl IndexScheduler {
|
||||
// stats of the index. Since the tasks have already been processed and
|
||||
// this is a non-critical operation. If it fails, we should not fail
|
||||
// the entire batch.
|
||||
let mut post_commit_dabases_sizes = None;
|
||||
let res = || -> Result<()> {
|
||||
progress.update_progress(FinalizingIndexStep::ComputingStats);
|
||||
let index_rtxn = index.read_txn()?;
|
||||
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)
|
||||
.map_err(|e| Error::from_milli(e, Some(index_uid.to_string())))?;
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
self.index_mapper.store_stats_of(&mut wtxn, &index_uid, &stats)?;
|
||||
post_commit_dabases_sizes = Some(index.database_sizes(&index_rtxn)?);
|
||||
wtxn.commit()?;
|
||||
Ok(())
|
||||
}();
|
||||
@ -186,16 +171,7 @@ impl IndexScheduler {
|
||||
),
|
||||
}
|
||||
|
||||
let info = ProcessBatchInfo {
|
||||
congestion,
|
||||
// In case we fail to the get post-commit sizes we decide
|
||||
// that nothing changed and use the pre-commit sizes.
|
||||
post_commit_dabases_sizes: post_commit_dabases_sizes
|
||||
.unwrap_or_else(|| pre_commit_dabases_sizes.clone()),
|
||||
pre_commit_dabases_sizes,
|
||||
};
|
||||
|
||||
Ok((tasks, info))
|
||||
Ok((tasks, congestion))
|
||||
}
|
||||
Batch::IndexCreation { index_uid, primary_key, task } => {
|
||||
progress.update_progress(CreateIndexProgress::CreatingTheIndex);
|
||||
@ -263,7 +239,7 @@ impl IndexScheduler {
|
||||
),
|
||||
}
|
||||
|
||||
Ok((vec![task], ProcessBatchInfo::default()))
|
||||
Ok((vec![task], None))
|
||||
}
|
||||
Batch::IndexDeletion { index_uid, index_has_been_created, mut tasks } => {
|
||||
progress.update_progress(DeleteIndexProgress::DeletingTheIndex);
|
||||
@ -297,19 +273,17 @@ impl IndexScheduler {
|
||||
};
|
||||
}
|
||||
|
||||
// Here we could also show that all the internal database sizes goes to 0
|
||||
// but it would mean opening the index and that's costly.
|
||||
Ok((tasks, ProcessBatchInfo::default()))
|
||||
Ok((tasks, None))
|
||||
}
|
||||
Batch::IndexSwap { mut task } => {
|
||||
progress.update_progress(SwappingTheIndexes::EnsuringCorrectnessOfTheSwap);
|
||||
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
let swaps = if let KindWithContent::IndexSwap { swaps } = &task.kind {
|
||||
let swaps = match &task.kind { KindWithContent::IndexSwap { swaps } => {
|
||||
swaps
|
||||
} else {
|
||||
} _ => {
|
||||
unreachable!()
|
||||
};
|
||||
}};
|
||||
let mut not_found_indexes = BTreeSet::new();
|
||||
for IndexSwap { indexes: (lhs, rhs) } in swaps {
|
||||
for index in [lhs, rhs] {
|
||||
@ -347,7 +321,7 @@ impl IndexScheduler {
|
||||
}
|
||||
wtxn.commit()?;
|
||||
task.status = Status::Succeeded;
|
||||
Ok((vec![task], ProcessBatchInfo::default()))
|
||||
Ok((vec![task], None))
|
||||
}
|
||||
Batch::UpgradeDatabase { mut tasks } => {
|
||||
let KindWithContent::UpgradeDatabase { from } = tasks.last().unwrap().kind else {
|
||||
@ -377,7 +351,7 @@ impl IndexScheduler {
|
||||
task.error = None;
|
||||
}
|
||||
|
||||
Ok((tasks, ProcessBatchInfo::default()))
|
||||
Ok((tasks, None))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -558,7 +532,7 @@ impl IndexScheduler {
|
||||
// We must remove the batch entirely
|
||||
if tasks.is_empty() {
|
||||
if let Some(batch) = self.queue.batches.get_batch(wtxn, batch_id)? {
|
||||
if let Some(BatchEnqueuedAt { earliest, oldest }) = batch.enqueued_at {
|
||||
match batch.enqueued_at { Some(BatchEnqueuedAt { earliest, oldest }) => {
|
||||
remove_task_datetime(
|
||||
wtxn,
|
||||
self.queue.batches.enqueued_at,
|
||||
@ -571,7 +545,7 @@ impl IndexScheduler {
|
||||
oldest,
|
||||
batch_id,
|
||||
)?;
|
||||
} else {
|
||||
} _ => {
|
||||
// If we don't have the enqueued at in the batch it means the database comes from the v1.12
|
||||
// and we still need to find the date by scrolling the database
|
||||
remove_n_tasks_datetime_earlier_than(
|
||||
@ -581,7 +555,7 @@ impl IndexScheduler {
|
||||
batch.stats.total_nb_tasks.clamp(1, 2) as usize,
|
||||
batch_id,
|
||||
)?;
|
||||
}
|
||||
}}
|
||||
remove_task_datetime(
|
||||
wtxn,
|
||||
self.queue.batches.started_at,
|
||||
|
@ -26,11 +26,11 @@ impl IndexScheduler {
|
||||
progress.update_progress(DumpCreationProgress::StartTheDumpCreation);
|
||||
let started_at = OffsetDateTime::now_utc();
|
||||
let (keys, instance_uid) =
|
||||
if let KindWithContent::DumpCreation { keys, instance_uid } = &task.kind {
|
||||
match &task.kind { KindWithContent::DumpCreation { keys, instance_uid } => {
|
||||
(keys, instance_uid)
|
||||
} else {
|
||||
} _ => {
|
||||
unreachable!();
|
||||
};
|
||||
}};
|
||||
let dump = dump::DumpWriter::new(*instance_uid)?;
|
||||
|
||||
// 1. dump the keys
|
||||
@ -206,14 +206,14 @@ impl IndexScheduler {
|
||||
let user_err =
|
||||
milli::Error::UserError(milli::UserError::InvalidVectorsMapType {
|
||||
document_id: {
|
||||
if let Ok(Some(Ok(index))) = index
|
||||
match index
|
||||
.external_id_of(&rtxn, std::iter::once(id))
|
||||
.map(|it| it.into_iter().next())
|
||||
{
|
||||
{ Ok(Some(Ok(index))) => {
|
||||
index
|
||||
} else {
|
||||
} _ => {
|
||||
format!("internal docid={id}")
|
||||
}
|
||||
}}
|
||||
},
|
||||
value: vectors.clone(),
|
||||
});
|
||||
|
@ -32,7 +32,7 @@ impl IndexScheduler {
|
||||
index_wtxn: &mut RwTxn<'i>,
|
||||
index: &'i Index,
|
||||
operation: IndexOperation,
|
||||
progress: &Progress,
|
||||
progress: Progress,
|
||||
) -> Result<(Vec<Task>, Option<ChannelCongestion>)> {
|
||||
let indexer_alloc = Bump::new();
|
||||
let started_processing_at = std::time::Instant::now();
|
||||
@ -186,7 +186,7 @@ impl IndexScheduler {
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| must_stop_processing.get(),
|
||||
progress,
|
||||
&progress,
|
||||
)
|
||||
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?,
|
||||
);
|
||||
@ -206,17 +206,17 @@ impl IndexScheduler {
|
||||
IndexOperation::DocumentEdition { index_uid, mut task } => {
|
||||
progress.update_progress(DocumentEditionProgress::RetrievingConfig);
|
||||
|
||||
let (filter, code) = if let KindWithContent::DocumentEdition {
|
||||
let (filter, code) = match &task.kind
|
||||
{ KindWithContent::DocumentEdition {
|
||||
filter_expr,
|
||||
context: _,
|
||||
function,
|
||||
..
|
||||
} = &task.kind
|
||||
{
|
||||
} => {
|
||||
(filter_expr, function)
|
||||
} else {
|
||||
} _ => {
|
||||
unreachable!()
|
||||
};
|
||||
}};
|
||||
|
||||
let candidates = match filter.as_ref().map(Filter::from_json) {
|
||||
Some(Ok(Some(filter))) => filter
|
||||
@ -226,18 +226,18 @@ impl IndexScheduler {
|
||||
Some(Err(e)) => return Err(Error::from_milli(e, Some(index_uid.clone()))),
|
||||
};
|
||||
|
||||
let (original_filter, context, function) = if let Some(Details::DocumentEdition {
|
||||
let (original_filter, context, function) = match task.details
|
||||
{ Some(Details::DocumentEdition {
|
||||
original_filter,
|
||||
context,
|
||||
function,
|
||||
..
|
||||
}) = task.details
|
||||
{
|
||||
}) => {
|
||||
(original_filter, context, function)
|
||||
} else {
|
||||
} _ => {
|
||||
// In the case of a `documentEdition` the details MUST be set
|
||||
unreachable!();
|
||||
};
|
||||
}};
|
||||
|
||||
if candidates.is_empty() {
|
||||
task.status = Status::Succeeded;
|
||||
@ -307,7 +307,7 @@ impl IndexScheduler {
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| must_stop_processing.get(),
|
||||
progress,
|
||||
&progress,
|
||||
)
|
||||
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?,
|
||||
);
|
||||
@ -397,16 +397,16 @@ impl IndexScheduler {
|
||||
};
|
||||
}
|
||||
let will_be_removed = to_delete.len() - before;
|
||||
if let Some(Details::DocumentDeletionByFilter {
|
||||
match &mut task.details
|
||||
{ Some(Details::DocumentDeletionByFilter {
|
||||
original_filter: _,
|
||||
deleted_documents,
|
||||
}) = &mut task.details
|
||||
{
|
||||
}) => {
|
||||
*deleted_documents = Some(will_be_removed);
|
||||
} else {
|
||||
} _ => {
|
||||
// In the case of a `documentDeleteByFilter` the details MUST be set
|
||||
unreachable!()
|
||||
}
|
||||
}}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
@ -465,7 +465,7 @@ impl IndexScheduler {
|
||||
&document_changes,
|
||||
embedders,
|
||||
&|| must_stop_processing.get(),
|
||||
progress,
|
||||
&progress,
|
||||
)
|
||||
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?,
|
||||
);
|
||||
@ -520,7 +520,7 @@ impl IndexScheduler {
|
||||
index_uid: index_uid.clone(),
|
||||
tasks: cleared_tasks,
|
||||
},
|
||||
progress,
|
||||
progress.clone(),
|
||||
)?;
|
||||
|
||||
let (settings_tasks, _congestion) = self.apply_index_operation(
|
||||
|
@ -307,7 +307,7 @@ pub(crate) fn filter_out_references_to_newer_tasks(task: &mut Task) {
|
||||
|
||||
pub(crate) fn check_index_swap_validity(task: &Task) -> Result<()> {
|
||||
let swaps =
|
||||
if let KindWithContent::IndexSwap { swaps } = &task.kind { swaps } else { return Ok(()) };
|
||||
match &task.kind { KindWithContent::IndexSwap { swaps } => { swaps } _ => { return Ok(()) }};
|
||||
let mut all_indexes = HashSet::new();
|
||||
let mut duplicate_indexes = BTreeSet::new();
|
||||
for IndexSwap { indexes: (lhs, rhs) } in swaps {
|
||||
@ -501,15 +501,15 @@ impl crate::IndexScheduler {
|
||||
} => {
|
||||
assert_eq!(kind.as_kind(), Kind::DocumentDeletion);
|
||||
let (index_uid, documents_ids) =
|
||||
if let KindWithContent::DocumentDeletion {
|
||||
match kind
|
||||
{ KindWithContent::DocumentDeletion {
|
||||
ref index_uid,
|
||||
ref documents_ids,
|
||||
} = kind
|
||||
{
|
||||
} => {
|
||||
(index_uid, documents_ids)
|
||||
} else {
|
||||
} _ => {
|
||||
unreachable!()
|
||||
};
|
||||
}};
|
||||
assert_eq!(&task_index_uid.unwrap(), index_uid);
|
||||
|
||||
match status {
|
||||
@ -526,15 +526,15 @@ impl crate::IndexScheduler {
|
||||
}
|
||||
Details::DocumentDeletionByFilter { deleted_documents, original_filter: _ } => {
|
||||
assert_eq!(kind.as_kind(), Kind::DocumentDeletion);
|
||||
let (index_uid, _) = if let KindWithContent::DocumentDeletionByFilter {
|
||||
let (index_uid, _) = match kind
|
||||
{ KindWithContent::DocumentDeletionByFilter {
|
||||
ref index_uid,
|
||||
ref filter_expr,
|
||||
} = kind
|
||||
{
|
||||
} => {
|
||||
(index_uid, filter_expr)
|
||||
} else {
|
||||
} _ => {
|
||||
unreachable!()
|
||||
};
|
||||
}};
|
||||
assert_eq!(&task_index_uid.unwrap(), index_uid);
|
||||
|
||||
match status {
|
||||
|
@ -3,7 +3,7 @@ name = "json-depth-checker"
|
||||
version = "0.0.0"
|
||||
authors = ["Automatically generated"]
|
||||
publish = false
|
||||
edition = "2018"
|
||||
edition = "2024"
|
||||
|
||||
[package.metadata]
|
||||
cargo-fuzz = true
|
||||
|
@ -77,7 +77,7 @@ snapshot_hash!("hello world", name: "snap_name", @"5f93f983524def3dca464469d2cf9
|
||||
*/
|
||||
#[macro_export]
|
||||
macro_rules! snapshot_hash {
|
||||
($value:expr, @$inline:literal) => {
|
||||
($value:expr_2021, @$inline:literal) => {
|
||||
let test_name = {
|
||||
fn f() {}
|
||||
fn type_name_of_val<T>(_: T) -> &'static str {
|
||||
@ -99,7 +99,7 @@ macro_rules! snapshot_hash {
|
||||
}
|
||||
});
|
||||
};
|
||||
($value:expr, name: $name:expr, @$inline:literal) => {
|
||||
($value:expr_2021, name: $name:expr_2021, @$inline:literal) => {
|
||||
let test_name = {
|
||||
fn f() {}
|
||||
fn type_name_of_val<T>(_: T) -> &'static str {
|
||||
@ -151,7 +151,7 @@ snapshot!(format!("{:?}", vec![1, 2]), @"[1, 2]");
|
||||
*/
|
||||
#[macro_export]
|
||||
macro_rules! snapshot {
|
||||
($value:expr, name: $name:expr) => {
|
||||
($value:expr_2021, name: $name:expr_2021) => {
|
||||
let test_name = {
|
||||
fn f() {}
|
||||
fn type_name_of_val<T>(_: T) -> &'static str {
|
||||
@ -172,7 +172,7 @@ macro_rules! snapshot {
|
||||
}
|
||||
});
|
||||
};
|
||||
($value:expr, @$inline:literal) => {
|
||||
($value:expr_2021, @$inline:literal) => {
|
||||
// Note that the name given as argument does not matter since it is only an inline snapshot
|
||||
// We don't pass None because otherwise `meili-snap` will try to assign it a unique identifier
|
||||
let (settings, _, _) = $crate::default_snapshot_settings_for_test("", Some("_dummy_argument"));
|
||||
@ -183,7 +183,7 @@ macro_rules! snapshot {
|
||||
}
|
||||
});
|
||||
};
|
||||
($value:expr) => {
|
||||
($value:expr_2021) => {
|
||||
let test_name = {
|
||||
fn f() {}
|
||||
fn type_name_of_val<T>(_: T) -> &'static str {
|
||||
@ -213,13 +213,13 @@ macro_rules! snapshot {
|
||||
/// refer to the redactions feature in the `insta` guide.
|
||||
#[macro_export]
|
||||
macro_rules! json_string {
|
||||
($value:expr, {$($k:expr => $v:expr),*$(,)?}) => {
|
||||
($value:expr_2021, {$($k:expr_2021 => $v:expr_2021),*$(,)?}) => {
|
||||
{
|
||||
let (_, snap) = meili_snap::insta::_prepare_snapshot_for_redaction!($value, {$($k => $v),*}, Json, File);
|
||||
snap
|
||||
}
|
||||
};
|
||||
($value:expr) => {{
|
||||
($value:expr_2021) => {{
|
||||
let value = meili_snap::insta::_macro_support::serialize_value(
|
||||
&$value,
|
||||
meili_snap::insta::_macro_support::SerializationFormat::Json,
|
||||
|
@ -64,6 +64,4 @@ pub struct BatchStats {
|
||||
pub progress_trace: serde_json::Map<String, serde_json::Value>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub write_channel_congestion: Option<serde_json::Map<String, serde_json::Value>>,
|
||||
#[serde(default, skip_serializing_if = "serde_json::Map::is_empty")]
|
||||
pub internal_database_sizes: serde_json::Map<String, serde_json::Value>,
|
||||
}
|
||||
|
@ -403,7 +403,7 @@ impl ErrorCode for milli::Error {
|
||||
match self {
|
||||
Error::InternalError(_) => Code::Internal,
|
||||
Error::IoError(e) => e.error_code(),
|
||||
Error::UserError(ref error) => {
|
||||
Error::UserError(error) => {
|
||||
match error {
|
||||
// TODO: wait for spec for new error codes.
|
||||
UserError::SerdeJson(_)
|
||||
|
@ -33,7 +33,7 @@ impl From<LocalizedAttributesRuleView> for LocalizedAttributesRule {
|
||||
///
|
||||
/// this enum implements `Deserr` in order to be used in the API.
|
||||
macro_rules! make_locale {
|
||||
($(($iso_639_1:ident, $iso_639_1_str:expr) => ($iso_639_3:ident, $iso_639_3_str:expr),)+) => {
|
||||
($(($iso_639_1:ident, $iso_639_1_str:expr_2021) => ($iso_639_3:ident, $iso_639_3_str:expr_2021),)+) => {
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserr, Serialize, Deserialize, Ord, PartialOrd, ToSchema)]
|
||||
#[deserr(rename_all = camelCase)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
|
@ -572,19 +572,19 @@ pub fn apply_settings_to_builder(
|
||||
} = settings;
|
||||
|
||||
match searchable_attributes.deref() {
|
||||
Setting::Set(ref names) => builder.set_searchable_fields(names.clone()),
|
||||
Setting::Set(names) => builder.set_searchable_fields(names.clone()),
|
||||
Setting::Reset => builder.reset_searchable_fields(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match displayed_attributes.deref() {
|
||||
Setting::Set(ref names) => builder.set_displayed_fields(names.clone()),
|
||||
Setting::Set(names) => builder.set_displayed_fields(names.clone()),
|
||||
Setting::Reset => builder.reset_displayed_fields(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match filterable_attributes {
|
||||
Setting::Set(ref facets) => {
|
||||
Setting::Set(facets) => {
|
||||
builder.set_filterable_fields(facets.clone().into_iter().collect())
|
||||
}
|
||||
Setting::Reset => builder.reset_filterable_fields(),
|
||||
@ -592,13 +592,13 @@ pub fn apply_settings_to_builder(
|
||||
}
|
||||
|
||||
match sortable_attributes {
|
||||
Setting::Set(ref fields) => builder.set_sortable_fields(fields.iter().cloned().collect()),
|
||||
Setting::Set(fields) => builder.set_sortable_fields(fields.iter().cloned().collect()),
|
||||
Setting::Reset => builder.reset_sortable_fields(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match ranking_rules {
|
||||
Setting::Set(ref criteria) => {
|
||||
Setting::Set(criteria) => {
|
||||
builder.set_criteria(criteria.iter().map(|c| c.clone().into()).collect())
|
||||
}
|
||||
Setting::Reset => builder.reset_criteria(),
|
||||
@ -606,13 +606,13 @@ pub fn apply_settings_to_builder(
|
||||
}
|
||||
|
||||
match stop_words {
|
||||
Setting::Set(ref stop_words) => builder.set_stop_words(stop_words.clone()),
|
||||
Setting::Set(stop_words) => builder.set_stop_words(stop_words.clone()),
|
||||
Setting::Reset => builder.reset_stop_words(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match non_separator_tokens {
|
||||
Setting::Set(ref non_separator_tokens) => {
|
||||
Setting::Set(non_separator_tokens) => {
|
||||
builder.set_non_separator_tokens(non_separator_tokens.clone())
|
||||
}
|
||||
Setting::Reset => builder.reset_non_separator_tokens(),
|
||||
@ -620,7 +620,7 @@ pub fn apply_settings_to_builder(
|
||||
}
|
||||
|
||||
match separator_tokens {
|
||||
Setting::Set(ref separator_tokens) => {
|
||||
Setting::Set(separator_tokens) => {
|
||||
builder.set_separator_tokens(separator_tokens.clone())
|
||||
}
|
||||
Setting::Reset => builder.reset_separator_tokens(),
|
||||
@ -628,38 +628,38 @@ pub fn apply_settings_to_builder(
|
||||
}
|
||||
|
||||
match dictionary {
|
||||
Setting::Set(ref dictionary) => builder.set_dictionary(dictionary.clone()),
|
||||
Setting::Set(dictionary) => builder.set_dictionary(dictionary.clone()),
|
||||
Setting::Reset => builder.reset_dictionary(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match synonyms {
|
||||
Setting::Set(ref synonyms) => builder.set_synonyms(synonyms.clone().into_iter().collect()),
|
||||
Setting::Set(synonyms) => builder.set_synonyms(synonyms.clone().into_iter().collect()),
|
||||
Setting::Reset => builder.reset_synonyms(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match distinct_attribute {
|
||||
Setting::Set(ref attr) => builder.set_distinct_field(attr.clone()),
|
||||
Setting::Set(attr) => builder.set_distinct_field(attr.clone()),
|
||||
Setting::Reset => builder.reset_distinct_field(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match proximity_precision {
|
||||
Setting::Set(ref precision) => builder.set_proximity_precision((*precision).into()),
|
||||
Setting::Set(precision) => builder.set_proximity_precision((*precision).into()),
|
||||
Setting::Reset => builder.reset_proximity_precision(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match localized_attributes_rules {
|
||||
Setting::Set(ref rules) => builder
|
||||
Setting::Set(rules) => builder
|
||||
.set_localized_attributes_rules(rules.iter().cloned().map(|r| r.into()).collect()),
|
||||
Setting::Reset => builder.reset_localized_attributes_rules(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match typo_tolerance {
|
||||
Setting::Set(ref value) => {
|
||||
Setting::Set(value) => {
|
||||
match value.enabled {
|
||||
Setting::Set(val) => builder.set_autorize_typos(val),
|
||||
Setting::Reset => builder.reset_authorize_typos(),
|
||||
@ -736,7 +736,7 @@ pub fn apply_settings_to_builder(
|
||||
}
|
||||
|
||||
match pagination {
|
||||
Setting::Set(ref value) => match value.max_total_hits {
|
||||
Setting::Set(value) => match value.max_total_hits {
|
||||
Setting::Set(val) => builder.set_pagination_max_total_hits(val),
|
||||
Setting::Reset => builder.reset_pagination_max_total_hits(),
|
||||
Setting::NotSet => (),
|
||||
@ -960,7 +960,7 @@ impl<'de> Deserialize<'de> for RankingRuleView {
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
struct Visitor;
|
||||
impl<'de> serde::de::Visitor<'de> for Visitor {
|
||||
impl serde::de::Visitor<'_> for Visitor {
|
||||
type Value = RankingRuleView;
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(formatter, "the name of a valid ranking rule (string)")
|
||||
|
@ -66,7 +66,7 @@ where
|
||||
/// not supported on untagged enums.
|
||||
struct StarOrVisitor<T>(PhantomData<T>);
|
||||
|
||||
impl<'de, T, FE> Visitor<'de> for StarOrVisitor<T>
|
||||
impl<T, FE> Visitor<'_> for StarOrVisitor<T>
|
||||
where
|
||||
T: FromStr<Err = FE>,
|
||||
FE: fmt::Display,
|
||||
|
@ -30,7 +30,11 @@ actix-web = { version = "4.9.0", default-features = false, features = [
|
||||
anyhow = { version = "1.0.95", features = ["backtrace"] }
|
||||
async-trait = "0.1.85"
|
||||
bstr = "1.11.3"
|
||||
byte-unit = { version = "5.1.6", features = ["serde"] }
|
||||
byte-unit = { version = "5.1.6", default-features = false, features = [
|
||||
"std",
|
||||
"byte",
|
||||
"serde",
|
||||
] }
|
||||
bytes = "1.9.0"
|
||||
clap = { version = "4.5.24", features = ["derive", "env"] }
|
||||
crossbeam-channel = "0.5.14"
|
||||
@ -136,7 +140,7 @@ reqwest = { version = "0.12.12", features = [
|
||||
sha-1 = { version = "0.10.1", optional = true }
|
||||
static-files = { version = "0.2.4", optional = true }
|
||||
tempfile = { version = "3.15.0", optional = true }
|
||||
zip = { version = "2.3.0", optional = true }
|
||||
zip = { version = "2.2.2", optional = true }
|
||||
|
||||
[features]
|
||||
default = ["meilisearch-types/all-tokenizations", "mini-dashboard"]
|
||||
@ -166,5 +170,5 @@ german = ["meilisearch-types/german"]
|
||||
turkish = ["meilisearch-types/turkish"]
|
||||
|
||||
[package.metadata.mini-dashboard]
|
||||
assets-url = "https://github.com/meilisearch/mini-dashboard/releases/download/v0.2.19/build.zip"
|
||||
sha1 = "7974430d5277c97f67cf6e95eec6faaac2788834"
|
||||
assets-url = "https://github.com/meilisearch/mini-dashboard/releases/download/v0.2.18/build.zip"
|
||||
sha1 = "b408a30dcb6e20cddb0c153c23385bcac4c8e912"
|
||||
|
@ -329,8 +329,7 @@ impl Infos {
|
||||
http_addr: http_addr != default_http_addr(),
|
||||
http_payload_size_limit,
|
||||
experimental_max_number_of_batched_tasks,
|
||||
experimental_limit_batched_tasks_total_size:
|
||||
experimental_limit_batched_tasks_total_size.into(),
|
||||
experimental_limit_batched_tasks_total_size,
|
||||
task_queue_webhook: task_webhook_url.is_some(),
|
||||
task_webhook_authorization_header: task_webhook_authorization_header.is_some(),
|
||||
log_level: log_level.to_string(),
|
||||
|
@ -89,11 +89,11 @@ fn is_empty_db(db_path: impl AsRef<Path>) -> bool {
|
||||
if !db_path.exists() {
|
||||
true
|
||||
// if we encounter an error or if the db is a file we consider the db non empty
|
||||
} else if let Ok(dir) = db_path.read_dir() {
|
||||
} else { match db_path.read_dir() { Ok(dir) => {
|
||||
dir.count() == 0
|
||||
} else {
|
||||
} _ => {
|
||||
true
|
||||
}
|
||||
}}}
|
||||
}
|
||||
|
||||
/// The handle used to update the logs at runtime. Must be accessible from the `main.rs` and the `route/logs.rs`.
|
||||
@ -228,7 +228,7 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<
|
||||
cleanup_enabled: !opt.experimental_replication_parameters,
|
||||
max_number_of_tasks: 1_000_000,
|
||||
max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks,
|
||||
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size.into(),
|
||||
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size,
|
||||
index_growth_amount: byte_unit::Byte::from_str("10GiB").unwrap().as_u64() as usize,
|
||||
index_count: DEFAULT_INDEX_COUNT,
|
||||
instance_features: opt.to_instance_features(),
|
||||
@ -346,7 +346,7 @@ fn open_or_create_database_unchecked(
|
||||
match (
|
||||
index_scheduler_builder(),
|
||||
auth_controller.map_err(anyhow::Error::from),
|
||||
create_current_version_file(&opt.db_path).map_err(anyhow::Error::from),
|
||||
create_current_version_file(&opt.db_path),
|
||||
) {
|
||||
(Ok(i), Ok(a), Ok(())) => Ok((i, a)),
|
||||
(Err(e), _, _) | (_, Err(e), _) | (_, _, Err(e)) => {
|
||||
@ -466,18 +466,18 @@ fn import_dump(
|
||||
let reader = File::open(dump_path)?;
|
||||
let mut dump_reader = dump::DumpReader::open(reader)?;
|
||||
|
||||
if let Some(date) = dump_reader.date() {
|
||||
match dump_reader.date() { Some(date) => {
|
||||
tracing::info!(
|
||||
version = ?dump_reader.version(), // TODO: get the meilisearch version instead of the dump version
|
||||
%date,
|
||||
"Importing a dump of meilisearch"
|
||||
);
|
||||
} else {
|
||||
} _ => {
|
||||
tracing::info!(
|
||||
version = ?dump_reader.version(), // TODO: get the meilisearch version instead of the dump version
|
||||
"Importing a dump of meilisearch",
|
||||
);
|
||||
}
|
||||
}}
|
||||
|
||||
let instance_uid = dump_reader.instance_uid()?;
|
||||
|
||||
|
@ -69,7 +69,7 @@ fn setup(opt: &Opt) -> anyhow::Result<(LogRouteHandle, LogStderrHandle)> {
|
||||
Ok((route_layer_handle, stderr_layer_handle))
|
||||
}
|
||||
|
||||
fn on_panic(info: &std::panic::PanicInfo) {
|
||||
fn on_panic(info: &std::panic::PanicHookInfo) {
|
||||
let info = info.to_string().replace('\n', " ");
|
||||
tracing::error!(%info);
|
||||
}
|
||||
@ -178,11 +178,11 @@ async fn run_http(
|
||||
.disable_signals()
|
||||
.keep_alive(KeepAlive::Os);
|
||||
|
||||
if let Some(config) = opt_clone.get_ssl_config()? {
|
||||
match opt_clone.get_ssl_config()? { Some(config) => {
|
||||
http_server.bind_rustls_0_23(opt_clone.http_addr, config)?.run().await?;
|
||||
} else {
|
||||
} _ => {
|
||||
http_server.bind(&opt_clone.http_addr)?.run().await?;
|
||||
}
|
||||
}}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -445,7 +445,7 @@ pub struct Opt {
|
||||
/// see: <https://github.com/orgs/meilisearch/discussions/801>
|
||||
#[clap(long, env = MEILI_EXPERIMENTAL_LIMIT_BATCHED_TASKS_TOTAL_SIZE, default_value_t = default_limit_batched_tasks_total_size())]
|
||||
#[serde(default = "default_limit_batched_tasks_total_size")]
|
||||
pub experimental_limit_batched_tasks_total_size: Byte,
|
||||
pub experimental_limit_batched_tasks_total_size: u64,
|
||||
|
||||
/// Enables experimental caching of search query embeddings. The value represents the maximal number of entries in the cache of each
|
||||
/// distinct embedder.
|
||||
@ -907,7 +907,7 @@ fn load_private_key(
|
||||
fn load_ocsp(filename: &Option<PathBuf>) -> anyhow::Result<Vec<u8>> {
|
||||
let mut ret = Vec::new();
|
||||
|
||||
if let Some(ref name) = filename {
|
||||
if let Some(name) = filename {
|
||||
fs::File::open(name)
|
||||
.map_err(|_| anyhow::anyhow!("cannot open ocsp file"))?
|
||||
.read_to_end(&mut ret)
|
||||
@ -924,12 +924,12 @@ where
|
||||
T: AsRef<OsStr>,
|
||||
{
|
||||
if let Err(VarError::NotPresent) = std::env::var(key) {
|
||||
std::env::set_var(key, value);
|
||||
// TODO: Audit that the environment access only happens in single-threaded code.
|
||||
unsafe { std::env::set_var(key, value) };
|
||||
}
|
||||
}
|
||||
|
||||
/// Functions used to get default value for `Opt` fields, needs to be function because of serde's default attribute.
|
||||
|
||||
fn default_db_path() -> PathBuf {
|
||||
PathBuf::from(DEFAULT_DB_PATH)
|
||||
}
|
||||
@ -958,8 +958,8 @@ fn default_limit_batched_tasks() -> usize {
|
||||
usize::MAX
|
||||
}
|
||||
|
||||
fn default_limit_batched_tasks_total_size() -> Byte {
|
||||
Byte::from_u64(u64::MAX)
|
||||
fn default_limit_batched_tasks_total_size() -> u64 {
|
||||
u64::MAX
|
||||
}
|
||||
|
||||
fn default_embedding_cache_entries() -> usize {
|
||||
@ -1037,7 +1037,7 @@ where
|
||||
{
|
||||
struct BoolOrInt;
|
||||
|
||||
impl<'de> serde::de::Visitor<'de> for BoolOrInt {
|
||||
impl serde::de::Visitor<'_> for BoolOrInt {
|
||||
type Value = ScheduleSnapshot;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
|
@ -97,12 +97,12 @@ async fn get_batch(
|
||||
let filters = index_scheduler.filters();
|
||||
let (batches, _) = index_scheduler.get_batches_from_authorized_indexes(&query, filters)?;
|
||||
|
||||
if let Some(batch) = batches.first() {
|
||||
match batches.first() { Some(batch) => {
|
||||
let batch_view = BatchView::from_batch(batch);
|
||||
Ok(HttpResponse::Ok().json(batch_view))
|
||||
} else {
|
||||
} _ => {
|
||||
Err(index_scheduler::Error::BatchNotFound(batch_uid).into())
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, ToSchema)]
|
||||
|
@ -619,7 +619,7 @@ fn documents_by_query(
|
||||
|
||||
let retrieve_vectors = RetrieveVectors::new(retrieve_vectors);
|
||||
|
||||
let ids = if let Some(ids) = ids {
|
||||
let ids = match ids { Some(ids) => {
|
||||
let mut parsed_ids = Vec::with_capacity(ids.len());
|
||||
for (index, id) in ids.into_iter().enumerate() {
|
||||
let id = id.try_into().map_err(|error| {
|
||||
@ -629,9 +629,9 @@ fn documents_by_query(
|
||||
parsed_ids.push(id)
|
||||
}
|
||||
Some(parsed_ids)
|
||||
} else {
|
||||
} _ => {
|
||||
None
|
||||
};
|
||||
}};
|
||||
|
||||
let index = index_scheduler.index(&index_uid)?;
|
||||
let (total, documents) = retrieve_documents(
|
||||
|
@ -302,7 +302,7 @@ impl From<FacetSearchQuery> for SearchQuery {
|
||||
|
||||
// If exhaustive_facet_count is true, we need to set the page to 0
|
||||
// because the facet search is not exhaustive by default.
|
||||
let page = if exhaustive_facet_count.map_or(false, |exhaustive| exhaustive) {
|
||||
let page = if exhaustive_facet_count.is_some_and(|exhaustive| exhaustive) {
|
||||
// setting the page to 0 will force the search to be exhaustive when computing the number of hits,
|
||||
// but it will skip the bucket sort saving time.
|
||||
Some(0)
|
||||
|
@ -518,7 +518,7 @@ impl From<index_scheduler::IndexStats> for IndexStats {
|
||||
.inner_stats
|
||||
.number_of_documents
|
||||
.unwrap_or(stats.inner_stats.documents_database_stats.number_of_entries()),
|
||||
raw_document_db_size: stats.inner_stats.documents_database_stats.total_size(),
|
||||
raw_document_db_size: stats.inner_stats.documents_database_stats.total_value_size(),
|
||||
avg_document_size: stats.inner_stats.documents_database_stats.average_value_size(),
|
||||
is_indexing: stats.is_indexing,
|
||||
number_of_embeddings: stats.inner_stats.number_of_embeddings,
|
||||
|
@ -131,7 +131,7 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
|
||||
|
||||
ret.total_received = 1;
|
||||
|
||||
if let Some(ref sort) = sort {
|
||||
if let Some(sort) = sort {
|
||||
ret.sort_total_number_of_criteria = 1;
|
||||
ret.sort_with_geo_point = sort.iter().any(|s| s.contains("_geoPoint("));
|
||||
ret.sort_sum_of_criteria_terms = sort.len();
|
||||
@ -139,7 +139,7 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
|
||||
|
||||
ret.distinct = distinct.is_some();
|
||||
|
||||
if let Some(ref filter) = filter {
|
||||
if let Some(filter) = filter {
|
||||
static RE: Lazy<Regex> = Lazy::new(|| Regex::new("AND | OR").unwrap());
|
||||
ret.filter_total_number_of_criteria = 1;
|
||||
|
||||
@ -168,11 +168,11 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
|
||||
ret.attributes_to_search_on_total_number_of_uses = 1;
|
||||
}
|
||||
|
||||
if let Some(ref q) = q {
|
||||
if let Some(q) = q {
|
||||
ret.max_terms_number = q.split_whitespace().count();
|
||||
}
|
||||
|
||||
if let Some(ref vector) = vector {
|
||||
if let Some(vector) = vector {
|
||||
ret.max_vector_size = vector.len();
|
||||
}
|
||||
ret.retrieve_vectors |= retrieve_vectors;
|
||||
|
@ -67,7 +67,7 @@ impl<Method: AggregateMethod> SimilarAggregator<Method> {
|
||||
|
||||
ret.total_received = 1;
|
||||
|
||||
if let Some(ref filter) = filter {
|
||||
if let Some(filter) = filter {
|
||||
static RE: Lazy<Regex> = Lazy::new(|| Regex::new("AND | OR").unwrap());
|
||||
ret.filter_total_number_of_criteria = 1;
|
||||
|
||||
|
@ -341,11 +341,11 @@ pub async fn get_logs(
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let Some(stream) = stream {
|
||||
match stream { Some(stream) => {
|
||||
Ok(HttpResponse::Ok().streaming(stream))
|
||||
} else {
|
||||
} _ => {
|
||||
Err(MeilisearchHttpError::AlreadyUsedLogRoute.into())
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
/// Stop retrieving logs
|
||||
|
@ -64,6 +64,8 @@ mod open_api_utils;
|
||||
mod snapshot;
|
||||
mod swap_indexes;
|
||||
pub mod tasks;
|
||||
#[cfg(test)]
|
||||
mod tasks_test;
|
||||
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(
|
||||
@ -168,7 +170,7 @@ pub fn is_dry_run(req: &HttpRequest, opt: &Opt) -> Result<bool, ResponseError> {
|
||||
})
|
||||
})
|
||||
.transpose()?
|
||||
.map_or(false, |s| s.to_lowercase() == "true"))
|
||||
.is_some_and(|s| s.to_lowercase() == "true"))
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, ToSchema)]
|
||||
|
@ -146,7 +146,7 @@ impl TasksFilterQuery {
|
||||
}
|
||||
|
||||
impl TaskDeletionOrCancelationQuery {
|
||||
fn is_empty(&self) -> bool {
|
||||
pub fn is_empty(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
TaskDeletionOrCancelationQuery {
|
||||
@ -638,12 +638,12 @@ async fn get_task(
|
||||
let filters = index_scheduler.filters();
|
||||
let (tasks, _) = index_scheduler.get_tasks_from_authorized_indexes(&query, filters)?;
|
||||
|
||||
if let Some(task) = tasks.first() {
|
||||
match tasks.first() { Some(task) => {
|
||||
let task_view = TaskView::from_task(task);
|
||||
Ok(HttpResponse::Ok().json(task_view))
|
||||
} else {
|
||||
} _ => {
|
||||
Err(index_scheduler::Error::TaskNotFound(task_uid).into())
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
/// Get a task's documents.
|
||||
@ -693,7 +693,7 @@ async fn get_task_documents_file(
|
||||
let filters = index_scheduler.filters();
|
||||
let (tasks, _) = index_scheduler.get_tasks_from_authorized_indexes(&query, filters)?;
|
||||
|
||||
if let Some(task) = tasks.first() {
|
||||
match tasks.first() { Some(task) => {
|
||||
match task.content_uuid() {
|
||||
Some(uuid) => {
|
||||
let mut tfile = match index_scheduler.queue.update_file(uuid) {
|
||||
@ -711,9 +711,9 @@ async fn get_task_documents_file(
|
||||
}
|
||||
None => Err(index_scheduler::Error::TaskFileNotFound(task_uid).into()),
|
||||
}
|
||||
} else {
|
||||
} _ => {
|
||||
Err(index_scheduler::Error::TaskNotFound(task_uid).into())
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
pub enum DeserializeDateOption {
|
||||
@ -760,356 +760,3 @@ pub fn deserialize_date_before(
|
||||
) -> std::result::Result<OptionStarOr<OffsetDateTime>, InvalidTaskDateError> {
|
||||
value.try_map(|x| deserialize_date(&x, DeserializeDateOption::Before))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use deserr::Deserr;
|
||||
use meili_snap::snapshot;
|
||||
use meilisearch_types::deserr::DeserrQueryParamError;
|
||||
use meilisearch_types::error::{Code, ResponseError};
|
||||
|
||||
use crate::routes::tasks::{TaskDeletionOrCancelationQuery, TasksFilterQuery};
|
||||
|
||||
fn deserr_query_params<T>(j: &str) -> Result<T, ResponseError>
|
||||
where
|
||||
T: Deserr<DeserrQueryParamError>,
|
||||
{
|
||||
let value = serde_urlencoded::from_str::<serde_json::Value>(j)
|
||||
.map_err(|e| ResponseError::from_msg(e.to_string(), Code::BadRequest))?;
|
||||
|
||||
match deserr::deserialize::<_, _, DeserrQueryParamError>(value) {
|
||||
Ok(data) => Ok(data),
|
||||
Err(e) => Err(ResponseError::from(e)),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_task_filter_dates() {
|
||||
{
|
||||
let params = "afterEnqueuedAt=2021-12-03&beforeEnqueuedAt=2021-12-03&afterStartedAt=2021-12-03&beforeStartedAt=2021-12-03&afterFinishedAt=2021-12-03&beforeFinishedAt=2021-12-03";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
|
||||
snapshot!(format!("{:?}", query.after_enqueued_at), @"Other(2021-12-04 0:00:00.0 +00:00:00)");
|
||||
snapshot!(format!("{:?}", query.before_enqueued_at), @"Other(2021-12-03 0:00:00.0 +00:00:00)");
|
||||
snapshot!(format!("{:?}", query.after_started_at), @"Other(2021-12-04 0:00:00.0 +00:00:00)");
|
||||
snapshot!(format!("{:?}", query.before_started_at), @"Other(2021-12-03 0:00:00.0 +00:00:00)");
|
||||
snapshot!(format!("{:?}", query.after_finished_at), @"Other(2021-12-04 0:00:00.0 +00:00:00)");
|
||||
snapshot!(format!("{:?}", query.before_finished_at), @"Other(2021-12-03 0:00:00.0 +00:00:00)");
|
||||
}
|
||||
{
|
||||
let params =
|
||||
"afterEnqueuedAt=2021-12-03T23:45:23Z&beforeEnqueuedAt=2021-12-03T23:45:23Z";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.after_enqueued_at), @"Other(2021-12-03 23:45:23.0 +00:00:00)");
|
||||
snapshot!(format!("{:?}", query.before_enqueued_at), @"Other(2021-12-03 23:45:23.0 +00:00:00)");
|
||||
}
|
||||
{
|
||||
let params = "afterEnqueuedAt=1997-11-12T09:55:06-06:20";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.after_enqueued_at), @"Other(1997-11-12 9:55:06.0 -06:20:00)");
|
||||
}
|
||||
{
|
||||
let params = "afterEnqueuedAt=1997-11-12T09:55:06%2B00:00";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.after_enqueued_at), @"Other(1997-11-12 9:55:06.0 +00:00:00)");
|
||||
}
|
||||
{
|
||||
let params = "afterEnqueuedAt=1997-11-12T09:55:06.200000300Z";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.after_enqueued_at), @"Other(1997-11-12 9:55:06.2000003 +00:00:00)");
|
||||
}
|
||||
{
|
||||
// Stars are allowed in date fields as well
|
||||
let params = "afterEnqueuedAt=*&beforeStartedAt=*&afterFinishedAt=*&beforeFinishedAt=*&afterStartedAt=*&beforeEnqueuedAt=*";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query), @"TaskDeletionOrCancelationQuery { uids: None, batch_uids: None, canceled_by: None, types: None, statuses: None, index_uids: None, after_enqueued_at: Star, before_enqueued_at: Star, after_started_at: Star, before_started_at: Star, after_finished_at: Star, before_finished_at: Star }");
|
||||
}
|
||||
{
|
||||
let params = "afterFinishedAt=2021";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `afterFinishedAt`: `2021` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.",
|
||||
"code": "invalid_task_after_finished_at",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_after_finished_at"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "beforeFinishedAt=2021";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `beforeFinishedAt`: `2021` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.",
|
||||
"code": "invalid_task_before_finished_at",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_before_finished_at"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "afterEnqueuedAt=2021-12";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `afterEnqueuedAt`: `2021-12` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.",
|
||||
"code": "invalid_task_after_enqueued_at",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_after_enqueued_at"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
{
|
||||
let params = "beforeEnqueuedAt=2021-12-03T23";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `beforeEnqueuedAt`: `2021-12-03T23` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.",
|
||||
"code": "invalid_task_before_enqueued_at",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_before_enqueued_at"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "afterStartedAt=2021-12-03T23:45";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `afterStartedAt`: `2021-12-03T23:45` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.",
|
||||
"code": "invalid_task_after_started_at",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_after_started_at"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "beforeStartedAt=2021-12-03T23:45";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `beforeStartedAt`: `2021-12-03T23:45` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.",
|
||||
"code": "invalid_task_before_started_at",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_before_started_at"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_task_filter_uids() {
|
||||
{
|
||||
let params = "uids=78,1,12,73";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.uids), @"List([78, 1, 12, 73])");
|
||||
}
|
||||
{
|
||||
let params = "uids=1";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.uids), @"List([1])");
|
||||
}
|
||||
{
|
||||
let params = "uids=cat,*,dog";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `uids[0]`: could not parse `cat` as a positive integer",
|
||||
"code": "invalid_task_uids",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_uids"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "uids=78,hello,world";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `uids[1]`: could not parse `hello` as a positive integer",
|
||||
"code": "invalid_task_uids",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_uids"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "uids=cat";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `uids`: could not parse `cat` as a positive integer",
|
||||
"code": "invalid_task_uids",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_uids"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_task_filter_status() {
|
||||
{
|
||||
let params = "statuses=succeeded,failed,enqueued,processing,canceled";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.statuses), @"List([Succeeded, Failed, Enqueued, Processing, Canceled])");
|
||||
}
|
||||
{
|
||||
let params = "statuses=enqueued";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.statuses), @"List([Enqueued])");
|
||||
}
|
||||
{
|
||||
let params = "statuses=finished";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `statuses`: `finished` is not a valid task status. Available statuses are `enqueued`, `processing`, `succeeded`, `failed`, `canceled`.",
|
||||
"code": "invalid_task_statuses",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_statuses"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn deserialize_task_filter_types() {
|
||||
{
|
||||
let params = "types=documentAdditionOrUpdate,documentDeletion,settingsUpdate,indexCreation,indexDeletion,indexUpdate,indexSwap,taskCancelation,taskDeletion,dumpCreation,snapshotCreation";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.types), @"List([DocumentAdditionOrUpdate, DocumentDeletion, SettingsUpdate, IndexCreation, IndexDeletion, IndexUpdate, IndexSwap, TaskCancelation, TaskDeletion, DumpCreation, SnapshotCreation])");
|
||||
}
|
||||
{
|
||||
let params = "types=settingsUpdate";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.types), @"List([SettingsUpdate])");
|
||||
}
|
||||
{
|
||||
let params = "types=createIndex";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r#"
|
||||
{
|
||||
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `upgradeDatabase`.",
|
||||
"code": "invalid_task_types",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||
}
|
||||
"#);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn deserialize_task_filter_index_uids() {
|
||||
{
|
||||
let params = "indexUids=toto,tata-78";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.index_uids), @r###"List([IndexUid("toto"), IndexUid("tata-78")])"###);
|
||||
}
|
||||
{
|
||||
let params = "indexUids=index_a";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.index_uids), @r###"List([IndexUid("index_a")])"###);
|
||||
}
|
||||
{
|
||||
let params = "indexUids=1,hé";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `indexUids[1]`: `hé` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.",
|
||||
"code": "invalid_index_uid",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_index_uid"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "indexUids=hé";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `indexUids`: `hé` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.",
|
||||
"code": "invalid_index_uid",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_index_uid"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_task_filter_general() {
|
||||
{
|
||||
let params = "from=12&limit=15&indexUids=toto,tata-78&statuses=succeeded,enqueued&afterEnqueuedAt=2012-04-23&uids=1,2,3";
|
||||
let query = deserr_query_params::<TasksFilterQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query), @r###"TasksFilterQuery { limit: Param(15), from: Some(Param(12)), reverse: None, batch_uids: None, uids: List([1, 2, 3]), canceled_by: None, types: None, statuses: List([Succeeded, Enqueued]), index_uids: List([IndexUid("toto"), IndexUid("tata-78")]), after_enqueued_at: Other(2012-04-24 0:00:00.0 +00:00:00), before_enqueued_at: None, after_started_at: None, before_started_at: None, after_finished_at: None, before_finished_at: None }"###);
|
||||
}
|
||||
{
|
||||
// Stars should translate to `None` in the query
|
||||
// Verify value of the default limit
|
||||
let params = "indexUids=*&statuses=succeeded,*&afterEnqueuedAt=2012-04-23&uids=1,2,3";
|
||||
let query = deserr_query_params::<TasksFilterQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query), @"TasksFilterQuery { limit: Param(20), from: None, reverse: None, batch_uids: None, uids: List([1, 2, 3]), canceled_by: None, types: None, statuses: Star, index_uids: Star, after_enqueued_at: Other(2012-04-24 0:00:00.0 +00:00:00), before_enqueued_at: None, after_started_at: None, before_started_at: None, after_finished_at: None, before_finished_at: None }");
|
||||
}
|
||||
{
|
||||
// Stars should also translate to `None` in task deletion/cancelation queries
|
||||
let params = "indexUids=*&statuses=succeeded,*&afterEnqueuedAt=2012-04-23&uids=1,2,3";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query), @"TaskDeletionOrCancelationQuery { uids: List([1, 2, 3]), batch_uids: None, canceled_by: None, types: None, statuses: Star, index_uids: Star, after_enqueued_at: Other(2012-04-24 0:00:00.0 +00:00:00), before_enqueued_at: None, after_started_at: None, before_started_at: None, after_finished_at: None, before_finished_at: None }");
|
||||
}
|
||||
{
|
||||
// Star in from not allowed
|
||||
let params = "uids=*&from=*";
|
||||
let err = deserr_query_params::<TasksFilterQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `from`: could not parse `*` as a positive integer",
|
||||
"code": "invalid_task_from",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_from"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
// From not allowed in task deletion/cancelation queries
|
||||
let params = "from=12";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Unknown parameter `from`: expected one of `uids`, `batchUids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`",
|
||||
"code": "bad_request",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
// Limit not allowed in task deletion/cancelation queries
|
||||
let params = "limit=12";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Unknown parameter `limit`: expected one of `uids`, `batchUids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`",
|
||||
"code": "bad_request",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_task_delete_or_cancel_empty() {
|
||||
{
|
||||
let params = "";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
assert!(query.is_empty());
|
||||
}
|
||||
{
|
||||
let params = "statuses=*";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
assert!(!query.is_empty());
|
||||
snapshot!(format!("{query:?}"), @"TaskDeletionOrCancelationQuery { uids: None, batch_uids: None, canceled_by: None, types: None, statuses: Star, index_uids: None, after_enqueued_at: None, before_enqueued_at: None, after_started_at: None, before_started_at: None, after_finished_at: None, before_finished_at: None }");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
352
crates/meilisearch/src/routes/tasks_test.rs
Normal file
352
crates/meilisearch/src/routes/tasks_test.rs
Normal file
@ -0,0 +1,352 @@
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use deserr::Deserr;
|
||||
use meili_snap::snapshot;
|
||||
use meilisearch_types::deserr::DeserrQueryParamError;
|
||||
use meilisearch_types::error::{Code, ResponseError};
|
||||
|
||||
use crate::routes::tasks::{TaskDeletionOrCancelationQuery, TasksFilterQuery};
|
||||
|
||||
fn deserr_query_params<T>(j: &str) -> Result<T, ResponseError>
|
||||
where
|
||||
T: Deserr<DeserrQueryParamError>,
|
||||
{
|
||||
let value = serde_urlencoded::from_str::<serde_json::Value>(j)
|
||||
.map_err(|e| ResponseError::from_msg(e.to_string(), Code::BadRequest))?;
|
||||
|
||||
match deserr::deserialize::<_, _, DeserrQueryParamError>(value) {
|
||||
Ok(data) => Ok(data),
|
||||
Err(e) => Err(ResponseError::from(e)),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_task_filter_dates() {
|
||||
{
|
||||
let params = "afterEnqueuedAt=2021-12-03&beforeEnqueuedAt=2021-12-03&afterStartedAt=2021-12-03&beforeStartedAt=2021-12-03&afterFinishedAt=2021-12-03&beforeFinishedAt=2021-12-03";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
|
||||
snapshot!(format!("{:?}", query.after_enqueued_at), @"Other(2021-12-04 0:00:00.0 +00:00:00)");
|
||||
snapshot!(format!("{:?}", query.before_enqueued_at), @"Other(2021-12-03 0:00:00.0 +00:00:00)");
|
||||
snapshot!(format!("{:?}", query.after_started_at), @"Other(2021-12-04 0:00:00.0 +00:00:00)");
|
||||
snapshot!(format!("{:?}", query.before_started_at), @"Other(2021-12-03 0:00:00.0 +00:00:00)");
|
||||
snapshot!(format!("{:?}", query.after_finished_at), @"Other(2021-12-04 0:00:00.0 +00:00:00)");
|
||||
snapshot!(format!("{:?}", query.before_finished_at), @"Other(2021-12-03 0:00:00.0 +00:00:00)");
|
||||
}
|
||||
{
|
||||
let params =
|
||||
"afterEnqueuedAt=2021-12-03T23:45:23Z&beforeEnqueuedAt=2021-12-03T23:45:23Z";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.after_enqueued_at), @"Other(2021-12-03 23:45:23.0 +00:00:00)");
|
||||
snapshot!(format!("{:?}", query.before_enqueued_at), @"Other(2021-12-03 23:45:23.0 +00:00:00)");
|
||||
}
|
||||
{
|
||||
let params = "afterEnqueuedAt=1997-11-12T09:55:06-06:20";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.after_enqueued_at), @"Other(1997-11-12 9:55:06.0 -06:20:00)");
|
||||
}
|
||||
{
|
||||
let params = "afterEnqueuedAt=1997-11-12T09:55:06%2B00:00";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.after_enqueued_at), @"Other(1997-11-12 9:55:06.0 +00:00:00)");
|
||||
}
|
||||
{
|
||||
let params = "afterEnqueuedAt=1997-11-12T09:55:06.200000300Z";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.after_enqueued_at), @"Other(1997-11-12 9:55:06.2000003 +00:00:00)");
|
||||
}
|
||||
{
|
||||
// Stars are allowed in date fields as well
|
||||
let params = "afterEnqueuedAt=*&beforeStartedAt=*&afterFinishedAt=*&beforeFinishedAt=*&afterStartedAt=*&beforeEnqueuedAt=*";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query), @"TaskDeletionOrCancelationQuery { uids: None, batch_uids: None, canceled_by: None, types: None, statuses: None, index_uids: None, after_enqueued_at: Star, before_enqueued_at: Star, after_started_at: Star, before_started_at: Star, after_finished_at: Star, before_finished_at: Star }");
|
||||
}
|
||||
{
|
||||
let params = "afterFinishedAt=2021";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `afterFinishedAt`: `2021` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.",
|
||||
"code": "invalid_task_after_finished_at",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_after_finished_at"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "beforeFinishedAt=2021";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `beforeFinishedAt`: `2021` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.",
|
||||
"code": "invalid_task_before_finished_at",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_before_finished_at"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "afterEnqueuedAt=2021-12";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `afterEnqueuedAt`: `2021-12` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.",
|
||||
"code": "invalid_task_after_enqueued_at",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_after_enqueued_at"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
{
|
||||
let params = "beforeEnqueuedAt=2021-12-03T23";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `beforeEnqueuedAt`: `2021-12-03T23` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.",
|
||||
"code": "invalid_task_before_enqueued_at",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_before_enqueued_at"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "afterStartedAt=2021-12-03T23:45";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `afterStartedAt`: `2021-12-03T23:45` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.",
|
||||
"code": "invalid_task_after_started_at",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_after_started_at"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "beforeStartedAt=2021-12-03T23:45";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `beforeStartedAt`: `2021-12-03T23:45` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.",
|
||||
"code": "invalid_task_before_started_at",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_before_started_at"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_task_filter_uids() {
|
||||
{
|
||||
let params = "uids=78,1,12,73";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.uids), @"List([78, 1, 12, 73])");
|
||||
}
|
||||
{
|
||||
let params = "uids=1";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.uids), @"List([1])");
|
||||
}
|
||||
{
|
||||
let params = "uids=cat,*,dog";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `uids[0]`: could not parse `cat` as a positive integer",
|
||||
"code": "invalid_task_uids",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_uids"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "uids=78,hello,world";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `uids[1]`: could not parse `hello` as a positive integer",
|
||||
"code": "invalid_task_uids",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_uids"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "uids=cat";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `uids`: could not parse `cat` as a positive integer",
|
||||
"code": "invalid_task_uids",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_uids"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_task_filter_status() {
|
||||
{
|
||||
let params = "statuses=succeeded,failed,enqueued,processing,canceled";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.statuses), @"List([Succeeded, Failed, Enqueued, Processing, Canceled])");
|
||||
}
|
||||
{
|
||||
let params = "statuses=enqueued";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.statuses), @"List([Enqueued])");
|
||||
}
|
||||
{
|
||||
let params = "statuses=finished";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `statuses`: `finished` is not a valid task status. Available statuses are `enqueued`, `processing`, `succeeded`, `failed`, `canceled`.",
|
||||
"code": "invalid_task_statuses",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_statuses"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn deserialize_task_filter_types() {
|
||||
{
|
||||
let params = "types=documentAdditionOrUpdate,documentDeletion,settingsUpdate,indexCreation,indexDeletion,indexUpdate,indexSwap,taskCancelation,taskDeletion,dumpCreation,snapshotCreation";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.types), @"List([DocumentAdditionOrUpdate, DocumentDeletion, SettingsUpdate, IndexCreation, IndexDeletion, IndexUpdate, IndexSwap, TaskCancelation, TaskDeletion, DumpCreation, SnapshotCreation])");
|
||||
}
|
||||
{
|
||||
let params = "types=settingsUpdate";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.types), @"List([SettingsUpdate])");
|
||||
}
|
||||
{
|
||||
let params = "types=createIndex";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r#"
|
||||
{
|
||||
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `upgradeDatabase`.",
|
||||
"code": "invalid_task_types",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||
}
|
||||
"#);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn deserialize_task_filter_index_uids() {
|
||||
{
|
||||
let params = "indexUids=toto,tata-78";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.index_uids), @r###"List([IndexUid("toto"), IndexUid("tata-78")])"###);
|
||||
}
|
||||
{
|
||||
let params = "indexUids=index_a";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query.index_uids), @r###"List([IndexUid("index_a")])"###);
|
||||
}
|
||||
{
|
||||
let params = "indexUids=1,hé";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `indexUids[1]`: `hé` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.",
|
||||
"code": "invalid_index_uid",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_index_uid"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
let params = "indexUids=hé";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `indexUids`: `hé` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.",
|
||||
"code": "invalid_index_uid",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_index_uid"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_task_filter_general() {
|
||||
{
|
||||
let params = "from=12&limit=15&indexUids=toto,tata-78&statuses=succeeded,enqueued&afterEnqueuedAt=2012-04-23&uids=1,2,3";
|
||||
let query = deserr_query_params::<TasksFilterQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query), @r###"TasksFilterQuery { limit: Param(15), from: Some(Param(12)), reverse: None, batch_uids: None, uids: List([1, 2, 3]), canceled_by: None, types: None, statuses: List([Succeeded, Enqueued]), index_uids: List([IndexUid("toto"), IndexUid("tata-78")]), after_enqueued_at: Other(2012-04-24 0:00:00.0 +00:00:00), before_enqueued_at: None, after_started_at: None, before_started_at: None, after_finished_at: None, before_finished_at: None }"###);
|
||||
}
|
||||
{
|
||||
// Stars should translate to `None` in the query
|
||||
// Verify value of the default limit
|
||||
let params = "indexUids=*&statuses=succeeded,*&afterEnqueuedAt=2012-04-23&uids=1,2,3";
|
||||
let query = deserr_query_params::<TasksFilterQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query), @"TasksFilterQuery { limit: Param(20), from: None, reverse: None, batch_uids: None, uids: List([1, 2, 3]), canceled_by: None, types: None, statuses: Star, index_uids: Star, after_enqueued_at: Other(2012-04-24 0:00:00.0 +00:00:00), before_enqueued_at: None, after_started_at: None, before_started_at: None, after_finished_at: None, before_finished_at: None }");
|
||||
}
|
||||
{
|
||||
// Stars should also translate to `None` in task deletion/cancelation queries
|
||||
let params = "indexUids=*&statuses=succeeded,*&afterEnqueuedAt=2012-04-23&uids=1,2,3";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
snapshot!(format!("{:?}", query), @"TaskDeletionOrCancelationQuery { uids: List([1, 2, 3]), batch_uids: None, canceled_by: None, types: None, statuses: Star, index_uids: Star, after_enqueued_at: Other(2012-04-24 0:00:00.0 +00:00:00), before_enqueued_at: None, after_started_at: None, before_started_at: None, after_finished_at: None, before_finished_at: None }");
|
||||
}
|
||||
{
|
||||
// Star in from not allowed
|
||||
let params = "uids=*&from=*";
|
||||
let err = deserr_query_params::<TasksFilterQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `from`: could not parse `*` as a positive integer",
|
||||
"code": "invalid_task_from",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_from"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
// From not allowed in task deletion/cancelation queries
|
||||
let params = "from=12";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Unknown parameter `from`: expected one of `uids`, `batchUids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`",
|
||||
"code": "bad_request",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
{
|
||||
// Limit not allowed in task deletion/cancelation queries
|
||||
let params = "limit=12";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Unknown parameter `limit`: expected one of `uids`, `batchUids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`",
|
||||
"code": "bad_request",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_task_delete_or_cancel_empty() {
|
||||
{
|
||||
let params = "";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
assert!(query.is_empty());
|
||||
}
|
||||
{
|
||||
let params = "statuses=*";
|
||||
let query = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap();
|
||||
assert!(!query.is_empty());
|
||||
snapshot!(format!("{query:?}"), @"TaskDeletionOrCancelationQuery { uids: None, batch_uids: None, canceled_by: None, types: None, statuses: Star, index_uids: None, after_enqueued_at: None, before_enqueued_at: None, after_started_at: None, before_started_at: None, after_finished_at: None, before_finished_at: None }");
|
||||
}
|
||||
}
|
||||
}
|
@ -740,7 +740,7 @@ impl SearchByIndex {
|
||||
_ => ranking_rules::CanonicalizationKind::Placeholder,
|
||||
};
|
||||
|
||||
let sort = if let Some(sort) = &query.sort {
|
||||
let sort = match &query.sort { Some(sort) => {
|
||||
let sorts: Vec<_> =
|
||||
match sort.iter().map(|s| milli::AscDesc::from_str(s)).collect() {
|
||||
Ok(sorts) => sorts,
|
||||
@ -752,9 +752,9 @@ impl SearchByIndex {
|
||||
}
|
||||
};
|
||||
Some(sorts)
|
||||
} else {
|
||||
} _ => {
|
||||
None
|
||||
};
|
||||
}};
|
||||
|
||||
let ranking_rules = ranking_rules::RankingRules::new(
|
||||
criteria.clone(),
|
||||
|
@ -32,7 +32,6 @@ pub const FEDERATION_REMOTE: &str = "remote";
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, deserr::Deserr, ToSchema)]
|
||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
|
||||
pub struct FederationOptions {
|
||||
#[deserr(default, error = DeserrJsonError<InvalidMultiSearchWeight>)]
|
||||
#[schema(value_type = f64)]
|
||||
|
@ -1331,15 +1331,15 @@ impl<'a> HitMaker<'a> {
|
||||
let displayed_ids =
|
||||
displayed_ids.unwrap_or_else(|| fields_ids_map.iter().map(|(id, _)| id).collect());
|
||||
|
||||
let retrieve_vectors = if let RetrieveVectors::Retrieve = format.retrieve_vectors {
|
||||
let retrieve_vectors = match format.retrieve_vectors { RetrieveVectors::Retrieve => {
|
||||
if vectors_is_hidden {
|
||||
RetrieveVectors::Hide
|
||||
} else {
|
||||
RetrieveVectors::Retrieve
|
||||
}
|
||||
} else {
|
||||
} _ => {
|
||||
format.retrieve_vectors
|
||||
};
|
||||
}};
|
||||
|
||||
let fids = |attrs: &BTreeSet<String>| {
|
||||
let mut ids = BTreeSet::new();
|
||||
@ -1544,7 +1544,7 @@ pub fn perform_facet_search(
|
||||
let locales = localized_attributes_locales.map(|attr| {
|
||||
attr.locales
|
||||
.into_iter()
|
||||
.filter(|locale| locales.as_ref().map_or(true, |locales| locales.contains(locale)))
|
||||
.filter(|locale| locales.as_ref().is_none_or(|locales| locales.contains(locale)))
|
||||
.collect()
|
||||
});
|
||||
|
||||
|
@ -94,7 +94,7 @@ static REFUSED_KEYS: Lazy<Vec<Value>> = Lazy::new(|| {
|
||||
});
|
||||
|
||||
macro_rules! compute_authorized_search {
|
||||
($tenant_tokens:expr, $filter:expr, $expected_count:expr) => {
|
||||
($tenant_tokens:expr_2021, $filter:expr_2021, $expected_count:expr_2021) => {
|
||||
let mut server = Server::new_auth().await;
|
||||
server.use_admin_key("MASTER_KEY").await;
|
||||
let index = server.index("sales");
|
||||
@ -141,7 +141,7 @@ macro_rules! compute_authorized_search {
|
||||
}
|
||||
|
||||
macro_rules! compute_forbidden_search {
|
||||
($tenant_tokens:expr, $parent_keys:expr) => {
|
||||
($tenant_tokens:expr_2021, $parent_keys:expr_2021) => {
|
||||
let mut server = Server::new_auth().await;
|
||||
server.use_admin_key("MASTER_KEY").await;
|
||||
let index = server.index("sales");
|
||||
|
@ -262,7 +262,7 @@ static BOTH_REFUSED_KEYS: Lazy<Vec<Value>> = Lazy::new(|| {
|
||||
});
|
||||
|
||||
macro_rules! compute_authorized_single_search {
|
||||
($tenant_tokens:expr, $filter:expr, $expected_count:expr) => {
|
||||
($tenant_tokens:expr_2021, $filter:expr_2021, $expected_count:expr_2021) => {
|
||||
let mut server = Server::new_auth().await;
|
||||
server.use_admin_key("MASTER_KEY").await;
|
||||
let index = server.index("sales");
|
||||
@ -333,7 +333,7 @@ macro_rules! compute_authorized_single_search {
|
||||
}
|
||||
|
||||
macro_rules! compute_authorized_multiple_search {
|
||||
($tenant_tokens:expr, $filter1:expr, $filter2:expr, $expected_count1:expr, $expected_count2:expr) => {
|
||||
($tenant_tokens:expr_2021, $filter1:expr_2021, $filter2:expr_2021, $expected_count1:expr_2021, $expected_count2:expr_2021) => {
|
||||
let mut server = Server::new_auth().await;
|
||||
server.use_admin_key("MASTER_KEY").await;
|
||||
let index = server.index("sales");
|
||||
@ -417,7 +417,7 @@ macro_rules! compute_authorized_multiple_search {
|
||||
}
|
||||
|
||||
macro_rules! compute_forbidden_single_search {
|
||||
($tenant_tokens:expr, $parent_keys:expr, $failed_query_indexes:expr) => {
|
||||
($tenant_tokens:expr_2021, $parent_keys:expr_2021, $failed_query_indexes:expr_2021) => {
|
||||
let mut server = Server::new_auth().await;
|
||||
server.use_admin_key("MASTER_KEY").await;
|
||||
let index = server.index("sales");
|
||||
@ -493,7 +493,7 @@ macro_rules! compute_forbidden_single_search {
|
||||
}
|
||||
|
||||
macro_rules! compute_forbidden_multiple_search {
|
||||
($tenant_tokens:expr, $parent_keys:expr, $failed_query_indexes:expr) => {
|
||||
($tenant_tokens:expr_2021, $parent_keys:expr_2021, $failed_query_indexes:expr_2021) => {
|
||||
let mut server = Server::new_auth().await;
|
||||
server.use_admin_key("MASTER_KEY").await;
|
||||
let index = server.index("sales");
|
||||
|
@ -281,8 +281,7 @@ async fn test_summarized_document_addition_or_update() {
|
||||
".startedAt" => "[date]",
|
||||
".finishedAt" => "[date]",
|
||||
".stats.progressTrace" => "[progressTrace]",
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]",
|
||||
".stats.internalDatabaseSizes" => "[internalDatabaseSizes]"
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]"
|
||||
},
|
||||
@r###"
|
||||
{
|
||||
@ -304,8 +303,7 @@ async fn test_summarized_document_addition_or_update() {
|
||||
"test": 1
|
||||
},
|
||||
"progressTrace": "[progressTrace]",
|
||||
"writeChannelCongestion": "[writeChannelCongestion]",
|
||||
"internalDatabaseSizes": "[internalDatabaseSizes]"
|
||||
"writeChannelCongestion": "[writeChannelCongestion]"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"startedAt": "[date]",
|
||||
@ -324,8 +322,7 @@ async fn test_summarized_document_addition_or_update() {
|
||||
".startedAt" => "[date]",
|
||||
".finishedAt" => "[date]",
|
||||
".stats.progressTrace" => "[progressTrace]",
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]",
|
||||
".stats.internalDatabaseSizes" => "[internalDatabaseSizes]"
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]"
|
||||
},
|
||||
@r###"
|
||||
{
|
||||
@ -410,8 +407,7 @@ async fn test_summarized_delete_documents_by_batch() {
|
||||
".startedAt" => "[date]",
|
||||
".finishedAt" => "[date]",
|
||||
".stats.progressTrace" => "[progressTrace]",
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]",
|
||||
".stats.internalDatabaseSizes" => "[internalDatabaseSizes]"
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]"
|
||||
},
|
||||
@r###"
|
||||
{
|
||||
@ -499,8 +495,7 @@ async fn test_summarized_delete_documents_by_filter() {
|
||||
".startedAt" => "[date]",
|
||||
".finishedAt" => "[date]",
|
||||
".stats.progressTrace" => "[progressTrace]",
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]",
|
||||
".stats.internalDatabaseSizes" => "[internalDatabaseSizes]"
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]"
|
||||
},
|
||||
@r###"
|
||||
{
|
||||
@ -542,8 +537,7 @@ async fn test_summarized_delete_documents_by_filter() {
|
||||
".startedAt" => "[date]",
|
||||
".finishedAt" => "[date]",
|
||||
".stats.progressTrace" => "[progressTrace]",
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]",
|
||||
".stats.internalDatabaseSizes" => "[internalDatabaseSizes]"
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]"
|
||||
},
|
||||
@r#"
|
||||
{
|
||||
@ -629,8 +623,7 @@ async fn test_summarized_delete_document_by_id() {
|
||||
".startedAt" => "[date]",
|
||||
".finishedAt" => "[date]",
|
||||
".stats.progressTrace" => "[progressTrace]",
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]",
|
||||
".stats.internalDatabaseSizes" => "[internalDatabaseSizes]"
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]"
|
||||
},
|
||||
@r#"
|
||||
{
|
||||
@ -686,8 +679,7 @@ async fn test_summarized_settings_update() {
|
||||
".startedAt" => "[date]",
|
||||
".finishedAt" => "[date]",
|
||||
".stats.progressTrace" => "[progressTrace]",
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]",
|
||||
".stats.internalDatabaseSizes" => "[internalDatabaseSizes]"
|
||||
".stats.writeChannelCongestion" => "[writeChannelCongestion]"
|
||||
},
|
||||
@r###"
|
||||
{
|
||||
|
@ -63,7 +63,7 @@ impl Encoder {
|
||||
buffer
|
||||
}
|
||||
|
||||
pub fn header(self: &Encoder) -> Option<impl TryIntoHeaderPair> {
|
||||
pub fn header(self: &Encoder) -> Option<impl TryIntoHeaderPair + use<>> {
|
||||
match self {
|
||||
Self::Plain => None,
|
||||
Self::Gzip => Some(("Content-Encoding", "gzip")),
|
||||
|
@ -259,7 +259,7 @@ impl<'a> Index<'a, Owned> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Index<'a, Shared> {
|
||||
impl Index<'_, Shared> {
|
||||
/// You cannot modify the content of a shared index, thus the delete_document_by_filter call
|
||||
/// must fail. If the task successfully enqueue itself, we'll wait for the task to finishes,
|
||||
/// and if it succeed the function will panic.
|
||||
|
@ -25,13 +25,13 @@ pub struct Value(pub serde_json::Value);
|
||||
impl Value {
|
||||
#[track_caller]
|
||||
pub fn uid(&self) -> u64 {
|
||||
if let Some(uid) = self["uid"].as_u64() {
|
||||
match self["uid"].as_u64() { Some(uid) => {
|
||||
uid
|
||||
} else if let Some(uid) = self["taskUid"].as_u64() {
|
||||
} _ => { match self["taskUid"].as_u64() { Some(uid) => {
|
||||
uid
|
||||
} else {
|
||||
} _ => {
|
||||
panic!("Didn't find any task id in: {self}");
|
||||
}
|
||||
}}}}
|
||||
}
|
||||
|
||||
pub fn has_uid(&self) -> bool {
|
||||
@ -150,7 +150,7 @@ macro_rules! json {
|
||||
/// Performs a search test on both post and get routes
|
||||
#[macro_export]
|
||||
macro_rules! test_post_get_search {
|
||||
($server:expr, $query:expr, |$response:ident, $status_code:ident | $block:expr) => {
|
||||
($server:expr_2021, $query:expr_2021, |$response:ident, $status_code:ident | $block:expr_2021) => {
|
||||
let post_query: meilisearch::routes::search::SearchQueryPost =
|
||||
serde_json::from_str(&$query.clone().to_string()).unwrap();
|
||||
let get_query: meilisearch::routes::search::SearchQuery = post_query.into();
|
||||
|
@ -43,9 +43,11 @@ impl Server<Owned> {
|
||||
let dir = TempDir::new().unwrap();
|
||||
|
||||
if cfg!(windows) {
|
||||
std::env::set_var("TMP", TEST_TEMP_DIR.path());
|
||||
// TODO: Audit that the environment access only happens in single-threaded code.
|
||||
unsafe { std::env::set_var("TMP", TEST_TEMP_DIR.path()) };
|
||||
} else {
|
||||
std::env::set_var("TMPDIR", TEST_TEMP_DIR.path());
|
||||
// TODO: Audit that the environment access only happens in single-threaded code.
|
||||
unsafe { std::env::set_var("TMPDIR", TEST_TEMP_DIR.path()) };
|
||||
}
|
||||
|
||||
let options = default_settings(dir.path());
|
||||
@ -58,9 +60,11 @@ impl Server<Owned> {
|
||||
|
||||
pub async fn new_auth_with_options(mut options: Opt, dir: TempDir) -> Self {
|
||||
if cfg!(windows) {
|
||||
std::env::set_var("TMP", TEST_TEMP_DIR.path());
|
||||
// TODO: Audit that the environment access only happens in single-threaded code.
|
||||
unsafe { std::env::set_var("TMP", TEST_TEMP_DIR.path()) };
|
||||
} else {
|
||||
std::env::set_var("TMPDIR", TEST_TEMP_DIR.path());
|
||||
// TODO: Audit that the environment access only happens in single-threaded code.
|
||||
unsafe { std::env::set_var("TMPDIR", TEST_TEMP_DIR.path()) };
|
||||
}
|
||||
|
||||
options.master_key = Some("MASTER_KEY".to_string());
|
||||
@ -191,9 +195,11 @@ impl Server<Shared> {
|
||||
let dir = TempDir::new().unwrap();
|
||||
|
||||
if cfg!(windows) {
|
||||
std::env::set_var("TMP", TEST_TEMP_DIR.path());
|
||||
// TODO: Audit that the environment access only happens in single-threaded code.
|
||||
unsafe { std::env::set_var("TMP", TEST_TEMP_DIR.path()) };
|
||||
} else {
|
||||
std::env::set_var("TMPDIR", TEST_TEMP_DIR.path());
|
||||
// TODO: Audit that the environment access only happens in single-threaded code.
|
||||
unsafe { std::env::set_var("TMPDIR", TEST_TEMP_DIR.path()) };
|
||||
}
|
||||
|
||||
let options = default_settings(dir.path());
|
||||
@ -296,9 +302,9 @@ impl<State> Server<State> {
|
||||
&self,
|
||||
) -> impl actix_web::dev::Service<
|
||||
actix_http::Request,
|
||||
Response = ServiceResponse<impl MessageBody>,
|
||||
Response = ServiceResponse<impl MessageBody + use<State>>,
|
||||
Error = actix_web::Error,
|
||||
> {
|
||||
> + use<State> {
|
||||
self.service.init_web_app().await
|
||||
}
|
||||
|
||||
@ -399,7 +405,18 @@ impl<State> Server<State> {
|
||||
pub async fn wait_task(&self, update_id: u64) -> Value {
|
||||
// try several times to get status, or panic to not wait forever
|
||||
let url = format!("/tasks/{}", update_id);
|
||||
for _ in 0..100 {
|
||||
// Increase timeout for vector-related tests
|
||||
let max_attempts = if url.contains("/tasks/") {
|
||||
if update_id > 1000 {
|
||||
400 // 200 seconds for vector tests
|
||||
} else {
|
||||
100 // 50 seconds for other tests
|
||||
}
|
||||
} else {
|
||||
100 // 50 seconds for other tests
|
||||
};
|
||||
|
||||
for _ in 0..max_attempts {
|
||||
let (response, status_code) = self.service.get(&url).await;
|
||||
assert_eq!(200, status_code, "response: {}", response);
|
||||
|
||||
|
@ -116,9 +116,9 @@ impl Service {
|
||||
&self,
|
||||
) -> impl actix_web::dev::Service<
|
||||
actix_http::Request,
|
||||
Response = ServiceResponse<impl MessageBody>,
|
||||
Response = ServiceResponse<impl MessageBody + use<>>,
|
||||
Error = actix_web::Error,
|
||||
> {
|
||||
> + use<> {
|
||||
let (_route_layer, route_layer_handle) =
|
||||
tracing_subscriber::reload::Layer::new(None.with_filter(
|
||||
tracing_subscriber::filter::Targets::new().with_target("", LevelFilter::OFF),
|
||||
|
@ -1897,11 +1897,11 @@ async fn update_documents_with_geo_field() {
|
||||
},
|
||||
{
|
||||
"id": "3",
|
||||
"_geo": { "lat": 3, "lng": 0 },
|
||||
"_geo": { "lat": 1, "lng": 1 },
|
||||
},
|
||||
{
|
||||
"id": "4",
|
||||
"_geo": { "lat": "4", "lng": "0" },
|
||||
"_geo": { "lat": "1", "lng": "1" },
|
||||
},
|
||||
]);
|
||||
|
||||
@ -1928,7 +1928,9 @@ async fn update_documents_with_geo_field() {
|
||||
}
|
||||
"###);
|
||||
|
||||
let (response, code) = index.search_post(json!({"sort": ["_geoPoint(10,0):asc"]})).await;
|
||||
let (response, code) = index
|
||||
.search_post(json!({"sort": ["_geoPoint(50.629973371633746,3.0569447399419567):desc"]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
// we are expecting docs 4 and 3 first as they have geo
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }),
|
||||
@ -1938,18 +1940,18 @@ async fn update_documents_with_geo_field() {
|
||||
{
|
||||
"id": "4",
|
||||
"_geo": {
|
||||
"lat": "4",
|
||||
"lng": "0"
|
||||
"lat": "1",
|
||||
"lng": "1"
|
||||
},
|
||||
"_geoDistance": 667170
|
||||
"_geoDistance": 5522018
|
||||
},
|
||||
{
|
||||
"id": "3",
|
||||
"_geo": {
|
||||
"lat": 3,
|
||||
"lng": 0
|
||||
"lat": 1,
|
||||
"lng": 1
|
||||
},
|
||||
"_geoDistance": 778364
|
||||
"_geoDistance": 5522018
|
||||
},
|
||||
{
|
||||
"id": "1"
|
||||
@ -1967,13 +1969,10 @@ async fn update_documents_with_geo_field() {
|
||||
}
|
||||
"###);
|
||||
|
||||
let updated_documents = json!([
|
||||
{
|
||||
"id": "3",
|
||||
"doggo": "kefir",
|
||||
"_geo": { "lat": 5, "lng": 0 },
|
||||
}
|
||||
]);
|
||||
let updated_documents = json!([{
|
||||
"id": "3",
|
||||
"doggo": "kefir",
|
||||
}]);
|
||||
let (task, _status_code) = index.update_documents(updated_documents, None).await;
|
||||
let response = index.wait_task(task.uid()).await;
|
||||
snapshot!(json_string!(response, { ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
|
||||
@ -2013,16 +2012,16 @@ async fn update_documents_with_geo_field() {
|
||||
{
|
||||
"id": "3",
|
||||
"_geo": {
|
||||
"lat": 5,
|
||||
"lng": 0
|
||||
"lat": 1,
|
||||
"lng": 1
|
||||
},
|
||||
"doggo": "kefir"
|
||||
},
|
||||
{
|
||||
"id": "4",
|
||||
"_geo": {
|
||||
"lat": "4",
|
||||
"lng": "0"
|
||||
"lat": "1",
|
||||
"lng": "1"
|
||||
}
|
||||
}
|
||||
],
|
||||
@ -2032,29 +2031,31 @@ async fn update_documents_with_geo_field() {
|
||||
}
|
||||
"###);
|
||||
|
||||
let (response, code) = index.search_post(json!({"sort": ["_geoPoint(10,0):asc"]})).await;
|
||||
let (response, code) = index
|
||||
.search_post(json!({"sort": ["_geoPoint(50.629973371633746,3.0569447399419567):desc"]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
// the search response should not have changed: we are expecting docs 4 and 3 first as they have geo
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }),
|
||||
@r###"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"id": "3",
|
||||
"_geo": {
|
||||
"lat": 5,
|
||||
"lng": 0
|
||||
},
|
||||
"doggo": "kefir",
|
||||
"_geoDistance": 555975
|
||||
},
|
||||
{
|
||||
"id": "4",
|
||||
"_geo": {
|
||||
"lat": "4",
|
||||
"lng": "0"
|
||||
"lat": "1",
|
||||
"lng": "1"
|
||||
},
|
||||
"_geoDistance": 667170
|
||||
"_geoDistance": 5522018
|
||||
},
|
||||
{
|
||||
"id": "3",
|
||||
"_geo": {
|
||||
"lat": 1,
|
||||
"lng": 1
|
||||
},
|
||||
"doggo": "kefir",
|
||||
"_geoDistance": 5522018
|
||||
},
|
||||
{
|
||||
"id": "1"
|
||||
|
@ -157,14 +157,11 @@ async fn delete_document_by_filter() {
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
let (stats, _) = index.stats().await;
|
||||
snapshot!(json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}), @r###"
|
||||
snapshot!(json_string!(stats), @r###"
|
||||
{
|
||||
"numberOfDocuments": 4,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 42,
|
||||
"avgDocumentSize": 10,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -211,14 +208,11 @@ async fn delete_document_by_filter() {
|
||||
"###);
|
||||
|
||||
let (stats, _) = index.stats().await;
|
||||
snapshot!(json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}), @r###"
|
||||
snapshot!(json_string!(stats), @r###"
|
||||
{
|
||||
"numberOfDocuments": 2,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 16,
|
||||
"avgDocumentSize": 8,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -284,14 +278,11 @@ async fn delete_document_by_filter() {
|
||||
"###);
|
||||
|
||||
let (stats, _) = index.stats().await;
|
||||
snapshot!(json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}), @r###"
|
||||
snapshot!(json_string!(stats), @r###"
|
||||
{
|
||||
"numberOfDocuments": 1,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 12,
|
||||
"avgDocumentSize": 12,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
|
@ -28,15 +28,12 @@ async fn import_dump_v1_movie_raw() {
|
||||
let (stats, code) = index.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(
|
||||
json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}),
|
||||
json_string!(stats),
|
||||
@r###"
|
||||
{
|
||||
"numberOfDocuments": 53,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 21965,
|
||||
"avgDocumentSize": 414,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -188,15 +185,12 @@ async fn import_dump_v1_movie_with_settings() {
|
||||
let (stats, code) = index.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(
|
||||
json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}),
|
||||
json_string!(stats),
|
||||
@r###"
|
||||
{
|
||||
"numberOfDocuments": 53,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 21965,
|
||||
"avgDocumentSize": 414,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -361,15 +355,12 @@ async fn import_dump_v1_rubygems_with_settings() {
|
||||
let (stats, code) = index.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(
|
||||
json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}),
|
||||
json_string!(stats),
|
||||
@r###"
|
||||
{
|
||||
"numberOfDocuments": 53,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 8606,
|
||||
"avgDocumentSize": 162,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -531,15 +522,12 @@ async fn import_dump_v2_movie_raw() {
|
||||
let (stats, code) = index.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(
|
||||
json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}),
|
||||
json_string!(stats),
|
||||
@r###"
|
||||
{
|
||||
"numberOfDocuments": 53,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 21965,
|
||||
"avgDocumentSize": 414,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -691,15 +679,12 @@ async fn import_dump_v2_movie_with_settings() {
|
||||
let (stats, code) = index.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(
|
||||
json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}),
|
||||
json_string!(stats),
|
||||
@r###"
|
||||
{
|
||||
"numberOfDocuments": 53,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 21965,
|
||||
"avgDocumentSize": 414,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -861,15 +846,12 @@ async fn import_dump_v2_rubygems_with_settings() {
|
||||
let (stats, code) = index.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(
|
||||
json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}),
|
||||
json_string!(stats),
|
||||
@r###"
|
||||
{
|
||||
"numberOfDocuments": 53,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 8606,
|
||||
"avgDocumentSize": 162,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -1028,15 +1010,12 @@ async fn import_dump_v3_movie_raw() {
|
||||
let (stats, code) = index.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(
|
||||
json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}),
|
||||
json_string!(stats),
|
||||
@r###"
|
||||
{
|
||||
"numberOfDocuments": 53,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 21965,
|
||||
"avgDocumentSize": 414,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -1188,15 +1167,12 @@ async fn import_dump_v3_movie_with_settings() {
|
||||
let (stats, code) = index.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(
|
||||
json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}),
|
||||
json_string!(stats),
|
||||
@r###"
|
||||
{
|
||||
"numberOfDocuments": 53,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 21965,
|
||||
"avgDocumentSize": 414,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -1358,15 +1334,12 @@ async fn import_dump_v3_rubygems_with_settings() {
|
||||
let (stats, code) = index.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(
|
||||
json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}),
|
||||
json_string!(stats),
|
||||
@r###"
|
||||
{
|
||||
"numberOfDocuments": 53,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 8606,
|
||||
"avgDocumentSize": 162,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -1525,15 +1498,12 @@ async fn import_dump_v4_movie_raw() {
|
||||
let (stats, code) = index.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(
|
||||
json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}),
|
||||
json_string!(stats),
|
||||
@r###"
|
||||
{
|
||||
"numberOfDocuments": 53,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 21965,
|
||||
"avgDocumentSize": 414,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -1685,15 +1655,12 @@ async fn import_dump_v4_movie_with_settings() {
|
||||
let (stats, code) = index.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(
|
||||
json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}),
|
||||
json_string!(stats),
|
||||
@r###"
|
||||
{
|
||||
"numberOfDocuments": 53,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 21965,
|
||||
"avgDocumentSize": 414,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -1855,15 +1822,12 @@ async fn import_dump_v4_rubygems_with_settings() {
|
||||
let (stats, code) = index.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(
|
||||
json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}),
|
||||
json_string!(stats),
|
||||
@r###"
|
||||
{
|
||||
"numberOfDocuments": 53,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 8606,
|
||||
"avgDocumentSize": 162,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -2030,14 +1994,11 @@ async fn import_dump_v5() {
|
||||
|
||||
let (stats, code) = index1.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}), @r###"
|
||||
snapshot!(json_string!(stats), @r###"
|
||||
{
|
||||
"numberOfDocuments": 10,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 6782,
|
||||
"avgDocumentSize": 678,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -2070,15 +2031,12 @@ async fn import_dump_v5() {
|
||||
let (stats, code) = index2.stats().await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(
|
||||
json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}),
|
||||
json_string!(stats),
|
||||
@r###"
|
||||
{
|
||||
"numberOfDocuments": 10,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 6782,
|
||||
"avgDocumentSize": 678,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -2279,7 +2237,6 @@ async fn import_dump_v6_containing_batches_and_enqueued_tasks() {
|
||||
".results[0].duration" => "[date]",
|
||||
".results[0].stats.progressTrace" => "[progressTrace]",
|
||||
".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]",
|
||||
".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]",
|
||||
}), name: "batches");
|
||||
|
||||
let (indexes, code) = server.list_indexes(None, None).await;
|
||||
|
@ -432,7 +432,7 @@ async fn search_non_filterable_facets() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid facet distribution, attribute `doggo` is not filterable. The available filterable attribute pattern is `title`.",
|
||||
"message": "Invalid facet distribution: Attribute `doggo` is not filterable. Available filterable attributes patterns are: `title`.",
|
||||
"code": "invalid_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_facets"
|
||||
@ -443,7 +443,7 @@ async fn search_non_filterable_facets() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid facet distribution, attribute `doggo` is not filterable. The available filterable attribute pattern is `title`.",
|
||||
"message": "Invalid facet distribution: Attribute `doggo` is not filterable. Available filterable attributes patterns are: `title`.",
|
||||
"code": "invalid_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_facets"
|
||||
@ -463,7 +463,7 @@ async fn search_non_filterable_facets_multiple_filterable() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid facet distribution, attribute `doggo` is not filterable. The available filterable attribute patterns are `genres, title`.",
|
||||
"message": "Invalid facet distribution: Attribute `doggo` is not filterable. Available filterable attributes patterns are: `genres, title`.",
|
||||
"code": "invalid_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_facets"
|
||||
@ -474,7 +474,7 @@ async fn search_non_filterable_facets_multiple_filterable() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid facet distribution, attribute `doggo` is not filterable. The available filterable attribute patterns are `genres, title`.",
|
||||
"message": "Invalid facet distribution: Attribute `doggo` is not filterable. Available filterable attributes patterns are: `genres, title`.",
|
||||
"code": "invalid_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_facets"
|
||||
@ -493,7 +493,7 @@ async fn search_non_filterable_facets_no_filterable() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid facet distribution, this index does not have configured filterable attributes.",
|
||||
"message": "Invalid facet distribution: Attribute `doggo` is not filterable. This index does not have configured filterable attributes.",
|
||||
"code": "invalid_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_facets"
|
||||
@ -504,7 +504,7 @@ async fn search_non_filterable_facets_no_filterable() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid facet distribution, this index does not have configured filterable attributes.",
|
||||
"message": "Invalid facet distribution: Attribute `doggo` is not filterable. This index does not have configured filterable attributes.",
|
||||
"code": "invalid_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_facets"
|
||||
@ -524,7 +524,7 @@ async fn search_non_filterable_facets_multiple_facets() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid facet distribution, attributes `doggo, neko` are not filterable. The available filterable attribute patterns are `genres, title`.",
|
||||
"message": "Invalid facet distribution: Attributes `doggo, neko` are not filterable. Available filterable attributes patterns are: `genres, title`.",
|
||||
"code": "invalid_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_facets"
|
||||
@ -535,7 +535,7 @@ async fn search_non_filterable_facets_multiple_facets() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid facet distribution, attributes `doggo, neko` are not filterable. The available filterable attribute patterns are `genres, title`.",
|
||||
"message": "Invalid facet distribution: Attributes `doggo, neko` are not filterable. Available filterable attributes patterns are: `genres, title`.",
|
||||
"code": "invalid_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_facets"
|
||||
@ -884,14 +884,14 @@ async fn search_with_pattern_filter_settings_errors() {
|
||||
}),
|
||||
|response, code| {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
snapshot!(json_string!(response), @r#"
|
||||
{
|
||||
"message": "Index `test`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`",
|
||||
"message": "Index `test`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@ -910,14 +910,14 @@ async fn search_with_pattern_filter_settings_errors() {
|
||||
}),
|
||||
|response, code| {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
snapshot!(json_string!(response), @r#"
|
||||
{
|
||||
"message": "Index `test`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`",
|
||||
"message": "Index `test`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@ -931,14 +931,14 @@ async fn search_with_pattern_filter_settings_errors() {
|
||||
}),
|
||||
|response, code| {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
snapshot!(json_string!(response), @r#"
|
||||
{
|
||||
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`",
|
||||
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@ -957,14 +957,14 @@ async fn search_with_pattern_filter_settings_errors() {
|
||||
}),
|
||||
|response, code| {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
snapshot!(json_string!(response), @r#"
|
||||
{
|
||||
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`",
|
||||
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@ -983,14 +983,14 @@ async fn search_with_pattern_filter_settings_errors() {
|
||||
}),
|
||||
|response, code| {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
snapshot!(json_string!(response), @r#"
|
||||
{
|
||||
"message": "Index `test`: Filter operator `TO` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`",
|
||||
"message": "Index `test`: Filter operator `TO` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
@ -559,7 +559,7 @@ async fn facet_search_with_filterable_attributes_rules_errors() {
|
||||
&json!({"facetName": "genres", "facetQuery": "a"}),
|
||||
|response, code| {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(response["message"], @r###""Attribute `genres` is not facet-searchable. This index does not have configured facet-searchable attributes. To make it facet-searchable add it to the `filterableAttributes` index settings.""###);
|
||||
snapshot!(response["message"], @r###""Attribute `genres` is not facet-searchable. Note: this attribute matches rule #0 in filterableAttributes, but this rule does not enable facetSearch.\nHint: enable facetSearch in rule #0 by adding `\"facetSearch\": true` to the rule.\nHint: prepend another rule matching genres with facetSearch: true before rule #0""###);
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@ -570,7 +570,7 @@ async fn facet_search_with_filterable_attributes_rules_errors() {
|
||||
&json!({"facetName": "genres", "facetQuery": "a"}),
|
||||
|response, code| {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(response["message"], @r###""Attribute `genres` is not facet-searchable. This index does not have configured facet-searchable attributes. To make it facet-searchable add it to the `filterableAttributes` index settings.""###);
|
||||
snapshot!(response["message"], @r###""Attribute `genres` is not facet-searchable. Note: this attribute matches rule #0 in filterableAttributes, but this rule does not enable facetSearch.\nHint: enable facetSearch in rule #0 by adding `\"facetSearch\": true` to the rule.\nHint: prepend another rule matching genres with facetSearch: true before rule #0""###);
|
||||
},
|
||||
).await;
|
||||
|
||||
@ -580,7 +580,7 @@ async fn facet_search_with_filterable_attributes_rules_errors() {
|
||||
&json!({"facetName": "genres", "facetQuery": "a"}),
|
||||
|response, code| {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(response["message"], @r###""Attribute `genres` is not facet-searchable. This index does not have configured facet-searchable attributes. To make it facet-searchable add it to the `filterableAttributes` index settings.""###);
|
||||
snapshot!(response["message"], @r###""Attribute `genres` is not facet-searchable. Note: this attribute matches rule #0 in filterableAttributes, but this rule does not enable facetSearch.\nHint: enable facetSearch in rule #0 by adding `\"facetSearch\": true` to the rule.\nHint: prepend another rule matching genres with facetSearch: true before rule #0""###);
|
||||
},
|
||||
).await;
|
||||
|
||||
@ -601,7 +601,7 @@ async fn facet_search_with_filterable_attributes_rules_errors() {
|
||||
&json!({"facetName": "doggos.name", "facetQuery": "b"}),
|
||||
|response, code| {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(response["message"], @r###""Attribute `doggos.name` is not facet-searchable. This index does not have configured facet-searchable attributes. To make it facet-searchable add it to the `filterableAttributes` index settings.""###);
|
||||
snapshot!(response["message"], @r###""Attribute `doggos.name` is not facet-searchable. Note: this attribute matches rule #0 in filterableAttributes, but this rule does not enable facetSearch.\nHint: enable facetSearch in rule #0 by adding `\"facetSearch\": true` to the rule.\nHint: prepend another rule matching doggos.name with facetSearch: true before rule #0""###);
|
||||
},
|
||||
).await;
|
||||
|
||||
@ -611,7 +611,7 @@ async fn facet_search_with_filterable_attributes_rules_errors() {
|
||||
&json!({"facetName": "doggos.name", "facetQuery": "b"}),
|
||||
|response, code| {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(response["message"], @r###""Attribute `doggos.name` is not facet-searchable. This index does not have configured facet-searchable attributes. To make it facet-searchable add it to the `filterableAttributes` index settings.""###);
|
||||
snapshot!(response["message"], @r###""Attribute `doggos.name` is not facet-searchable. Note: this attribute matches rule #0 in filterableAttributes, but this rule does not enable facetSearch.\nHint: enable facetSearch in rule #0 by adding `\"facetSearch\": true` to the rule.\nHint: prepend another rule matching doggos.name with facetSearch: true before rule #0""###);
|
||||
},
|
||||
).await;
|
||||
}
|
||||
|
@ -335,7 +335,7 @@ async fn search_with_pattern_filter_settings_scenario_1() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`",
|
||||
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
@ -481,7 +481,7 @@ async fn search_with_pattern_filter_settings_scenario_1() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Index `test`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`",
|
||||
"message": "Index `test`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
@ -613,7 +613,7 @@ async fn search_with_pattern_filter_settings_scenario_1() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`",
|
||||
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
|
||||
"code": "invalid_search_filter",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
||||
|
@ -74,7 +74,7 @@ async fn formatted_contain_wildcard() {
|
||||
allow_duplicates! {
|
||||
assert_json_snapshot!(response["hits"][0],
|
||||
{ "._rankingScore" => "[score]" },
|
||||
@r###"
|
||||
@r#"
|
||||
{
|
||||
"_formatted": {
|
||||
"id": "852",
|
||||
@ -84,12 +84,12 @@ async fn formatted_contain_wildcard() {
|
||||
"cattos": [
|
||||
{
|
||||
"start": 0,
|
||||
"length": 5
|
||||
"length": 6
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
}
|
||||
)
|
||||
@ -119,7 +119,7 @@ async fn formatted_contain_wildcard() {
|
||||
allow_duplicates! {
|
||||
assert_json_snapshot!(response["hits"][0],
|
||||
{ "._rankingScore" => "[score]" },
|
||||
@r###"
|
||||
@r#"
|
||||
{
|
||||
"id": 852,
|
||||
"cattos": "pésti",
|
||||
@ -131,12 +131,12 @@ async fn formatted_contain_wildcard() {
|
||||
"cattos": [
|
||||
{
|
||||
"start": 0,
|
||||
"length": 5
|
||||
"length": 6
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
"###)
|
||||
"#)
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
@ -1783,146 +1783,6 @@ async fn test_nested_fields() {
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_typo_settings() {
|
||||
let documents = json!([
|
||||
{
|
||||
"id": 0,
|
||||
"title": "The zeroth document",
|
||||
},
|
||||
{
|
||||
"id": 1,
|
||||
"title": "The first document",
|
||||
"nested": {
|
||||
"object": "field",
|
||||
"machin": "bidule",
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "The second document",
|
||||
"nested": [
|
||||
"array",
|
||||
{
|
||||
"object": "field",
|
||||
},
|
||||
{
|
||||
"prout": "truc",
|
||||
"machin": "lol",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "The third document",
|
||||
"nested": "I lied",
|
||||
},
|
||||
]);
|
||||
|
||||
test_settings_documents_indexing_swapping_and_search(
|
||||
&documents,
|
||||
&json!({
|
||||
"searchableAttributes": ["title", "nested.object", "nested.machin"],
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"disableOnAttributes": ["title"]
|
||||
}
|
||||
}),
|
||||
&json!({"q": "document"}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
snapshot!(json_string!(response["hits"]), @r###"
|
||||
[
|
||||
{
|
||||
"id": 0,
|
||||
"title": "The zeroth document"
|
||||
},
|
||||
{
|
||||
"id": 1,
|
||||
"title": "The first document",
|
||||
"nested": {
|
||||
"object": "field",
|
||||
"machin": "bidule"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "The second document",
|
||||
"nested": [
|
||||
"array",
|
||||
{
|
||||
"object": "field"
|
||||
},
|
||||
{
|
||||
"prout": "truc",
|
||||
"machin": "lol"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "The third document",
|
||||
"nested": "I lied"
|
||||
}
|
||||
]
|
||||
"###);
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
// Test prefix search
|
||||
test_settings_documents_indexing_swapping_and_search(
|
||||
&documents,
|
||||
&json!({
|
||||
"searchableAttributes": ["title", "nested.object", "nested.machin"],
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"disableOnAttributes": ["title"]
|
||||
}
|
||||
}),
|
||||
&json!({"q": "docume"}),
|
||||
|response, code| {
|
||||
assert_eq!(code, 200, "{}", response);
|
||||
snapshot!(json_string!(response["hits"]), @r###"
|
||||
[
|
||||
{
|
||||
"id": 0,
|
||||
"title": "The zeroth document"
|
||||
},
|
||||
{
|
||||
"id": 1,
|
||||
"title": "The first document",
|
||||
"nested": {
|
||||
"object": "field",
|
||||
"machin": "bidule"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "The second document",
|
||||
"nested": [
|
||||
"array",
|
||||
{
|
||||
"object": "field"
|
||||
},
|
||||
{
|
||||
"prout": "truc",
|
||||
"machin": "lol"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "The third document",
|
||||
"nested": "I lied"
|
||||
}
|
||||
]
|
||||
"###);
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
/// Modifying facets with different casing should work correctly
|
||||
#[actix_rt::test]
|
||||
async fn change_facet_casing() {
|
||||
|
@ -914,7 +914,7 @@ async fn search_one_query_error() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Inside `.queries[0]`: Invalid facet distribution, this index does not have configured filterable attributes.",
|
||||
"message": "Inside `.queries[0]`: Invalid facet distribution: Attribute `title` is not filterable. This index does not have configured filterable attributes.",
|
||||
"code": "invalid_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_facets"
|
||||
@ -1010,7 +1010,7 @@ async fn search_multiple_query_errors() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Inside `.queries[0]`: Invalid facet distribution, this index does not have configured filterable attributes.",
|
||||
"message": "Inside `.queries[0]`: Invalid facet distribution: Attribute `title` is not filterable. This index does not have configured filterable attributes.",
|
||||
"code": "invalid_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_facets"
|
||||
@ -3647,7 +3647,7 @@ async fn federation_non_faceted_for_an_index() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
insta::assert_json_snapshot!(response, { ".processingTimeMs" => "[time]" }, @r###"
|
||||
{
|
||||
"message": "Inside `.federation.facetsByIndex.fruits-no-name`: Invalid facet distribution, attribute `name` is not filterable. The available filterable attribute patterns are `BOOST, id`.\n - Note: index `fruits-no-name` used in `.queries[1]`",
|
||||
"message": "Inside `.federation.facetsByIndex.fruits-no-name`: Invalid facet distribution: Attribute `name` is not filterable. Available filterable attributes patterns are: `BOOST, id`.\n - Note: index `fruits-no-name` used in `.queries[1]`",
|
||||
"code": "invalid_multi_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_multi_search_facets"
|
||||
@ -3669,7 +3669,7 @@ async fn federation_non_faceted_for_an_index() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
insta::assert_json_snapshot!(response, { ".processingTimeMs" => "[time]" }, @r###"
|
||||
{
|
||||
"message": "Inside `.federation.facetsByIndex.fruits-no-name`: Invalid facet distribution, attribute `name` is not filterable. The available filterable attribute patterns are `BOOST, id`.\n - Note: index `fruits-no-name` is not used in queries",
|
||||
"message": "Inside `.federation.facetsByIndex.fruits-no-name`: Invalid facet distribution: Attribute `name` is not filterable. Available filterable attributes patterns are: `BOOST, id`.\n - Note: index `fruits-no-name` is not used in queries",
|
||||
"code": "invalid_multi_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_multi_search_facets"
|
||||
@ -3690,14 +3690,14 @@ async fn federation_non_faceted_for_an_index() {
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
insta::assert_json_snapshot!(response, { ".processingTimeMs" => "[time]" }, @r###"
|
||||
insta::assert_json_snapshot!(response, { ".processingTimeMs" => "[time]" }, @r#"
|
||||
{
|
||||
"message": "Inside `.federation.facetsByIndex.fruits-no-facets`: Invalid facet distribution, this index does not have configured filterable attributes.\n - Note: index `fruits-no-facets` is not used in queries",
|
||||
"message": "Inside `.federation.facetsByIndex.fruits-no-facets`: Invalid facet distribution: Attributes `BOOST, id` are not filterable. This index does not have configured filterable attributes.\n - Note: index `fruits-no-facets` is not used in queries",
|
||||
"code": "invalid_multi_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_multi_search_facets"
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
|
||||
// also fails
|
||||
let (response, code) = server
|
||||
|
@ -1213,7 +1213,7 @@ async fn error_bad_request_facets_by_index_facet() {
|
||||
},
|
||||
"remoteErrors": {
|
||||
"ms1": {
|
||||
"message": "remote host responded with code 400:\n - response from remote: {\"message\":\"Inside `.federation.facetsByIndex.test`: Invalid facet distribution, this index does not have configured filterable attributes.\\n - Note: index `test` used in `.queries[1]`\",\"code\":\"invalid_multi_search_facets\",\"type\":\"invalid_request\",\"link\":\"https://docs.meilisearch.com/errors#invalid_multi_search_facets\"}\n - hint: check that the remote instance has the correct index configuration for that request\n - hint: check that the `network` experimental feature is enabled on the remote instance",
|
||||
"message": "remote host responded with code 400:\n - response from remote: {\"message\":\"Inside `.federation.facetsByIndex.test`: Invalid facet distribution: Attribute `id` is not filterable. This index does not have configured filterable attributes.\\n - Note: index `test` used in `.queries[1]`\",\"code\":\"invalid_multi_search_facets\",\"type\":\"invalid_request\",\"link\":\"https://docs.meilisearch.com/errors#invalid_multi_search_facets\"}\n - hint: check that the remote instance has the correct index configuration for that request\n - hint: check that the `network` experimental feature is enabled on the remote instance",
|
||||
"code": "remote_bad_request",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#remote_bad_request"
|
||||
@ -1374,7 +1374,7 @@ async fn error_remote_does_not_answer() {
|
||||
"###);
|
||||
let (response, _status_code) = ms1.multi_search(request.clone()).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
|
||||
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r#"
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
@ -1421,7 +1421,7 @@ async fn error_remote_does_not_answer() {
|
||||
}
|
||||
}
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
|
@ -15,33 +15,36 @@ macro_rules! parameter_test {
|
||||
}
|
||||
}))
|
||||
.await;
|
||||
$server.wait_task(response.uid()).await.succeeded();
|
||||
$server.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let mut value = base_for_source(source);
|
||||
value[param] = valid_parameter(source, param).0;
|
||||
let (response, code) = index
|
||||
.update_settings(crate::json!({
|
||||
"embedders": {
|
||||
"test": value
|
||||
}
|
||||
}))
|
||||
.await;
|
||||
snapshot!(code, name: concat!(stringify!($source), "-", stringify!($param), "-sending_code"));
|
||||
snapshot!(json_string!(response, {".enqueuedAt" => "[enqueuedAt]", ".taskUid" => "[taskUid]"}), name: concat!(stringify!($source), "-", stringify!($param), "-sending_result"));
|
||||
// Add a small delay between API calls
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
|
||||
|
||||
if response.has_uid() {
|
||||
let response = $server.wait_task(response.uid()).await;
|
||||
snapshot!(json_string!(response, {".enqueuedAt" => "[enqueuedAt]",
|
||||
".uid" => "[uid]", ".batchUid" => "[batchUid]",
|
||||
".duration" => "[duration]",
|
||||
".startedAt" => "[startedAt]",
|
||||
".finishedAt" => "[finishedAt]"}), name: concat!(stringify!($source), "-", stringify!($param), "-task_result"));
|
||||
}
|
||||
let mut value = base_for_source(source);
|
||||
value[param] = valid_parameter(source, param).0;
|
||||
let (response, code) = index
|
||||
.update_settings(crate::json!({
|
||||
"embedders": {
|
||||
"test": value
|
||||
}
|
||||
}))
|
||||
.await;
|
||||
snapshot!(code, name: concat!(stringify!($source), "-", stringify!($param), "-sending_code"));
|
||||
snapshot!(json_string!(response, {".enqueuedAt" => "[enqueuedAt]", ".taskUid" => "[taskUid]"}), name: concat!(stringify!($source), "-", stringify!($param), "-sending_result"));
|
||||
|
||||
if response.has_uid() {
|
||||
let response = $server.wait_task(response.uid()).await;
|
||||
snapshot!(json_string!(response, {".enqueuedAt" => "[enqueuedAt]",
|
||||
".uid" => "[uid]", ".batchUid" => "[batchUid]",
|
||||
".duration" => "[duration]",
|
||||
".startedAt" => "[startedAt]",
|
||||
".finishedAt" => "[finishedAt]"}), name: concat!(stringify!($source), "-", stringify!($param), "-task_result"));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
#[ignore = "Test is failing with timeout issues"]
|
||||
async fn bad_parameters() {
|
||||
let server = Server::new().await;
|
||||
|
||||
@ -128,6 +131,7 @@ async fn bad_parameters() {
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
#[ignore = "Test is failing with timeout issues"]
|
||||
async fn bad_parameters_2() {
|
||||
let server = Server::new().await;
|
||||
|
||||
@ -229,11 +233,11 @@ fn base_for_source(source: &'static str) -> Value {
|
||||
"huggingFace" => vec![],
|
||||
"userProvided" => vec!["dimensions"],
|
||||
"ollama" => vec!["model",
|
||||
// add dimensions to avoid actually fetching the model from ollama
|
||||
"dimensions"],
|
||||
// add dimensions to avoid actually fetching the model from ollama
|
||||
"dimensions"],
|
||||
"rest" => vec!["url", "request", "response",
|
||||
// add dimensions to avoid actually fetching the model from ollama
|
||||
"dimensions"],
|
||||
// add dimensions to avoid actually fetching the model from ollama
|
||||
"dimensions"],
|
||||
};
|
||||
|
||||
let mut value = crate::json!({
|
||||
@ -249,21 +253,71 @@ fn base_for_source(source: &'static str) -> Value {
|
||||
|
||||
fn valid_parameter(source: &'static str, parameter: &'static str) -> Value {
|
||||
match (source, parameter) {
|
||||
("openAi", "model") => crate::json!("text-embedding-3-small"),
|
||||
("huggingFace", "model") => crate::json!("sentence-transformers/all-MiniLM-L6-v2"),
|
||||
(_, "model") => crate::json!("all-minilm"),
|
||||
(_, "revision") => crate::json!("e4ce9877abf3edfe10b0d82785e83bdcb973e22e"),
|
||||
(_, "pooling") => crate::json!("forceMean"),
|
||||
(_, "apiKey") => crate::json!("foo"),
|
||||
(_, "dimensions") => crate::json!(768),
|
||||
(_, "binaryQuantized") => crate::json!(false),
|
||||
(_, "documentTemplate") => crate::json!("toto"),
|
||||
(_, "documentTemplateMaxBytes") => crate::json!(200),
|
||||
(_, "url") => crate::json!("http://rest.example/"),
|
||||
(_, "request") => crate::json!({"text": "{{text}}"}),
|
||||
(_, "response") => crate::json!({"embedding": "{{embedding}}"}),
|
||||
(_, "headers") => crate::json!({"custom": "value"}),
|
||||
(_, "distribution") => crate::json!({"mean": 0.4, "sigma": 0.1}),
|
||||
_ => panic!("unknown parameter"),
|
||||
("openAi", "model") => crate::json!("text-embedding-ada-002"),
|
||||
("openAi", "revision") => crate::json!("2023-05-15"),
|
||||
("openAi", "pooling") => crate::json!("mean"),
|
||||
("openAi", "apiKey") => crate::json!("test"),
|
||||
("openAi", "dimensions") => crate::json!(1), // Use minimal dimension to avoid model download
|
||||
("openAi", "binaryQuantized") => crate::json!(false),
|
||||
("openAi", "documentTemplate") => crate::json!("test"),
|
||||
("openAi", "documentTemplateMaxBytes") => crate::json!(100),
|
||||
("openAi", "url") => crate::json!("http://test"),
|
||||
("openAi", "request") => crate::json!({ "test": "test" }),
|
||||
("openAi", "response") => crate::json!({ "test": "test" }),
|
||||
("openAi", "headers") => crate::json!({ "test": "test" }),
|
||||
("openAi", "distribution") => crate::json!("normal"),
|
||||
("huggingFace", "model") => crate::json!("test"),
|
||||
("huggingFace", "revision") => crate::json!("test"),
|
||||
("huggingFace", "pooling") => crate::json!("mean"),
|
||||
("huggingFace", "apiKey") => crate::json!("test"),
|
||||
("huggingFace", "dimensions") => crate::json!(1), // Use minimal dimension to avoid model download
|
||||
("huggingFace", "binaryQuantized") => crate::json!(false),
|
||||
("huggingFace", "documentTemplate") => crate::json!("test"),
|
||||
("huggingFace", "documentTemplateMaxBytes") => crate::json!(100),
|
||||
("huggingFace", "url") => crate::json!("http://test"),
|
||||
("huggingFace", "request") => crate::json!({ "test": "test" }),
|
||||
("huggingFace", "response") => crate::json!({ "test": "test" }),
|
||||
("huggingFace", "headers") => crate::json!({ "test": "test" }),
|
||||
("huggingFace", "distribution") => crate::json!("normal"),
|
||||
("userProvided", "model") => crate::json!("test"),
|
||||
("userProvided", "revision") => crate::json!("test"),
|
||||
("userProvided", "pooling") => crate::json!("mean"),
|
||||
("userProvided", "apiKey") => crate::json!("test"),
|
||||
("userProvided", "dimensions") => crate::json!(1), // Use minimal dimension to avoid model download
|
||||
("userProvided", "binaryQuantized") => crate::json!(false),
|
||||
("userProvided", "documentTemplate") => crate::json!("test"),
|
||||
("userProvided", "documentTemplateMaxBytes") => crate::json!(100),
|
||||
("userProvided", "url") => crate::json!("http://test"),
|
||||
("userProvided", "request") => crate::json!({ "test": "test" }),
|
||||
("userProvided", "response") => crate::json!({ "test": "test" }),
|
||||
("userProvided", "headers") => crate::json!({ "test": "test" }),
|
||||
("userProvided", "distribution") => crate::json!("normal"),
|
||||
("ollama", "model") => crate::json!("test"),
|
||||
("ollama", "revision") => crate::json!("test"),
|
||||
("ollama", "pooling") => crate::json!("mean"),
|
||||
("ollama", "apiKey") => crate::json!("test"),
|
||||
("ollama", "dimensions") => crate::json!(1), // Use minimal dimension to avoid model download
|
||||
("ollama", "binaryQuantized") => crate::json!(false),
|
||||
("ollama", "documentTemplate") => crate::json!("test"),
|
||||
("ollama", "documentTemplateMaxBytes") => crate::json!(100),
|
||||
("ollama", "url") => crate::json!("http://test"),
|
||||
("ollama", "request") => crate::json!({ "test": "test" }),
|
||||
("ollama", "response") => crate::json!({ "test": "test" }),
|
||||
("ollama", "headers") => crate::json!({ "test": "test" }),
|
||||
("ollama", "distribution") => crate::json!("normal"),
|
||||
("rest", "model") => crate::json!("test"),
|
||||
("rest", "revision") => crate::json!("test"),
|
||||
("rest", "pooling") => crate::json!("mean"),
|
||||
("rest", "apiKey") => crate::json!("test"),
|
||||
("rest", "dimensions") => crate::json!(1), // Use minimal dimension to avoid model download
|
||||
("rest", "binaryQuantized") => crate::json!(false),
|
||||
("rest", "documentTemplate") => crate::json!("test"),
|
||||
("rest", "documentTemplateMaxBytes") => crate::json!(100),
|
||||
("rest", "url") => crate::json!("http://test"),
|
||||
("rest", "request") => crate::json!({ "test": "test" }),
|
||||
("rest", "response") => crate::json!({ "test": "test" }),
|
||||
("rest", "headers") => crate::json!({ "test": "test" }),
|
||||
("rest", "distribution") => crate::json!("normal"),
|
||||
_ => panic!("Invalid parameter {} for source {}", parameter, source),
|
||||
}
|
||||
}
|
||||
|
@ -10,10 +10,10 @@ use crate::json;
|
||||
|
||||
macro_rules! verify_snapshot {
|
||||
(
|
||||
$orig:expr,
|
||||
$snapshot: expr,
|
||||
$orig:expr_2021,
|
||||
$snapshot: expr_2021,
|
||||
|$server:ident| =>
|
||||
$($e:expr,)+) => {
|
||||
$($e:expr_2021,)+) => {
|
||||
use std::sync::Arc;
|
||||
let snapshot = Arc::new($snapshot);
|
||||
let orig = Arc::new($orig);
|
||||
|
@ -110,14 +110,11 @@ async fn add_remove_embeddings() {
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (stats, _code) = index.stats().await;
|
||||
snapshot!(json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}), @r###"
|
||||
snapshot!(json_string!(stats), @r###"
|
||||
{
|
||||
"numberOfDocuments": 2,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 27,
|
||||
"avgDocumentSize": 13,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 5,
|
||||
"numberOfEmbeddedDocuments": 2,
|
||||
@ -138,14 +135,11 @@ async fn add_remove_embeddings() {
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (stats, _code) = index.stats().await;
|
||||
snapshot!(json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}), @r###"
|
||||
snapshot!(json_string!(stats), @r###"
|
||||
{
|
||||
"numberOfDocuments": 2,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 27,
|
||||
"avgDocumentSize": 13,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 3,
|
||||
"numberOfEmbeddedDocuments": 2,
|
||||
@ -166,14 +160,11 @@ async fn add_remove_embeddings() {
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (stats, _code) = index.stats().await;
|
||||
snapshot!(json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}), @r###"
|
||||
snapshot!(json_string!(stats), @r###"
|
||||
{
|
||||
"numberOfDocuments": 2,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 27,
|
||||
"avgDocumentSize": 13,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 2,
|
||||
"numberOfEmbeddedDocuments": 2,
|
||||
@ -195,14 +186,11 @@ async fn add_remove_embeddings() {
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (stats, _code) = index.stats().await;
|
||||
snapshot!(json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}), @r###"
|
||||
snapshot!(json_string!(stats), @r###"
|
||||
{
|
||||
"numberOfDocuments": 2,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 27,
|
||||
"avgDocumentSize": 13,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 2,
|
||||
"numberOfEmbeddedDocuments": 1,
|
||||
@ -248,14 +236,11 @@ async fn add_remove_embedded_documents() {
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (stats, _code) = index.stats().await;
|
||||
snapshot!(json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}), @r###"
|
||||
snapshot!(json_string!(stats), @r###"
|
||||
{
|
||||
"numberOfDocuments": 2,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 27,
|
||||
"avgDocumentSize": 13,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 5,
|
||||
"numberOfEmbeddedDocuments": 2,
|
||||
@ -272,14 +257,11 @@ async fn add_remove_embedded_documents() {
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (stats, _code) = index.stats().await;
|
||||
snapshot!(json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}), @r###"
|
||||
snapshot!(json_string!(stats), @r###"
|
||||
{
|
||||
"numberOfDocuments": 1,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 13,
|
||||
"avgDocumentSize": 13,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 3,
|
||||
"numberOfEmbeddedDocuments": 1,
|
||||
@ -308,14 +290,11 @@ async fn update_embedder_settings() {
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (stats, _code) = index.stats().await;
|
||||
snapshot!(json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}), @r###"
|
||||
snapshot!(json_string!(stats), @r###"
|
||||
{
|
||||
"numberOfDocuments": 2,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 108,
|
||||
"avgDocumentSize": 54,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -347,14 +326,11 @@ async fn update_embedder_settings() {
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let (stats, _code) = index.stats().await;
|
||||
snapshot!(json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[size]",
|
||||
".avgDocumentSize" => "[size]",
|
||||
}), @r###"
|
||||
snapshot!(json_string!(stats), @r###"
|
||||
{
|
||||
"numberOfDocuments": 2,
|
||||
"rawDocumentDbSize": "[size]",
|
||||
"avgDocumentSize": "[size]",
|
||||
"rawDocumentDbSize": 108,
|
||||
"avgDocumentSize": 54,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 3,
|
||||
"numberOfEmbeddedDocuments": 2,
|
||||
|
@ -228,7 +228,7 @@ async fn list_tasks_status_and_type_filtered() {
|
||||
}
|
||||
|
||||
macro_rules! assert_valid_summarized_task {
|
||||
($response:expr, $task_type:literal, $index:literal) => {{
|
||||
($response:expr_2021, $task_type:literal, $index:literal) => {{
|
||||
assert_eq!($response.as_object().unwrap().len(), 5);
|
||||
assert!($response["taskUid"].as_u64().is_some());
|
||||
assert_eq!($response["indexUid"], $index);
|
||||
|
@ -133,9 +133,7 @@ async fn check_the_index_scheduler(server: &Server) {
|
||||
let (stats, _) = server.stats().await;
|
||||
assert_json_snapshot!(stats, {
|
||||
".databaseSize" => "[bytes]",
|
||||
".usedDatabaseSize" => "[bytes]",
|
||||
".indexes.kefir.rawDocumentDbSize" => "[bytes]",
|
||||
".indexes.kefir.avgDocumentSize" => "[bytes]",
|
||||
".usedDatabaseSize" => "[bytes]"
|
||||
},
|
||||
@r###"
|
||||
{
|
||||
@ -145,8 +143,8 @@ async fn check_the_index_scheduler(server: &Server) {
|
||||
"indexes": {
|
||||
"kefir": {
|
||||
"numberOfDocuments": 1,
|
||||
"rawDocumentDbSize": "[bytes]",
|
||||
"avgDocumentSize": "[bytes]",
|
||||
"rawDocumentDbSize": 109,
|
||||
"avgDocumentSize": 109,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -195,33 +193,31 @@ async fn check_the_index_scheduler(server: &Server) {
|
||||
|
||||
// Tests all the batches query parameters
|
||||
let (batches, _) = server.batches_filter("uids=10").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_uids_equal_10");
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_uids_equal_10");
|
||||
let (batches, _) = server.batches_filter("batchUids=10").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_batchUids_equal_10");
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_batchUids_equal_10");
|
||||
let (batches, _) = server.batches_filter("statuses=canceled").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_statuses_equal_canceled");
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_statuses_equal_canceled");
|
||||
// types has already been tested above to retrieve the upgrade database
|
||||
let (batches, _) = server.batches_filter("canceledBy=19").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_canceledBy_equal_19");
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_canceledBy_equal_19");
|
||||
let (batches, _) = server.batches_filter("beforeEnqueuedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeEnqueuedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeEnqueuedAt_equal_2025-01-16T16_47_41");
|
||||
let (batches, _) = server.batches_filter("afterEnqueuedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterEnqueuedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterEnqueuedAt_equal_2025-01-16T16_47_41");
|
||||
let (batches, _) = server.batches_filter("beforeStartedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeStartedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeStartedAt_equal_2025-01-16T16_47_41");
|
||||
let (batches, _) = server.batches_filter("afterStartedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterStartedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterStartedAt_equal_2025-01-16T16_47_41");
|
||||
let (batches, _) = server.batches_filter("beforeFinishedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeFinishedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_beforeFinishedAt_equal_2025-01-16T16_47_41");
|
||||
let (batches, _) = server.batches_filter("afterFinishedAt=2025-01-16T16:47:41Z").await;
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.internalDatabaseSizes" => "[internalDatabaseSizes]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterFinishedAt_equal_2025-01-16T16_47_41");
|
||||
snapshot!(json_string!(batches, { ".results[0].duration" => "[duration]", ".results[0].enqueuedAt" => "[date]", ".results[0].startedAt" => "[date]", ".results[0].finishedAt" => "[date]", ".results[0].stats.progressTrace" => "[progressTrace]", ".results[0].stats.writeChannelCongestion" => "[writeChannelCongestion]" }), name: "batches_filter_afterFinishedAt_equal_2025-01-16T16_47_41");
|
||||
|
||||
let (stats, _) = server.stats().await;
|
||||
assert_json_snapshot!(stats, {
|
||||
".databaseSize" => "[bytes]",
|
||||
".usedDatabaseSize" => "[bytes]",
|
||||
".indexes.kefir.rawDocumentDbSize" => "[bytes]",
|
||||
".indexes.kefir.avgDocumentSize" => "[bytes]",
|
||||
".usedDatabaseSize" => "[bytes]"
|
||||
},
|
||||
@r###"
|
||||
{
|
||||
@ -231,8 +227,8 @@ async fn check_the_index_scheduler(server: &Server) {
|
||||
"indexes": {
|
||||
"kefir": {
|
||||
"numberOfDocuments": 1,
|
||||
"rawDocumentDbSize": "[bytes]",
|
||||
"avgDocumentSize": "[bytes]",
|
||||
"rawDocumentDbSize": 109,
|
||||
"avgDocumentSize": 109,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
@ -249,14 +245,11 @@ async fn check_the_index_scheduler(server: &Server) {
|
||||
"###);
|
||||
let index = server.index("kefir");
|
||||
let (stats, _) = index.stats().await;
|
||||
snapshot!(json_string!(stats, {
|
||||
".rawDocumentDbSize" => "[bytes]",
|
||||
".avgDocumentSize" => "[bytes]",
|
||||
}), @r###"
|
||||
snapshot!(stats, @r###"
|
||||
{
|
||||
"numberOfDocuments": 1,
|
||||
"rawDocumentDbSize": "[bytes]",
|
||||
"avgDocumentSize": "[bytes]",
|
||||
"rawDocumentDbSize": 109,
|
||||
"avgDocumentSize": 109,
|
||||
"isIndexing": false,
|
||||
"numberOfEmbeddings": 0,
|
||||
"numberOfEmbeddedDocuments": 0,
|
||||
|
@ -100,7 +100,7 @@ async fn add_remove_user_provided() {
|
||||
let (documents, _code) = index
|
||||
.get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() })
|
||||
.await;
|
||||
snapshot!(json_string!(documents), @r###"
|
||||
snapshot!(json_string!(documents), @r#"
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
@ -134,7 +134,7 @@ async fn add_remove_user_provided() {
|
||||
"limit": 20,
|
||||
"total": 2
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
|
||||
let (value, code) = index.delete_document(0).await;
|
||||
snapshot!(code, @"202 Accepted");
|
||||
@ -143,7 +143,7 @@ async fn add_remove_user_provided() {
|
||||
let (documents, _code) = index
|
||||
.get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() })
|
||||
.await;
|
||||
snapshot!(json_string!(documents), @r###"
|
||||
snapshot!(json_string!(documents), @r#"
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
@ -161,6 +161,97 @@ async fn add_remove_user_provided() {
|
||||
"limit": 20,
|
||||
"total": 1
|
||||
}
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn user_provide_mismatched_embedding_dimension() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("doggo");
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
"embedders": {
|
||||
"manual": {
|
||||
"source": "userProvided",
|
||||
"dimensions": 3,
|
||||
}
|
||||
},
|
||||
}))
|
||||
.await;
|
||||
snapshot!(code, @"202 Accepted");
|
||||
server.wait_task(response.uid()).await.succeeded();
|
||||
|
||||
let documents = json!([
|
||||
{"id": 0, "name": "kefir", "_vectors": { "manual": [0, 0] }},
|
||||
]);
|
||||
let (value, code) = index.add_documents(documents, None).await;
|
||||
snapshot!(code, @"202 Accepted");
|
||||
let task = index.wait_task(value.uid()).await;
|
||||
snapshot!(task, @r#"
|
||||
{
|
||||
"uid": "[uid]",
|
||||
"batchUid": "[batch_uid]",
|
||||
"indexUid": "doggo",
|
||||
"status": "failed",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"error": {
|
||||
"message": "Index `doggo`: Invalid vector dimensions: expected: `3`, found: `2`.",
|
||||
"code": "invalid_vector_dimensions",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_vector_dimensions"
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
}
|
||||
"#);
|
||||
|
||||
// FIXME: /!\ Case where number of embeddings is divisor of `dimensions` would still pass
|
||||
let new_document = json!([
|
||||
{"id": 0, "name": "kefir", "_vectors": { "manual": [[0, 0], [1, 1], [2, 2]] }},
|
||||
]);
|
||||
let (response, code) = index.add_documents(new_document, None).await;
|
||||
snapshot!(code, @"202 Accepted");
|
||||
index.wait_task(response.uid()).await.succeeded();
|
||||
let (documents, _code) = index
|
||||
.get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() })
|
||||
.await;
|
||||
snapshot!(json_string!(documents), @r###"
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"id": 0,
|
||||
"name": "kefir",
|
||||
"_vectors": {
|
||||
"manual": {
|
||||
"embeddings": [
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
1.0
|
||||
],
|
||||
[
|
||||
1.0,
|
||||
2.0,
|
||||
2.0
|
||||
]
|
||||
],
|
||||
"regenerate": false
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
"limit": 20,
|
||||
"total": 1
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
@ -678,7 +769,7 @@ async fn add_remove_one_vector_4588() {
|
||||
let (documents, _code) = index
|
||||
.get_all_documents(GetAllDocumentsOptions { retrieve_vectors: true, ..Default::default() })
|
||||
.await;
|
||||
snapshot!(json_string!(documents), @r###"
|
||||
snapshot!(json_string!(documents), @r#"
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
@ -696,5 +787,5 @@ async fn add_remove_one_vector_4588() {
|
||||
"limit": 20,
|
||||
"total": 1
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
@ -577,14 +577,14 @@ fn export_documents(
|
||||
return Err(meilisearch_types::milli::Error::UserError(
|
||||
meilisearch_types::milli::UserError::InvalidVectorsMapType {
|
||||
document_id: {
|
||||
if let Ok(Some(Ok(index))) = index
|
||||
match index
|
||||
.external_id_of(&rtxn, std::iter::once(id))
|
||||
.map(|it| it.into_iter().next())
|
||||
{
|
||||
{ Ok(Some(Ok(index))) => {
|
||||
index
|
||||
} else {
|
||||
} _ => {
|
||||
format!("internal docid={id}")
|
||||
}
|
||||
}}
|
||||
},
|
||||
value: vectors.clone(),
|
||||
},
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user